diff --git "a/5297.jsonl" "b/5297.jsonl" new file mode 100644--- /dev/null +++ "b/5297.jsonl" @@ -0,0 +1,78 @@ +{"seq_id":"35245523271","text":"import numpy as np\r\nimport xarray as xr\r\nfrom wrf_ens_tools.calc import *\r\nfrom wrf_ens_tools.post import storeIdealizedPracPef, storeIdealizedNearestNeighborFortran\r\n\r\n# Create dummy data\r\nsixhr = True; timedim = 6\r\nnbrhd = 12.; dx = 4.; sigma = 1\r\nxdim, ydim, zdim = 588, 540, 40\r\nypts, xpts = np.meshgrid(np.arange(ydim), np.arange(xdim))\r\nfake_uh_fcst = np.random.uniform(low=0., high=100., size=(timedim, ydim, xdim))\r\nfake_refl_fcst = np.zeros((timedim, zdim, ydim, xdim))\r\n\r\n# Fill one vertical column with 45 dBZ reflectivity values at arbitrary time slice\r\nfake_refl_fcst[int(timedim/2), :, int(ydim/2), int(xdim/2)] = 45.\r\n# Number of grid pts around uh pts where refl exceedance is checked\r\n# (see Sobash et al 2011)\r\nr = 25. / dx\r\n\r\n# Mask used to determine number of total pts within refl neighborhood radius\r\nmask = dist_mask(xind=int(xdim/2), yind=int(ydim/2),\r\n xpts=xpts, ypts=ypts, r=25.)\r\nmax_num_ssr = len(mask == True)\r\nfhr = 28 # random forecast hour to attribute to meta data\r\n\r\n# Get lat/lons for interpolation from SPC 211 grid\r\nds = xr.open_dataset(\"wrfoutREFd2\")\r\nlats = ds[\"XLAT\"][0]\r\nlons = ds[\"XLONG\"][0]\r\nds.close()\r\n\r\ndef test_ssr_sspf_spc_grid():\r\n # Create SSRs and SSPFs from dummy data\r\n # SSR/SSPF arrays on SPC 211 grid\r\n ssr211 = gen_surrogate_severe_reports(uh_arr=fake_uh_fcst,\r\n sim_refl_arr=fake_refl_fcst,\r\n uh_thresh=40., lats=lats,\r\n lons=lons, dx=dx, spc_grid=True)\r\n sspf211 = gen_SSPFs_from_SSRs(ssr_arr=ssr211, sigma=sigma)\r\n # Store SSPF and SSR data to netCDF files\r\n storeIdealizedPracPef(sspf_arr=sspf211, outlats=lats, outlons=lons,\r\n outpath=\"idealized_pperf_from_spc211grid.nc\",\r\n sigma=sigma, fhrs=fhr, spc_grid=True)\r\n pp_ds = xr.open_dataset(\"idealized_pperf_from_spc211grid.nc\")\r\n pperf211 = pp_ds[\"practically_perfect\"][:]\r\n num_ssrs = len(np.isclose(ssr211, 1) == True)\r\n # Ensure SSRs are generating correctly\r\n assert(num_ssrs <= max_num_ssr)\r\n # # Ensure SSPFs are valid\r\n # assert(pperf211.all() < 0.99)\r\n\r\ndef test_ssr_sspf_native_grid():\r\n # Create SSRs and SSPFs from dummy data\r\n # SSR/SSPF arrays on native WRF grid\r\n ssr = gen_surrogate_severe_reports(uh_arr=fake_uh_fcst,\r\n sim_refl_arr=fake_refl_fcst,\r\n uh_thresh=40., lats=lats, lons=lons,\r\n dx=dx, spc_grid=False)\r\n sspf = gen_SSPFs_from_SSRs(ssr_arr=ssr, sigma=sigma)\r\n # Store SSPF and SSR data to netCDF files\r\n storeIdealizedPracPef(sspf_arr=sspf, outlats=lats, outlons=lons,\r\n outpath=\"idealized_pperf_from_nativegrid.nc\",\r\n sigma=sigma, fhrs=fhr, spc_grid=False)\r\n storeIdealizedNearestNeighborFortran(ssr_arr=ssr,\r\n outpath=\"idealized_rel_ob_grid.nc\",\r\n wrfrefpath=\"wrfoutREFd2\",\r\n obvar=\"P_HYD\")\r\n num_ssrs = len(np.isclose(ssr, 1) == True)\r\n stored_ssrs = xr.open_dataset(\"idealized_rel_ob_grid.nc\")\r\n num_stored_ssrs = len(np.isclose(stored_ssrs[\"P_HYD\"][0,0]) == True)\r\n # Ensure reliability ob storage is working correctly\r\n assert(num_ssrs <= max_num_ssr)\r\n # assert(ssr == stored_ssrs[\"P_HYD\"][0,0])\r\n # stored_ssrs.close()\r\n # # Ensure SSPF is working correctly\r\n # pp_ds = xr.open_dataset(\"idealized_pperf_from_nativegrid.nc\")\r\n # pperf = pp_ds[\"practically_perfect\"][:]\r\n # pp_ds.close()\r\n # assert(sspf.all() == pperf.all())\r\n # assert(sspf < 0.99)\r\n","repo_name":"ac0015/wrf-ens-tools","sub_path":"test/test_ssr_sspf_calc.py","file_name":"test_ssr_sspf_calc.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"14"} +{"seq_id":"49150055260","text":"import json\nimport logging\nimport requests\n\nfrom .mgr_test_case import MgrTestCase\n\nlog = logging.getLogger(__name__)\n\n\nclass TestPrometheus(MgrTestCase):\n MGRS_REQUIRED = 3\n\n def setUp(self):\n super(TestPrometheus, self).setUp()\n self.setup_mgrs()\n\n def test_file_sd_command(self):\n self._assign_ports(\"prometheus\", \"server_port\")\n self._load_module(\"prometheus\")\n\n result = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd(\n \"prometheus\", \"file_sd_config\"))\n mgr_map = self.mgr_cluster.get_mgr_map()\n self.assertEqual(len(result[0]['targets']), len(mgr_map['standbys']) + 1)\n\n\n\n def test_standby(self):\n self._assign_ports(\"prometheus\", \"server_port\")\n self._load_module(\"prometheus\")\n\n original_active = self.mgr_cluster.get_active_id()\n\n original_uri = self._get_uri(\"prometheus\")\n log.info(\"Originally running at {0}\".format(original_uri))\n\n self.mgr_cluster.mgr_fail(original_active)\n\n failed_over_uri = self._get_uri(\"prometheus\")\n log.info(\"After failover running at {0}\".format(failed_over_uri))\n\n self.assertNotEqual(original_uri, failed_over_uri)\n\n # The original active daemon should have come back up as a standby\n # and serve some html under \"/\" and an empty answer under /metrics\n r = requests.get(original_uri, allow_redirects=False)\n self.assertEqual(r.status_code, 200)\n r = requests.get(original_uri + \"metrics\", allow_redirects=False)\n self.assertEqual(r.status_code, 200)\n self.assertEqual(r.headers[\"content-type\"], \"text/plain;charset=utf-8\")\n self.assertEqual(r.headers[\"server\"], \"Ceph-Prometheus\")\n\n def test_urls(self):\n self._assign_ports(\"prometheus\", \"server_port\")\n self._load_module(\"prometheus\")\n\n base_uri = self._get_uri(\"prometheus\")\n\n # This is a very simple smoke test to check that the module can\n # give us a 200 response to requests. We're not testing that\n # the content is correct or even renders!\n\n urls = [\n \"/\",\n \"/metrics\"\n ]\n\n failures = []\n\n for url in urls:\n r = requests.get(base_uri + url, allow_redirects=False)\n if r.status_code != 200:\n failures.append(url)\n\n log.info(\"{0}: {1} ({2} bytes)\".format(\n url, r.status_code, len(r.content)\n ))\n\n self.assertListEqual(failures, [])\n","repo_name":"ceph/ceph","sub_path":"qa/tasks/mgr/test_prometheus.py","file_name":"test_prometheus.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","stars":12580,"dataset":"github-code","pt":"14"} +{"seq_id":"44344058320","text":"class Account:\r\n\r\n def __init__(self,a,n,t,b):\r\n self.acno=a\r\n self.name=n\r\n self.type=t\r\n self.balance=b\r\n\r\n def deposit(self,a):\r\n self.balance+=a\r\n print('Rs.',a,'deposited. Current balance is: Rs.',self.balance)\r\n\r\n def withdraw(self,a):\r\n if self.balance >= a:\r\n self.balance -= a\r\n print('Rs.',a,'withdrawn. Current balance is: Rs.', self.bal)\r\n else:\r\n print('Insufficient balance to make this transaction.')\r\n\r\na = int(input('Enter account number:'))\r\nn = input('Enter name of the account holder: ')\r\nt = input('Enter account type: ')\r\nb = float(input('Enter your balance:'))\r\n\r\nobj = Account(a,n,t,b)\r\nch = '1'\r\n\r\nwhile ch!='4':\r\n print('1.Deposit')\r\n print('2.Withdraw')\r\n print('3.View balance')\r\n print('4.Exit')\r\n ch=input('Enter choice : ')\r\n\r\n if ch=='1':\r\n obj.deposit(float(input('Enter amount to deposit: ')))\r\n\r\n elif ch=='2':\r\n obj.withdraw(float(input('Enter amount to withdraw: ')))\r\n elif ch=='3':\r\n print('Balance is : ',obj.b)\r\n elif ch=='4':\r\n exit()\r\n else:\r\n print('Wrong choice')\r\n\r\n","repo_name":"billanjacob/Programminng-Lab","sub_path":"PROGRAMMING LAB-Billan/17-02-2021/CO4/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"30992399429","text":"#from oauth2client.service_account import ServiceAccountCredentials\nimport gspread\nimport json\nfrom datetime import date, datetime\nimport re\nimport time\n\nFLIGHTS_PER_PAGE = 20\n\nSCOPES = [\"https://spreadsheets.google.com/feeds\",\n 'https://www.googleapis.com/auth/spreadsheets',\n \"https://www.googleapis.com/auth/drive.file\",\n \"https://www.googleapis.com/auth/drive\",\n 'https://www.googleapis.com/auth/spreadsheets.editor']\n\nconfig = None\nsheet = None\n\ndef init():\n global sheet\n\n global config\n with open('config.json') as json_file:\n data = json.load(json_file)\n config = data['sheets']\n\n client = gspread.service_account(filename=\"keys/aims-extractor.json\")\n sheet = client.open_by_key(config['spreadsheet_id']).sheet1\n\ndef get_last_entry_date():\n last = get_last_entry_and_subtotals()\n return last['activity']['date']\n\ndef get_last_entry_and_subtotals():\n print(\"Getting last entry from Google Sheet Logbook\")\n\n all_flight_dates = sheet.col_values(1)\n all_sim_dates = sheet.col_values(23)\n\n # last activity and last page subtotals\n activity_entry = None\n subtotals_entry = None\n\n i = len(all_flight_dates) - 1\n while (not activity_entry or not subtotals_entry and i > 0):\n\n # 'TOTAL ACC' entry\n if (all_flight_dates[i] == 'TOTAL ACC'):\n match = re.search(r'(\\d+)$', all_flight_dates[i-1])\n if not match:\n raise RuntimeError('Page number expected at %s', i-1) \n last_page = int(match.group(1))\n if (not subtotals_entry):\n subtotals_entry = {'row': i, 'page': last_page}\n # Date\n elif (not re.search(r'PAGINA (\\d+)$', all_flight_dates[i]) and len(all_flight_dates[i]) > 0):\n flight_date = datetime.strptime(all_flight_dates[i], '%d-%m-%Y').date()\n activity_entry = {'row': i + 1, 'date': flight_date}\n\n i = i - 1\n\n # last simulator\n i = len(all_sim_dates) - 1\n while (i > activity_entry['row'] - 1):\n if len(all_sim_dates[i]) > 0:\n sim_date = datetime.strptime(all_sim_dates[i], '%d-%m-%Y').date()\n activity_entry = {'row': i + 1, 'date': sim_date}\n i = i - 1\n\n last = {'activity': activity_entry, 'subtotals': subtotals_entry}\n print(last)\n return last\n\ndef insert_flight(flight, last):\n print(\"Insert flight:\")\n print(flight)\n\n # Stop at future activities\n if date.fromisoformat(flight['Date']) > date.today():\n raise RuntimeError(\"Trying to import future flights (not yet taken place).\")\n\n # Skip older activities\n if date.fromisoformat(flight['Date']) <= last['activity']['date']:\n raise RuntimeError(\"Trying to import an older activity (maybe already inserted).\")\n\n row = last['activity']['row'] + 1\n\n if (row == last['subtotals']['row']):\n row = last['subtotals']['row'] + 2\n last = create_new_subtotals(last)\n\n date_range = \"\"\n row_range = \"\"\n\n # Simulator\n if len(flight['SimType']) > 0:\n rowdata = [\n flight['Date'],\n 'A' + flight['ACType'],\n flight['SimTime'],\n flight['SimType']\n ]\n date_range = \"W%s\" % (row)\n row_range = \"W%s:Z%s\" % (row,row)\n # Flight\n else:\n rowdata = [\n flight['Date'],\n flight['dep_icao'],\n flight['DepTime'],\n flight['arr_icao'],\n flight['ArrTime'],\n 'A' + flight['ACType'],\n flight['Reg'],\n '',\n '',\n flight['FltTime'], # multi pilot\n flight['FltTime'], # total time\n flight['PicName'],\n flight['TKoffsDay'],\n flight['TKoffsNight'],\n flight['LandsDay'],\n flight['LandsNight'],\n flight['NightTime'],\n flight['FltTime'], # IFR\n flight['PIC'],\n flight['CoPlt'],\n '', # Dual\n '', # Instructor\n ]\n date_range = \"A%s\" % (row)\n row_range = \"A%s:V%s\" % (row,row)\n\n print(rowdata)\n\n time.sleep(2)\n sheet.format(date_range, {\n 'numberFormat': {\n 'type': 'DATE', 'pattern': 'dd-mm-yyyy'\n }\n })\n sheet.update(row_range, [rowdata], \n value_input_option = gspread.worksheet.ValueInputOption.user_entered)\n\n last['activity']['row'] = row\n last['activity']['Date'] = date.fromisoformat(flight['Date'])\n\n return last\n\ndef create_new_subtotals(last):\n global sheet\n\n print (\"Creating new page subtotals\")\n\n page = last['subtotals']['page']\n\n source_base_row = last['subtotals']['row']\n dest_base_row = source_base_row + FLIGHTS_PER_PAGE + 2\n\n source_range = \"%s:%s\" % (source_base_row, source_base_row + 1)\n dest_range = \"%s:%s\" % (dest_base_row, dest_base_row + 1)\n\n # Check existing cell contents before overwriting\n if sheet.cell(dest_base_row, 1).value:\n raise RuntimeError(\"Refusing to overwrite subtotals header\")\n\n sheet.copy_range(source_range, dest_range, paste_type='PASTE_NORMAL', paste_orientation='NORMAL')\n\n page_cell = \"A%s\" % (dest_base_row)\n new_page = page + 1\n sheet.update(page_cell, \"PAGINA %d\" % (new_page))\n\n last['subtotals']['row'] = dest_base_row\n last['subtotals']['page'] = new_page\n\n return last\n","repo_name":"ptsmonteiro/aims-extractor","sub_path":"sheets.py","file_name":"sheets.py","file_ext":"py","file_size_in_byte":5322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"44008408276","text":"\"\"\"For processing file paths for example.\"\"\"\n#(str ->) sys -> io\n## for file paths\nfrom os.path import exists,dirname,basename,abspath,isdir,splitext ## prefer `pathlib` over `os.path`\nfrom pathlib import Path\nfrom glob import glob\nfrom roux.lib.str import replace_many, encode\n\n#\nimport subprocess\nimport sys\nimport logging\nimport shutil\n\n## for file paths\ndef basenamenoext(p):\n \"\"\"Basename without the extension.\n\n Args:\n p (str): path.\n\n Returns:\n s (str): output.\n \"\"\"\n return splitext(basename(p))[0]\n\ndef remove_exts(\n p: str,\n exts: tuple= None,\n ):\n \"\"\"Filename without the extension.\n\n Args:\n p (str): path.\n exts (tuple): extensions.\n\n Returns:\n s (str): output.\n \"\"\"\n if not isinstance(p,str):\n p=str(p)\n if exts is None:\n exts=Path(p).suffixes\n if isinstance(exts,(list,tuple)):\n e=''.join(Path(p).suffixes)\n if p.endswith(e):\n p=p[:-len(e)]\n return p\n\ndef read_ps(\n ps,\n test=True,\n ) -> list:\n \"\"\"Read a list of paths.\n \n Parameters:\n ps (list|str): list of paths or a string with wildcard/s.\n test (bool): testing.\n\n Returns:\n ps (list): list of paths.\n \"\"\"\n if isinstance(ps,str): \n if '*' in ps:\n ps=glob(ps)\n else:\n ps=[ps]\n ps=sorted(ps)\n if test:\n import pandas as pd\n ds1=pd.Series({p:p2time(p) if exists(p) else None for p in ps}).sort_values().dropna()\n if len(ds1)>1:\n from roux.lib.str import get_suffix\n d0=ds1.iloc[[0,-1]].to_dict()\n for k_,k,v in zip(['oldest','latest'],get_suffix(*d0.keys(),common=False),d0.values()):\n logging.info(f\"{k_}: {k}\\t{v}\")\n elif len(ds1)==0:\n logging.warning('paths do not exist.')\n return ps\n\ndef to_path(\n s,\n replacewith='_',\n verbose=False,\n coff_len_escape_replacement=100,\n ):\n \"\"\"Normalise a string to be used as a path of file.\n \n Parameters:\n s (string): input string.\n replacewith (str): replace the whitespaces or incompatible characters with.\n \n Returns:\n s (string): output string.\n \"\"\"\n import re\n s=re.sub(r'(/)\\1+',r'\\1',s) # remove multiple /'s\n if max([len(s_) for s_ in s.split('/')]) dict:\n \"\"\"\n Infer a output path for each of the paths or inputs.\n \n Parameters:\n input_paths (list) : list of input paths. Defaults to None.\n inputs (list) : list of inputs e.g. dictionaries. Defaults to None.\n output_path_base (str) : output path with a placeholder '{KEY}' to be replaced. Defaults to None.\n encode_short: (bool) : short encoded string, else long encoded string (reversible) is used. Defaults to True.\n replaces_output_path : list, dictionary or function to replace the input paths. Defaults to None.\n key_output_path (str) : key to be used to incorporate output_path variable among the inputs. Defaults to None.\n force (bool): overwrite the outputs. Defaults to False.\n verbose (bool) : show verbose. Defaults to False.\n \n Returns: \n dictionary with the output path mapped to input paths or inputs.\n \n TODOs:\n 1. Placeholders other than {KEY}.\n \"\"\"\n output_paths={}\n # path standardisation\n for i,_ in enumerate(inputs):\n for k,v in inputs[i].items():\n if k.endswith('_path') and isinstance(v,str):\n inputs[i][k]=str(Path(v))\n if k.endswith('_paths') and isinstance(v,list):\n inputs[i][k]=[str(Path(s)) for s in v]\n \n if isinstance(input_paths,list):\n ## transform input path\n l1={replace_many(p, replaces=replaces_output_path, replacewith='', ignore=False):p for p in input_paths}\n ## test collisions\n assert len(l1)==len(input_paths), 'possible duplicated output path'\n output_paths.update(l1)\n output_paths_exist=list(filter(exists,output_paths))\n if isinstance(inputs,list): \n ## infer output_path\n assert not '*' in output_path_base, output_path_base\n assert '{KEY}' in output_path_base, f\"placeholder i.e. '{{KEY}}' not found in output_path_base: '{output_path_base}'\"\n l2={output_path_base.format(KEY=encode(d.copy(),short=encode_short)):d.copy() for d in inputs}\n # if verbose:\n # logging.info(l2.keys())\n ## test collisions\n assert len(l2)==len(inputs), 'possible duplicated inputs or collisions of the hashes'\n ## check existing output paths \n output_paths.update(l2)\n output_paths_exist=glob(output_path_base.replace('{KEY}','*'))\n for k in output_paths:\n ## add output path in the dictionary\n if not key_output_path is None:\n output_paths[k][key_output_path]=k\n if force:\n return output_paths\n else:\n if verbose:\n logging.info(f\"output_paths: {list(output_paths.keys())}\")\n logging.info(f\"output_paths_exist: {output_paths_exist}\")\n \n # output_paths_not_exist=list(set(list(output_paths.keys())) - set(output_paths_exist))\n output_paths_not_exist=list(filter(lambda x: not exists(x),output_paths))\n if verbose:\n logging.info(f\"output_paths_not_exist: {output_paths_not_exist}\")\n if len(output_paths_not_exist) < len(output_paths):\n logging.info(f\"size of output paths changed: {len(output_paths)}->{len(output_paths_not_exist)}, because {len(output_paths)-len(output_paths_not_exist)}/{len(output_paths)} paths exist. Use force=True to overwrite.\")\n return {k:output_paths[k] for k in output_paths_not_exist}\n \ndef get_encoding(p):\n \"\"\"Get encoding of a file.\n \n Parameters:\n p (str): file path\n \n Returns:\n s (string): encoding.\n \"\"\"\n import chardet\n with open(p, 'rb') as f:\n result = chardet.detect(f.read())\n return result['encoding'] \n\n# ls\ndef get_all_subpaths(d='.',include_directories=False):\n \"\"\"Get all the subpaths.\n\n Args:\n d (str, optional): _description_. Defaults to '.'.\n include_directories (bool, optional): to include the directories. Defaults to False.\n\n Returns:\n paths (list): sub-paths.\n \"\"\"\n from glob import glob\n import os\n paths=[]\n for root, dirs, files in os.walk(d):\n if include_directories:\n for d in dirs:\n path=os.path.relpath(os.path.join(root, d), \".\")\n paths.append(path)\n for f in files:\n path=os.path.relpath(os.path.join(root, f), d)\n paths.append(path)\n paths=sorted(paths)\n return paths\n\n\ndef get_env(\n env_name: str,\n return_path: bool=False,\n ):\n \"\"\"Get the virtual environment as a dictionary.\n\n Args:\n env_name (str): name of the environment.\n\n Returns:\n d (dict): parameters of the virtual environment.\n \"\"\"\n import sys,subprocess, os\n env = os.environ.copy()\n env_name_current=sys.executable.split('anaconda3/envs/')[1].split('/')[0]\n path=sys.executable.replace(env_name_current,env_name)\n if return_path:\n return dirname(path)+'/'\n env['CONDA_PYTHON_EXE']=path\n if 'anaconda3/envs' in env[\"PATH\"]:\n env[\"PATH\"]=env[\"PATH\"].replace(env_name_current,env_name)\n elif 'anaconda' in env[\"PATH\"]:\n env[\"PATH\"]=env[\"PATH\"].replace(f\"{sys.executable.split('/anaconda3')[0]}/anaconda3/bin\",\n f\"{sys.executable.split('/anaconda3')[0]}/anaconda3/envs/{env_name}/bin\")\n else:\n env[\"PATH\"]=path.replace('/bin/python','/bin')+':'+env[\"PATH\"]\n \n return env\n\ndef runbash(s1,env=None,test=False,**kws):\n \"\"\"Run a bash command. \n\n Args:\n s1 (str): command.\n env (str): environment name.\n test (bool, optional): testing. Defaults to False.\n\n Returns:\n output: output of the `subprocess.call` function.\n\n TODOs:\n 1. logp\n 2. error ignoring\n \"\"\"\n if test:logging.info(s1)\n if env is None:\n logging.warning('env is not set.')\n response=subprocess.call(s1, shell=True,\n env=get_env(env) if isinstance(env,str) else env if not env is None else env,\n stderr=subprocess.DEVNULL if not test else None, \n stdout=subprocess.DEVNULL if not test else None,\n **kws)\n assert response==0, f\"Error: {s1}\"+('\\nset `test=True` for more verbose.' if not test else '')\n return response\n\ndef runbash_tmp(s1: str,\n env: str,\n df1=None,\n inp='INPUT',\n input_type='df',\n output_type='path',\n tmp_infn='in.txt',\n tmp_outfn='out.txt',\n outp=None,\n force=False,\n test=False,\n **kws):\n \"\"\"Run a bash command in `/tmp` directory.\n\n Args:\n s1 (str): command.\n env (str): environment name.\n df1 (DataFrame, optional): input dataframe. Defaults to None.\n inp (str, optional): input path. Defaults to 'INPUT'.\n input_type (str, optional): input type. Defaults to 'df'.\n output_type (str, optional): output type. Defaults to 'path'.\n tmp_infn (str, optional): temporary input file. Defaults to 'in.txt'.\n tmp_outfn (str, optional): temporary output file.. Defaults to 'out.txt'.\n outp (_type_, optional): output path. Defaults to None.\n force (bool, optional): force. Defaults to False.\n test (bool, optional): test. Defaults to False.\n\n Returns:\n output: output of the `subprocess.call` function.\n \"\"\"\n if exists(outp) and not force:\n return\n import tempfile\n with tempfile.TemporaryDirectory() as p:\n if test: p=abspath('test/')\n makedirs(p)\n tmp_inp=f\"{p}/{tmp_infn}\"\n tmp_outp=f\"{p}/{tmp_outfn}\"\n s1=replace_many(s1,{'INPUT':tmp_inp,\n 'OUTPUT':tmp_outp,\n })\n if not df1 is None:\n if input_type=='df':\n df1.to_csv(replace_many(inp,{'INPUT':tmp_inp,}),sep='\\t')\n elif input_type=='list':\n from roux.lib.set import to_list\n to_list(df1,replace_many(inp,{'INPUT':tmp_inp}))\n response=runbash(s1,env=env,\n test=test,\n **kws) \n if exists(tmp_outp):\n if output_type=='path':\n makedirs(outp)\n shutil.move(tmp_outp,outp)\n return outp\n else:\n logging.error(f\"output file not found: {outp} ({tmp_outp})\")\n \ndef create_symlink(\n p: str,\n outp: str,\n test=False,\n force=False,\n ):\n \"\"\"Create symbolic links.\n\n Args:\n p (str): input path.\n outp (str): output path.\n test (bool, optional): test. Defaults to False.\n\n Returns:\n outp (str): output path.\n \n TODOs:\n Use `pathlib`: `Path(p).symlink_to(Path(outp))`\n \"\"\"\n import os\n if not exists(p):\n logging.error(f\"skipped: file does not exists {p}\")\n return \n if exists(outp) and not force:\n if os.path.islink(outp):\n if os.readlink(outp)==abspath(p):\n logging.error(f\"skipped: symlink exists {outp}\")\n return\n else:\n logging.error(f\"skipped: wrong symlink {os.readlink(outp)} not {outp}\")\n return\n else:\n logging.error(f\"skipped: file exists {outp}\")\n return\n p,outp=abspath(p),abspath(outp)\n com=f\"ln -s {p} {outp}\"\n makedirs(outp)\n if test: print(com)\n os.system(com)\n return outp\n\ndef input_binary(q:str):\n \"\"\"Get input in binary format.\n\n Args:\n q (str): question.\n\n Returns:\n b (bool): response.\n \"\"\"\n reply=''\n while not reply in ['y','n','o']:\n reply = input(f\"{q}:\")\n if reply == 'y':\n return True\n if reply == 'n':\n return False\n return reply\n\ndef is_interactive():\n \"\"\"Check if the UI is interactive e.g. jupyter or command line. \n \"\"\"\n import __main__ as main\n return not hasattr(main, '__file__')\n\ndef is_interactive_notebook():\n \"\"\"Check if the UI is interactive e.g. jupyter or command line. \n \n Notes:\n\n Reference:\n \"\"\"\n return 'ipykernel.kernelapp' in sys.modules\n\ndef get_excecution_location(depth=1):\n \"\"\"Get the location of the function being executed.\n\n Args:\n depth (int, optional): Depth of the location. Defaults to 1.\n\n Returns:\n tuple (tuple): filename and line number.\n \"\"\"\n from inspect import getframeinfo, stack\n caller = getframeinfo(stack()[depth][0])\n return caller.filename,caller.lineno\n\n## time\n## logging system\ndef get_datetime(\n outstr: bool=True,\n fmt=\"%G%m%dT%H%M%S\",\n ):\n \"\"\"Get the date and time.\n\n Args:\n outstr (bool, optional): string output. Defaults to True.\n fmt (str): format of the string.\n \n Returns:\n s : date and time.\n \"\"\"\n import datetime\n time=datetime.datetime.now()\n if outstr:\n # from roux.lib.io import to_path # potential circular import\n # return to_path(str(time)).replace('-','_').replace('.','_')\n return time.strftime(fmt)\n else:\n return time\n\ndef p2time(filename: str,time_type='m'):\n \"\"\"Get the creation/modification dates of files.\n\n Args:\n filename (str): filename.\n time_type (str, optional): _description_. Defaults to 'm'.\n\n Returns:\n time (str): time.\n \"\"\"\n import os\n import datetime\n if time_type=='m':\n t = os.path.getmtime(filename)\n else:\n t = os.path.getctime(filename)\n return str(datetime.datetime.fromtimestamp(t))\n\ndef ps2time(ps: list,**kws_p2time):\n \"\"\"Get the times for a list of files. \n\n Args:\n ps (list): list of paths.\n\n Returns:\n ds (Series): paths mapped to corresponding times.\n \"\"\"\n import pandas as pd\n from glob import glob\n if isinstance(ps,str):\n if isdir(ps):\n ps=glob(f\"{ps}/*\")\n return pd.Series({p:p2time(p,**kws_p2time) for p in ps}).sort_values().reset_index().rename(columns={'index':'p',0:'time'})\n \n\ndef get_logger(program='program',argv=None,level=None,dp=None):\n \"\"\"Get the logging object.\n\n Args:\n program (str, optional): name of the program. Defaults to 'program'.\n argv (_type_, optional): arguments. Defaults to None.\n level (_type_, optional): level of logging. Defaults to None.\n dp (_type_, optional): _description_. Defaults to None.\n \"\"\"\n log_format='[%(asctime)s] %(levelname)s\\tfrom %(filename)s in %(funcName)s(..):%(lineno)d: %(message)s'\n# def initialize_logger(output_dir):\n cmd='_'.join([str(s) for s in argv]).replace('/','_')\n if dp is None:\n dp=''\n else:\n dp=dp+'/'\n date=get_datetime()\n logp=f\"{dp}.log_{program}_{date}_{cmd}.log\"\n #'[%(asctime)s] %(levelname)s\\tfrom %(filename)s in %(funcName)s(..):%(lineno)d: %(message)s'\n \n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n \n # create console handler and set level to info\n handler = logging.StreamHandler()\n handler.setLevel(level)\n formatter = logging.Formatter(log_format)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n# # create error file handler and set level to error\n# handler = logging.FileHandler(os.path.join(output_dir, \"error.log\"),\"w\", encoding=None, delay=\"true\")\n# handler.setLevel(logging.ERROR)\n# formatter = logging.Formatter(log_format)\n# handler.setFormatter(formatter)\n# logger.addHandler(handler)\n\n # create debug file handler and set level to debug\n handler = logging.FileHandler(logp)\n handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter(log_format)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logp","repo_name":"rraadd88/roux","sub_path":"roux/lib/sys.py","file_name":"sys.py","file_ext":"py","file_size_in_byte":17989,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"13179370065","text":"\"\"\"\n2520 is the smallest number that can be divided by each of the numbers from 1 to\n10 without any remainder. What is the smallest positive number that is evenly\ndivisible by all of the numbers from 1 to 20?\n\"\"\"\n\n# Using the information, we can assume that it will be a multiple of 2520.\n\nimport functools\n\n\ndef factorial(n):\n return functools.reduce(lambda a, b: a * b, range(1, n + 1))\n\n\nif __name__ == \"__main__\":\n number = 0\n\n while number < factorial(20):\n number += 2520\n if all(number % i == 0 for i in range(2, 20)):\n break\n\n print(number)\n\n# solution = 232792560\n","repo_name":"TedAlden/project-euler","sub_path":"005.py","file_name":"005.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"25777178064","text":"class Solution:\n def solve(self, n, goal, k, dp) -> int:\n mod = int(10 ** 9) + 7\n if n == 0 and goal == 0:\n return 1\n if n == 0 or goal == 0:\n return 0\n if dp[n][goal] != -1:\n return dp[n][goal]\n \n p = self.solve(n-1, goal-1, k, dp) * n\n np = self.solve(n, goal-1, k, dp) * max(n-k, 0)\n\n dp[n][goal] = (p + np) % mod\n\n return dp[n][goal]\n def numMusicPlaylists(self, n: int, goal: int, k: int) -> int:\n dp = [[-1 for i in range(0, goal+1)] for j in range(0, n+1)]\n return self.solve(n, goal, k, dp)\n \n","repo_name":"samsepi0x0/leetcode","sub_path":"920_number_of_music_playlists.py","file_name":"920_number_of_music_playlists.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"13469841558","text":"#!/usr/bin/env python\r\n\r\n#=====================================================================\r\n# Filename: \tthomas_M3_test.py\r\n# Date: \t\t10/23/2016\r\n# Created by: \tThomas Lazor\r\n# Version:\t\t1.0\r\n# Info:\t\tPython script for simulating control thread messages to\r\n#\t\t\tmotor_thread. The user will input 'left', 'right', \r\n#\t\t\t'forward', 'backward', or 'stop' to control the\r\n#\t\t\tmovement of the rover.\r\n#\r\n#=====================================================================\r\n\r\nimport fileinput\r\n\r\ndef parseCommand(conn):\r\n\tfor line in fileinput.input():\r\n\t\tif line == \"left\":\r\n\t\t\tconn.sendall(0x230001)\r\n\t\telif line == \"right\":\r\n\t\t\tconn.sendall(0x230002)\r\n\t\telif line == \"forward\":\r\n\t\t\tconn.sendall(0x230003)\r\n\t\telif line == \"backward\":\r\n\t\t\tconn.sendall(0x230004)\r\n\t\telif line == \"stop\":\r\n\t\t\tconn.sendall(0x230000)\r\n\t\telif line == \"exit\":\r\n\t\t\treturn\r\n\t\telse:\r\n\t\t\tcontinue","repo_name":"tomoestreich/Embedded-Rover","sub_path":"milestones/three/thomas_M3_test.py","file_name":"thomas_M3_test.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"38627792078","text":"import utils\n\ndef main():\n print(\"Welcome to Lai-Yang Algorithm!\")\n\n print(\"----Getting the Process details for P1----\")\n utils.addDetails(1)\n print(\"----Getting the Event details for P1----\")\n utils.eventDetails(1)\n\n print(\"----Getting the Process details for P2----\")\n utils.addDetails(2)\n print(\"----Getting the Event details for P2----\")\n utils.eventDetails(2)\n\n print(\"---Calculating the Global State---\")\n utils.calculateGlobalState()\n\n reset = input(\"Do you want to reset the values? Enter Y for yes or N for No : \")\n if(reset == 'Y' or reset == 'y'):\n utils.reset()\n restart = input(\"The values have been reset, Would you like to start again? Please enter Y for Yes or N for No : \")\n if(restart == 'Y' or restart == 'y'):\n main()\n print(\"Thankyou for using Lai Yang Algorithm\")\n\n\n\n \nif __name__ == '__main__':\n main()\n","repo_name":"AnnetteEPaul/lai-yang-algorithm","sub_path":"LaiYang.py","file_name":"LaiYang.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"16667444459","text":"import argparse\nimport os\nimport random\nimport json\nimport shutil\nimport time\nimport warnings\nimport torch.cuda as cuda\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom Datasets import Generate_Dataloader\nimport numpy as np\nfrom logger import Logger\nimport Model_zoo as models\n\nimport os\nfrom torch.nn.modules.loss import _Loss\n\n\nparser = argparse.ArgumentParser(description='Tracing')\nparser.add_argument('--arch', default='vgg16_bn', type=str)\nparser.add_argument('--seed', default=1, type=int)\nparser.add_argument('--batch_size', default=64, type=int)\nparser.add_argument('--net_A', default='../New_Models_Re/checkpoint_CUB200_vgg16_bn_lr-2_sd0_itr300.pth.tar', type=str)\nparser.add_argument('--net_B', default='../New_Models_Re/checkpoint_CUB200_vgg16_bn_lr-2_sd5_itr300.pth.tar', type=str)\nparser.add_argument('--resume_Ys', default='model_checkpoints/CUB200_vgg16_bn/checkpoint_L30__a0.1_lr-4.pth.tar',\n type=str)\nparser.add_argument('--dataset', default='CUB200', type=str)\nparser.add_argument('--optim', default='Adam', type=str)\nparser.add_argument('--print_freq', '-p', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--gpu', default=3, type=int,\n help='GPU id to use.')\nparser.add_argument('--resumePath', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--conv_layer', default=30, type=int)\nparser.add_argument('--workers', default=4, type=int)\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('--epochs', default=1000, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--topk', default='[1,3]', type=str)\nparser.add_argument('--suffix', default='test', type=str)\n# parser.add_argument('--suffix', default='trainData_0.1_release_nosub', type=str)\nparser.add_argument('--in_mode', default='[1,1,1]', type=str)\n# parser.add_argument('--sub_sampler',\n# default='/home/data/lilongfei/FeatureFactorization/sub_sampler_VOC2012_crop_10par_C.npy',\n# type=str)\nparser.add_argument('--logspace', default=2, type=int)\nparser.add_argument('--lr', '--learning-rate', default=0.00001, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--resume', action='store_true')\nparser.add_argument('--save_epoch', default=100, type=int)\nparser.add_argument('--val_epoch', default=10, type=int)\n# parser.add_argument('--save_per_epoch', default=False, type=bool)\nparser.add_argument('--channels', default=3, type=int)\nparser.add_argument('--sample_num', default='', type=str)\n\nargs = opt = parser.parse_args()\nif args.arch.startswith(\"vgg\"):\n convName = {\"conv4_1\": 24, \"conv4_2\": 27, \"conv4_3\": 30, \"conv5_1\": 34, \"conv5_2\": 37, \"conv5_3\": 40, \"FC6\": 0,\n \"FC7\": 3}\nelif args.arch.startswith(\"alexnet\"):\n convName = {\"conv4\": 8, \"conv5\": 10, \"FC6\": 1, \"FC7\": 4}\nelif args.arch.startswith(\"resnet\"):\n convName = {\"layer3\": 3}\nfor x, y in convName.items():\n if y == args.conv_layer:\n conv = x\nprint(\"extract feature maps from {}\\n\".format(conv))\n\nprint('parsed options:', vars(opt))\n\ntopk = json.loads(opt.topk)\nin_mode = json.loads(opt.in_mode)\n\ncuda.empty_cache()\n\n\ndef ResBlock_beforeReLU(block, x): # Only for ResNet 50 101 152!!\n identity = x\n\n out = block.conv1(x)\n out = block.bn1(out)\n out = block.relu(out)\n\n out = block.conv2(out)\n out = block.bn2(out)\n if block.__class__.__name__ != 'BasicBlock':\n out = block.relu(out)\n\n out = block.conv3(out)\n out = block.bn3(out)\n\n if block.downsample is not None:\n identity = block.downsample(x)\n\n out += identity\n return out\n\n\nclass img_to_feature(nn.Module):\n def __init__(self, fcn, f_trans=None): # fcn: vgg16_bn[:41]; f_trans: linearTest\n super(img_to_feature, self).__init__()\n self.fcn = fcn\n self.trans = f_trans\n self.eval()\n\n def forward(self, x):\n with torch.no_grad():\n x = self.fcn(x)\n if self.trans:\n out, out_n = self.trans.val_batch(x)\n return out, out_n\n\n\nclass TransClassifier(nn.Module):\n def __init__(self, ori_net, layer=0):\n super(TransClassifier, self).__init__()\n if args.arch.startswith(\"alexnet\"):\n self.features = ori_net.features[layer:]\n self.classifier = ori_net.classifier\n self.avgpool = ori_net.avgpool\n elif args.arch.startswith(\"vgg\"):\n self.features = ori_net.features[layer:]\n self.classifier = ori_net.classifier\n elif args.arch.startswith(\"resnet\"):\n self.layer3 = ori_net.layer3\n self.layer4 = ori_net.layer4\n self.avgpool = ori_net.avgpool\n self.fc = ori_net.fc\n for param in self.parameters():\n param.requires_grad = True\n\n def forward(self, x):\n if args.arch.startswith(\"alexnet\"):\n out = self.features(x)\n out = self.avgpool(out)\n out = out.view(out.size(0), 256 * 6 * 6)\n out = self.classifier(out)\n elif args.arch.startswith(\"vgg\"):\n out = self.features(x)\n out = out.view(out.size(0), -1).cuda(opt.gpu)\n out = self.classifier(out)\n elif args.arch.startswith(\"resnet\"):\n x = nn.ReLU(inplace=True)(x)\n out = self.layer4(x)\n out = self.avgpool(out)\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n return out\n\n\nclass head_resnet(nn.Module):\n def __init__(self, ori_net):\n super(head_resnet, self).__init__()\n self.conv1 = ori_net.conv1\n self.bn1 = ori_net.bn1\n self.relu = ori_net.relu\n self.maxpool = ori_net.maxpool\n self.layer1 = ori_net.layer1\n self.layer2 = ori_net.layer2\n self.layer3 = ori_net.layer3\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3[:-1](x)\n b3_beforeR = ResBlock_beforeReLU(self.layer3[-1], x)\n return b3_beforeR\n\n\ndef load_checkpoint(resume, model):\n if os.path.isfile(resume):\n print(\"=> loading checkpoint '{}'\".format(resume))\n checkpoint = torch.load(resume, map_location=torch.device(\"cuda:{}\".format(opt.gpu)))\n state_dict = checkpoint['state_dict']\n keys = list(state_dict.keys())\n for key in keys:\n if key.find('module') != -1:\n state_dict[key.replace('module.', '')] = state_dict.pop(key)\n\n model.load_state_dict(state_dict)\n if 'best_acc1' in checkpoint:\n print(\"=> loaded checkpoint '{}' (epoch {} acc1 {})\"\n .format(resume, checkpoint['epoch'], checkpoint['best_acc1']))\n else:\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(resume))\n del checkpoint, state_dict\n\n\nclass M_Dataset(torch.utils.data.Dataset):\n def __init__(self, in_mode, suffix, channels=3):\n super(M_Dataset, self).__init__()\n self.target = torch.load('./M_Output/M_Output_target_{}.pkl'.format(suffix))\n if sum(in_mode) == channels:\n convOut = torch.load('./M_Output/M_Output_Y_sum_{}.pkl'.format(suffix))\n print(\"load ./M_Output/M_Output_Y_sum_{}.pkl\".format(suffix))\n else:\n k = 0\n for i in range(channels):\n if in_mode[i] == 1:\n tmp = torch.load('./M_Output/M_Output_Y{}_{}.pkl'.format(i, suffix))\n print(\"load ./M_Output/M_Output_Y{}_{}.pkl\".format(i, suffix))\n if k == 0:\n convOut = torch.zeros_like(tmp)\n convOut += tmp\n k += 1\n del tmp\n self.convOut = convOut\n\n def __len__(self):\n return len(self.target)\n\n def __getitem__(self, idx):\n convOut = self.convOut[idx]\n target = self.target[idx]\n\n return (convOut, target)\n\n\ndef main():\n if args.dataset.startswith(\"VOC\"):\n netA = models.__dict__[args.arch](num_classes=20).cuda(args.gpu)\n netB = models.__dict__[args.arch](num_classes=20).cuda(args.gpu)\n elif args.dataset.startswith(\"CUB\"):\n netA = models.__dict__[args.arch](num_classes=200).cuda(args.gpu)\n netB = models.__dict__[args.arch](num_classes=200).cuda(args.gpu)\n elif args.dataset.startswith(\"DOG\"):\n netA = models.__dict__[args.arch](num_classes=120).cuda(args.gpu)\n netB = models.__dict__[args.arch](num_classes=120).cuda(args.gpu)\n elif args.dataset.startswith(\"Mix\"):\n netA = models.__dict__[args.arch](num_classes=320).cuda(args.gpu)\n netB = models.__dict__[args.arch](num_classes=320).cuda(args.gpu)\n\n # load vgg model\n print(\"load model_vgg......\")\n load_checkpoint(opt.net_A, netA)\n load_checkpoint(opt.net_B, netB)\n\n if args.arch.startswith(\"alexnet\") or args.arch.startswith(\"vgg\"):\n vggB_part = TransClassifier(netB, args.conv_layer + 1)\n vggA_part = netA.features[:args.conv_layer + 1]\n elif args.arch.startswith(\"resnet\"):\n vggB_part = TransClassifier(netB)\n vggA_part = head_resnet(netA)\n\n # load 3 layers model\n print(\"load model_Ys......\")\n #######################################################\n if args.arch.startswith(\"vgg\"):\n if args.conv_layer <= 30:\n input_size = output_size = torch.zeros((512, 28, 28)).shape\n else:\n input_size = output_size = torch.zeros((512, 14, 14)).shape\n elif args.arch.startswith(\"alexnet\"):\n input_size = output_size = torch.zeros((256, 13, 13)).shape\n elif args.arch.startswith(\"resnet\"):\n if args.arch.startswith('resnet18') or args.arch.startswith('resnet34'):\n input_size = output_size = torch.zeros((256, 14, 14)).shape\n else:\n input_size = output_size = torch.zeros((1024, 14, 14)).shape\n #######################################################\n\n model_Ys = models.LinearTester(input_size, output_size, gpu_id=args.gpu, fix_p=True, bn=False, instance_bn=True)\n if args.resume_Ys == \"\":\n resume_Ys = \"./model_checkpoints/VOC2012_crop/checkpoint_L{}_{}_3.0.pth.tar\".format(args.conv_layer,\n args.sample_num)\n else:\n resume_Ys = args.resume_Ys\n\n load_checkpoint(resume_Ys, model_Ys)\n\n catA = img_to_feature(vggA_part, model_Ys)\n\n # Create dataloader\n train_loader, val_loader = \\\n Generate_Dataloader(args.dataset, args.batch_size, args.workers,\n args.suffix, args.sample_num)\n\n if args.gpu is not None:\n catA = catA.cuda(args.gpu)\n vggB_part = vggB_part.cuda(args.gpu)\n else:\n print(\"error: gpu not assigned\")\n\n if not os.path.exists(\"./logs_convs_vgg2trans/{}_{}_{}\".format(opt.dataset, opt.sample_num, opt.arch)):\n os.makedirs(\"./logs_convs_vgg2trans/{}_{}_{}\".format(opt.dataset, opt.sample_num, opt.arch), exist_ok=True)\n logger_train = Logger(\n './logs_convs_vgg2trans/{}_{}_{}/L{}_{}_{}_{}_{}/train'.format(opt.dataset, opt.sample_num, opt.arch,\n opt.conv_layer, opt.dataset, in_mode, opt.lr,\n opt.suffix))\n logger_val = Logger(\n './logs_convs_vgg2trans/{}_{}_{}/L{}_{}_{}_{}_{}/val'.format(opt.dataset, opt.sample_num, opt.arch,\n opt.conv_layer, opt.dataset, in_mode, opt.lr,\n opt.suffix))\n\n # define loss function (criterion) and optimizer\n if args.dataset == 'VOC2012':\n criterion = nn.BCEWithLogitsLoss().cuda(args.gpu)\n else:\n criterion = nn.CrossEntropyLoss().cuda(args.gpu)\n\n if args.optim == 'SGD':\n optimizer = torch.optim.SGD(vggB_part.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n elif args.optim == 'Adam':\n optimizer = torch.optim.Adam(vggB_part.parameters(), args.lr, weight_decay=args.weight_decay)\n\n if opt.resume:\n if os.path.isfile(opt.resumePath):\n print(\"=> loading checkpoint '{}'\".format(opt.resumePath))\n checkpoint = torch.load(args.resume, map_location=torch.device(\"cuda:{}\".format(opt.gpu)))\n args.start_epoch = checkpoint['epoch']\n best_acc1 = checkpoint['best_acc1']\n vggB_part.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n logspace_lr = torch.logspace(np.log10(args.lr), np.log10(args.lr) - args.logspace, args.epochs)\n best_acc1 = 0\n for epoch in range(args.start_epoch, args.epochs):\n for param_group in optimizer.param_groups:\n param_group['lr'] = logspace_lr[epoch]\n # train for one epoch\n train(train_loader, vggB_part, catA, criterion, optimizer, epoch, logger_train)\n if epoch % args.val_epoch == 9:\n acc1 = validate(val_loader, vggB_part, catA, criterion, epoch, logger_val)\n\n # # remember best acc@1 and save checkpoint\n # # evaluate on validation sets\n #\n is_best = acc1 > best_acc1\n best_acc1 = max(acc1, best_acc1)\n\n if not os.path.exists(\"./check/{}_{}_{}/\".format(opt.dataset, opt.sample_num, opt.arch)):\n os.mkdir(\"./check/{}_{}_{}/\".format(opt.dataset, opt.sample_num, opt.arch))\n save_dir = save_dir_itr = './check/{}_{}_{}/checkpoint_{}_{}_L{}.pth.tar'.format(args.dataset,\n args.sample_num, opt.arch,\n args.in_mode, args.suffix,\n args.conv_layer)\n if epoch > 0 and epoch % args.save_epoch == 99:\n save_checkpoint(\n {'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': vggB_part.state_dict(),\n 'best_acc1': best_acc1,\n 'optimizer': optimizer.state_dict(), },\n is_best,\n save_dir)\n return\n\n\ndef train(train_loader, vggB_part, catA, criterion, optimizer, epoch, logger):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top0 = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n catA.eval()\n vggB_part.train()\n\n if args.dataset == \"VOC2012\":\n Targets = torch.zeros((args.batch_size, 20))\n else:\n Targets = torch.zeros(args.batch_size)\n\n end = time.time()\n for i, (datas, target) in enumerate(train_loader):\n if args.gpu is not None:\n datas = datas.cuda(args.gpu, non_blocking=True)\n # output Ys\n output, output_n = catA(datas)\n\n if args.dataset == \"Mix_DOG120\":\n target = target + 200\n # start to finetune\n # measure Ys loading time\n data_time.update(time.time() - end)\n input = output_n[0] * in_mode[0] + output_n[1] * in_mode[1] + output_n[2] * in_mode[2]\n # print(input.grad_fn)\n # print(input[8])\n # print(output_n[0][8])\n # print(output_n[1][8])\n # print(output_n[2][8])\n # exit()\n if args.gpu is not None:\n input = input.cuda(args.gpu, non_blocking=True)\n target = target.long().cuda(args.gpu, non_blocking=True)\n output = vggB_part(input)\n\n if args.dataset == \"VOC2012\":\n loss = criterion(output, target.float())\n else:\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n if args.dataset == 'CUB':\n acc1, acc5 = accuracy(output, target, topk=(1, 1))\n elif args.dataset == 'VOC2012':\n acc = accuracy_VOC2012(output, target)\n else:\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n if args.dataset == 'VOC2012':\n top0.update(acc[0], input.size(0))\n else:\n top1.update(acc1[0], input.size(0))\n top5.update(acc5[0], input.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n if args.dataset == 'VOC2012':\n if i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc@0 {top0.val:.3f} ({top0.avg:.3f})\\t'.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top0=top0))\n else:\n if i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n\n if args.dataset == 'VOC2012':\n log_dict = {'Loss': losses.avg, 'top0_prec': top0.avg.item()}\n else:\n log_dict = {'Loss': losses.avg, 'top1_prec': top1.avg.item(), 'top5_prec': top5.avg.item()}\n set_tensorboard(log_dict, epoch, logger)\n\n\ndef validate(val_loader, vggB_part, catA, criterion, epoch, logger):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top0 = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n catA.eval()\n vggB_part.eval()\n\n with torch.no_grad():\n if args.dataset == \"VOC2012\":\n Targets = torch.zeros((args.batch_size, 20))\n else:\n Targets = torch.zeros(args.batch_size)\n\n end = time.time()\n for i, (datas, target) in enumerate(val_loader):\n if args.gpu is not None:\n datas = datas.cuda(args.gpu, non_blocking=True)\n # output Ys\n output, output_n = catA(datas)\n if args.dataset == \"Mix_DOG120\":\n target = target + 200\n input = output_n[0] * in_mode[0] + output_n[1] * in_mode[1] + output_n[2] * in_mode[2]\n\n if args.gpu is not None:\n input = input.cuda(args.gpu, non_blocking=True)\n target = target.long().cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = vggB_part(input)\n if args.dataset == \"VOC2012\":\n loss = criterion(output, target.float())\n else:\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n if args.dataset == 'CUB':\n acc1, acc5 = accuracy(output, target, topk=(1, 1))\n elif args.dataset == 'VOC2012':\n acc = accuracy_VOC2012(output, target)\n else:\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n if args.dataset == 'VOC2012':\n top0.update(acc[0], input.size(0))\n else:\n top1.update(acc1[0], input.size(0))\n top5.update(acc5[0], input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n if args.dataset == 'VOC2012':\n if i % args.print_freq == 0:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc@0 {top0.val:.3f} ({top0.avg:.3f})\\t'.format(\n i, len(val_loader), batch_time=batch_time, loss=losses,\n top0=top0))\n else:\n if i % args.print_freq == 0:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n i, len(val_loader), batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n if args.dataset == 'VOC2012':\n print(' * Acc@0 {top0.avg:.3f}'\n .format(top0=top0))\n else:\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n if args.dataset == 'VOC2012':\n log_dict = {'Loss': losses.avg, 'top0_prec': top0.avg.item()}\n set_tensorboard(log_dict, epoch, logger)\n else:\n log_dict = {'Loss': losses.avg, 'top1_prec': top1.avg.item(), 'top5_prec': top5.avg.item()}\n set_tensorboard(log_dict, epoch, logger)\n if args.dataset == 'VOC2012':\n return top0.avg\n else:\n return top1.avg\n\n\ndef set_tensorboard(log_dict, epoch, logger):\n # set for tensorboard\n info = log_dict\n\n for tag, value in info.items():\n logger.scalar_summary(tag, value, epoch + 1)\n\n return\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\nclass fc_out(nn.Module):\n def __init__(self, fea, cla):\n super(fc_out, self).__init__()\n self.fea = fea\n self.cla = cla\n self.eval()\n\n def forward(self, x):\n with torch.no_grad():\n x = self.fea(x)\n x = x.view(x.size(0), -1)\n x = self.cla(x)\n return x\n\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename,\n './check/checkpoint_{}_{}_{}_{}_best.pth.tar'.format(args.in_mode, args.lr, args.suffix,\n args.conv_layer))\n\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (args.decay_factor ** (epoch // args.epoch_step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\ndef accuracy_VOC2012(output, target):\n with torch.no_grad():\n batch_size = target.size(0)\n accur = output.gt(0.).long().eq(target.long()).float().mean()\n res = []\n res.append(accur)\n return res\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nexuslrf/knowledge_consistency","sub_path":"transClassifier.py","file_name":"transClassifier.py","file_ext":"py","file_size_in_byte":25174,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"14"} +{"seq_id":"12260019442","text":"import turtle as t\nimport random as r\n\ntim=t.Turtle()\n\ndef randomColor():\n Colors=[\"red\",\"green\",\"blue\",\"black\",\"orange\",\"cyan\",\"aqua\",\"brown\"]\n return r.choice(Colors)\n\ndef drawShape(num_sides):\n angle=360/num_sides\n color_draw=randomColor()\n tim.pencolor(color_draw)\n for _ in range(num_sides):\n tim.forward(100)\n tim.right(angle)\n\n\n\nfor num_sides in range(3,11):\n drawShape(num_sides)\n\nscreen=t.Screen()\nscreen.exitonclick()","repo_name":"Deepankar1999/100DaysOfPython","sub_path":"day18/all_shape.py","file_name":"all_shape.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"20892172947","text":"print(\"{:#^40}\".format(\" Lojas do Futuro \"))\n\nvl = float(input(\"Qual o valor da compra? R$:\"))\nforma = int(input('''Escolha a forma de pagamento\n[ 1 ] Dinheiro/Cheque\n[ 2 ] Debito/Credito a vista\n[ 3 ] Até 2x no Cartão de credito\n[ 4 ] 3x ou mais no Cartão de credito\nOpção: '''))\n\nif forma == 1:\n print(\"O valor da compra ficou R$:{:.2f}, mas terá um desconto e ficará R$:{:.2f}\".format(\n vl, vl-(vl*0.10)))\nelif forma == 2:\n print(\"O valor da compra ficou R$:{:.2f}, mas terá um desconto e ficará R$:{:.2f}\".format(\n vl, vl-(vl*0.05)))\nelif forma == 3:\n print(\"O valor da compra ficá em 2x de R$:{:.2f} Sem Juros. no Total de R$: {:.2f}\".format(\n vl/2, vl))\nelif forma == 4:\n parcela = int(input(\"Quantas parcelas? \"))\n print(\"Sua compra será parcelada em {}x de R$: {:.2f} COM JUROS\".format(\n parcela, (vl + (vl*0.20))/parcela))\n print(\"Sua compra de R$: {:.2f} vai custar R$:{:.2f} no final.\".format(\n vl, vl+(vl*0.20)))\nelse:\n print(\"Escolha uma opção valida\")\n","repo_name":"jeanthecreator/TaskManager","sub_path":"Ex.mundo2/ex044.py","file_name":"ex044.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"19726859567","text":"#!/usr/bin/python3\n\n# Author: Dan Walsh \nimport os\nimport sys\nfrom distutils.core import setup, Extension\n\next_modules = []\nif sys.version_info < (3,):\n default_encoding_utf8 = Extension(\"setroubleshoot.default_encoding_utf8\",\n sources=[\"default_encoding.c\"]\n )\n ext_modules = [default_encoding_utf8]\n\n\nsetup(name=\"setroubleshoot\",\n version=\"1.1\",\n description=\"Python SELinux Troubleshooter\",\n author=\"Dan Walsh\", author_email=\"dwalsh@redhat.com\",\n url='',\n download_url='',\n license='GPLv3+',\n platforms='posix',\n ext_modules=ext_modules,\n packages=[\"setroubleshoot\"])\n","repo_name":"fedora-selinux/setroubleshoot","sub_path":"framework/src/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"14"} +{"seq_id":"5879939401","text":"from typing import List\nfrom src.domain.use_cases.get_user import GetUser as GetUserInterface\nfrom src.data.interfaces.user_repository import UserRepositoryInterface\nfrom src.errors.types import HttpNotFoundError, HttpBadRequestError\nfrom src.db.entities.user import User\n\n\nclass GetUser(GetUserInterface):\n def __init__(self, user_repository: UserRepositoryInterface) -> None:\n self.user_repository = user_repository\n\n def get_user_by_phone(self, phone: str) -> User:\n self.__validate_phone(phone)\n user = self.user_repository.get_user_by_phone(phone)\n if user is None:\n raise HttpNotFoundError(\"User not found\")\n return user\n\n @classmethod\n def __validate_phone(cls, phone: str) -> None:\n if phone is None:\n raise HttpBadRequestError(\"Phone number is required\")\n\n if len(phone) > 20:\n raise HttpBadRequestError(\"Phone number is too long\")\n\n def get_users(self, list_of_phones: list) -> List:\n for phone in list_of_phones:\n self.__validate_phone(phone)\n\n users = self.user_repository.get_users(list_of_phones)\n if users is None:\n raise HttpNotFoundError(\"Users not found\")\n return users\n","repo_name":"alisson-araujo/mychat-api","sub_path":"src/data/use_cases/get_user.py","file_name":"get_user.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"12301192580","text":"def fac_rec(n):\n if n == 2: # base case\n return 2\n elif n <= 1: # base case\n return 1\n else: # recursive case\n return n * fac_rec(n-1)\n\n\ndef fac_iter(n):\n res = 1\n if n == 2:\n res = 2\n for i in range(2, n+1):\n res *= i\n return res\n\n\nif __name__ == '__main__':\n number = 5\n print(fac_iter(number))\n print(fac_rec(number))\n","repo_name":"ApexTone/DataStructAlgo-Code-KMITL","sub_path":"Recursion/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"16117455134","text":"import time\n\nfrom bddrest.authoring import response\n\nfrom restfulpy.application import Application\nfrom restfulpy.authentication import StatefulAuthenticator\nfrom restfulpy.principal import JWTPrincipal, JWTRefreshToken\nfrom restfulpy.testing import ApplicableTestCase\n\n\nroles = ['admin', 'test']\n\n\nclass MockupAuthenticator(StatefulAuthenticator):\n def validate_credentials(self, credentials):\n raise NotImplementedError()\n\n def create_refresh_principal(self, member_id=None):\n return JWTRefreshToken(dict(\n id=member_id\n ))\n\n def create_principal(self, member_id=None, session_id=None, **kwargs):\n return JWTPrincipal(\n dict(id=1, email='test@example.com', roles=roles, sessionId='1')\n )\n\n\nclass TestRefreshTokenWithoutSSl(ApplicableTestCase):\n __application__ = Application(\n 'Application',\n None,\n authenticator=MockupAuthenticator()\n )\n\n __configuration__ = ('''\n jwt:\n max_age: .3\n refresh_token:\n max_age: 3\n secure: true\n ''')\n\n def test_refresh_token_security(self):\n principal = self.__application__.__authenticator__.create_principal()\n\n token = principal.dump().decode(\"utf-8\")\n refresh_principal = self.__application__.__authenticator__.\\\n create_refresh_principal()\n refresh_token = 'refresh-token=' + refresh_principal.dump().\\\n decode(\"utf-8\")\n assert refresh_token.startswith('refresh-token=') is True\n self._authentication_token = token\n\n time.sleep(1)\n\n with self.given(\n 'Refresh tokn can not be set in not secure connections',\n headers={'Cookie': refresh_token},\n ):\n assert response.status == 400\n\n","repo_name":"pylover/restfulpy","sub_path":"tests/test_refreshtoken_without_ssl.py","file_name":"test_refreshtoken_without_ssl.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"14"} +{"seq_id":"16158483009","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport math\r\n\r\nx = np.arange(0, 30, 0.1)\r\ns = [-math.sin(y) + 2 for y in x]\r\nc = np.sin(x)\r\nplt.plot(x, s, label='sin(x)')\r\nplt.plot(x, c, label='sin(x)')\r\nplt.ylabel('sin(x)')\r\nplt.xlabel('x')\r\nplt.title(\"Wykres funkcji sin(x) zmodyfikowany\")\r\nplt.legend()\r\nplt.show()\r\n","repo_name":"Arsonist1337/WD","sub_path":"Zadania 10/L10Z4.py","file_name":"L10Z4.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"42870196560","text":"import pandas as __pd\nimport numpy as __np\nimport matplotlib.pyplot as __plt\nfrom sklearn.preprocessing import StandardScaler as __sc\nfrom sklearn.cluster import KMeans as __km\nfrom sklearn.metrics import silhouette_score as __si\nimport pickle as __pi\n\ndef standard_scaler(df, filename, fit = True):\n df_num = df.drop(columns=[\"songname\", \"artist\", \"album\", \"id\", \"uri\", \"track_href\"])\n if fit:\n scaler = __sc()\n scaler.fit(df_num)\n filename = filename + \".sav\"\n __pi.dump(scaler, open(\"scaler/\"+filename, 'wb'))\n scaled = scaler.transform(df_num)\n scaled_df = __pd.DataFrame(scaled, columns=df_num.columns)\n return scaled_df, filename\n if fit == False:\n loaded_model = __pi.load(open(\"scaler/\"+filename, 'rb'))\n scaled = loaded_model.transform(df_num)\n scaled_df = __pd.DataFrame(scaled, columns=df_num.columns)\n return scaled_df\n \ndef clustering(df):\n K = range(2, 21)\n inertia = []\n silhouette = []\n\n for k in K:\n print(\"Training a K-Means model with {} neighbours! \".format(k))\n print()\n kmeans = __km(n_clusters=k,\n random_state=1234)\n kmeans.fit(df)\n filename = \"models/kmeans_\" + str(k) + \".sav\"\n with open(filename, \"wb\") as f:\n __pi.dump(kmeans,f)\n inertia.append(kmeans.inertia_)\n silhouette.append(__si(df, kmeans.predict(df)))\n\n\n fig, ax = __plt.subplots(1,2,figsize=(16,8))\n ax[0].plot(K, inertia, 'bx-')\n ax[0].set_xlabel('k')\n ax[0].set_ylabel('inertia')\n ax[0].set_xticks(__np.arange(min(K), max(K)+1, 1.0))\n ax[0].set_title('Elbow Method showing the optimal k')\n ax[1].plot(K, silhouette, 'bx-')\n ax[1].set_xlabel('k')\n ax[1].set_ylabel('silhouette score')\n ax[1].set_xticks(__np.arange(min(K), max(K)+1, 1.0))\n ax[1].set_title('Silhouette Method showing the optimal k')\n\ndef predict(df_scaled, df_original, filename):\n loaded_model = __pi.load(open(\"models/\"+filename, 'rb'))\n cluster = loaded_model.predict(df_scaled)\n column = \"cluster_\"+filename[7:-4]\n df_original[column] = cluster\n return df_original\n\ndef clustering_2(df):\n K = range(2, 21)\n inertia5 = []\n inertia10 = []\n inertia30 = []\n inertia50 = []\n silhouette5 = []\n silhouette10 = []\n silhouette30 = []\n silhouette50 = []\n init = [5, 10, 30, 50]\n \n for k in K:\n for n in init:\n print(\"Training a K-Means model with {} neighbours and {} n! \".format(k,n))\n print()\n kmeans = __km(n_clusters=k, n_init=n, random_state=1234)\n kmeans.fit(df)\n filename = \"models/kmeans_\" + str(k) + \"_\" + str(n) + \".sav\"\n with open(filename, \"wb\") as f:\n __pi.dump(kmeans,f)\n if n == 5:\n inertia5.append(kmeans.inertia_)\n silhouette5.append(__si(df, kmeans.predict(df)))\n elif n == 10:\n inertia10.append(kmeans.inertia_)\n silhouette10.append(__si(df, kmeans.predict(df)))\n elif n == 30:\n inertia30.append(kmeans.inertia_)\n silhouette30.append(__si(df, kmeans.predict(df)))\n elif n == 50:\n inertia50.append(kmeans.inertia_)\n silhouette50.append(__si(df, kmeans.predict(df)))\n \n \n\n fig, ax = __plt.subplots(1,2,figsize=(16,8))\n ax[0].plot(K, inertia5, 'bx-', K, inertia10, 'gx-', K, inertia30, 'rx-', K, inertia50, 'yx-')\n ax[0].set_xlabel('k')\n ax[0].set_ylabel('inertia')\n ax[0].set_xticks(__np.arange(min(K), max(K)+1, 1.0))\n ax[0].set_title('Elbow Method showing the optimal k, blue:5, green: 10, red: 30, yellow: 50')\n ax[1].plot(K, silhouette5, 'bx-', K, silhouette10, 'gx-', K, silhouette30, 'rx-', K, silhouette50, 'yx-')\n ax[1].set_xlabel('k')\n ax[1].set_ylabel('silhouette score')\n ax[1].set_xticks(__np.arange(min(K), max(K)+1, 1.0))\n ax[1].set_title('Silhouette Method showing the optimal k, , blue:5, green: 10, red: 30, yellow: 50')\n \n\n\n","repo_name":"MLer76149/clustering_songs","sub_path":"songcluster/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"2901334584","text":"\"\"\"General purpose Utilities.\"\"\"\n\nimport os\nimport platform\nimport random\n\n\ndef getRandomFile(directory):\n\tfiles = []\n\tfor dirpath, _, filenames in os.walk(directory):\n\t\tfor f in filenames:\n\t\t\tfiles.append(os.path.join(dirpath, f))\n\timage_path = random.choice(files)\n\treturn image_path\n\n\n\ndef isRaspberry():\n\t\"\"\"Get if the target is a Raspberry.\"\"\"\n\trv = True\n\tif platform.machine() == \"x86_64\":\n\t\trv = False\n\treturn rv\n","repo_name":"tiandti/MEMO","sub_path":"memo/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"32794809628","text":"import pycountry\n\nall_langs = pycountry.languages\n\n\ndef chunks(l, n):\n n = max(1, n)\n return (l[i:i + n] for i in range(0, len(l), n))\n\n\ndef lang_code_to_name(lang_code):\n lang = all_langs.get(alpha_2=\"{}\".format(lang_code))\n if lang is None:\n lang = all_langs.get(alpha_3=\"{}\".format(lang_code))\n if lang is None:\n # sometimes lang codes have a dash that then specifies regional dialect - just take the first part\n lang = all_langs.get(alpha_2=\"{}\".format(lang_code.split(\"-\")[0]))\n if lang is None:\n lang = all_langs.get(alpha_2=\"{}\".format(lang_code.split(\"-\")[0]))\n if lang is None:\n if \"zh\" in lang_code:\n # this should cover lots of regional zh langs (sorry to lump them all into one!)\n lang = all_langs.get(alpha_2=\"{}\".format(\"zh\"))\n if lang is None:\n print('No language name found for {}, returning language code'.format(lang_code))\n return lang_code\n else:\n return lang_code\n return lang.name\n","repo_name":"sdtblck/youtube_subtitle_dataset","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"14"} +{"seq_id":"30409252974","text":"#Program that obtains the sensor data and plots it on a graph as a function of x,y.\n\n# Import libraries \nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom rplidar import RPLidar\n\n#Initialize the sensor through the port\nlidar = RPLidar('/dev/ttyUSB0')\n \ndatos = [] #Variable to store the measurements\ntime.sleep(5) #To avoid the error of 'Wrong body size'\n\n#Method to store the distance and angle of sensor measurements \ndef obtener_datos():\n\n for i, scan in enumerate(lidar.iter_scans()):\n print('%d: Got %d measurements' % (i, len(scan)))\n # datos.append('Escaneo %d' % i)\n for medida in scan[:len(scan)]:\n if len(datos) < 500:\n datos.append(medida[1:])\n else: \n print(len(datos))\n lidar.stop_motor()\n return datos\n\n#Method for processing sensor data and converting it into xy coordinates\ndef obtener_coordenadas():\n x = [0] \n y = [0]\n \n datos = obtener_datos()\n print(datos)\n for punto in datos:\n #the distance must be in meters and the angle must be converted to radians so that python can deal with\n x.append(((punto[1]*np.cos(((-90+punto[0])*np.pi)/180.0)))/1000) \n y.append(-((punto[1]*np.sin(((-90+punto[0])*np.pi)/180.0)))/1000)\n \n matriz = np.array([x,y])\n matriz1 = np.transpose(matriz)\n \n #Store the data in a txt file\n encabezado = 'x y'\n np.savetxt('datost_xy.txt', matriz1, fmt='%d', header=encabezado)\n\n #To plot the points on an xy plane\n plt.clf()\n plt.scatter(x,y,marker='.',norm='0.5')\n plt.pause(.1)\n plt.ylabel('y')\n plt.xlabel('x')\n plt.xlim(-8,8)\n plt.ylim(-8,8)\n plt.savefig(\"Graficat_xy_simple.jpg\",bbox_inches='tight')\n plt.show()\n paro_sensor()\n return matriz1\n\ndef paro_sensor():\n lidar.stop()\n lidar.stop_motor()\n lidar.disconnect()\n \n \n #For testing \nif __name__ == '__main__':\n valores0=obtener_coordenadas()","repo_name":"paulacm5/TFG-Paula-Campina-Monzon","sub_path":"Sensor/obtener_coordenadas_como_lidar.py","file_name":"obtener_coordenadas_como_lidar.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"10967681469","text":"import unittest\nfrom calculator import create_app\n\nimport json\n\n\nclass CalculatorTest(unittest.TestCase):\n def setUp(self):\n app = create_app()\n self.app = app.test_client()\n\n def tearDown(self):\n pass\n\n def test_main(self):\n uri = '/'\n message = 'Hello Flask'\n\n rv = self.app.get(uri)\n\n self.assertEqual(message, rv.data.decode('utf-8'))\n\n def test_base_sqrt(self):\n uri = '/sqrt' + '/1'\n data = {'function': 'sqrt',\n 'input': ['1'],\n 'output': ['1.0']}\n json_data = json.dumps(data, sort_keys=True)\n\n rv = self.app.get(uri)\n\n self.assertEqual(json_data, rv.data.decode('utf-8'))\n\n def test_base_power(self):\n uri = '/power?base={}&exponent={}'.format(1, 2)\n data = {'function': 'power',\n 'input': ['1', '2'],\n 'output': ['1.0']}\n json_data = json.dumps(data, sort_keys=True)\n\n rv = self.app.get(uri)\n\n self.assertEqual(json_data, rv.data.decode('utf-8'))\n","repo_name":"wsunccake/myPractice","sub_path":"restful/python/flask/calculator_2/src/unittest/python/calculator_tests.py","file_name":"calculator_tests.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"29864646521","text":"\"\"\"\nauthor: Noorvir Aulakh\ndate: 20 July 2017\n\nN.B. Parts of this file are inspired by (Mahler, 2017)\n\nMahler, Jeffrey, Jacky Liang, Sherdil Niyaz, Michael Laskey, Richard Doan, Xinyu Liu, Juan Aparicio Ojea, and\nKen Goldberg. \"Dex-Net 2.0: Deep Learning to Plan Robust Grasps with Synthetic Point Clouds and Analytic Grasp Metrics.\"\narXiv preprint arXiv:1703.09312 (2017).\n\n\n------------------------------------------------------------------------------------------------------------------------\n\nApproach Angles:\n N.B. There is some confusion with the API documentation for the two angles that define the grasp. I assume (which is\n the more likely case) that grasp_approach_angle is the angle between the vector pointing out of the gripper jaws and\n the table normal. The grasp_axis_angle is the angle between the vector between the gripper jaws and the table normal\n\nTransformation:\n The convention for variable names for rigid transformations is T_fromframe_toframe.\n\n\"\"\"\n\nfrom __future__ import print_function\nfrom autolab_core import YamlConfig, RigidTransform, Point\nfrom meshpy import ObjFile, SceneObject, UniformPlanarWorksurfaceImageRandomVariable\nfrom dexnet.database.keys import *\nfrom perception import RenderMode\nfrom dexnet.grasping import GraspCollisionChecker, RobotGripper\nimport dexnet.database.database as db\nfrom gqcnn import Visualizer as vis2d\nfrom gqcnn import Grasp2D\nfrom grasp_ucl.utils.visualise import UCLVisualiser as vis\nfrom grasp_ucl.database.transformations import ImageTransform\n\nfrom jenks import jenks\nfrom sklearn.cluster import KMeans\nfrom natsort import natsorted, ns\nimport numpy as np\nimport cPickle as pkl\nimport logging\nimport warnings\nimport operator\nimport time\nimport os\n\n# Display logging info\nlogging.getLogger().setLevel(logging.INFO)\n\n\nclass UCLDatabaseGQCNN(object):\n \"\"\"\n Create custom data-set from GQCNN data-set (Mahler, 2017). Use bins (0.0, 0.2, ... 1.0) as labels to learn a grasp-\n quality function.\n \"\"\"\n\n def __init__(self, config):\n\n self.config = config\n self.metric_list = []\n self.database_dir = config['database_dir']\n self.dataset_dir = config['dataset_dir']\n self.shuffled_dataset_output_dir = config['shuffled_dataset_output_dir']\n self.dataset_output_dir = config['dataset_output_dir']\n self.dataset_cache_dir = config['dataset_cache_dir']\n self.grasp_metric = config['grasp_metric']\n self.metric_stats_filename = config['metric_stats_filename']\n self.labels = config['labels']\n self.bin_step = config['bin_step']\n self.label_threshold = config['label_threshold']\n self.num_points_per_file = config['num_points_per_file']\n\n\n def get_metric_stats(self):\n \"\"\" Get the max and min values of the grasp metric for normalisation \"\"\"\n\n filenames = os.listdir(self.dataset_dir)\n metric_filenames = [name for name in filenames if name.find(self.grasp_metric) > -1]\n\n st_time = time.time()\n # open and read all metric data\n for filename in metric_filenames:\n\n with np.load(os.path.join(self.dataset_dir, filename)) as f:\n data = f['arr_0']\n data = data[np.where(data != 0)]\n self.metric_list = np.concatenate((self.metric_list, data))\n\n self.metric_max = np.max(self.metric_list)\n self.metric_min = np.min(self.metric_list)\n\n stats_file_path = os.path.join(self.database_dir, self.grasp_metric + '_metric_stats.pkl')\n with open(stats_file_path, 'wb') as pkl_file:\n print('Writing %s stats to file: %s' % (self.grasp_metric, str(stats_file_path)))\n stats_dict = {'metric_list': self.metric_list, 'metric_max': self.metric_max, 'metric_min': self.metric_min}\n pkl.dump(stats_dict, pkl_file)\n\n print('Time taken for writing %d non-zero metric points: %s(s)\\n' % (len(self.metric_list),\n (time.time() - st_time)))\n\n\n def shuffle_pre_existing_data(self):\n \"\"\" Shuffles data in numpy files and saves them to new files\"\"\"\n\n # depth image filenames\n img_filenames = self.load_filenames(self.dataset_dir, 'depth_ims_tf_table', sort=True)\n # robust ferrai_canny label filenames\n label_filenames = self.load_filenames(self.dataset_dir, 'robust_ferrari_canny', sort=True)\n # pose filenames\n pose_filenames = self.load_filenames(self.dataset_dir, 'hand_poses', sort=True)\n\n label_data = np.empty([0])\n pose_data = np.empty([0, 4])\n\n print('Loading labels ...')\n for i, filename in enumerate(label_filenames):\n file_data = np.load(os.path.join(self.dataset_dir, filename))['arr_0']\n label_data = np.concatenate((label_data, file_data))\n\n if i % 100 == 0:\n print('Loading data from file number %d out of %d' % (i + 1, len(label_filenames)))\n\n # important not to use img_data for this\n num_data = np.shape(label_data)[0]\n \n print('Loading poses ...')\n for i, filename in enumerate(pose_filenames):\n file_data = np.load(os.path.join(self.dataset_dir, filename))['arr_0']\n pose_data = np.concatenate((pose_data, file_data), axis=0)\n\n if i % 100 == 0:\n print('Loading data from file number %d out of %d' % (i + 1, len(pose_filenames)))\n\n print('Loading images ...')\n img_data = np.zeros([num_data, 32, 32, 1])\n start_index = 0\n end_index = self.num_points_per_file\n for i, filename in enumerate(img_filenames):\n file_data = np.load(os.path.join(self.dataset_dir, filename))['arr_0']\n\n if end_index <= (num_data - 1):\n img_data[start_index:end_index, :, :, :] = file_data\n else:\n img_data[start_index:num_data, :, :, :] = file_data\n\n start_index = end_index\n end_index = end_index + self.num_points_per_file\n\n if i % 100 == 0:\n print('Loading data from file number %d out of %d' % (i + 1, len(img_filenames)))\n\n idx = range(num_data)\n np.random.shuffle(idx)\n\n # save to files\n start_index = 0\n end_index = self.num_points_per_file\n for i, img_filename in enumerate(img_filenames):\n\n if end_index <= (num_data - 1):\n curr_idx = idx[start_index:end_index]\n start_index = end_index\n end_index = end_index + self.num_points_per_file\n\n else:\n curr_idx = idx[start_index:]\n\n # get new data\n imgs = img_data[curr_idx]\n labels = label_data[curr_idx]\n poses = pose_data[curr_idx]\n\n # get new filenames\n label_filename = label_filenames[i]\n pose_filename = pose_filenames[i]\n\n if i % 10 == 0:\n print('Saving file %d of %d' % (i+1, np.shape(img_filenames)[0]))\n np.savez_compressed(os.path.join(self.shuffled_dataset_output_dir, img_filename), imgs)\n np.savez_compressed(os.path.join(self.shuffled_dataset_output_dir, label_filename), labels)\n np.savez_compressed(os.path.join(self.shuffled_dataset_output_dir, pose_filename), poses)\n\n # save shuffling key\n np.savez_compressed(os.path.join(self.shuffled_dataset_output_dir, 'shuffle_key'), idx)\n print('done!')\n\n\n def create_images(self, input_filename_template, output_filename_template):\n \"\"\" Modify existing images to the desired format\"\"\"\n\n # load images\n img_filenames = self.load_filenames(self.dataset_dir, input_filename_template)\n\n num_files = len(img_filenames)\n for _id, filename in enumerate(img_filenames):\n\n imgs = np.load(os.path.join(self.dataset_dir, filename))['arr_0']\n logging.info('Resampling images from file %d of %d. %d images.' % (_id, num_files, len(imgs)))\n\n # upsample\n scaled_images = ImageTransform.resample(imgs, self.config['output_img_size'])\n # scaled_images = {'arr_0': scaled_images}\n\n # save\n output_filename = output_filename_template + '_' + str(filename[-9:-4])\n output_file_path = os.path.join(self.dataset_output_dir, output_filename)\n np.savez(output_file_path, scaled_images)\n\n\n def create_clustered_labels(self, num_bins=6, normalise=True):\n \"\"\" Use Jenks Natural Breaks to bin data \"\"\"\n\n # load data\n print('Loading data...')\n metric_filenames = self.load_filenames(self.dataset_dir, self.grasp_metric)\n\n print('Starting label conversion.')\n st_time = time.time()\n\n data = np.array([])\n\n print('Loading data...')\n for i, filename in enumerate(metric_filenames):\n file_data = np.load(os.path.join(self.dataset_dir, filename))['arr_0']\n data = np.concatenate((data, file_data))\n\n if i % 100 == 0:\n print('Loading data from file number %d out of %d' % (i + 1, len(metric_filenames)))\n\n # get the bin edges using the Jenks Natural Breaks algorithm\n # bin_edges = jenks(data, num_bins)\n nzero_data_idx = np.where(data != 0)\n nzero_data = data[nzero_data_idx]\n\n if normalise:\n nzero_data = (nzero_data - np.mean(nzero_data))/np.std(nzero_data)\n # clip data to 3 standard deviations\n nzero_data = np.clip(nzero_data, -3 * np.std(nzero_data), 3 * np.std(nzero_data))\n\n print('Clustering %d data-points' % len(nzero_data))\n # use one dimensional k-means clustering to get bin-edges\n clus_st = time.time()\n km = KMeans(n_clusters=num_bins-1, n_init=20, tol=0.00001, n_jobs=-1)\n km.fit(np.reshape(nzero_data, [-1,1]))\n print('Finished clustering in %.4f seconds!' % (time.time() - clus_st))\n\n bin_centers = np.squeeze(km.cluster_centers_)\n data_labels = np.squeeze(km.labels_)\n bin_sorted_idx = np.argsort(bin_centers)\n labels = np.arange(0, np.shape(bin_sorted_idx)[0] + 1, 1)\n\n bin_idx_map = {}\n for i, label in enumerate(bin_sorted_idx):\n # add one because bin zero is reserved for grasps in collision\n bin_idx_map[label] = i + 1\n\n print('Binning data.')\n binned_nz_data = np.zeros((np.shape(data_labels)))\n for i, label in enumerate(data_labels):\n binned_nz_data[i] = bin_idx_map[label]\n print('Done!')\n\n # add zero elements back\n binned_data = np.zeros((np.shape(data)))\n binned_data[nzero_data_idx] = binned_nz_data\n\n # convert to one_hot representation\n print('Converting data to one-hot vector representation...')\n one_hot_data = self.create_one_hot(binned_data, labels)\n\n # save to files\n start_index = 0\n end_index = self.config['num_points_per_file']\n print('Starting file write...')\n for i, filename in enumerate(metric_filenames):\n\n # filenames\n binned_label_filename = 'binned_clus_labels_' + filename[-9:-4]\n binned_label_path = os.path.join(self.dataset_output_dir, binned_label_filename)\n\n one_hot_label_filename = 'one_hot_clus_labels_' + filename[-9:-4]\n one_hot_label_path = os.path.join(self.dataset_output_dir, one_hot_label_filename)\n\n if end_index <= (len(binned_data) - 1):\n binned_file_data = binned_data[start_index: end_index]\n one_hot_file_data = one_hot_data[start_index: end_index, :]\n\n start_index = end_index\n end_index = end_index + self.config['num_points_per_file']\n\n else:\n binned_file_data = binned_data[start_index:]\n one_hot_file_data = one_hot_data[start_index:, :]\n\n # add a extra array dimension for convenience at training time\n binned_file_data = np.expand_dims(binned_file_data, axis=1)\n\n # save\n np.savez_compressed(binned_label_path, binned_file_data)\n np.savez_compressed(one_hot_label_path, one_hot_file_data)\n\n if i % 100 == 0:\n print('Saving file number %d out of %d' % (i + 1, len(metric_filenames)))\n\n print('All labels written to file in %s(s)' % str(time.time() - st_time))\n\n\n\n def create_percentile_labels(self):\n \"\"\" Creates and saves one-hot normalised and binned grasp quality labels for each label file in GQCNN dataset\"\"\"\n\n if not hasattr(self, 'metric_max'):\n\n stats_file_path = os.path.join(self.dataset_cache_dir, self.metric_stats_filename)\n if not os.path.exists(stats_file_path):\n raise IOError('Metric statistics not found. Make sure UCLDatabaseGQCNN.get_metric_stats() first.')\n\n stats = pkl.load(open(stats_file_path, 'rb'))\n self.metric_max = stats['metric_max']\n self.metric_min = stats['metric_min']\n self.metric_list = stats['metric_list']\n self.num_metric_points = len(self.metric_list)\n self.num_seen_data_points = 0 # used to compute percentile normalisation\n\n\n filenames = os.listdir(self.dataset_dir)\n metric_filenames = [name for name in filenames if name.find(self.grasp_metric) > -1]\n metric_filenames = natsorted(metric_filenames) # sort file indices\n\n print('Starting label conversion.')\n st_time = time.time()\n\n data = np.array([])\n # load data\n print('Loading data...')\n for i, filename in enumerate(metric_filenames):\n file_data = np.load(os.path.join(self.dataset_dir, filename))['arr_0']\n data = np.concatenate((data, file_data))\n\n if i % 100 == 0:\n print('Loading data from file number %d out of %d' % (i + 1, len(metric_filenames)))\n\n\n # convert raw data to binary labels\n logging.info('Thresholding data data...')\n binary_data = self.create_binary(np.copy(data), threshold=self.label_threshold)\n binary_data = self.create_one_hot(binary_data, [0, 1])\n\n # normalise\n print('Normalising data...')\n normalised_data = self.normalise(data, normalisation_type=self.config['normalisation_type'])\n\n # bin\n print('Binning data...')\n binned_data = self.bin(normalised_data, bin_step=self.bin_step)\n\n # convert to one_hot representation\n print('Converting data to one-hot vector representation...')\n one_hot_data = self.create_one_hot(binned_data, self.labels)\n\n # save to files\n start_index = 0\n end_index = self.config['num_points_per_file']\n print('Startin file write...')\n for i, filename in enumerate(metric_filenames):\n\n # filenames\n binary_label_filename = 'binary_labels_' + filename[-9:-4]\n binary_label_path = os.path.join(self.dataset_output_dir, binary_label_filename)\n\n binned_label_filename = 'binned_labels_' + filename[-9:-4]\n binned_label_path = os.path.join(self.dataset_output_dir, binned_label_filename)\n\n one_hot_label_filename = 'one_hot_labels_' + filename[-9:-4]\n one_hot_label_path = os.path.join(self.dataset_output_dir, one_hot_label_filename)\n\n if end_index <= (len(binned_data) - 1):\n binary_file_data = binary_data[start_index: end_index]\n binned_file_data = binned_data[start_index: end_index]\n one_hot_file_data = one_hot_data[start_index: end_index, :]\n\n start_index = end_index\n end_index = end_index + self.config['num_points_per_file']\n\n else:\n binary_file_data = binary_data[start_index:]\n binned_file_data = binned_data[start_index:]\n one_hot_file_data = one_hot_data[start_index:, :]\n\n # add a extra array dimension for convenience at training time\n binned_file_data = np.expand_dims(binned_file_data, axis=1)\n\n # save\n np.savez_compressed(binary_label_path, binary_file_data)\n np.savez_compressed(binned_label_path, binned_file_data)\n np.savez_compressed(one_hot_label_path, one_hot_file_data)\n\n if i % 100 == 0:\n print('Saving file number %d out of %d' % (i + 1, len(metric_filenames)))\n\n print('All labels written to file in %s(s)' % str(time.time() - st_time))\n\n\n def normalise(self, data, normalisation_type='linear'):\n \"\"\" Normalise data to the range [0,1] \"\"\"\n\n if normalisation_type == 'linear':\n normalised_data = (data - self.metric_min)/(self.metric_max - self.metric_min)\n normalised_data = np.clip(normalised_data, 0, float('inf')) # corner case for grasps in collision\n\n elif normalisation_type == 'percentile':\n normalised_data = np.copy(data)\n # non-zero data list\n map_non_zero_org = np.where(data != 0)\n non_zero_data = data[map_non_zero_org]\n num_data_points = len(non_zero_data)\n\n # keep track of indices\n map_sorted_non_zero = np.argsort(non_zero_data)\n\n # normalise\n data_idx = np.arange(1, num_data_points + 1)\n sorted_normalised_data = data_idx / float(num_data_points)\n\n # map data back to original indices\n non_zero_normalised_data = np.zeros(num_data_points)\n non_zero_normalised_data[map_sorted_non_zero] = sorted_normalised_data\n normalised_data[map_non_zero_org] = non_zero_normalised_data\n\n elif normalisation_type == 'gamma':\n pass\n\n else:\n raise ValueError('Unknown normalisation_type %s' % normalisation_type)\n\n return normalised_data\n\n\n def visualise(self, vis_type='histogram'):\n \"\"\" Visualise different data metrics \"\"\"\n\n # load data metrics from file\n if not hasattr(self, 'metric_max'):\n stats_file_path = os.path.join(self.database_dir, self.grasp_metric + '_metric_stats.pkl')\n if not os.path.exists(stats_file_path):\n raise IOError('Metric statistics not found. Make sure UCLDatabaseGQCNN.get_metric_stats() first.')\n\n stats = pkl.load(open(stats_file_path, 'rb'))\n self.metric_max = stats['metric_max']\n self.metric_min = stats['metric_min']\n self.metric_list = stats['metric_list']\n\n filenames = os.listdir(self.dataset_dir)\n metric_filenames = [name for name in filenames if name.find(self.grasp_metric) > -1]\n\n if vis_type == 'histogram':\n bins = {}\n # histogram properties\n num_bins = self.config['vis_histogram_num_bins']\n bin_step = self.config['bin_step']\n histogram_data = [0.0] * num_bins\n\n # calculate bin edges automatically if bin_step not specified\n if bin_step == 'auto':\n bins['edges'] = list(np.linspace(self.metric_min, self.metric_max, num_bins + 1))\n else:\n bins['edges'] = list(np.arange(self.metric_min - (bin_step/2), self.metric_max + bin_step, bin_step))\n\n bins['labels'] = list(np.linspace(self.metric_min, self.metric_max, num_bins))\n\n for filename in metric_filenames:\n with np.load(os.path.join(self.dataset_dir, filename)) as f:\n data = f['arr_0']\n # binned_data, _ = self.histogram(data, num_bins, self.metric_min, self.metric_max)\n binned_data = np.histogram(data, bins['edges'])[0]\n histogram_data = map(operator.add, histogram_data, binned_data)\n\n # visualise\n vis.histogram(histogram_data, bins['labels'])\n\n\n @staticmethod\n def histogram(data, num_bins, min_bin=0, max_bin=1):\n \"\"\" Digitise data into num_bins bins in the range [0,1] \"\"\"\n\n bins_edges = np.linspace(min_bin, max_bin, num_bins)\n binned_data = np.histogram(data, bins_edges)\n return binned_data[0], binned_data[1]\n\n @staticmethod\n def bin(data, method='percentile', bin_edges=None, bin_step=0.2, min_bin=0):\n \"\"\" Digitise data into bins of step bin_step. Expects data normalise to the interval [0, 1] \"\"\"\n\n if method == 'clustering' and bin_edges is None:\n raise ValueError('Must supply bin_edges for Jenks Natural Breaks based binning. Exiting.')\n\n # make sure data is an array\n data = np.array(data)\n\n if method == 'percentile':\n binned_data = min_bin + np.round(data/bin_step) * bin_step\n elif method == 'clustering':\n binned_data = np.digitize(data, bin_edges)\n else:\n raise ValueError('Unknown binning method \"%s\" specified' % method)\n\n return binned_data\n\n @staticmethod\n def create_one_hot(data, labels):\n \"\"\" Create one hot labels from sorted list of labels\"\"\"\n # create array of indexes into one_hot labels\n label_map = {}\n for idx, label in enumerate(labels):\n label_map[label] = idx\n\n data_idx = np.copy(data)\n for key in label_map:\n data_idx[data == key] = label_map[key]\n\n # turn to one_hot\n one_hot_data = np.zeros([len(data), len(labels)])\n one_hot_data[np.arange(len(data)), data_idx.astype(int)] = 1\n\n return one_hot_data\n\n @staticmethod\n def create_binary(data, threshold):\n \"\"\" Thresholds input data\"\"\"\n\n binary_data = (data >= threshold).astype(np.int)\n\n return binary_data\n\n @staticmethod\n def load_filenames(directory, template, sort=True):\n \"\"\" Load a list of filenames matching the template from a given directory\"\"\"\n\n filenames = os.listdir(directory)\n matched_filenames = [name for name in filenames if name.find(template) > -1]\n\n if sort:\n matched_filenames = natsorted(matched_filenames)\n\n return matched_filenames\n\n @staticmethod\n def fit_dist(data, dist_type='normal'):\n pass\n\n\n\nclass UCLDatabaseDexnet(object):\n \"\"\"\n Create custom data-set from dex-net data-set (Mahler, 2017)\n \"\"\"\n\n\n def __init__(self, config, load_from_pkl=False):\n\n self.grasps = {}\n self.config = config\n self._setup_config()\n\n # stable poses for every object\n self.stable_poses = {}\n\n # initialise database object\n self.hdf5_db = db.Hdf5Database(os.path.join(self.database_dir, self.database_filename))\n\n dataset_name = self.hdf5_db.datasets[0].dataset_name_\n hdf5_group = self.hdf5_db.data_['datasets'][dataset_name]\n\n # initialise data-set object\n self.hdf5_ds = db.Hdf5Dataset(dataset_name, hdf5_group, cache_dir=self.dataset_cache_dir)\n\n self.gripper = RobotGripper.load(self.gripper_name, gripper_dir=self.config['grippers_dir'])\n\n\n def _setup_config(self):\n \"\"\" Read config file and setup class variables \"\"\"\n\n self.database_dir = self.config['database_dir']\n self.database_filename = self.config['database_filename']\n self.dataset_cache_dir = self.config['dataset_cache_dir']\n self.gripper_name = self.config['gripper_name']\n self.grasp_metric = self.config['grasp_metric']\n self.cache_datapoints_limit = self.config['cache_datapoints_limit']\n self.img_cache_datapoints_limit = self.config['img_cache_datapoints_limit']\n\n # params related to collision checking\n self._setup_collision_checker_params()\n\n # camera params\n self.camera_params = self.config['camera_params']\n self.num_image_samples = self.config['num_image_samples']\n self.output_img_height = self.config['output_image_params']['output_img_height']\n self.output_img_width = self.config['output_image_params']['output_img_width']\n self.output_img_crop_width = self.config['output_image_params']['output_img_crop_width']\n self.output_img_crop_height = self.config['output_image_params']['output_img_crop_height']\n\n\n def _setup_collision_checker_params(self):\n \"\"\" Setup the discrete space over which to sample the collision checking \"\"\"\n\n self.approach_steps = []\n self.approach_dist = self.config['approach_dist']\n self.delta_approach = self.config['delta_approach']\n self.max_approach_angle_z = np.deg2rad(self.config['max_approach_angle_z'])\n self.max_approach_angle_y = np.deg2rad(self.config['max_approach_angle_y'])\n self.min_approach_angle_y = -self.max_approach_angle_y\n self.table_mesh = ObjFile(self.config['table_mesh_filename']).read()\n\n num_samples = self.config['num_approach_samples']\n\n\n # get approach angle increments\n if self.max_approach_angle_y == self.min_approach_angle_y:\n approach_inc = 1\n elif num_samples == 1:\n approach_inc = self.max_approach_angle_y - self.min_approach_angle_y + 1\n else:\n approach_inc = (self.max_approach_angle_y - self.min_approach_angle_y) / (num_samples - 1)\n\n approach_angle = self.min_approach_angle_y\n\n # create list of approach angles to try for collision checking\n while approach_angle <= self.max_approach_angle_y:\n self.approach_steps.append(approach_angle)\n approach_angle += approach_inc\n\n\n def get_object_keys(self):\n pass\n\n\n def _get_stable_poses(self):\n \"\"\" Get the stable poses in which the object can be placed on the table\"\"\"\n for key in self.hdf5_ds.object_keys:\n self.stable_poses[key] = self.hdf5_ds.stable_poses(key)\n\n\n def _get_collision_free_grasps(self, obj, all_grasps):\n \"\"\" Filters out grasps that are in collision with the table\"\"\"\n\n num_data_points = 0\n valid_grasps = {}\n\n # initialise collision checker for unachievable grasps. GraspCollisionChecker does not have a remove object\n # method, therefore need to reinitialise Class for each obj\n collision_checker = GraspCollisionChecker(self.gripper)\n collision_checker.set_graspable_object(obj)\n\n # load stable poses for current object\n stable_poses = self.hdf5_ds.stable_poses(obj.key)\n\n for pose in stable_poses:\n\n # aligned_grasps = []\n valid_grasps[pose.id] = []\n # setup table in collision checker (does stp \"mean stable pose\"?)\n T_obj_stp = pose.T_obj_table.as_frames('obj', 'stp')\n T_obj_table = obj.mesh.get_T_surface_obj(T_obj_stp, delta=self.config['table_offset']).as_frames('obj',\n 'table')\n T_table_obj = T_obj_table.inverse()\n collision_checker.set_table(self.config['table_mesh_filename'], T_table_obj)\n\n # align all grasps with table normal\n # aligned_grasps = [grasp.perpendicular_table(pose) for grasp in all_grasps]\n\n # get aligned_grasps along with grasp metrics\n for idx, grasp in enumerate(all_grasps[0]):\n\n found_grasp = False\n grasp.metric_type = 'robust_ferrari_canny'\n grasp.metric = all_grasps[1][idx]\n grasp = grasp.perpendicular_table(pose)\n\n # angles relative to table normal (see docstring at the top)\n grasp_axis_angle, grasp_approach_angle, _ = grasp.grasp_angles_from_stp_z(pose)\n\n if np.abs(grasp_approach_angle) > self.max_approach_angle_z:\n continue\n\n # check collision along approach directions\n for angle in self.approach_steps:\n rotated_test_grasp = grasp.grasp_y_axis_offset(angle)\n in_collision = collision_checker.collides_along_approach(rotated_test_grasp,\n self.approach_dist,\n self.delta_approach)\n # break if at-least one collision free path is found\n if not in_collision:\n found_grasp = True\n break\n\n # label as bad grasp if no collision free grasp is found\n if found_grasp:\n valid_grasps[pose.id].append(grasp)\n else:\n grasp.metric = 0\n valid_grasps[pose.id].append(grasp)\n\n num_data_points += 1\n\n return valid_grasps, num_data_points\n\n\n def get_grasps(self, num_objs=float('inf')):\n \"\"\" Get grasps for the given object and gripper. Saves the resulting grasping into cache\"\"\"\n\n total_data_points = 0\n current_data_points = 0\n pickle_file_num = 1\n\n abs_st_time = time.time()\n st_time = time.time()\n # get stable poses for all objects\n self._get_stable_poses()\n\n # for obj_key in self.hdf5_ds.object_keys:\n for obj_idx, obj in enumerate(self.hdf5_ds):\n\n print('Starting grasp calculation for %s' % obj.key)\n\n # get all grasps for obj\n all_grasps = self.hdf5_ds.sorted_grasps(obj.key, self.config['grasp_metric'], self.gripper_name)\n\n # get collision free grasps\n self.grasps[obj.key], num_data_points = self._get_collision_free_grasps(obj, all_grasps)\n\n current_data_points += num_data_points\n total_data_points += num_data_points\n # save grasps and metrics in cache so we don't run out of memory\n if current_data_points > self.cache_datapoints_limit or (obj_idx + 1) == self.hdf5_ds.num_objects:\n\n # cache filenames\n cache_filename = 'grasp_cache' + str(pickle_file_num) + '.pkl'\n grasp_cache_filename = os.path.join(self.dataset_cache_dir + cache_filename)\n\n with open(grasp_cache_filename, 'wb') as pkl_file:\n print('Writing pickle file %s' % cache_filename)\n print('Time taken for %d data points: %s(s)\\n' % (current_data_points, (time.time() - st_time)))\n pkl.dump(self.grasps, pkl_file)\n\n pickle_file_num += 1\n current_data_points = 0\n # flush grasps to prevent running out of memory\n self.grasps.clear()\n st_time = time.time()\n\n if obj_idx + 1 == num_objs:\n break\n\n print('Total number of data-point: %d' % total_data_points)\n print('Total number of pickle files: %d' % pickle_file_num)\n print('Total time taken: %s(s)' % str(time.time() - abs_st_time))\n\n\n def get_rendered_images(self, pickle_file_num, total_data_points, grasps=None):\n \"\"\" Get rendered images for for all stable poses with valid grasps. Saves the resulting grasping into cache\"\"\"\n\n if type(grasps) is None:\n grasps = self.grasps\n elif type(grasps) is dict:\n pass\n elif type(grasps) is str:\n # load load external pickle file\n grasps = pkl.load(open(grasps, 'rb'))\n else:\n raise TypeError('Unknown type for argument grasps: Must be of a filename (type \"str\")')\n\n if not grasps:\n warnings.warn('W: grasps dictionary is empty!')\n\n current_data_points = 0\n # total_data_points = 0\n # pickle_file_num = 1\n\n abs_st_time = time.time()\n st_time = time.time()\n\n # store rendered images and grasps\n obj_renders = {}\n\n # only get objects for which a grasp exists\n objs = [self.hdf5_ds[key] for key in grasps.keys()]\n\n for obj_idx, obj in enumerate(objs):\n\n stable_poses = self.hdf5_ds.stable_poses(obj.key)\n\n for pose in stable_poses:\n\n obj_renders[obj.key] = {pose.id: []}\n\n # grasps for current object\n obj_grasps = grasps[obj.key][pose.id]\n\n # object pose wrt table\n T_obj_stp = pose.T_obj_table.as_frames('obj', 'stp')\n T_obj_stp = obj.mesh.get_T_surface_obj(T_obj_stp)\n\n # sample images from camera model accounting for positional uncertainty\n T_table_obj = RigidTransform(from_frame='table', to_frame='obj')\n scene_objs = {'table': SceneObject(self.table_mesh, T_table_obj)}\n uirv = UniformPlanarWorksurfaceImageRandomVariable(obj.mesh,\n [RenderMode.DEPTH_SCENE],\n 'camera',\n self.camera_params,\n stable_pose=pose,\n scene_objs=scene_objs)\n\n # sample multiple images (model randomness)\n samples = uirv.rvs(size=self.num_image_samples)\n\n for sample in samples:\n\n # store all grasps for current sample\n sample_grasps = []\n\n # get image\n depth_img = sample.renders[RenderMode.DEPTH_SCENE].image\n\n # get camera transformation\n T_stp_camera = sample.camera.object_to_camera_pose\n camera_intr = sample.camera.camera_intr\n # get center pixels\n center_x = depth_img.center[1]\n center_y = depth_img.center[0]\n \n # object in camera frame\n T_obj_camera = T_stp_camera * T_obj_stp.as_frames('obj', 'stp')\n\n corrected_camera_intr = camera_intr\n # recompute intrinsics if image is being cropped and resized\n if self.config['output_image_params']['resize']:\n scale = self.output_img_height/float(self.output_img_crop_height)\n cropped_camera_intr = camera_intr.crop(self.output_img_crop_height, self.output_img_width,\n center_x, center_y)\n corrected_camera_intr = cropped_camera_intr.resize(scale)\n\n # crop image\n depth_img = depth_img.crop(self.output_img_crop_height, self.output_img_crop_width)\n # resize image\n depth_img = depth_img.resize((self.output_img_height, self.output_img_width))\n\n for grasp in obj_grasps:\n\n # project gripper into camera (T[stp->cam]T[obj->stp])\n P_grasp_camera = grasp.project_camera(T_obj_camera, camera_intr)\n\n # take the cropping into account for grasp center\n translation_x = center_x - self.output_img_crop_width/2\n translation_y = center_y - self.output_img_crop_height/2\n scaled_grasp_center_x = scale * (P_grasp_camera.center.x - translation_x)\n scaled_grasp_center_y = scale * (P_grasp_camera.center.y - translation_y)\n # translated_grasp_center = np.array([scaled_grasp_center_x, scaled_grasp_center_y])\n\n # get grasp in image space\n # grasp_center = Point(translated_grasp_center, frame=corrected_camera_intr.frame)\n sample_grasps.append([scaled_grasp_center_x, scaled_grasp_center_y, P_grasp_camera.angle, P_grasp_camera.depth])\n\n current_data_points += 1\n total_data_points += 1\n\n pose_sample = {'image': depth_img,\n 'grasps': sample_grasps,\n 'vis': {'camera_intr': corrected_camera_intr}}\n obj_renders[obj.key][pose.id].append(pose_sample)\n\n if current_data_points > self.img_cache_datapoints_limit or (obj_idx + 1) == len(objs):\n # cache filenames\n cache_filename = 'image_cache' + str(pickle_file_num) + '.pkl'\n image_cache_filename = os.path.join(self.dataset_cache_dir + cache_filename)\n\n with open(image_cache_filename, 'wb') as pkl_file:\n print('Writing pickle file %s' % cache_filename)\n print('Time taken for %d data points: %s(s)\\n' % (current_data_points, (time.time() - st_time)))\n pkl.dump(obj_renders, pkl_file)\n\n pickle_file_num += 1\n current_data_points = 0\n # flush grasps to prevent running out of memory\n obj_renders.clear()\n st_time = time.time()\n\n print('Total number of data-points: %d' % total_data_points)\n print('Total number of pickle files: %d' % pickle_file_num)\n print('Total time taken: %s(s)' % str(time.time() - abs_st_time))\n\n return pickle_file_num, total_data_points\n\n # TODO: Implement get_data method to control data access and file read/write\n # def get_data(self, data_type, num_objs):\n # \"\"\" Get data and save to cache\"\"\"\n # if data_type == 'grasps':\n # cache_filename = 'grasp_cache'\n # method_call = self.get_grasps\n #\n # elif data_type == 'rendered_images':\n # cache_filename = 'image_cache'\n # method_call = self.get_rendered_images\n #\n # else:\n # raise StandardError('get_data type %s not supported' % data_type)\n #\n # total_data_points = 0\n # current_data_points = 0\n # pickle_file_num = 1\n #\n # abs_st_time = time.time()\n # st_time = time.time()\n #\n # for obj_idx, obj in enumerate(self.hdf5_ds):\n #\n # data, num_data_points = method_call()\n #\n # current_data_points += num_data_points\n # total_data_points += num_data_points\n # # save grasps and metrics in cache so we don't run out of memory\n # if current_data_points > self.cache_datapoints_limit or (obj_idx + 1) == self.hdf5_ds.num_objects:\n #\n # cache_filpath = os.path.join(self.dataset_cache_dir, cache_filename + str(pickle_file_num) + '.pkl')\n # with open(cache_filpath, 'wb') as pkl_file:\n # print('Writing pickle file %s' % cache_filename)\n # print('Time taken for %d data points: %s(s)\\n' % (current_data_points, (time.time() - st_time)))\n # pkl.dump(self.grasps, pkl_file)\n #\n # pickle_file_num += 1\n # current_data_points = 0\n # # flush grasps to prevent running out of memory\n # self.grasps.clear()\n # st_time = time.time()\n #\n # if obj_idx + 1 == num_objs:\n # break\n #\n # print('Total number of data-point: %d' % total_data_points)\n # print('Total number of pickle files: %d' % pickle_file_num)\n # print('Total time taken: %s(s)' % str(time.time() - abs_st_time))\n\n def compile_database(self):\n pass\n\n def visualise(self, images, proj_grasp_img):\n\n for image in images:\n vis2d.figure()\n vis2d.imshow(image)\n vis2d.grasp(proj_grasp_img)\n vis2d.show()\n pass\n\n\nif __name__ == '__main__':\n\n dexnet_config = YamlConfig('/home/noorvir/catkin_ws/src/grasp_ucl/cfg/generate_ucl_dexnet_dataset.yaml')\n gqcnn_config = YamlConfig('/home/noorvir/catkin_ws/src/grasp_ucl/cfg/generate_ucl_gqcnn_dataset.yaml')\n\n # UCL_DEXNET\n # ucl_denet_db = UCLDatabaseDexnet(dexnet_config)\n # ucl_denet_db.get_grasps() # get grasps\n\n # get images for stable object poses with valid grasps\n # ucl_denet_db.get_rendered_images(ucldb.grasps)\n\n # all_filenames = os.listdir(dexnet_config['dataset_cache_dir'])\n # grasp_cache_filenames =[name for name in all_filenames if name.find('grasp_cache') > -1]\n # grasp_cache_filenames = natsorted(grasp_cache_filenames)\n #\n # pickle_file_num = 1\n # total_data_points = 0\n # for idx, cache_file in enumerate(grasp_cache_filenames):\n # print('Getting grasp file number %d' % (idx + 1))\n # grasp_cache_file_path = os.path.join(dexnet_config['dataset_cache_dir'], cache_file)\n # pickle_file_num, total_data_points = ucl_denet_db.get_rendered_images(pickle_file_num, total_data_points, grasp_cache_file_path)\n\n # compile database - associate rendered images with grasps and metrics\n\n\n # UCL_GQCNN\n ucl_gqcnn_db = UCLDatabaseGQCNN(gqcnn_config)\n ucl_gqcnn_db.shuffle_pre_existing_data()\n # ucl_gqcnn_db.get_metric_stats() # get statistics about successful grasps\n # ucl_gqcnn_db.create_images('depth_ims_tf_table', 'depth_ims_stf_{}_table'.format(gqcnn_config['output_img_size']))\n # ucl_gqcnn_db.create_labels() # create one-hot labels for quality function\n # ucl_gqcnn_db.visualise()\n pass\n","repo_name":"noorvir/grasp_ucl","sub_path":"database/generate_ucl_dataset.py","file_name":"generate_ucl_dataset.py","file_ext":"py","file_size_in_byte":41497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"38260400136","text":"def gather_credits(*kwargs):\n max_points = kwargs[0]\n credits = 0\n curses = []\n\n for curse_name, current_credits in kwargs[1:]:\n if curse_name not in curses:\n curses.append(curse_name)\n credits += current_credits\n if credits >= max_points:\n break\n\n if max_points > credits:\n return f\"You need to enroll in more courses! You have to gather {max_points - credits} credits more.\"\n\n return f\"Enrollment finished! Maximum credits: {credits}.\\n\" \\\n f\"Courses: {', '.join(sorted(curses))}\"\n\n\nprint(gather_credits(\n 60,\n (\"Basics\", 27),\n (\"Fundamentals\", 27),\n (\"Advanced\", 30),\n (\"Web\", 30)\n))","repo_name":"krassykrastev/python","sub_path":"3.programming_advanced_sep_2023/final_exam/03.enrollment.py","file_name":"03.enrollment.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"22378863221","text":"import sys\nimport cv2\nimport time\nfrom PIL import Image\nfrom PIL import ImageDraw\nimport numpy as np\n\nfrom config.common import bright_red, amber, white, black\n\n# from video.Video import Video\n\n###\n\n\ndef calculate_fps(start_time, end_time, frames):\n \"\"\"\n\n Calculates frames per second to be used in friendly console messages.\n\n :param start_time: time.time() integer seconds\n :param end_time: time.time() integer seconds\n :param frames: number of video frames in the time interval\n :return float fps\n \"\"\"\n elapsed_seconds = end_time - start_time\n\n fps = frames / elapsed_seconds\n\n return fps\n\n\ndef box_the_droplet(display_frame, droplet, color=bright_red, margin=5):\n \"\"\"\n Draw a box around a contour, using the specified BGR color, and with specified pixel\n margin. Default margin is 5px larger than contour bounding-box, so it doesn't hide\n the edges. Bright red is the default color.\n\n :param np video frame:\n :param contour: OpenCV contour\n :param color: tuple, BGR color tuple\n :return: np video frame\n \"\"\"\n box_x, box_y, box_w, box_h = cv2.boundingRect(droplet.contour)\n box_point_1 = tuple([box_x - margin, box_y - margin])\n box_point_2 = tuple([box_x + box_w + margin, box_y + box_h + margin])\n cv2.rectangle(display_frame, box_point_1, box_point_2, color=color, thickness=1)\n # return display_frame, box_x, box_y, box_w, margin\n return display_frame, box_point_1, box_point_2\n\n\ndef draw_text(\n np_image, xy, text, fill=(255, 2555, 255, 255), font=None, angle=0, antialias=True\n):\n \"\"\"\n Use PIL image library to draw text on a numpy image.\n Converts an opencv np image to a PIL canvas and back again.\n xy position is upper left corner of right-reading text\n Function expects a BGR color, and it will be translated to RGB for drawing.\n BGRA is acceptable for transparent text, 0-255 alpha range.\n\n :param np_image: numpy image to which text will be added\n :param xy: origin xy tuple\n :param text: text to write\n :param fill: BGR or BGRA color tuple\n :param font: PIL ImageFont font name from config/general.py\n :param angle: rotation angle for text - only 90 degree increments right now\n :param antialias: antialias flag, boolean\n\n :return: np image with added text, with the same number of channels as the input image\n \"\"\"\n\n width, height, depth = np_image.shape\n\n # Add an alpha channel if the image doesn't have one\n ALPHA_CHANNEL_ADDED = False\n if depth == 3:\n ALPHA_CHANNEL_ADDED = True\n np_image = add_alpha_channel(np_image)\n\n # Flip the color information to RGB.\n # (This isn't really needed, as PIL will ignore the color order: it doesn't know\n # that our color value_1_values are BGR. However, it makes debugging easier if I peek at\n # an intermediate stage of the process and red is red. :)\n np_image[:, :, [0, 1, 2]] = np_image[:, :, [2, 1, 0]]\n\n # max_dimension = max(width, height)\n\n # Add alpha for full opacity to supplied color if it doesn't already have an alpha value.\n if len(fill) == 3:\n BGRA_color = fill + (255,)\n else:\n BGRA_color = fill\n\n # Flip the requested text fill color to RGBA as opencv uses BGRA.\n (b, g, r, a) = BGRA_color\n RGBA_color = (r, g, b, a)\n\n # Make a PIL image from the supplied opencv image.\n _imagepil = Image.fromarray(np_image)\n\n # base = Image.open(image.convert(\"RGBA\")\n\n # Create a new image to draw text on.\n text_image = Image.new(\"RGBA\", _imagepil.size, (255, 255, 255, 0))\n\n # build a transparency mask large enough to hold the text\n # canvas_size = (max_dimension * 2, max_dimension * 2)\n # text_canvas = Image.new(\"RGBA\", pil_image.size, (0, 0, 0, 0))\n\n # Create a drawing context on the PIL image...\n draw = ImageDraw.Draw(text_image)\n\n # This is an undocumented hack in the PIL code to turn off font antialiasing.\n if not antialias:\n draw.fontmode = \"1\"\n\n # .. and draw the text.\n draw.text(xy, text, fill=RGBA_color, font=font)\n\n if angle % 90 == 0:\n # rotate by multiple of 90 deg is easier\n text_image = text_image.rotate(angle, center=xy)\n else:\n # For now, we're just doing multiples of 90 degrees.\n # To do odd angles, we'll need to scale up/scale down the text to smooth jaggies.\n pass\n\n # Notes here are random fodder for arbitrary rotation\n\n # # rotate an an enlarged mask to minimize jaggies\n # bigger_canvas = text_image.resize((max_dim*8, max_dim*8),\n # resample=Image.BICUBIC)\n # rotated_canvas = text_image.rotate(angle).resize(\n # canvas_size, resample=Image.LANCZOS)\n #\n # # crop the mask to match image\n # canvas_xy = (max_dimension - xy[0], max_dimension - xy[1])\n # bounding_box = canvas_xy + (canvas_xy[0] + width, canvas_xy[1] + height)\n # canvas = rotated_canvas.crop(bounding_box)\n\n # paste the appropriate color, with the text transparency mask\n # color_image = Image.new('RGBA', (width, height), RGBA_color)\n # pil_image.paste(canvas)\n\n # Composite the text on top of the base image.\n composited_image = Image.alpha_composite(_imagepil, text_image)\n\n # composited_image.show() # Debug\n\n # Back to a numpy image.\n np_output_image = np.array(composited_image)\n\n # Remove the alpha channel if we added one.\n if ALPHA_CHANNEL_ADDED:\n remove_alpha_channel(np_output_image)\n\n # And flip RGB back to BGR\n np_output_image[:, :, [0, 1, 2]] = np_output_image[:, :, [2, 1, 0]]\n # Note that this preserves the alpha channel data, whereas\n # np_output_image = np_output_image[:,:,[2,1,0]]\n # does not.\n\n return np_output_image\n\n\ndef measure_text_size(np_image, text, font=None):\n \"\"\"\n Wrapper for PIL text size function.\n \"\"\"\n\n pil_image = Image.fromarray(np_image)\n draw = ImageDraw.Draw(pil_image)\n\n return draw.textsize(str(text), font)\n\n\ndef threshold_and_find_droplets(frame, threshold, border_width=None, DROPLET_SCAN=True):\n \"\"\"\n Find all the droplets in a video frame.\n\n :param frame: np video frame image\n :param threshold: int from 1-254 to use as a brightness threshold\n :param border_width: int width of border frame to blank, to eliminate edge light scatter\n\n :return: np array of found droplet contours\n :return: grayscale image after thresholding\n \"\"\"\n\n # Convert the image to grayscale.\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Block out the border to reduce false positives\n bordered_gray_frame = recolor_border(gray_frame, border_width)\n\n # Threshold to lose light scatter in image.\n thresholded_frame = threshold_image(bordered_gray_frame, threshold)\n\n if DROPLET_SCAN:\n # Find the droplets.\n droplets = cv2.findContours(\n thresholded_frame,\n mode=cv2.RETR_EXTERNAL,\n # method=cv2.CHAIN_APPROX_SIMPLE)[-2]\n method=cv2.CHAIN_APPROX_NONE,\n )[-2]\n\n return droplets, thresholded_frame\n else:\n return thresholded_frame\n\n\ndef threshold_image(source_image, threshold_value):\n \"\"\"\n Removes all value_1_values in a grayscale image with value_1_values less than supplied threshold.\n\n :param source_image: grayscale np video frame image\n :param threshold_value: int vaue from 1-254 to use in thresholding\n\n :return: thresholded np array video frame\n \"\"\"\n if len(source_image.shape) > 2:\n sys.exit(\n \"\\nOops. threshold_image wants a grayscale image, not one with a bit depth of {}.\\n\".format(\n source_image.shape[2]\n )\n )\n\n _, thresholded_frame = cv2.threshold(\n source_image, threshold_value, 255, cv2.THRESH_BINARY\n )\n\n return thresholded_frame\n\n\ndef recolor_border(source_image, border_width, border_color=(0, 0, 0)):\n \"\"\"\n Recolors the border of an image. Default border color is black.\n\n :param source_image: np array video image\n :param border_width: width of border to recolor\n :param border_color: bgr color tuple, color to apply to border\n\n :return: np image with border in requested color\n \"\"\"\n\n height, width = source_image.shape[:2]\n bw = border_width\n\n # Rectangle drawing in opencv sucks: corners are rounded, and the line width\n # straddles the dimension line. So we'll slice the data to crop the image\n # and then add a black border.\n bordered_image = source_image[bw : height - bw, bw : width - bw]\n bordered_image = cv2.copyMakeBorder(\n bordered_image, bw, bw, bw, bw, cv2.BORDER_CONSTANT, value=border_color\n )\n\n return bordered_image\n\n\ndef aggressive_droplet_frame(source_frame, droplets, more_droplets):\n\n droplet_frame = source_frame.copy()\n\n # Convert frame back to color so we can write in color on it.\n droplet_frame = cv2.cvtColor(droplet_frame, cv2.COLOR_GRAY2RGB)\n total_optimistic_area = 0\n total_conservative_area = 0\n\n h, w = droplet_frame.shape[:2]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n\n flood_connectivity = 8\n optimistic_floodfill_flags = flood_connectivity | cv2.FLOODFILL_FIXED_RANGE\n\n # Color in aggressive droplet set, which will also catch original droplets.\n for droplet in more_droplets:\n mask[:] = 0\n seed_point = tuple(droplet[0][0])\n area = cv2.floodFill(\n droplet_frame,\n mask,\n seed_point,\n amber,\n (20,) * 3,\n (20,) * 3,\n optimistic_floodfill_flags,\n )[0]\n total_optimistic_area += area\n\n # Fill original droplets with black, leaving only aggressive additions.\n for droplet in droplets.contour:\n mask[:] = 0\n seed_point = tuple(droplet[0])\n area = cv2.floodFill(\n droplet_frame,\n mask,\n seed_point,\n black,\n (20,) * 3,\n (20,) * 3,\n optimistic_floodfill_flags,\n )[0]\n total_conservative_area += area\n\n # cv2.imwrite('./saved_colorized_aggressive_image.png', droplet_frame) #Debug\n\n return droplet_frame, total_optimistic_area - total_conservative_area\n\n # gained_area = total_optimistic_area - total_area\n\n\ndef add_alpha_channel(source_image, transparent_color=None):\n \"\"\"\n Add an alpha channel to a numpy image, if it doesn't already have one.\n\n If supplied an optional BGR color, change all pixels in the image with that color\n value to completely transparent, for example, making all the black areas in an image\n transparent, in preparation for compositing.\n\n :param source_image: np array video frame\n :param transparent_color: int value for alpha channel pixels, 0-255\n :return: 4-channel np image\n \"\"\"\n height, width, depth = source_image.shape # image dimensions\n\n if depth == 3:\n # Add the alpha channel. Channel value_1_values are 0 to 255, transparent to opaque.\n # If there are already 4 channels, we don't need to add one.\n # (And if there are only 1 or 2 channels, don't do anything unpredictable.)\n alpha_image = np.concatenate(\n [source_image, np.full((height, width, 1), 255, dtype=np.uint8)], axis=-1\n )\n else:\n alpha_image = source_image\n\n if transparent_color:\n # create a mask with all pixels matching the supplied transparent color\n alpha_mask = np.all(source_image == transparent_color, axis=-1)\n # change the alpha channel value_1_values to 0 (transparent) for all those pixels\n alpha_image[alpha_mask, -1] = 0\n\n return alpha_image\n\n\ndef remove_alpha_channel(source_image):\n \"\"\"\n Remove the alpha channel from a numpy image, if it has one. Returns the\n unmodified image if it's only BGR and not BGRA.\n\n :param source_image: np source image\n :return: BGR np image\n \"\"\"\n height, width, depth = source_image.shape # image dimensions\n\n if depth == 4:\n # Remove the alpha channel.\n # If there is no alpha channel, we don't need to remove it.\n image_without_alpha = source_image[:, :, :3]\n\n return image_without_alpha\n\n\n###\n","repo_name":"rlevine/droplet_video_analyzer","sub_path":"utils/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":12137,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"14"} +{"seq_id":"29544560640","text":"# creates: pt_h2.png\n\nfrom ase import Atoms\nfrom ase.io import write\n\na = 2.41 # Pt binding lenght\nb = 0.90 # H2 binding lenght\nc = 1.70 # Pt-H binding lenght\nL = 7.00 # width of unit cell\n\n# Setup the Atoms for the scattering region.\natoms = Atoms('Pt5H2Pt5',\n cell=[3 * a + b + 2 * c, L, L],\n pbc=True)\n\natoms.positions[:5, 0] = [(i - 2.5) * a for i in range(5)]\natoms.positions[-5:, 0] = [(i - 2.5) * a + b + 2 * c for i in range(4, 9)]\natoms.positions[5:7, 0] = [1.5 * a + c, 1.5 * a + c + b]\natoms.positions[:, 1:] = L / 2.\n\nwrite('pt_h2.pov', atoms, show_unit_cell=2,\n transparent=False, display=False, run_povray=True)\n","repo_name":"ryancoleman/lotsofcoresbook2code","sub_path":"Pearls2_Chapter14/gpaw/doc/exercises/transport/pt_h2.py","file_name":"pt_h2.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"14"} +{"seq_id":"5627084781","text":"numarValori = int(input())\nvectorValori = []\nfor i in range(0, numarValori):\n vectorValori.append(float(input()))\nprag = float(input())\n\nvectorProcesat = []\nfor i in range(0, numarValori):\n if vectorValori[i] > prag:\n vectorProcesat.append(1)\n else:\n vectorProcesat.append(0)\n \nvectorProcesat.append(0)\n \nnumarSecvente = 0\n\n#print(vectorProcesat)\n\nfor i in range(1, numarValori + 1):\n if vectorProcesat[i] == 0 and vectorProcesat[i-1] == 1:\n numarSecvente += 1\n\nprint(numarSecvente)\n \n\n","repo_name":"AlexCvoan/eccpr-python","sub_path":"Problema 10 - Secvenţe lungi de numere mari/seccventelungi.py","file_name":"seccventelungi.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"17200465365","text":"\ndef max_subarray(numbers):\n good = False\n for i in numbers:\n if i > 0:\n good = True\n if not good:\n return None\n\n best_sum = 0.\n best_start = best_end = 0\n current_sum = 0\n for current_end, x in enumerate(numbers):\n if current_sum <= 0:\n current_start = current_end\n current_sum = x\n else:\n current_sum += x\n if current_sum > best_sum:\n best_sum = current_sum\n best_start = current_start\n best_end = current_end + 1\n\n return numbers[best_start:best_end]","repo_name":"develTM/PIPR-domowe","sub_path":"lab 6/zad1.py","file_name":"zad1.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"9074327661","text":"#ESP32 Micropython example to read a input Pin (push button)\n#let us import Pin and sleep modules\n\nprint('Hello Input Pins ;) ')\nfrom machine import Pin\nfrom time import sleep\n\n#LED is connected ti D2 as output\nled = Pin(2, Pin.OUT)\n#push button is connected to Pin 13 , you can change it as needed\npush_button = Pin(13, Pin.IN)\n\n#we are good..\n#let us create a loop to always monitor the button status and drive the LED on or off\n\nwhile True:\n logic_state = push_button.value() #read the button state into logic_state variable\n if logic_state == True: # if button is pressed, turn on LED\n led.value(1)\n else: # if button is pressed, turn off LED\n led.value(0)","repo_name":"hilmyfachriii/-UAS-Final-Project-Python-IoT-","sub_path":"led_micropython.py","file_name":"led_micropython.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"12198195172","text":"def solution(s):\n time = 0\n zeros = 0\n while True:\n if s == '1':\n return [time,zeros]\n time +=1\n zeros += s.count('0')\n temp = s.count('0')\n s = format(len(s) - temp,'b')\n","repo_name":"oguuk/Programmers","sub_path":"Lv2/월간 코드 챌린지 시즌1 이진 변환 반복하기.py","file_name":"월간 코드 챌린지 시즌1 이진 변환 반복하기.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"33020559712","text":"import requests\nimport json\n\n\nclass initiate_calls(object):\n \"\"\"\n - Class to intiate the calls\n - Will define the initate request method used by all the searches.\n - Will also define the individual search methods\n \"\"\"\n\n def init_request(self, url, payload):\n self.url = url\n self.payload = payload\n result = None\n try:\n r = requests.get(self.url, params=self.payload, timeout=15)\n if r.status_code == 200:\n result = json.loads(r.text)\n return result, r.status_code, r.elapsed.total_seconds()\n except:\n # return a generic error, status = 500, from IANA HTTP status code\n return None, 500, -1\n","repo_name":"lazarustanaka11/Book-and-Song-Search","sub_path":"app/src/init_request.py","file_name":"init_request.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"74620041615","text":"#!/usr/bin:env python\nfrom __future__ import print_function\nimport numpy as np\nimport argparse\nimport sys\nimport os\nimport itertools\n\ntry: # Python2\n from urllib import urlretrieve\nexcept ImportError: #Python3\n from urllib.request import urlretrieve\nimport subprocess\n\n# Get the directorys where we launched this script from and where the\n# downsample script is located.\ntest_dir = os.path.dirname(os.path.realpath(__file__))\nscript_dir = \"{0}/../src/\".format(test_dir)\nsys.path.append(script_dir)\n\nimport downsample \n\ntol = 1e-16 # Set the tolerance of the tests. \n\n\ndef unit_tests(grid, expected_gridsize, expected_datatype=np.float64):\n \"\"\"\n Tests some fundamental properties of the grids output by `downsample`.\n\n Parameters\n ----------\n\n grid : `~numpy.ndarray`\n The 3D downsampled array created by `downsample`. \n \n expected_gridsize : int, optional\n The expected 1D size of output grid. Default : 128 and 64.\n\n Returns\n ----------\n\n None.\n\n Errors\n ----------\n\n RuntimeError\n Raised if the output grid is not in the expected shape specified when\n `subample` was called.\n Raised if any values in the output grid are not `np.float64` types. \n \"\"\"\n\n # Check that the grid is cubic with the expected gridsize.\n if not grid.shape == (expected_gridsize, \n expected_gridsize,\n expected_gridsize):\n print(\"The output grid shape was expected to be ({0}, {0}, {0})\" \\\n .format(expected_gridsize))\n print(\"However the shape was {0}\".format(grid.shape))\n raise RuntimeError\n\n # Check that every element is of the expected datatype.\n for (i, j, k) in itertools.product(range(expected_gridsize),\n range(expected_gridsize),\n range(expected_gridsize)):\n if not type(grid[i,j,k]) == expected_datatype: \n print(\"The input data format was a `{4}`. The output \" \n \"data type should be identical however for element \" \n \"({0},{1},{2}) it was {3}\".format(i,j,k,type(grid[i,j,k]),\n expected_datatype)) \n raise RuntimeError\n\n\ndef test_homogenous_input(input_gridsize=128, output_gridsize=64):\n \"\"\"\n Tests that passing a homogenous input grid will produce a homogenous\n output.\n\n The test will fail if the output grid is not homogenously filled with\n values of 1.0.\n\n We also run a suite of unit tests (see `unit_tests` function) that will\n return a RuntimeError if they're not passed.\n\n Parameters\n ----------\n\n input_gridsize, output_gridsize : int, optional\n 1D size of the input/output grids. Default : 128 and 64.\n\n ..note::\n `output_gridsize` must be an integer multiple (and smaller) than\n `input_gridsize`. If not, a `RuntimeError` will be raised by\n `downsample.downsample_grid`.\n\n Returns\n ----------\n\n None.\n\n Errors\n ----------\n\n RuntimeError\n Raised if the output grid contains values that are not within the range\n 0.99999 to 1.00001.\n \"\"\"\n\n # Generate a homogenous input grid filled with 1. \n input_grid = np.ones((input_gridsize, input_gridsize, input_gridsize),\n dtype=np.float64)\n\n # Perform the downsampling.\n output_grid = downsample.downsample_grid(input_grid, output_gridsize)\n\n # Find any instances where the output grid is not 1.\n w = np.where((output_grid <= 1.0-tol) & (output_grid >= 1.0+tol))[0]\n\n # Raise error.\n if len(w) > 0:\n print(\"We tested a homogenous input grid with every cell containing a \"\n \"value of 1.0. We expected the output grid to contain values of \"\n \"1.0 as well.\")\n print(\"However cells {0} had values {1}\".format(w, output_grid[w]))\n raise RuntimeError\n\n # Now run some unit tests that check some properties.\n unit_tests(output_grid, output_gridsize)\n\n\ndef test_multiple_input(input_gridsize=128, output_gridsize=64):\n \"\"\"\n Tests that passing an input grid where every input_gridsize/output_gridsize\n cell is filled will a value of (input_gridsize/output_gridsize)^3 produces\n a grid that is homogenously filled with values of 1.0. \n\n The test will fail if the output grid is not homogenously filled with\n values of 1.0. \n\n We also run a suite of unit tests (see `unit_tests` function) that will\n return a RuntimeError if they're not passed.\n\n Parameters\n ----------\n\n input_gridsize, output_gridsize : int, optional\n 1D size of the input/output grids. Default : 128 and 64.\n\n ..note::\n `output_gridsize` must be an integer multiple (and smaller) than\n `input_gridsize`. If not, a `RuntimeError` will be raised by\n `downsample.downsample_grid`.\n\n Returns\n ----------\n\n None.\n\n Errors\n ----------\n\n RuntimeError\n Raised if the output grid contains values that are not close to 1\n (within tolerance defined by the global variable `tol`).\n \"\"\"\n\n # Ratio in grid size. \n conversion = int(input_gridsize / output_gridsize)\n \n input_grid = np.zeros((input_gridsize, input_gridsize, input_gridsize))\n\n # We fill every conversion-th cell with a value of conversion cubed.\n for (i, j, k) in itertools.product(range(output_gridsize),\n range(output_gridsize),\n range(output_gridsize)):\n input_grid[i*conversion, j*conversion, k*conversion] = conversion**3 \n\n # Run the downsampler.\n output_grid = downsample.downsample_grid(input_grid, output_gridsize)\n\n\n # Find any instances where the output grid is not 1.\n w = np.where((output_grid <= 1.0-tol) & (output_grid >= 1.0+tol))[0] \n if len(w) > 0: \n print(\"We tested an input grid with every {0} cell containing a value \"\n \"of {1}. We expected the output grid to contain values of 1.0 \" \n \"as well.\".format(conversion, conversion**3)) \n print(\"However cells {0} had values {1}\".format(w, output_grid[w]))\n raise RuntimeError\n\n # Now run some unit tests that check some properties.\n unit_tests(output_grid, output_gridsize)\n\n\ndef test_random(input_gridsize=128, output_gridsize=64, \n seed=12, save_output=False):\n \"\"\"\n Generates an input grid of random numbers. This is then checked against a\n saved output grid generated using the same seed.\n\n Parameters\n ----------\n\n input_gridsize, output_gridsize : int, optional\n 1D size of the input/output grids. Default : 128 and 64.\n\n ..note::\n `output_gridsize` must be an integer multiple (and smaller) than\n `input_gridsize`. If not, a `RuntimeError` will be raised by\n `downsample.downsample_grid`.\n\n seed : int, optional\n Seed used for the random number generator. Default : 12.\n\n save_output : boolean, optional\n Dictates if we want to save the output grid as the 'correct' code. If\n you want to test a random grid with different gridsizes/seed than the\n default, this will need to be set to `True` for the first time.\n\n ..warning::\n Ensure that the code is running 100% correct before turning this\n variable on. Please run the tests using default parameters first.\n\n Returns\n ----------\n\n None.\n\n Errors\n ----------\n\n RuntimeError\n Raised if the randomly generated input grid does not match (to a\n tolerance defined by the global variable `tol`) the saved grid. \n \"\"\"\n\n # Set the RNG seed and generate an input grid.\n np.random.seed(seed)\n\n input_grid = np.random.rand(input_gridsize,\n input_gridsize,\n input_gridsize)\n\n # Run the code with the randomly generated input grid.\n output_grid = downsample.downsample_grid(input_grid, output_gridsize)\n\n # Now we want to set up the known grid.\n known_grid_name = \"{0}/known_grid_in{1}_out{2}_seed{3}.npz\" \\\n .format(test_dir, input_gridsize, output_gridsize, seed) \n\n # If we're saving a new 'correct' output grid, do so and exit.\n if save_output: \n np.savez(known_grid_name, output_grid)\n return\n\n # Otherwise read in the known grid and shape it properly.\n known_grid = (np.load(known_grid_name))[\"arr_0\"]\n known_grid.shape = (output_gridsize, output_gridsize, output_gridsize)\n\n # Find any instances where the grids disagree.\n w = np.where(abs(known_grid - output_grid) > tol)[0]\n\n # Raise error.\n if len(w) > 0:\n print(\"We compared an input grid with randomly generated data with \"\n \"random seed {0}. Reading the known input grid ({1}), we had \"\n \"cells that contained different values.\".format(seed,\n known_grid_name))\n print(\"These were cells {0} with difference {1}\".format(w,\n known_grid-output_grid[w]))\n raise RuntimeError\n\n # Now run some unit tests that check some properties.\n unit_tests(output_grid, output_gridsize)\n\n\ndef run_tests():\n \"\"\"\n Wrapper to run all the tests.\n\n Parameters\n ----------\n\n None.\n\n Returns\n ----------\n\n None.\n \"\"\"\n\n print(\"=================================\")\n print(\"Running tests\")\n print(\"=================================\")\n print(\"\")\n\n print(\"Testing a homogenous grid input\")\n test_homogenous_input()\n\n print(\"\")\n print(\"Testing an input where every input_gridsize/output_gridsize cell \" \n \"has a value of (input_gridsize/output_gridsize)^3.\")\n test_multiple_input()\n\n print(\"\")\n print(\"Testing a randomly generated grid with known seed.\")\n test_random()\n\n print(\"\")\n print(\"=================================\")\n print(\"All tests passed!\")\n print(\"=================================\")\n\n\nif __name__ == '__main__':\n\n run_tests()\n","repo_name":"jacobseiler/testing_tutorial","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":10075,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"14"} +{"seq_id":"38227356180","text":"with open('input.txt', 'r') as file:\n left_count = 0\n right_count = 0\n count = 0\n while True:\n character = file.read(1)\n if character == '(':\n left_count += 1\n count += 1\n elif character == ')':\n right_count += 1\n count += 1\n if left_count == right_count and character == ')':\n count += 1\n print(count)\n break\n if not character:\n break \n","repo_name":"b1n4ryw0rm/gym","sub_path":"aoc/2015/day1_COMPLETE/part_b.py","file_name":"part_b.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"4895409971","text":"#######puzzleVerma#######\n\n\nimport sys\nimport math\nmod = 10**9+7\n\n\nLI=lambda:[int(k) for k in input().split()]\ninput = lambda: sys.stdin.readline().rstrip()\nIN=lambda:int(input())\nS=lambda:input()\nr=range\n\n\nfor t in r(IN()):\n ans=0\n s=S()\n b1=0\n b2=0\n for ele in s:\n if ele==\"(\":\n b1+=1\n elif ele==\"[\":\n b2+=1\n elif ele==\")\":\n if b1>0:\n b1-=1\n ans+=1\n else:\n if b2>0:\n b2-=1\n ans+=1\n print(ans)","repo_name":"puzzleVerma/Codeforces-Solution","sub_path":"1452C.py","file_name":"1452C.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"17017378590","text":"#Author:Anliu\nfrom multiprocessing import Process,Pool\nimport time\ndef Foo(n):\n time.sleep(2)\n print(\"This is a For\")\n return n + 100\ndef bar(rag):\n print(\"this is a back valuse : \",rag) #回调函数接受的参数将是调运函数的返回值。\nif __name__ == '__main__':\n pool = Pool(2)\n for n in range(10):\n #pool.apply(func=Foo,args=(n,)) #同步\n pool.apply_async(func=Foo,args=(n,),callback=bar) #异步,回调函数是bar\n #pool.join()\n pool.close()\n pool.join() #进程池中进程执行完毕后再关闭,如果注释,那么程序直接关闭。\n","repo_name":"anliu520/pyprogram","sub_path":"day034/test02.py","file_name":"test02.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"29520264557","text":"import matplotlib.pyplot as plt\n\nwith open('data2.txt', 'r') as f:\n s = list(filter(lambda el: el, f.read().split('\\n')))\n fig, axs = plt.subplots(len(s) // 6, 3)\n n = len(s)\n min_x = float(s[0].split()[0])\n\n max_x = min_x\n min_y = float(s[1].split()[0])\n max_y = min_y\n\n for i in range(n // 2):\n x = [float(j) for j in s[2 * i].split()]\n y = [float(j) for j in s[2 * i + 1].split()]\n min_x = min(min_x, min(x))\n max_x = max(max_x, max(x))\n min_y = min(min_y, min(y))\n max_y = max(max_y, max(y))\n\n for i in range(n // 2):\n x = [float(j) for j in s[2 * i].split()]\n y = [float(j) for j in s[2 * i + 1].split()]\n axs[i // 3, i % 3].plot(x, y)\n\n axs[i // 3, i % 3].minorticks_on()\n axs[i // 3, i % 3].grid(which='major',\n color='k',\n linewidth=1)\n axs[i // 3, i % 3].grid(which='minor',\n color='grey',\n linestyle=':')\n\n axs[i // 3, i % 3].set_title('Frame ' + str(i + 1))\n axs[i // 3, i % 3].set_xlim((min_x, max_x))\n axs[i // 3, i % 3].set_ylim((min_y - 1, max_y + 1))\n\n plt.show()\n","repo_name":"NikolayMorgunov/MPL-lab","sub_path":"ep2.py","file_name":"ep2.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"36112744485","text":"#!/usr/bin/python3.4\n\n# Project Euler:\n# Problem #30:\n#\n# Answer: 443839\n\nimport sys\n\ndef list_digits(num):\n digs = []\n n = num\n while n > 0:\n digs.append(n%10)\n n = n // 10\n return digs\n\ndef find_range(exp):\n max_dig = 9 ** exp\n dig = 1\n while int(dig*\"9\") 1:\n exp = int(sys.argv[1])\n else:\n exp = 4\n\n ans = find_narc_numbers(exp)\n print(\"Sum:\",ans,\"=\",sum(ans))\n\nif __name__ == '__main__':\n main()\n","repo_name":"gerglion/ProjectEuler","sub_path":"Problem_30/problem30.py","file_name":"problem30.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"17757637090","text":"#! /usr/bin/env python\nimport os\nimport re\nimport sys\nfrom collections import defaultdict as dd\nfrom sys import argv\n#Given a file with contig lengths and locations of the selected repeat (output from identify_telomere_repeats.py), quantify the number of repeats at xx bp off the ends of the contig.\n\nscript, contig_lengths, locations, cutoff_bp = argv\n\nbare = r\"(\\w+)(.bed$)\"\nout_sub = r\"\\1\"\no = re.sub(bare, out_sub, locations)\nout_name = o + \"_\" + str(cutoff_bp) + \".txt\"\nout = open(out_name, 'w')\n\n#Determine the end boundry for each contig:\nend_boundary = dict()\ncl_h = open(contig_lengths)\nfor line in cl_h:\n fields = line.split()\n contig_id = fields[0].strip()\n boundary = int(fields[1]) - int(cutoff_bp)\n end_boundary[contig_id] = boundary\ncl_h.close\n#Parse the results file and save results to a dictionary\ncounts_start = dd(int)\ncounts_end = dd(int)\nfl_h = open(locations)\nfor line in fl_h:\n fields = line.split()\n contig_id = fields[0].strip()\n loc = int(fields[1])\n if loc < int(cutoff_bp):\n counts_start[contig_id] += 1\n elif loc > end_boundary[contig_id]:\n counts_end[contig_id] += 1\nfl_h.close()\n\nkeylist = end_boundary.keys()\nkeylist.sort()\nout.write(\"Used \" + cutoff_bp + \" bp to define contig ends\" + \"\\n\")\nout.write(\"contig name\" + \"\\t\" + \"number of repeats at 5' end\" + \"\\t\" + \"number of repeats at 3' end\" + \"\\n\")\nfor key in keylist:\n out.write(key + \"\\t\" + str(counts_start[key]) + \"\\t\" + str(counts_end[key]) + \"\\n\")\nout.close()","repo_name":"harrisonlab/popgen","sub_path":"codon/how_many_repeats_regions.py","file_name":"how_many_repeats_regions.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"14"} +{"seq_id":"17678604890","text":"from collections import defaultdict\n\n\ndef build_graph(edge_list):\n graph = defaultdict(list)\n seen_edges = defaultdict(int)\n for src, dst, weight in edge_list:\n seen_edges[(src, dst, weight)] += 1\n if seen_edges[(src, dst, weight)] > 1: # checking for duplicated edge entries\n continue\n graph[src].append((dst, weight))\n graph[dst].append((src, weight)) # remove this line of edge list is directed\n return graph\n\n\ndef dijkstra(graph, src, dst=None):\n nodes = []\n for n in graph:\n nodes.append(n)\n nodes += [x[0] for x in graph[n]]\n\n q = set(nodes)\n nodes = list(q)\n dist = dict()\n prev = dict()\n for n in nodes:\n dist[n] = float('inf')\n prev[n] = None\n\n dist[src] = 0\n\n while q:\n u = min(q, key=dist.get)\n q.remove(u)\n\n if dst is not None and u == dst:\n return dist[dst], prev\n\n for v, w in graph.get(u, ()):\n alt = dist[u] + w\n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n\n return dist, prev\n\n\ndef find_path(pr, node): # generate path list based on parent points 'prev'\n p = []\n while node is not None:\n p.append(node)\n node = pr[node]\n return p[::-1]\n\n\nif __name__ == \"__main__\":\n edges = [\n [\"A\", \"S\", 4],\n [\"A\", \"D\", 5],\n [\"A\", \"E\", 3],\n (\"S\", \"B\", 5),\n (\"S\", \"D\", 11),\n (\"S\", \"C\", 10),\n (\"D\", \"E\", 2),\n (\"D\", \"F\", 5),\n (\"D\", \"C\", 3),\n (\"C\", \"B\", 8),\n (\"C\", \"F\", 2),\n (\"B\", \"F\", 9),\n (\"F\", \"E\", 4),\n (\"F\", \"G\", 7),\n (\"E\", \"G\", 8)\n\n ]\n\n g = build_graph(edges)\n\n print(\"=== Dijkstra ===\")\n\n print(\"--- Single source, single destination ---\")\n # d, prev = dijkstra(g, \"F\", \"G\")\n # path = find_path(prev, \"E\")\n # print(\"A -> E: distance = {}, path = {}\".format(d, path))\n\n d, prev = dijkstra(g, \"S\", \"G\")\n path = find_path(prev, \"G\")\n print(\"S -> G: distance = {}, path = {}\".format(d, path))\n\n print(\"--- Single source, all destinations ---\")\n ds, prev = dijkstra(g, \"S\")\n for k in ds:\n path = find_path(prev, k)\n print(\"S -> {}: distance = {}, path = {}\".format(k, ds[k], path))","repo_name":"ht38nhatphan/Learn_code_algortithm","sub_path":"cod/dijctra2.py","file_name":"dijctra2.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"72999676495","text":"import socket\nimport time\n\ninterface = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ninterface.bind(('192.168.0.2', 9991))\naddr = (\"192.168.0.2\", 9990)\nmsg = bytes([200]*200)\n#recv, r = interface.recvfrom(4096)\n#recv = list(recv)\ninterface.sendto(msg, addr)\n#print(recv)\n\n","repo_name":"yoshiV3/communication_pANDd","sub_path":"experimental feature/test_code/control_3-10.py","file_name":"control_3-10.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"7432077373","text":"import pandas as pd\nfrom geopy.geocoders import Nominatim\nimport sqlite3\n\ndef get_db():\n if \"db\" not in g:\n g.db = sqlite3.connect(\"./tanair.db\")\n return g.db\n\ndef close_db(e=None):\n db = g.pop('db', None)\n if db is not None:\n db.close()\n\n# Liste des codes OACI des aérodromes en France\ncodes_oaci = [\n \"LFRU\", # Paris Charles de Gaulle Airport\n \"LFPO\", # Paris Orly Airport\n \"LFMN\", # Nice Côte d'Azur Airport\n \"LFLL\", # Lyon-Saint-Exupéry Airport\n \"LFNG\", \"LFHO\", \"LFTW\", \"LFME\", \"LFNU\", \"LFTN\", \"LFHF\", \"LFLQ\", \"LFMT\", \"LFMS\", \"LFHD\", \"LFMN\", \"LFNV\", \"LFNH\", \"LFMV\", \"LFNT\", \n \"LFNZ\", \"LFNE\", \"LFNR\", \"LFML\", \"LFMA\", \"LFMQ\", \"LFNF\", \"LFMU\", \"LFMP\", \"LFMZ\", \"LFCM\", \"LFNX\", \"LFIF\", \"LFNB\", \"LFHL\",\n \"LFCL\", \"LFBO\", \"LFBF\", \"LFBR\", \"LFBD\", \"LFCS\", \"LFDI\", \"LFDR\", \"LFDM\", \"LFDF\", \"LFBS\", \"LFCH\", \"LFCD\", \"LFIV\", \"LFDK\", \"LFCY\", \n \"LFDC\", \"LFCZ\", \"LFBZ\", \"LFBP\", \"LFBT\", \"LFDT\", \"LFRB\", \"LFRQ\", \"LFES\", \"LFRO\", \"LFRT\", \"LFED\", \"LFEC\", \"LFRV\", \"LFEQ\", \"LFEA\",\n \"LFEB\", \"LFRD\", \"LFRU\", \"LFDY\", \"NTAA\", \"NTTM\", \"FMEE\",\n \n # Ajoutez d'autres codes OACI ici...\n]\n\n# Chargement des correspondances entre les codes OACI et les noms des aérodromes à partir d'un fichier CSV\ncorrespondances = pd.read_csv(\"correspondances_oaci.csv\", delimiter=\";\")\n\n# Fonction pour obtenir le nom de l'aérodrome à partir de son code OACI\ndef get_aerodrome_name(oaci_code):\n row = correspondances[correspondances[\"co OACI\"] == oaci_code]\n if not row.empty:\n return row[\"Nom\"].values[0]\n else:\n return None\n\ndef get_coordinates(oaci_code):\n geolocator = Nominatim(user_agent=\"aerodrome_locator\")\n location = geolocator.geocode(oaci_code + \", France\")\n if location:\n return location.latitude, location.longitude\n else:\n return None\n\n# Créer un DataFrame pour stocker les données des aérodromes\ndf = pd.DataFrame(columns=[\"code OACI\", \"Nom\", \"Latitude\", \"Longitude\"])\n\n'''\n# Remplir le DataFrame avec les coordonnées et les noms des aérodromes\nfor oaci_code in codes_oaci:\n coordinates = get_coordinates(oaci_code)\n aerodrome_name = get_aerodrome_name(oaci_code)\n if coordinates and aerodrome_name:\n df = pd.concat([df, pd.DataFrame({\n \"code OACI\": [oaci_code],\n \"Nom\": [aerodrome_name],\n \"Latitude\": [coordinates[0]],\n \"Longitude\": [coordinates[1]]\n})], ignore_index=True)\n'''\n\nconn = sqlite3.connect('tanair.db')\ncursor = conn.cursor()\nquery = \"INSERT INTO Aerodrome (nom,latitude,longitude,codeOACI) VALUES (?,?,?,?)\"\n\nfor oaci_code in codes_oaci :\n coordinates = get_coordinates(oaci_code)\n aerodrome_name = get_aerodrome_name(oaci_code)\n cursor.execute(query,(aerodrome_name,coordinates[0],coordinates[1],oaci_code))\n print(oaci_code + \"a été ajouté avec succès\")\n conn.commit()\n\nclose_db()\n\n'''\n# Afficher le DataFrame\nprint(df)\n'''","repo_name":"Seikonolff/Projet-Tang-air","sub_path":"refresh_airports/Aerodrome_filler.py","file_name":"Aerodrome_filler.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"20569857049","text":"\"\"\"\nauthor: Wei Li\ndate: 10/19/2020\n\nhttps://leetcode.com/problems/recover-a-tree-from-preorder-traversal/\n\n1028. Recover a Tree From Preorder Traversal\n\nWe run a preorder depth first search on the root of a binary tree.\n\nAt each node in this traversal, we output D dashes (where D is the depth of this node), then we output the value of this node. (If the depth of a node is D, the depth of its immediate child is D+1. The depth of the root node is 0.)\n\nIf a node has only one child, that child is guaranteed to be the left child.\n\nGiven the output S of this traversal, recover the tree and return its root.\n\n\nExample 1:\nInput: \"1-2--3--4-5--6--7\"\nOutput: [1,2,5,3,4,6,7]\n\n 1\n 2 5\n 3 46 7 \n\nExample 2:\nInput: \"1-2--3---4-5--6---7\"\nOutput: [1,2,5,3,null,6,null,4,null,7]\n 1\n 2 5\n 3 6\n 4 7\n\nExample 3:\nInput: \"1-401--349---90--88\"\nOutput: [1,401,null,349,88,90]\n \n\nNote:\n\nThe number of nodes in the original tree is between 1 and 1000.\nEach node will have a value between 1 and 10^9.\n\"\"\"\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def recoverFromPreorder(self, S: str) -> TreeNode:\n if not S:\n return None\n \n depth_map, S, num, depth = {-1: TreeNode(-1)}, S + '-', 0, 0\n \n for i, c in enumerate(S):\n if c == '-':\n depth += 1\n else:\n num = 10 * num + int(c)\n if S[i + 1] == '-':\n parent = depth_map[depth - 1]\n node = depth_map[depth] = TreeNode(int(num))\n \n \n if parent.left:\n parent.right = node\n else:\n parent.left = node\n\n \n num, depth = 0, 0\n\n return depth_map[0]","repo_name":"zjkang/ds_algorithm","sub_path":"python/tree_bst/traversal/leetcode_1028_recover_a_tree_from_preorder_traversal_hard_frq1.py","file_name":"leetcode_1028_recover_a_tree_from_preorder_traversal_hard_frq1.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"14"} +{"seq_id":"42663475525","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom collections import defaultdict\n\ninput_file = \"input\"\n#input_file = \"test1.txt\"\n\ngrid = defaultdict(int)\ngrid2 = defaultdict(int) # for part 2\n\nwith open(input_file,'r') as f:\n for s in f.readlines():\n p1, p2 = s.split(\" -> \")\n x1,y1 = map(int,p1.split(\",\"))\n x2,y2 = map(int,p2.split(\",\"))\n if x1 == x2:\n for y in range(min(y1,y2),max(y1,y2)+1):\n grid[(x1,y)] += 1\n elif y1 == y2:\n for x in range(min(x1,x2),max(x1,x2)+1):\n grid[(x,y1)] += 1\n else:\n dx = x2 - x1\n dy = y2 - y1\n if dx * dy > 0:\n if dx > 0:\n for i in range(dx+1):\n grid2[x1+i,y1+i] += 1\n else: # dx = dy < 0\n for i in range(-dx+1):\n grid2[x2+i,y2+i] += 1\n else: # dx = -dy\n if dx > 0: # dy < 0\n for i in range(dx+1):\n grid2[x1+i,y1-i] += 1\n else:\n for i in range(dy+1):\n grid2[x1-i,y1+i] += 1 \n\nfor k in grid.keys(): # merge grid 1 in 2\n grid2[k] += grid[k]\n\nprint(len([p for p in grid.values() if p > 1]))\nprint(len([p for p in grid2.values() if p > 1]))","repo_name":"plut0nium/AdventOfCode","sub_path":"2021/05/day_05.py","file_name":"day_05.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"24983329481","text":"import torch\nimport subjectlist as subl\nimport os\nimport torchsrc\n\ndef mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\ndef print_network(net):\n\tnum_params = 0\n\tfor param in net.parameters():\n\t\tnum_params += param.numel()\n\tprint(net)\n\tprint('Total number of parameters: %d' % num_params)\n\n# hyper parameters\nepoch_num = 1000\nbatch_size = 1\nlmk_num = 133\nlearning_rate = 0.0001\n\n# 5000 mas\n# train_root_dir = '/share4/huoy1/Deep_5000_Brain/working_dir/test_out'\n# out = '/share4/huoy1/Deep_5000_Brain/working_dir/MAS5000/testing/'\n\n#45 truth\n# train_root_dir = '/share3/huoy1/3DUnet/working_dir/test_out_lr=0.0001'\n# out = '/share4/huoy1/Deep_5000_Brain/working_dir/True45/testing'\n# test_img_dir = '/share4/huoy1/Deep_5000_Brain/testing/resampled'\n\n#1_1_1\ntrain_root_dir = '/share4/huoy1/Deep_5000_Brain/working_dir/1_1_1/test_out'\nout = '/share4/huoy1/Deep_5000_Brain/working_dir/1_1_1/testing/'\ntest_img_dir = '/share4/huoy1/Deep_5000_Brain/testing/part_1_1_1/croped'\n\n\n\n\n\n\nmkdir(out)\n\n# make img list\n\ntest_img_subs,test_img_files = subl.get_sub_list(test_img_dir)\ntest_dict = {}\ntest_dict['img_subs'] = test_img_subs\ntest_dict['img_files'] = test_img_files\n\n\n# load image\ntest_set = torchsrc.imgloaders.pytorch_loader(test_dict,num_labels=lmk_num)\ntest_loader = torch.utils.data.DataLoader(test_set,batch_size=batch_size,shuffle=True,num_workers=1)\n\n# load network\nmodel = torchsrc.models.UNet3D(in_channel=1, n_classes=lmk_num)\n# model = torchsrc.models.VNet()\n\n# print_network(model)\n#\n# load optimizor\n# optim = torch.optim.SGD(model.parameters(), lr=learning_curve() _rate, momentum=0.9)\n\n# load CUDA\ncuda = torch.cuda.is_available()\ntorch.manual_seed(1)\nif cuda:\n\ttorch.cuda.manual_seed(1)\n\tmodel = model.cuda()\n\n# load trainer\ntrainer = torchsrc.Trainer(\n\tcuda=cuda,\n\tmodel=model,\n\ttest_loader=test_loader,\n train_root_dir = train_root_dir,\n\tout=out,\n\tmax_epoch = epoch_num,\n\tbatch_size = batch_size,\n\tlmk_num = lmk_num,\n)\n\n\nprint(\"==start testing==\")\n\nstart_epoch = 0\nstart_iteration = 1\ntrainer.epoch = start_epoch\ntrainer.iteration = start_iteration\ntrainer.test_epoch()\n\n\n\n\n\n\n\n","repo_name":"MASILab/SLANT_CDMRI","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"961535240","text":"import pygame\nfrom random import uniform,randint\n\nclass Meteor(pygame.sprite.Sprite):\n\n\tdef __init__(self,groups,min_range_x=-0.5,max_range_x=1):\n\t\tself.groups=groups\n\t\tsuper().__init__(self.groups)\n\n\t\t#randomizing the meteor size\n\t\tmeteor_surf=pygame.image.load(\"graphics/meteor.png\").convert_alpha()\n\t\tmeteor_size=pygame.math.Vector2(meteor_surf.get_size())*uniform(0.5,1.5)\n\t\tself.scaled_surf=pygame.transform.scale(meteor_surf,meteor_size)\n\t\tself.image=self.scaled_surf\n\n\n\t\tself.rect=self.image.get_rect(center=(0,0))\n\n\t\tself.pos=pygame.math.Vector2((randint(-100,1280+100),0))\n\t\tself.direction=pygame.math.Vector2(uniform(min_range_x,max_range_x),1)\n\t\tself.speed=randint(400,600)\n\n\t\tself.timer=0\n\t\tself.create_meteor=True\n\n\t\t#rotation logic\n\t\tself.rotation=0\n\t\tself.rotation_speed=randint(20,50)\n\n\t\t#mask\n\t\tself.mask=pygame.mask.from_surface(self.image)\n\n\n\n\tdef reset_timer(self):\n\t\tif not self.create_meteor:\n\t\t\tcurrent_time=pygame.time.get_ticks()\n\t\t\tif current_time-self.timer>500:\n\t\t\t\tself.create_meteor=True\n\n\tdef meteor_creation(self,dt):\n\t\tif self.create_meteor:\n\t\t\tself.create_meteor=False\n\t\t\tself.timer=pygame.time.get_ticks()\n\n\t\tself.pos+=self.direction*self.speed*dt\n\t\tself.rect.bottomleft=(round(self.pos.x),round(self.pos.y))\n\n\tdef rotate(self,dt):\n\t\tself.rotation+=self.rotation_speed*dt\n\t\t#rotate_surf=pygame.transform.rotate(self.scaled_surf,self.rotation)\n\t\t#this masks the quality loss\n\t\trotate_surf=pygame.transform.rotozoom(self.scaled_surf,self.rotation,1)\n\t\tself.image=rotate_surf\n\t\tself.rect=self.image.get_rect(center=self.rect.center)\n\t\tself.mask=pygame.mask.from_surface(self.image)\n\n\tdef update(self,dt):\n\t\tself.meteor_creation(dt)\n\t\tself.reset_timer()\n\t\tself.rotate(dt)\n\n","repo_name":"FindikBabmbino/Astroid-Game","sub_path":"meteor.py","file_name":"meteor.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"44618209757","text":"class Solution:\r\n def findMaxConsecutiveOnes(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: int\r\n \"\"\"\r\n m= 0\r\n sn = [str(i) for i in nums]\r\n s= \"\".join(sn).split(\"0\")\r\n # print(s)\r\n for i in s:\r\n m= max( m, len(i))\r\n return m","repo_name":"sihcpro/leetcode","sub_path":"max-consecutive-ones.py","file_name":"max-consecutive-ones.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"27771283141","text":"from os import system, name\nfrom time import sleep\n\ndef divisao(x,y):\n\n if y == 0:\n return f'Divisão não por zero'\n return x / y\n\nopcao = {\n 1: lambda x,y: x + y,\n 2: lambda x,y: x - y,\n 3: lambda x,y: x * y,\n 4: divisao,\n 5: lambda x,y: x ** y,\n 6: lambda *args: exit()\n\n}\n\ndef limpa():\n\n if name == 'nt':\n system('cls')\n else:\n system('clear')\n\ndef menu():\n\n while True:\n limpa()\n print('-'*32)\n print('\\n')\n print(f'1 - Adição \\n'\\\n f'2 - Subtração \\n' \\\n f'3 - Multiplicação \\n' \\\n f'4 - Divisão \\n' \\\n f'5 - Potenciação \\n' \\\n f'6 - Sai \\n'\n )\n op = int(input('Escolha a sua opção: '))\n\n if op == 6:\n break\n\n x, y = input('Entre com os dois numeros utilizando o formato x,y: ').split(',')\n x, y = float(x), float(y)\n\n \n if op in opcao.keys():\n print(opcao[op](x,y))\n sleep(3)\n else:\n print('Opção inválida !!')\n sleep(1)\n\n\nif __name__ == '__main__':\n menu()\n","repo_name":"pcaramguedes/letscode","sub_path":"calculadora02.py","file_name":"calculadora02.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"31865853097","text":"# coding: utf-8\nimport pymssql\n\n\nclass MSSQL:\n def __init__(self):\n self.host = \"localhost\"\n self.user = \"GPS\"\n self.pwd = \"mywife\"\n self.db = \"GPS\"\n\n def __GetConnect(self):\n \"\"\"\n 得到连接信息\n 返回: conn.cursor()\n \"\"\"\n if not self.db:\n raise(NameError, \"没有设置数据库信息\")\n self.conn = pymssql.connect(host=self.host, user=self.user, password=self.pwd, database=self.db, charset=\"utf8\", as_dict=True)\n cur = self.conn.cursor()\n if not cur:\n raise(NameError, \"连接数据库失败\")\n else:\n return cur\n\n def sDB(self, sql):\n \"\"\"\n 执行查询语句\n 返回的是一个包含tuple的list,list的元素是记录行,tuple的元素是每行记录的字段\n\n 调用示例:\n ms = MSSQL(host=\"localhost\",user=\"sa\",pwd=\"123456\",db=\"PythonWeiboStatistics\")\n resList = ms.sDB(\"SELECT id,NickName FROM WeiBoUser\")\n for (id,NickName) in resList:\n print str(id),NickName\n \"\"\"\n cur = self.__GetConnect()\n cur.execute(sql)\n resList = cur.fetchall()\n\n #查询完毕后必须关闭连接\n self.conn.close()\n return resList\n\n def eDB(self, sql):\n \"\"\"\n 执行非查询语句\n\n 调用示例:\n cur = self.__GetConnect()\n cur.execute(sql)\n self.conn.commit()\n self.conn.close()\n \"\"\"\n cur = self.__GetConnect()\n cur.execute(sql)\n self.conn.commit()\n self.conn.close()\n\n\nif __name__ == 'main':\n ms = MSSQL()\n res_id = ms.sDB(\"SELECT @@IDENTITY AS 'ID'\")\n print(res_id)","repo_name":"55567772/MyMail","sub_path":"Codes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"69976408336","text":"from flask import Flask, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime, date\nimport json\n\nimport pymysql\npymysql.install_as_MySQLdb()\n\napp = Flask(__name__)\n#设置连接数据库的URL\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:123@127.0.0.1:3306/book'\n#设置每次请求结束后会自动提交数据库中的改动\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n#查询时会显示原始SQL语句\napp.config['SQLALCHEMY_ECHO'] = True\ndb = SQLAlchemy(app)\n\n\nclass DateEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, datetime):\n return obj.strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(obj, date):\n return obj.strftime(\"%Y-%m-%d\")\n else:\n return json.JSONEncoder.default(self, obj)\n\n\ndef to_json(inst, cls):\n d = dict()\n for c in cls.__table__.columns:\n v = getattr(inst, c.name)\n d[c.name] = v\n return json.dumps(d, cls=DateEncoder)\n\n\nclass Book(db.Model):\n # 定义表名\n __tablename__ = \"bookinfo\"\n # 定义列对象\n id = db.Column(db.Integer, primary_key=True)\n btitle = db.Column(db.String(64), unique=True)\n bpub_date = db.Column(db.DateTime, default=datetime.now)\n bread = db.Column(db.Integer, default=0)\n bcomment = db.Column(db.Integer, default=0)\n isDelete = db.Column(db.Integer, default=0)\n hero = db.relationship(\"Hero\", backref=\"book\")\n\n def __repr__(self):\n return '书籍:%s' % self.btitle\n\n @property\n def serialize(self):\n return to_json(self, self.__class__)\n\n\nclass Hero(db.Model):\n # 定义表名\n __tablename__ = \"heroinfo\"\n # 定义列对象\n id = db.Column(db.Integer, primary_key=True)\n hname = db.Column(db.String(64))\n hgender = db.Column(db.Integer)\n bcomment = db.Column(db.Integer, default=0)\n isDelete = db.Column(db.Integer, default=0)\n hbook_id = db.Column(db.Integer, db.ForeignKey(\"bookinfo.id\"))\n\n def __repr__(self):\n return '人物:%s' % self.btitle\n\n\n@app.route(\"/\")\ndef index():\n return jsonify(Book.query.all())\n\n\nif __name__ == '__main__':\n app.run()","repo_name":"czl0325/flask-start","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"3096714664","text":"from get_tokens2 import get_tokens2\nimport requests\nimport pandas as pd\nimport jquantsapi\nfrom datetime import datetime, timedelta\nfrom requests import HTTPError\nimport json\n\nid_token = get_tokens2()\nprint(id_token)\ncli = jquantsapi.Client(refresh_token=id_token)\n\n\n# J-Quants API から取得するデータの期間\nstart_dt: datetime = datetime(2021, 4, 29)\nend_dt: datetime = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)\nstart_dt_yyyymmdd = start_dt.strftime(\"%Y%m%d\")\nend_dt_yyyymmdd = end_dt.strftime(\"%Y%m%d\")\n\n# # 銘柄一覧\n# stock_list_load: pd.DataFrame = cli.get_list()\n\n# # 株価情報 開始から終了まで全て\n# stock_price_load: pd.DataFrame = cli.get_price_range(start_dt=start_dt, end_dt=end_dt)\n\n# # 株価情報 取引日指定Ver\n# d = datetime.now() \n# df_quotes_bydate = cli.get_prices_daily_quotes(date_yyyymmdd=d.strftime(format=\"%Y%m%d\"))\n\n# # 株式銘柄コード指定Ver\n# df_quotes_bycode = cli.get_prices_daily_quotes(code=8697)\n\n\n# 財務情報全て\nstock_fin_load: pd.DataFrame = cli.get_statements_range(start_dt=start_dt, end_dt=end_dt)\n\n# LocalCodeが7777のデータを絞り込む\n# filtered_data = stock_fin_load[stock_fin_load['LocalCode'] == 6616]\n\n# カラムを絞る\nselected_columns = [\n 'DisclosedDate',\n 'LocalCode',\n 'TypeOfDocument',\n 'TypeOfCurrentPeriod',\n 'CurrentPeriodStartDate',\n 'CurrentPeriodEndDate',\n 'NetSales',\n 'OperatingProfit',\n 'ForecastNetSales',\n 'ForecastOperatingProfit',\n 'NextYearForecastNetSales',\n 'NextYearForecastOperatingProfit'\n]\nfiltered_data = stock_fin_load[selected_columns]\n\n\n# 財務情報 code指定\ndf_fins = cli.get_fins_statements(code=8697)\n\n# 信用取引週末残高を日付範囲を指定して取得\ndf_weekly_margin: pd.DataFrame=cli.get_weekly_margin_range(start_dt=start_dt, end_dt=end_dt)\n\n\n\n# # 大容量データが返却された場合の再検索\n# # データ量により複数ページ取得できる場合があるため、pagination_keyが含まれる限り、再検索を実施\n# while \"pagination_key\" in r_get.json():\n# pagination_key = r_get.json()[\"pagination_key\"]\n# r_get = requests.get(f\"https://api.jquants.com/v1/method?query=param&pagination_key={pagination_key}\", headers=headers)\n# data += r_get.json()[\"data\"]\n \n\n# print(data)","repo_name":"tomoito/jquant","sub_path":"1_joujou_list_api.py","file_name":"1_joujou_list_api.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"9354109366","text":"from typing import List\n\nnumber: int = int(input(\"Ingrese un numero: \"))\n# nativos: int, str, float, bool\n\n\ndef convert_to_list(number: int):\n list: List[int] = []\n str_number: str = str(number)\n for i in range(len(str_number)):\n list.append(int(str_number[i]))\n return list\n\n\ndef is_step(list: List[int]):\n for i in range(len(list)):\n if list[i] == list[i + 1] + 1 or list[i] == list[i + 1] - 1:\n return True\n else:\n return False\n\n\nif is_step(convert_to_list(number)):\n print(\"El numero \" + str(number) + \" es un numero step\")\nelse:\n print(\"El numero \" + str(number) + \" no es un numero step\")\n","repo_name":"MateMar04/PG3_ITSVillada2022","sub_path":"1_Ejercicios/7_ej.py","file_name":"7_ej.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"37150904258","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import logout, login, authenticate\nfrom .models import *\nfrom .forms import *\nfrom django.contrib.auth.models import User\n\ndef home(request):\n\tuser = request.user\n\tif request.method == \"POST\":\n\t\tform_sugest = SugestionForm(request.POST or None)\n\t\tif form_sugest.is_valid():\n\t\t\tsubject = form_sugest.cleaned_data['subject']\n\t\t\tif user.is_authenticated:\n\t\t\t\tSugestion(user = user, subject=subject).save()\n\t\t\treturn redirect(connexion)\n\tform_sugest = SugestionForm()\n\tchart = CovidChart.objects.all()\n\n\treturn render(request, 'index.html', locals())\n\ndef identification(request):\n\tform = InscriptionForm(request.POST, request.FILES)\n\tif request.method == \"POST\" :\n\t\tif form.is_valid():\n\t\t\tusername = form.cleaned_data['username']\n\t\t\tfirstname = form.cleaned_data['firstname']\n\t\t\tlastname = form.cleaned_data['lastname']\n\t\t\tpassword = form.cleaned_data['password']\n\t\t\tpassword2 = form.cleaned_data['password2']\n\t\t\temail = form.cleaned_data['email']\n\t\t\tif password==password2:\n\t\t\t\tuser = User.objects.create_user(\n\t\t\t\t\tusername=username,\n\t\t\t\t\temail=email,\n\t\t\t\t\tpassword=password)\n\t\t\t\tuser.first_name, user.last_name = firstname, lastname\n\t\t\t\tuser.save()\n\t\t\t\tprint(user)\n\t\tif user:\n\t\t\tlogin(request, user)\n\t\t\treturn redirect(diagnostic)\n\tform = InscriptionForm()\n\treturn render(request, 'identification.html', locals())\n\ndef connexion(request):\n\tform_connection = ConnexionForm(request.POST)\n\ttry:\n\t\tnext_p = request.GET[\"next\"]\n\texcept:\n\t\tnext_p = \"\"\n\tif request.method == \"POST\" and form_connection.is_valid():\n\t\tusername = form_connection.cleaned_data['username']\n\t\tpassword = form_connection.cleaned_data['password']\n\t\tuser = authenticate(username=username, password=password)\n\t\tif user: # Si l'objet renvoyé n'est pas None\n\t\t\tlogin(request, user)\n\t\t\tif next_p:\n\t\t\t\treturn redirect(next_p)\n\t\t\telse:\n\t\t\t\treturn redirect(home)\n\tform_connection = ConnexionForm()\n\treturn render(request, 'connect.html', locals())\n\ndef deconnexion(request):\n\tlogout(request)\n\treturn redirect(home)\n\n@login_required()\ndef diagnostic(request, quest=1):\n\tuser = request.user\n\tquestions = Questions.objects.all()\n\tp = Paginator(questions, 1)\n\t\n\ttry:\n\t\tpagination = p.page(quest)\n\t\tquestion = pagination.object_list[0]\n\texcept Exception as e:\n\t\treturn redirect(results_diag)\n\n\tform = DiagnosticForm(request.POST or None)\n\tif request.method == 'POST':\n\t\tif form.is_valid():\n\t\t\toui = form.cleaned_data['oui']\n\t\t\tnon = form.cleaned_data['non']\n\t\t\ttry:\n\t\t\t\tresult = get_object_or_404(Diagnostic, user=user, question=question)\n\t\t\t\tresult.oui = oui\n\t\t\t\tresult.non = non\n\t\t\t\tresult.save()\n\t\t\texcept Exception as e:\n\t\t\t\tDiagnostic(user = request.user, question = question, oui=oui, non=non).save()\n\t\t\tquest += 1\n\tform = DiagnosticForm()\n\treturn render(request, 'diagnostic.html', locals())\n\n\ndef somme_liste(liste):\n\tsomme = 0\n\tlongueur = len(liste)\n\tfor i in range(longueur):\n\t\tsomme = somme + liste[i]\n\n\treturn somme\n\ndef results_diag(request):\n\tresults = Diagnostic.objects.filter(user=request.user.id)\n\tmax_ranges = []\n\tmargin = []\n\tfor x in Questions.objects.all():\n\t\tmax_ranges.append(x.facteur)\n\n\tmax_range = somme_liste(max_ranges)*2\n\n\tfor result in results:\n\t\tif result.oui == True:\n\t\t\tmargin.append(2*result.question.facteur)\n\t\tif result.non == True:\n\t\t\tmargin.append(0*result.question.facteur)\n\n\tfinal_result = somme_liste(margin)\n\n\tmax_high = (100*max_range)/100\n\tmin_high = (80*max_range)/100\n\n\tmax_medium = (79*max_range)/100\n\tmin_medium = (50*max_range)/100\n\n\tmax_low = (49*max_range)/100\n\n\tprint(final_result)\n\n\n\n\treturn render(request, 'results.html', locals())\n\n\n\ndef about(request):\n\t\n\treturn render(request, 'about.html')\n\ndef contact(request):\n\tuser = request.user\n\tform_contact = ContactForm(request.POST or None)\n\tif request.method == 'POST':\n\t\tif form_contact.is_valid():\n\t\t\tfull_name = form_contact.cleaned_data['full_name']\n\t\t\tmail = form_contact.cleaned_data['mail']\n\t\t\tmessage = form_contact.cleaned_data['message']\n\t\t\tif user.is_authenticated:\n\t\t\t\tContactUs(user = user, full_name = full_name, mail=mail, message=message).save()\n\t\t\treturn redirect(connexion)\n\tform_contact = ContactForm()\n\n\treturn render(request, 'contact.html', locals())\n","repo_name":"ArtcalO/covid19out","sub_path":"covid19_out/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"73550557775","text":"import tweepy\nimport json\nimport re\nimport webbrowser\nimport requests\n\n# load Twitter API credentials\nwith open('twitter_credentials.json') as cred_data:\n info = json.load(cred_data)\n consumer_key = info['API_KEY']\n consumer_secret = info['API_SECRET']\n access_key = info['ACCESS_TOKEN_KEY']\n access_secret = info['ACCESS_TOKEN_SECRET']\n\n# Create the API endpoint\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\napi = tweepy.API(auth)\n\n# Mention the maximum number of tweets that you want to be extracted.\nmax_number_of_tweets_to_be_extracted = int(input('Enter the number of tweets that you want to extract: '))\n\n# Mention the hashtag that you want to look out for\nticker = input('Enter the stock ticker that you want to scrape for: ')\n\n# Function scrapes twitter API to look for tweets that have specific ticker symbol based on user input.\ndef getTweets():\n for tweet in tweepy.Cursor(api.search, q='$' + ticker, rpp=100).items(max_number_of_tweets_to_be_extracted):\n current_tweet = tweet.text.encode('utf-8')\n tweets.append(current_tweet)\n\n# Function checks if the tweet has a url and if so, it appends that link to a list.\ndef links2list():\n for tweet in tweets:\n searchString = \"(?Phttps?://[^\\s]+)\"\n link = re.search(searchString, tweet.decode(\"utf-8\"))\n if link:\n links.append(link.group(0))\n\n# Extend the shortened URL's to better detect duplicate links\ndef expandLink():\n for link in links:\n response = requests.get(link)\n link_list.append(response.url)\n\n# Remove duplicate links from list to new list\ndef removeDuplicates():\n for i in link_list:\n if i not in final_list:\n final_list.append(i)\n\n# Function opens all the links form the links list in new tabs in Google Chrome.\ndef openLinksInChrome():\n for url in final_list:\n webbrowser.open_new_tab(url)\n\n# Specify global empty lists.\ntweets = []\nlinks = []\nlink_list = []\nfinal_list = []\n\n# Run the code.\ngetTweets()\nlinks2list()\nexpandLink()\nremoveDuplicates()\nopenLinksInChrome()\n\n# Print a results statement that shows how many links were extracted from the specified number of tweets.\nprint('Extracted ' + str(len(links)) + ' links from ' + str(max_number_of_tweets_to_be_extracted) + ' tweets with ticker $' + ticker)\n","repo_name":"mmccarthy93/StockMarketTweets","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"25874712088","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 19 14:24:55 2016\n\n@author: Viktor\n\"\"\"\n\nfrom scipy import *\nfrom pylab import *\nimport sys\nimport scipy.special\nimport numpy as np\nimport pandas as pd\nimport csv\n\n\n\"\"\"Birds\"\"\"\n\n\n\n\n\ndef birdfile(): \n\t\n\tfile = open('bird1_1.txt', 'r')\n\treturn file\nmoves = []\ndates = []\t\nbirds1 = list(birdfile())\n\nimport datetime\n#from astral import Astral\n\nAlla_datum= []\nnew_list= []\n\nfor i in birds1:\n splits2= i.split()\n Alla_datum.append(splits2[0])\n\n#for i in Alla_datum:\n# if i not in new_list:\n# new_list.append((i))\n\n#datumz = []\n#from datetime import datetime\n#import pytz\t\t\n#import tzlocal # pip install tzlocal\n#def tz_fix(dates):\n#\tlocal_timezone = tzlocal.get_localzone() # get pytz tzinfo\n#\tfor n in dates:\n#\t\tutc_time = datetime.strptime(n, \"%Y-%m-%d\")\n#\t\tlocal_time = utc_time.replace(tzinfo=pytz.utc).astimezone(local_timezone)\n#\t\tdatumz.append(local_time)\n#\treturn datumz\n#\tlägg till timer?\n#\tspara till fil?\n#x = tz_fix(new_list)\n#print(x[0])\n\n\n#print(new_list[0],new_list[-1])\n#print(len(new_list))\n\n#city_name = 'Stockholm'\n\n#a = Astral()\n#a.solar_depression = 'civil'\n\n#city = a[city_name]\n\n#print('Information for %s/%s\\n' % (city_name, city.region))\n\n#timezone = city.timezone\n#print('Timezone: %s' % timezone)\n\n\n#print('Latitude: %.02f; Longitude: %.02f\\n' % \\\n# (city.latitude, city.longitude))\n\n#sun = []\n#for i in x:\n#\tsun.append(city.sun(date=datetime.date(i), local=True))\n\n#lista = []\n#for i in sun:\n#\ta = list(i.items())\n#\tlista.append(a)\n#\tsplits = lista.split()\n#print(splits[0])\n#print('hej!',lista[0],'hejdå!')\n\n#dawnlist = []\n#dusklist = []\n\n#import itertools as it\n#light = []\n#dawn = []\n#dusk = []\n#for i in lista:\n#\tdawn.append(i[3])\n#\tdusk.append(i[2])\n#print(dawn[0])\n#for i in dawn:\n#\ta = list(i)\n#\tdawnlist.append(a)\n#for j in dusk:\n#\tb = list(j)\n#\tdusklist.append(b)\n#print('hej!',dawnlist[2][1], dusklist[2][1])\n\n#light = list(it.zip_longest(dawnlist, dusklist))\n#print('hej', light[0], light[-1])\n\n#print('Dawn: %s' % str(sun['sunrise']))\n#print('Sunrise: %s' % str(sun['sunrise']))\n#print('Noon: %s' % str(sun['noon']))\n#print('Sunset: %s' % str(sun['sunset']))\n#print('Dusk: %s' % str(sun['dusk']))\n\n\njan = []\nfeb = []\nmar = []\napr = []\nmaj = []\n\nfor h in range(len(birds1)):\n if birds1[h][6] == '1':\n jan.append(birds1[h])\n elif birds1[h][6] == '2':\n feb.append(birds1[h])\n elif birds1[h][6] == '3':\n mar.append(birds1[h])\n elif birds1[h][6] == '4':\n apr.append(birds1[h])\n elif birds1[h][6] == '5':\n maj.append(birds1[h])\n\nmovesjan = []\nmovesfeb = []\nmovesmar = []\nmovesapr = []\nmovesmaj = []\n\nfor n in range(len(jan)-1):\n movesjan.append(movescheck[n])\n\nfor n in range(len(feb)-1):\n movesfeb.append(movescheck[(n+len(jan))])\n\nfor n in range(len(mar)-1):\n movesmar.append(movescheck[(n+len(jan)+len(feb))])\n\nfor n in range(len(apr)-1):\n movesapr.append(movescheck[(n+len(jan)+len(feb)+len(mar))])\n\nfor n in range(len(maj)-1):\n movesmaj.append(movescheck[(n+len(jan)+len(feb)+len(mar)+len(apr))])\n\nprint(mean(movesjan))\nprint(mean(movesfeb))\nprint(mean(movesmar))\nprint(mean(movesapr))\nprint(mean(movesmaj))\n\ntfapr = []\nfor n in range(len(apr)):\n if apr[n][8] == '2':\n if apr[n][9] == '4':\n tfapr.append(apr[n])\n\nprint(tfapr)\n\nmovestfapr = []\nfor i in (tfapr):\n splits = i.split()\n# dates.append(splits2)\n movestfapr.append(int(splits[2]))\n\n\nprint(movestfapr)\n\n\ndiffmovestfapr = diff(movestfapr)\nprint(diffmovestfapr)\n\n\n#dt = datetime.datetime.now()\n#dt = dt.replace(microsecond=0) # Returns a copy\n#dt\n#datetime.datetime(2015, 4, 24, 0, 0)\n\n#if dt == None : dt = datetime.datetime.now()\n#seconds = (dt - dt.min).seconds\n# // is a floor division, not a comment on following line:\n#rounding = (seconds+roundTo/2) // roundTo * roundTo\n#return dt + datetime.timedelta(0,rounding-seconds,-dt.microsecond)\n\n#for k in range(len(tfapr)):\n# tfapr.remove(tfapr[k][19])\n# tfapr.remove(tfapr[k][20])\n# tfapr.remove(tfapr[k][21])\n# tfapr.remove(tfapr[k][22])\n# tfapr.remove(tfapr[k][23])\n# tfapr.remove(tfapr[k][24])\n# tfapr.remove(tfapr[k][25])\n\n#3datetime.datetime(2010, 7, 6, 5, 27, 23, 662390)\n#dtwithoutseconds = dt.replace(second=0, microsecond=0)\n\n\n\n#print(tfapr) \n#tfaprs = []\n#for k in range(len(tfapr)-1):\n# tfaprs.append((tfapr[k].split(\"<--\")[19].split(\"-->\")[25]))\n\n#print(tfaprs)\n\n#http://stackoverflow.com/questions/28765563/average-values-from-a-column-on-an-hourly-timeseries\n\n#import itertools as it\n#datumbirds = list(zip(datum, movescheck))\n#print(len(datumbirds))\n#print('hej!', datumbirds[0], datumbirds[-1])","repo_name":"nijynot/birds","sub_path":"bird1/bird_old/nufan astral.py","file_name":"nufan astral.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"2871930107","text":"import all_inclusive\n\ntemp = None\nstage = 0\ncounter = 0\n\n# Overwrite temp/stage/counter in case of program restart\n# temp = 0\n# stage = 0\n# counter = 0\n\ndef callback(thermocouple_temp):\n\tglobal stage\n\tglobal counter\n\tglobal temp\n\n\tif temp is None:\n\t\ttemp = thermocouple_temp\n\telif temp <= 200:\n\t\ttemp += 0.5\n\telse:\n\t\ttemp = 200\n\n\tprint(temp, thermocouple_temp)\n\n\treturn temp\n\nall_inclusive.callback = callback\nall_inclusive.start()\n\n","repo_name":"skimberk/kiln-controller","sub_path":"control/dry.py","file_name":"dry.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"78082655","text":"import pydantic\nimport pydantic.generics\nimport typing\nimport types\n\n\nclass BaseModel(pydantic.BaseModel):\n class Config:\n extra = pydantic.Extra.forbid\n\n\nT = typing.TypeVar('T')\n\n\n# noinspection PyTypeChecker,PyTypeHints\ndef create_generic_model(\n model_name: str,\n *,\n __config__: typing.Type[pydantic.BaseConfig] = None,\n typevars: typing.List[typing.TypeVar],\n __module__: typing.Optional[str] = None,\n __validators__: typing.Dict[str, classmethod] = None,\n **field_definitions: typing.Any,\n) -> typing.Type[pydantic.generics.GenericModel]:\n fields = {}\n annotations = {}\n\n for f_name, f_def in field_definitions.items():\n if isinstance(f_def, tuple):\n f_annotation, f_value = f_def\n else:\n f_annotation, f_value = None, f_def\n\n if f_annotation:\n annotations[f_name] = f_annotation\n fields[f_name] = f_value\n\n namespace = {'__annotations__': annotations}\n namespace.update(fields)\n\n if len(typevars) == 1: # This is gross, but unpacking doesn't seem to work.\n generic_base = typing.Generic[typevars[0]]\n elif len(typevars) == 2:\n generic_base = typing.Generic[typevars[0], typevars[1]]\n elif len(typevars) == 3:\n generic_base = typing.Generic[typevars[0], typevars[1], typevars[2]]\n else:\n raise ValueError(f'create_generic_model takes 1-3 typevars. Got {len(typevars)}')\n\n return types.new_class(name=model_name, bases=(pydantic.generics.GenericModel, generic_base),\n exec_body=lambda ns: ns.update(namespace))\n","repo_name":"Duelers/card-scripting-schema","sub_path":"card_models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"40281426635","text":"# In this assignment you must do a Twitter search on any term\n# of your choice.\n# Deliverables:\n# 1) Print each tweet\n# 2) Print the average subjectivity of the results\n# 3) Print the average polarity of the results\n\n# Sentiment Analysis - Understand and Extracting Feelings from Data\n## polarity -- measures how positive or negative\n## subjectivity -- measures how factual.\n\n### Be prepared to change the search term during demo.\n\nimport tweepy\nfrom textblob import TextBlob\nimport sys\n\n# function that will help with encoding/decoding\ndef uprint(*objects, sep=' ', end='\\n', file=sys.stdout): \n enc = file.encoding\n if enc == 'UTF-8':\n print(*objects, sep=sep, end=end, file=file)\n else:\n f = lambda obj: str(obj).encode(enc, errors='backslashreplace').decode(enc)\n print(*map(f, objects), sep=sep, end=end, file=file)\n\n\n# Unique code from Twitter\naccess_token = \"2546126607-MwJvheq6bGtvvJk3XsZju5XNYFWABrsvgKMBv9z\"\naccess_token_secret = \"ugAOGH8F9C7f8hFIGjHXA3lC1KeuA9kwI7WNQavf7GIgr\"\nconsumer_key = \"cKKu7YX8zB4MxdIaorIFAIUrB\"\nconsumer_secret = \"EfhAxqvWP7VoFVTxPO4GJDlKgjiqYLiLSSCxtOaDSwBHS7WFQa\"\n\n# Boilerplate code here\nauth = tweepy.OAuthHandler(consumer_key,consumer_secret)\nauth.set_access_token(access_token,access_token_secret)\napi = tweepy.API(auth)\n\n\npublic_tweets = api.search('Thanksgiving') # input search term\n\nsubjectivity = [] # make two empty lists for subjectivity and polarity\npolarity = []\n\nfor tweet in public_tweets: # loop through all the tweets that appear for search term\n uprint(tweet.text)\n analysis = TextBlob(tweet.text) \n polarity.append(analysis.sentiment[0]) # add the first value in analysis.sentiment to the polarity list, and second to subjectivity list\n subjectivity.append(analysis.sentiment[1]) \n\navg_sub = sum(subjectivity)/len(subjectivity) # divide the sum of all subjectivity scores by how many scores there are to find average\navg_pol = sum(polarity)/len(subjectivity) # same process to find the average polarity score\n\nfor tweet in public_tweets: # print all tweets\n\tuprint(tweet.text)\n \nprint(\"Average subjectivity is\", avg_sub) # print the average scores\nprint(\"Average polarity is\", avg_pol)\n\n\n","repo_name":"angetsai/hw3","sub_path":"twitterhw3b.py","file_name":"twitterhw3b.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"25356655563","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom pyquery import PyQuery\nfrom ..items import City58Item\n\n\nclass ChuZu58Spider(scrapy.Spider):\n name = 'ChuZu58'\n allowed_domains = ['58.com']\n start_urls = ['https://zs.58.com/chuzu/']\n\n def parse(self, response):\n pyq_object = PyQuery(response.text)\n li_tags = pyq_object('body > div.mainbox > div > div.content > div.listBox > ul > li').items()\n for li in li_tags:\n if li.attr('id') == 'bottom_ad_li':\n break\n else:\n a_tag = li('div.des > h2 > a')\n items = City58Item()\n items['name'] = a_tag.text()\n items['detail_link'] = a_tag.attr('href')\n items['price'] = li('div.listliright > div.money > b').text()\n items['house_detail'], items['area'] = li('div.des > p.room.strongbox').text().split()\n items['location'] = li('div.des > p.add > a:nth-child(1)').text()\n items['location_detail'] = li('div.des > p.add > a:nth-child(2)').text().replace('...', '') # 去掉3个恶心的点\n yield items\n\n next_url = pyq_object('#bottom_ad_li > div.pager > a.next').attr('href')\n if next_url:\n yield scrapy.Request(next_url, callback=self.parse,\n meta={'dont_redirect': True},\n dont_filter=True)\n\n","repo_name":"zhenyong97/scrapy_practice","sub_path":"city_58/city_58/spiders/chuzu58.py","file_name":"chuzu58.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"29545432200","text":"def agts(queue):\n gs_N2 = queue.add('gs_N2.py', ncpus=1, walltime=2000)\n w = queue.add('frequency.py', deps=gs_N2, walltime=200)\n f = queue.add('con_freq.py', ncpus=2, deps=gs_N2, walltime=1000)\n rpa_N2 = queue.add('rpa_N2.py', deps=gs_N2,\n #queueopts='-l mem=127GB', # needed on 16 cpus\n ncpus=32, walltime=1200)\n queue.add('plot_w.py', deps=w, creates='E_w.png')\n queue.add('plot_con_freq.py', deps=f, creates='con_freq.png')\n queue.add('extrapolate.py', deps=rpa_N2, creates='extrapolate.png')\n","repo_name":"ryancoleman/lotsofcoresbook2code","sub_path":"Pearls2_Chapter14/gpaw/doc/tutorials/rpa/submit.agts.py","file_name":"submit.agts.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"14"} +{"seq_id":"17147795255","text":"\r\nfrom flask import Flask,render_template,request\r\nimport model\r\napp=Flask(__name__)\r\n\r\n@app.route(\"/\",methods=[\"GET\",\"POST\"])\r\ndef marks():\r\n global mk\r\n mk=1\r\n if request.method==\"POST\":\r\n hrs=request.form[\"hrs\"]\r\n marks_pred=model.marks_prediction(hrs)\r\n mk=marks_pred\r\n return render_template(\"index.html\",my_marks=mk)\r\n\r\nif __name__==\"__main__\":\r\n app.run(debug=True)","repo_name":"ramangoya/my_model_deployment","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"1905273222","text":"import pytest\n\nfrom tests.configs import base_config\nfrom helpers.typing.common_types import ConfigsScope\n\nfrom market_data_api.market_data_downloader import MarketDataDownloader\nfrom trading import Timeframe, AssetPair, Asset, TimeRange\n\n\n@pytest.mark.parametrize(\"timeframe, candle_count\", [\n ('15m', 9),\n ('1h', 3)\n])\ndef test_candle_count(timeframe: str, candle_count: int, base_config: ConfigsScope) -> None:\n MarketDataDownloader.init(base_config['market_data_downloader'])\n candles = MarketDataDownloader.get_candles(\n asset_pair=AssetPair(Asset('WAVES'), Asset('USDN')),\n timeframe=Timeframe(timeframe),\n time_range=TimeRange.from_iso_format(\n from_ts='2021-03-01 00:00:00',\n to_ts='2021-03-01 02:00:00'\n ))\n assert len(candles) == candle_count\n","repo_name":"alexgryzlov/crypto-trade","sub_path":"tests/market_data_api/md_downloader_test.py","file_name":"md_downloader_test.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"14"} +{"seq_id":"38282751254","text":"import pygame\nfrom .button import Button\nfrom .constants import *\nfrom random import randint\n\n\nclass Control:\n \"\"\"\n The control represents the panel that allows the user to make certain decisions about the sorting process.\n\n Attributes:\n win (Surface): The surface to which we will be displaying data to.\n gen_btn (Button): The button that generates random data to be sorted.\n sort_btn (Button): The button that initiates the sort of the generated data.\n reset_btn (Button): The button that resets the graph and control panel back to the default setting.\n buttons (List): A list of all of the buttons on the control panel.\n selected_btn (Button): Holds the button that the user selected.\n generated (boolean): Tells us if we have generated data to be sorted.\n \"\"\"\n\n def __init__(self, win):\n self.win = win\n self.gen_btn = Button(self.win, GEN_BTN_X, GEN_BTN_Y, TYPE_GEN)\n self.sort_btn = Button(self.win, SORT_BTN_X, SORT_BTN_Y, TYPE_SORT)\n self.reset_btn = Button(self.win, RESET_BTN_X, RESET_BTN_Y, TYPE_RESET)\n self.buttons = {TYPE_GEN: self.gen_btn, TYPE_SORT: self.sort_btn, TYPE_RESET: self.reset_btn}\n self.selected_btn = None\n self.generated = False\n\n def get_data(self, number=101):\n \"\"\"\n Get a random list of numbers.\n\n Parameter:\n number (int): The number of data points in the list, defualt of a 1000.\n \"\"\"\n\n data = []\n lower = 10\n upper = PLOT_HEIGHT\n\n for i in range(number):\n rand_val = randint(lower, upper)\n data.append(rand_val)\n\n return data\n \n def draw(self):\n pygame.draw.rect(self.win, DARK_GRAY, (GRAPH_WIDTH, 0, CONTROL_WIDTH, CONTROL_HEIGHT))\n pygame.draw.rect(self.win, BLACK, (GRAPH_WIDTH, 0, CONTROL_WIDTH, CONTROL_HEIGHT), 2)\n \n def select(self, pos):\n \"\"\"\n Selects something on the control panel.\n\n Parameter:\n pos (tuple): The coordinates on the window that were selected.\n\n Return:\n boolean: True if a button was selected, False otherwise.\n \"\"\"\n\n x, y = pos\n for btn in self.buttons.values():\n if btn.x <= x <= btn.x + BUTTON_WIDTH and btn.y <= y <= btn.y + BUTTON_HEIGHT:\n self.selected_btn = btn\n\n if btn.name == TYPE_GEN:\n self.generated = True\n self.buttons[TYPE_SORT].on = True\n elif btn.name == TYPE_SORT and self.generated:\n self.turn_off([self.buttons[TYPE_RESET]])\n elif btn.name == TYPE_RESET:\n self.reset()\n\n return True\n \n return False\n \n def turn_off(self, btns=[]):\n \"\"\"\n Turn off the buttons on the control panel.\n\n If buttons are given then these are not switched off.\n\n Parameter:\n btns (List): Buttons to be ignored and not switched off.\n \"\"\"\n\n for btn in self.buttons.values():\n if btn in btns:\n continue\n else:\n btn.turn_off()\n \n def reset(self, btns=[]):\n \"\"\"\n Resets the control panel back to its default setting.\n\n If no specific buttons are given to reset then all are, otherwise only the specified.\n\n Parameter:\n btns (List): Holds the specific buttons to be reset. Default is an empty list.\n \"\"\"\n\n self.generated = False\n\n if not btns:\n # empty list - all buttons are reset\n for btn in self.buttons.values():\n btn.reset()\n else:\n # reset buttons given\n for btn in btns:\n btn.reset()\n \n def update(self):\n \"\"\"\n Refresh the display of the correct region on the window.\n\n This will be used to update any changes that may occur that are relevent to the\n control portion of the window.\n \"\"\"\n\n self.draw()\n\n for btn in self.buttons.values():\n btn.update()\n \n pygame.display.update()","repo_name":"Jeremy643/Sorting-Algorithm-Visualisation","sub_path":"src/window/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"1758466570","text":"import subprocess\nfrom TestHarnessTestCase import TestHarnessTestCase\n\nclass TestHarnessTester(TestHarnessTestCase):\n def testDeleted(self):\n \"\"\"\n Test that deleted tests returns a failed deleted test when extra info argument is supplied\n \"\"\"\n with self.assertRaises(subprocess.CalledProcessError) as cm:\n self.runTests('--no-color', '-i', 'deleted', '-e')\n\n e = cm.exception\n self.assertRegex(e.output.decode('utf-8'), r'test_harness\\.deleted.*? \\[TEST DELETED TEST\\] FAILED \\(DELETED\\)')\n\n # Verify return code is DELETED related (0x83)\n self.assertIs(0x83, e.returncode)\n\n def testNoExtraInfo(self):\n \"\"\"\n Test that deleted tests do not run without -e (extra) option\n \"\"\"\n output = self.runTests('--no-color', '-i', 'deleted').decode('utf-8')\n self.assertNotIn('tests/test_harness.deleted', output)\n","repo_name":"idaholab/moose","sub_path":"python/TestHarness/tests/test_Deleted.py","file_name":"test_Deleted.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":1339,"dataset":"github-code","pt":"14"} +{"seq_id":"13360770124","text":"from setuptools import find_packages\nfrom setuptools import setup\nimport os\n\nVERSION = '0.3.0'\n\nsetup(author='Alex Clark',\n author_email='aclark@aclark.net',\n classifiers=[\n 'Programming Language :: Python :: 2.7',\n ],\n description=\"Zope Management Interface with Bootstrap\",\n entry_points={\n 'z3c.autoinclude.plugin': 'target = plone',\n },\n keywords='Bootstrap Zope Management Interface ZMI',\n include_package_data=True,\n install_requires=[\n 'collective.monkeypatcher',\n ],\n license='GPL',\n long_description=(\n open('README.rst').read() + '\\n' + open('CHANGES.rst').read()),\n name='zope2_bootstrap',\n packages=find_packages(),\n test_suite='tests.TestCase',\n url='https://github.com/collective/zope2_bootstrap',\n version=VERSION,\n zip_safe=False, )\n","repo_name":"collective/zope2_bootstrap","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"41566990993","text":"import numpy as np\nimport json\nimport yaml\nimport operator\nimport re\nimport os\nimport ast\nimport time\nfrom abc import abstractmethod, ABC\nfrom typing import Any, Dict, Optional, Tuple, List, Callable\n\nfrom cereal import car\nfrom common.basedir import BASEDIR\nfrom common.conversions import Conversions as CV\nfrom common.kalman.simple_kalman import KF1D\nfrom common.numpy_fast import clip\nfrom common.params import Params, put_nonblocking, put_bool_nonblocking\nfrom common.realtime import DT_CTRL\nfrom selfdrive.car import apply_hysteresis, gen_empty_fingerprint, scale_rot_inertia, scale_tire_stiffness\nfrom selfdrive.controls.lib.desire_helper import LANE_CHANGE_SPEED_MIN\nfrom selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX, V_CRUISE_UNSET, get_friction\nfrom selfdrive.controls.lib.events import Events\nfrom selfdrive.controls.lib.vehicle_model import VehicleModel\nfrom system.swaglog import cloudlog\n\nButtonType = car.CarState.ButtonEvent.Type\nGearShifter = car.CarState.GearShifter\nEventName = car.CarEvent.EventName\nTorqueFromLateralAccelCallbackType = Callable[[float, car.CarParams.LateralTorqueTuning, float, float, bool], float]\n\nMAX_CTRL_SPEED = (V_CRUISE_MAX + 4) * CV.KPH_TO_MS\nACCEL_MAX = 2.0\nACCEL_MIN = -3.5\nFRICTION_THRESHOLD = 0.3\n\nTORQUE_PARAMS_PATH = os.path.join(BASEDIR, 'selfdrive/car/torque_data/params.yaml')\nTORQUE_OVERRIDE_PATH = os.path.join(BASEDIR, 'selfdrive/car/torque_data/override.yaml')\nTORQUE_SUBSTITUTE_PATH = os.path.join(BASEDIR, 'selfdrive/car/torque_data/substitute.yaml')\n\nGAC_DICT = {1: 1, 2: 2, 3: 3}\n\nclass FluxModel:\n # dict used to rename activation functions whose names aren't valid python identifiers\n activation_function_names = {'σ': 'sigmoid'}\n def __init__(self, params_file, zero_bias=False):\n with open(params_file, \"r\") as f:\n params = json.load(f)\n\n self.input_size = params[\"input_size\"]\n self.output_size = params[\"output_size\"]\n self.input_mean = np.array(params[\"input_mean\"], dtype=np.float32).T\n self.input_std = np.array(params[\"input_std\"], dtype=np.float32).T\n test_dict = params[\"test_dict_zero_bias\"] if zero_bias else params[\"test_dict\"]\n self.layers = []\n\n for layer_params in params[\"layers\"]:\n W = np.array(layer_params[next(key for key in layer_params.keys() if key.endswith('_W'))], dtype=np.float32).T\n b = np.array(layer_params[next(key for key in layer_params.keys() if key.endswith('_b'))], dtype=np.float32).T\n if zero_bias:\n b = np.zeros_like(b)\n activation = layer_params[\"activation\"]\n for k, v in self.activation_function_names.items():\n activation = activation.replace(k, v)\n self.layers.append((W, b, activation))\n \n self.test(test_dict)\n if not self.test_passed:\n raise ValueError(f\"NN FF model failed test: {params_file}\")\n \n # Begin activation functions.\n # These are called by name using the keys in the model json file\n def sigmoid(self, x):\n return 1 / (1 + np.exp(-x))\n\n def identity(self, x):\n return x\n # End activation functions\n\n def forward(self, x):\n for W, b, activation in self.layers:\n if hasattr(self, activation):\n x = getattr(self, activation)(x.dot(W) + b)\n else:\n raise ValueError(f\"Unknown activation: {activation}\")\n return x\n\n def evaluate(self, input_array):\n if len(input_array) != self.input_size:\n # This can be used to discern between different \"versions\" of the NNFF model\n # v1 has an input of 4 (v_ego, lateral_accel, lateral_jerk, roll)\n # v2 has an input of 20 (v_ego, a_ego, lateral_accel, lateral_jerk, roll, ) \n if self.input_size == 4: # leave out a_ego and anything after the first 5 values\n input_array = [input_array[0], input_array[1], input_array[2], -input_array[3]]\n else:\n raise ValueError(f\"Input array length {len(input_array)} does not match the expected length {self.input_size}\")\n \n input_array = np.array(input_array, dtype=np.float32)#.reshape(1, -1)\n\n # Rescale the input array using the input_mean and input_std\n input_array = (input_array - self.input_mean) / self.input_std\n\n output_array = self.forward(input_array)\n\n return float(output_array[0, 0])\n \n def test(self, test_data: dict) -> str:\n num_passed = 0\n num_failed = 0\n allowed_chars = r'^[-\\d.,\\[\\] ]+$'\n self.test_passed = False\n\n for input_str, expected_output in test_data.items():\n if not re.match(allowed_chars, input_str):\n raise ValueError(f\"Invalid characters in NN FF model testing input string: {input_str}\")\n\n input_list = ast.literal_eval(input_str)\n model_output = self.evaluate(input_list)\n\n if abs(model_output - expected_output) <= 5e-5:\n num_passed += 1\n else:\n num_failed += 1\n raise ValueError(f\"NN FF model failed test at value {input_list}: expected {expected_output}, got {model_output}\")\n\n summary_str = (\n f\"Test results: PASSED ({num_passed} inputs tested) \"\n )\n \n self.test_passed = num_failed == 0\n self.test_str = summary_str\n\n def summary(self, do_print=True):\n summary_lines = [\n \"FluxModel Summary:\",\n f\"Input size: {self.input_size}\",\n f\"Output size: {self.output_size}\",\n f\"Number of layers: {len(self.layers)}\",\n self.test_str,\n \"Layer details:\"\n ]\n\n for i, (W, b, activation) in enumerate(self.layers):\n summary_lines.append(\n f\" Layer {i + 1}: W: {W.shape}, b: {b.shape}, f: {activation}\"\n )\n \n summary_str = \"\\n\".join(summary_lines)\n\n if do_print:\n print(summary_str)\n\n return summary_str\n\ndef get_torque_params(candidate):\n with open(TORQUE_SUBSTITUTE_PATH) as f:\n sub = yaml.load(f, Loader=yaml.CSafeLoader)\n if candidate in sub:\n candidate = sub[candidate]\n\n with open(TORQUE_PARAMS_PATH) as f:\n params = yaml.load(f, Loader=yaml.CSafeLoader)\n with open(TORQUE_OVERRIDE_PATH) as f:\n override = yaml.load(f, Loader=yaml.CSafeLoader)\n\n # Ensure no overlap\n if sum([candidate in x for x in [sub, params, override]]) > 1:\n raise RuntimeError(f'{candidate} is defined twice in torque config')\n\n if candidate in override:\n out = override[candidate]\n elif candidate in params:\n out = params[candidate]\n else:\n raise NotImplementedError(f\"Did not find torque params for {candidate}\")\n return {key: out[i] for i, key in enumerate(params['legend'])}\n\n\n# generic car and radar interfaces\n\nclass CarInterfaceBase(ABC):\n def __init__(self, CP, CarController, CarState):\n self.CP = CP\n self.VM = VehicleModel(CP)\n\n self.frame = 0\n self.steering_unpressed = 0\n self.low_speed_alert = False\n self.no_steer_warning = False\n self.silent_steer_warning = True\n self.v_ego_cluster_seen = False\n self.ff_nn_model = None\n\n self.CS = None\n self.can_parsers = []\n if CarState is not None:\n self.CS = CarState(CP)\n\n self.cp = self.CS.get_can_parser(CP)\n self.cp_cam = self.CS.get_cam_can_parser(CP)\n self.cp_adas = self.CS.get_adas_can_parser(CP)\n self.cp_body = self.CS.get_body_can_parser(CP)\n self.cp_loopback = self.CS.get_loopback_can_parser(CP)\n self.can_parsers = [self.cp, self.cp_cam, self.cp_adas, self.cp_body, self.cp_loopback]\n\n self.CC = None\n if CarController is not None:\n self.CC = CarController(self.cp.dbc_name, CP, self.VM)\n \n self.param_s = Params()\n self.disengage_on_accelerator = self.param_s.get_bool(\"DisengageOnAccelerator\")\n self.enable_mads = self.param_s.get_bool(\"EnableMads\")\n self.mads_disengage_lateral_on_brake = self.param_s.get_bool(\"DisengageLateralOnBrake\")\n self.mads_ndlob = self.enable_mads and not self.mads_disengage_lateral_on_brake\n self.gear_warning = 0\n self.cruise_cancelled_btn = True\n self.acc_mads_combo = self.param_s.get_bool(\"AccMadsCombo\")\n self.below_speed_pause = self.param_s.get_bool(\"BelowSpeedPause\")\n self.prev_acc_mads_combo = False\n self.mads_event_lock = True\n self.gap_button_counter = 0\n self.experimental_mode_hold = False\n self.experimental_mode = self.param_s.get_bool(\"ExperimentalMode\")\n self._frame = 0\n self.op_lookup = {\"+\": operator.add, \"-\": operator.sub}\n self.gac = self.param_s.get_bool(\"GapAdjustCruise\")\n self.gac_mode = round(float(self.param_s.get(\"GapAdjustCruiseMode\", encoding=\"utf8\")))\n self.prev_gac_button = False\n self.gac_button_counter = 0\n self.gac_min = -1\n self.gac_max = -1\n self.reverse_dm_cam = self.param_s.get_bool(\"ReverseDmCam\")\n self.mads_main_toggle = self.param_s.get_bool(\"MadsCruiseMain\")\n \n def get_ff_nn(self, x):\n return self.ff_nn_model.evaluate(x)\n \n def get_nn_ff_model_path(self, car):\n return f\"/data/openpilot/selfdrive/car/torque_data/lat_models/{car}.json\"\n \n def has_nn_ff(self, car):\n model_path = self.get_nn_ff_model_path(car)\n if os.path.isfile(model_path):\n return True\n else:\n return False\n \n def initialize_ff_nn(self, car):\n cloudlog.warning(f\"Checking for lateral torque NN FF model for {car}...\")\n if self.has_nn_ff(car):\n self.ff_nn_model = FluxModel(self.get_nn_ff_model_path(car))\n cloudlog.warning(f\"Lateral torque NN FF model loaded\")\n cloudlog.warning(self.ff_nn_model.summary(do_print=False))\n return True\n else:\n cloudlog.warning(f\"No lateral torque NN FF model found for {car}\")\n return False\n\n @staticmethod\n def get_pid_accel_limits(CP, current_speed, cruise_speed):\n return ACCEL_MIN, ACCEL_MAX\n\n @classmethod\n def get_non_essential_params(cls, candidate: str):\n \"\"\"\n Parameters essential to controlling the car may be incomplete or wrong without FW versions or fingerprints.\n \"\"\"\n return cls.get_params(candidate, gen_empty_fingerprint(), list(), False, False)\n\n @classmethod\n def get_params(cls, candidate: str, fingerprint: Dict[int, Dict[int, int]], car_fw: List[car.CarParams.CarFw], experimental_long: bool, docs: bool):\n ret = CarInterfaceBase.get_std_params(candidate)\n ret = cls._get_params(ret, candidate, fingerprint, car_fw, experimental_long, docs)\n if Params().get_bool(\"EnforceTorqueLateral\"):\n ret = CarInterfaceBase.sp_configure_torque_tune(candidate, ret)\n\n # Set common params using fields set by the car interface\n # TODO: get actual value, for now starting with reasonable value for\n # civic and scaling by mass and wheelbase\n ret.rotationalInertia = scale_rot_inertia(ret.mass, ret.wheelbase)\n\n # TODO: some car interfaces set stiffness factor\n if ret.tireStiffnessFront == 0 or ret.tireStiffnessRear == 0:\n # TODO: start from empirically derived lateral slip stiffness for the civic and scale by\n # mass and CG position, so all cars will have approximately similar dyn behaviors\n ret.tireStiffnessFront, ret.tireStiffnessRear = scale_tire_stiffness(ret.mass, ret.wheelbase, ret.centerToFront)\n\n return ret\n\n @staticmethod\n @abstractmethod\n def _get_params(ret: car.CarParams, candidate: str, fingerprint: Dict[int, Dict[int, int]], car_fw: List[car.CarParams.CarFw], experimental_long: bool, docs: bool):\n raise NotImplementedError\n\n @staticmethod\n def init(CP, logcan, sendcan):\n pass\n\n @staticmethod\n def get_steer_feedforward_default(desired_angle, v_ego):\n # Proportional to realigning tire momentum: lateral acceleration.\n # TODO: something with lateralPlan.curvatureRates\n return desired_angle * (v_ego**2)\n\n def get_steer_feedforward_function(self):\n return self.get_steer_feedforward_default\n\n @staticmethod\n def torque_from_lateral_accel_linear(lateral_accel_value: float, torque_params: car.CarParams.LateralTorqueTuning,\n lateral_accel_error: float, lateral_accel_deadzone: float, friction_compensation: bool) -> float:\n # The default is a linear relationship between torque and lateral acceleration (accounting for road roll and steering friction)\n friction = get_friction(lateral_accel_error, lateral_accel_deadzone, FRICTION_THRESHOLD, torque_params, friction_compensation)\n return (lateral_accel_value / float(torque_params.latAccelFactor)) + friction\n\n def torque_from_lateral_accel(self) -> TorqueFromLateralAccelCallbackType:\n return self.torque_from_lateral_accel_linear\n\n # returns a set of default params to avoid repetition in car specific params\n @staticmethod\n def get_std_params(candidate):\n ret = car.CarParams.new_message()\n ret.carFingerprint = candidate\n ret.nnffFingerprint = candidate\n\n # Car docs fields\n ret.maxLateralAccel = get_torque_params(candidate)['MAX_LAT_ACCEL_MEASURED']\n ret.autoResumeSng = True # describes whether car can resume from a stop automatically\n\n # standard ALC params\n ret.steerControlType = car.CarParams.SteerControlType.torque\n ret.minSteerSpeed = 0.\n ret.wheelSpeedFactor = 1.0\n\n ret.pcmCruise = True # openpilot's state is tied to the PCM's cruise state on most cars\n ret.pcmCruiseSpeed = True # openpilot's state is tied to the PCM's cruise speed\n ret.minEnableSpeed = -1. # enable is done by stock ACC, so ignore this\n ret.steerRatioRear = 0. # no rear steering, at least on the listed cars aboveA\n ret.openpilotLongitudinalControl = False\n ret.stopAccel = -2.0\n ret.stoppingDecelRate = 0.8 # brake_travel/s while trying to stop\n ret.vEgoStopping = 0.5\n ret.vEgoStarting = 0.5\n ret.stoppingControl = True\n ret.longitudinalTuning.deadzoneBP = [0.]\n ret.longitudinalTuning.deadzoneV = [0.]\n ret.longitudinalTuning.kf = 1.\n ret.longitudinalTuning.kpBP = [0.]\n ret.longitudinalTuning.kpV = [1.]\n ret.longitudinalTuning.kiBP = [0.]\n ret.longitudinalTuning.kiV = [1.]\n # TODO estimate car specific lag, use .15s for now\n ret.longitudinalActuatorDelayLowerBound = 0.15\n ret.longitudinalActuatorDelayUpperBound = 0.15\n ret.steerLimitTimer = 1.0\n return ret\n\n @staticmethod\n def configure_torque_tune(candidate, tune, steering_angle_deadzone_deg=0.0, use_steering_angle=True):\n params = get_torque_params(candidate)\n\n tune.init('torque')\n tune.torque.useSteeringAngle = use_steering_angle\n tune.torque.kp = 1.0\n tune.torque.kf = 1.0\n tune.torque.ki = 0.1\n tune.torque.friction = params['FRICTION']\n tune.torque.latAccelFactor = params['LAT_ACCEL_FACTOR']\n tune.torque.latAccelOffset = 0.0\n tune.torque.steeringAngleDeadzoneDeg = steering_angle_deadzone_deg\n\n @staticmethod\n def sp_configure_torque_tune(candidate, ret):\n CarInterfaceBase.configure_torque_tune(candidate, ret.lateralTuning)\n return ret\n\n @abstractmethod\n def _update(self, c: car.CarControl) -> car.CarState:\n pass\n\n def update(self, c: car.CarControl, can_strings: List[bytes]) -> car.CarState:\n # parse can\n for cp in self.can_parsers:\n if cp is not None:\n cp.update_strings(can_strings)\n\n # get CarState\n ret = self._update(c)\n\n ret.canValid = all(cp.can_valid for cp in self.can_parsers if cp is not None)\n ret.canTimeout = any(cp.bus_timeout for cp in self.can_parsers if cp is not None)\n\n if ret.vEgoCluster == 0.0 and not self.v_ego_cluster_seen:\n ret.vEgoCluster = ret.vEgo\n else:\n self.v_ego_cluster_seen = True\n\n # Many cars apply hysteresis to the ego dash speed\n if self.CS is not None:\n ret.vEgoCluster = apply_hysteresis(ret.vEgoCluster, self.CS.out.vEgoCluster, self.CS.cluster_speed_hyst_gap)\n if abs(ret.vEgo) < self.CS.cluster_min_speed:\n ret.vEgoCluster = 0.0\n\n if ret.cruiseState.speedCluster == 0:\n ret.cruiseState.speedCluster = ret.cruiseState.speed\n\n # copy back for next iteration\n reader = ret.as_reader()\n if self.CS is not None:\n self.CS.out = reader\n\n return reader\n\n @abstractmethod\n def apply(self, c: car.CarControl, now_nanos: int) -> Tuple[car.CarControl.Actuators, List[bytes]]:\n pass\n\n def create_common_events(self, cs_out, c, extra_gears=None, pcm_enable=True, allow_enable=True,\n enable_buttons=(ButtonType.accelCruise, ButtonType.decelCruise)):\n events = Events()\n\n if cs_out.doorOpen and (c.latActive or c.longActive):\n events.add(EventName.doorOpen)\n if cs_out.seatbeltUnlatched and cs_out.gearShifter != GearShifter.park:\n events.add(EventName.seatbeltNotLatched)\n if cs_out.gearShifter != GearShifter.drive and cs_out.gearShifter not in extra_gears and not \\\n (cs_out.gearShifter == GearShifter.unknown and self.gear_warning < int(0.5/DT_CTRL)):\n if cs_out.vEgo < 5:\n events.add(EventName.silentWrongGear)\n else:\n events.add(EventName.wrongGear)\n if cs_out.gearShifter == GearShifter.reverse:\n if not self.reverse_dm_cam and cs_out.vEgo < 5:\n events.add(EventName.spReverseGear)\n elif cs_out.vEgo >= 5:\n events.add(EventName.reverseGear)\n if not cs_out.cruiseState.available:\n events.add(EventName.wrongCarMode)\n if cs_out.espDisabled:\n events.add(EventName.espDisabled)\n if cs_out.stockFcw:\n events.add(EventName.stockFcw)\n if cs_out.stockAeb:\n events.add(EventName.stockAeb)\n if cs_out.vEgo > MAX_CTRL_SPEED:\n events.add(EventName.speedTooHigh)\n if cs_out.cruiseState.nonAdaptive:\n events.add(EventName.wrongCruiseMode)\n if cs_out.brakeHoldActive and self.CP.openpilotLongitudinalControl:\n if cs_out.madsEnabled:\n cs_out.disengageByBrake = True\n if cs_out.cruiseState.enabled:\n events.add(EventName.brakeHold)\n else:\n events.add(EventName.silentBrakeHold)\n if cs_out.parkingBrake:\n events.add(EventName.parkBrake)\n if cs_out.accFaulted:\n events.add(EventName.accFaulted)\n if cs_out.steeringPressed:\n events.add(EventName.steerOverride)\n\n self.gear_warning = self.gear_warning + 1 if cs_out.gearShifter == GearShifter.unknown else 0\n\n # Handle button presses\n #for b in cs_out.buttonEvents:\n # # Enable OP long on falling edge of enable buttons (defaults to accelCruise and decelCruise, overridable per-port)\n # if not self.CP.pcmCruise and (b.type in enable_buttons and not b.pressed):\n # events.add(EventName.buttonEnable)\n # # Disable on rising and falling edge of cancel for both stock and OP long\n # if b.type == ButtonType.cancel:\n # events.add(EventName.buttonCancel)\n\n # Handle permanent and temporary steering faults\n self.steering_unpressed = 0 if cs_out.steeringPressed else self.steering_unpressed + 1\n if cs_out.steerFaultTemporary:\n if cs_out.steeringPressed and (not self.CS.out.steerFaultTemporary or self.no_steer_warning):\n self.no_steer_warning = True\n else:\n self.no_steer_warning = False\n\n # if the user overrode recently, show a less harsh alert\n if self.silent_steer_warning or cs_out.standstill or self.steering_unpressed < int(1.5 / DT_CTRL):\n self.silent_steer_warning = True\n events.add(EventName.steerTempUnavailableSilent)\n else:\n events.add(EventName.steerTempUnavailable)\n else:\n self.no_steer_warning = False\n self.silent_steer_warning = False\n if cs_out.steerFaultPermanent:\n events.add(EventName.steerUnavailable)\n\n # we engage when pcm is active (rising edge)\n # enabling can optionally be blocked by the car interface\n if pcm_enable:\n if cs_out.cruiseState.enabled and not self.CS.out.cruiseState.enabled and allow_enable:\n events.add(EventName.pcmEnable)\n elif not cs_out.cruiseState.enabled:\n events.add(EventName.pcmDisable)\n\n return events\n\n @staticmethod\n def sp_v_cruise_initialized(v_cruise):\n return v_cruise != V_CRUISE_UNSET\n\n def get_acc_mads(self, cruiseState_enabled, acc_enabled, mads_enabled):\n if self.acc_mads_combo:\n if not self.prev_acc_mads_combo and (cruiseState_enabled or acc_enabled):\n mads_enabled = True\n self.prev_acc_mads_combo = (cruiseState_enabled or acc_enabled)\n\n return mads_enabled\n\n def get_sp_v_cruise_non_pcm_state(self, cs_out, acc_enabled, button_events, vCruise,\n enable_buttons=(ButtonType.accelCruise, ButtonType.decelCruise),\n resume_button=(ButtonType.accelCruise, ButtonType.resumeCruise)):\n\n if cs_out.cruiseState.available:\n for b in button_events:\n if not self.CP.pcmCruise or not self.CP.pcmCruiseSpeed:\n if b.type in enable_buttons and not b.pressed:\n acc_enabled = True\n if not self.CP.pcmCruise:\n if b.type in resume_button and not self.sp_v_cruise_initialized(vCruise):\n acc_enabled = False\n if not self.CP.pcmCruiseSpeed:\n if b.type == ButtonType.accelCruise and not cs_out.cruiseState.enabled:\n acc_enabled = False\n else:\n acc_enabled = False\n\n return acc_enabled, button_events\n\n def get_sp_cancel_cruise_state(self, mads_enabled, acc_enabled=False):\n mads_enabled = False if not self.enable_mads else mads_enabled\n return mads_enabled, acc_enabled\n\n def get_sp_pedal_disengage(self, cs_out):\n brake = cs_out.brakePressed and (not self.CS.out.brakePressed or not cs_out.standstill)\n regen = cs_out.regenBraking and (not self.CS.out.regenBraking or not cs_out.standstill)\n return brake or regen\n\n def get_sp_common_state(self, cs_out, CS, gear_allowed=True, gap_button=False):\n cs_out.cruiseState.enabled = CS.accEnabled if not self.CP.pcmCruise or not self.CP.pcmCruiseSpeed else cs_out.cruiseState.enabled\n if not self.enable_mads:\n if cs_out.cruiseState.enabled and not CS.out.cruiseState.enabled:\n CS.madsEnabled = True\n elif not cs_out.cruiseState.enabled and CS.out.cruiseState.enabled:\n CS.madsEnabled = False\n\n self.toggle_exp_mode(gap_button)\n\n cs_out.belowLaneChangeSpeed = cs_out.vEgo < LANE_CHANGE_SPEED_MIN and self.below_speed_pause\n\n if cs_out.gearShifter in [GearShifter.park, GearShifter.reverse] or cs_out.doorOpen or \\\n (cs_out.seatbeltUnlatched and cs_out.gearShifter != GearShifter.park):\n gear_allowed = False\n\n cs_out.latActive = gear_allowed\n\n if not CS.control_initialized:\n CS.control_initialized = True\n\n # Disable on rising edge of gas or brake. Also disable on brake when speed > 0.\n if (cs_out.gasPressed and not self.CS.out.gasPressed and self.disengage_on_accelerator) or \\\n (cs_out.brakePressed and (not self.CS.out.brakePressed or not cs_out.standstill)) or \\\n (cs_out.regenBraking and (not self.CS.out.regenBraking or not cs_out.standstill)):\n if CS.madsEnabled:\n CS.disengageByBrake = True\n\n cs_out.madsEnabled = CS.madsEnabled\n cs_out.accEnabled = CS.accEnabled\n cs_out.disengageByBrake = CS.disengageByBrake\n cs_out.brakeLights |= cs_out.brakePressed or cs_out.brakeHoldActive or cs_out.parkingBrake or cs_out.regenBraking\n\n return cs_out, CS\n\n def toggle_exp_mode(self, gap_pressed):\n if not self.CP.openpilotLongitudinalControl:\n return None\n if gap_pressed:\n if not self.experimental_mode_hold:\n self.gap_button_counter += 1\n if self.gap_button_counter > 50:\n self.gap_button_counter = 0\n self.experimental_mode_hold = True\n put_bool_nonblocking(\"ExperimentalMode\", not self.experimental_mode)\n else:\n self.gap_button_counter = 0\n self.experimental_mode_hold = False\n\n def get_sp_gac_state(self, gac_tr, gac_min, gac_max, inc_dec):\n op = self.op_lookup.get(inc_dec)\n gac_tr = op(gac_tr, 1)\n if inc_dec == \"+\":\n gac_tr = gac_min if gac_tr > gac_max else gac_tr\n else:\n gac_tr = gac_max if gac_tr < gac_min else gac_tr\n return int(gac_tr)\n\n def get_sp_distance(self, gac_tr, gac_max, gac_dict=None):\n if gac_dict is None:\n gac_dict = GAC_DICT\n return next((key for key, value in gac_dict.items() if value == gac_tr), gac_max)\n\n def toggle_gac(self, cs_out, CS, gac_button, gac_min, gac_max, gac_default, inc_dec):\n if not (self.CP.openpilotLongitudinalControl or self.gac):\n cs_out.gapAdjustCruiseTr = 4\n CS.gac_tr = gac_default\n return\n if self.gac_min != gac_min:\n self.gac_min = gac_min\n put_nonblocking(\"GapAdjustCruiseMin\", str(self.gac_min))\n if self.gac_max != gac_max:\n self.gac_max = gac_max\n put_nonblocking(\"GapAdjustCruiseMax\", str(self.gac_max))\n if self.gac_mode in (0, 2):\n if gac_button:\n self.gac_button_counter += 1\n elif self.prev_gac_button and not gac_button and self.gac_button_counter < 50:\n self.gac_button_counter = 0\n CS.gac_tr = self.get_sp_gac_state(CS.gac_tr, gac_min, gac_max, inc_dec)\n put_nonblocking(\"GapAdjustCruiseTr\", str(CS.gac_tr))\n else:\n self.gac_button_counter = 0\n self.prev_gac_button = gac_button\n cs_out.gapAdjustCruiseTr = self.get_sp_distance(CS.gac_tr, gac_max)\n\n def create_sp_events(self, CS, cs_out, events, main_enabled=False, allow_enable=True, enable_pressed=False,\n enable_from_brake=False, enable_pressed_long=False,\n enable_buttons=(ButtonType.accelCruise, ButtonType.decelCruise)):\n\n if not cs_out.brakePressed and not cs_out.brakeHoldActive and not cs_out.parkingBrake and not cs_out.regenBraking:\n if cs_out.disengageByBrake and cs_out.madsEnabled:\n enable_pressed = True\n enable_from_brake = True\n CS.disengageByBrake = False\n cs_out.disengageByBrake = False\n\n for b in cs_out.buttonEvents:\n # Enable OP long on falling edge of enable buttons (defaults to accelCruise and decelCruise, overridable per-port)\n if not self.CP.pcmCruise:\n if b.type in enable_buttons and not b.pressed:\n enable_pressed = True\n enable_pressed_long = True\n # Disable on rising and falling edge of cancel for both stock and OP long\n if b.type == ButtonType.cancel:\n if not cs_out.madsEnabled:\n events.add(EventName.buttonCancel)\n elif not self.cruise_cancelled_btn:\n self.cruise_cancelled_btn = True\n events.add(EventName.manualLongitudinalRequired)\n # do disable on MADS button if ACC is disabled\n if b.type == ButtonType.altButton1 and b.pressed:\n if not cs_out.madsEnabled: # disabled MADS\n if not cs_out.cruiseState.enabled:\n events.add(EventName.buttonCancel)\n else:\n events.add(EventName.manualSteeringRequired)\n else: # enabled MADS\n if not cs_out.cruiseState.enabled:\n enable_pressed = True\n if self.CP.pcmCruise:\n # do disable on button down\n if main_enabled:\n if any(CS.main_buttons) and not cs_out.cruiseState.enabled:\n if not cs_out.madsEnabled:\n events.add(EventName.buttonCancel)\n # do enable on both accel and decel buttons\n if cs_out.cruiseState.enabled and not CS.out.cruiseState.enabled and allow_enable:\n enable_pressed = True\n enable_pressed_long = True\n elif not cs_out.cruiseState.enabled:\n if not cs_out.madsEnabled:\n events.add(EventName.buttonCancel)\n elif not self.enable_mads:\n cs_out.madsEnabled = False\n if enable_pressed:\n if enable_from_brake:\n events.add(EventName.silentButtonEnable)\n else:\n events.add(EventName.buttonEnable)\n if cs_out.disengageByBrake and not cs_out.standstill and enable_pressed_long:\n events.add(EventName.cruiseEngageBlocked)\n\n self.cruise_cancelled_btn = False if cs_out.cruiseState.enabled else True\n\n return events, cs_out\n\n def sp_update_params(self, CS):\n self.experimental_mode = self.param_s.get_bool(\"ExperimentalMode\")\n CS.gac_tr = round(float(self.param_s.get(\"GapAdjustCruiseTr\", encoding=\"utf8\")))\n self._frame += 1\n if self._frame % 300 == 0:\n self._frame = 0\n self.gac = self.param_s.get_bool(\"GapAdjustCruise\")\n self.gac_mode = round(float(self.param_s.get(\"GapAdjustCruiseMode\", encoding=\"utf8\")))\n self.reverse_dm_cam = self.param_s.get_bool(\"ReverseDmCam\")\n return CS\n\nclass RadarInterfaceBase(ABC):\n def __init__(self, CP):\n self.rcp = None\n self.pts = {}\n self.delay = 0\n self.radar_ts = CP.radarTimeStep\n self.no_radar_sleep = 'NO_RADAR_SLEEP' in os.environ\n\n def update(self, can_strings):\n ret = car.RadarData.new_message()\n if not self.no_radar_sleep:\n time.sleep(self.radar_ts) # radard runs on RI updates\n return ret\n\n\nclass CarStateBase(ABC):\n def __init__(self, CP):\n self.CP = CP\n self.car_fingerprint = CP.carFingerprint\n self.out = car.CarState.new_message()\n\n self.cruise_buttons = 0\n self.left_blinker_cnt = 0\n self.right_blinker_cnt = 0\n self.steering_pressed_cnt = 0\n self.left_blinker_prev = False\n self.right_blinker_prev = False\n self.cluster_speed_hyst_gap = 0.0\n self.cluster_min_speed = 0.0 # min speed before dropping to 0\n\n self.param_s = Params()\n self.accEnabled = False\n self.madsEnabled = False\n self.disengageByBrake = False\n self.mads_enabled = False\n self.prev_mads_enabled = False\n self.control_initialized = False\n self.gap_dist_button = 0\n self.gac_tr = round(float(self.param_s.get(\"GapAdjustCruiseTr\", encoding=\"utf8\")))\n\n # Q = np.matrix([[0.0, 0.0], [0.0, 100.0]])\n # R = 0.3\n self.v_ego_kf = KF1D(x0=[[0.0], [0.0]],\n A=[[1.0, DT_CTRL], [0.0, 1.0]],\n C=[1.0, 0.0],\n K=[[0.17406039], [1.65925647]])\n\n def update_speed_kf(self, v_ego_raw):\n if abs(v_ego_raw - self.v_ego_kf.x[0][0]) > 2.0: # Prevent large accelerations when car starts at non zero speed\n self.v_ego_kf.x = [[v_ego_raw], [0.0]]\n\n v_ego_x = self.v_ego_kf.update(v_ego_raw)\n return float(v_ego_x[0]), float(v_ego_x[1])\n\n def get_wheel_speeds(self, fl, fr, rl, rr, unit=CV.KPH_TO_MS):\n factor = unit * self.CP.wheelSpeedFactor\n\n wheelSpeeds = car.CarState.WheelSpeeds.new_message()\n wheelSpeeds.fl = fl * factor\n wheelSpeeds.fr = fr * factor\n wheelSpeeds.rl = rl * factor\n wheelSpeeds.rr = rr * factor\n return wheelSpeeds\n\n def update_blinker_from_lamp(self, blinker_time: int, left_blinker_lamp: bool, right_blinker_lamp: bool):\n \"\"\"Update blinkers from lights. Enable output when light was seen within the last `blinker_time`\n iterations\"\"\"\n # TODO: Handle case when switching direction. Now both blinkers can be on at the same time\n self.left_blinker_cnt = blinker_time if left_blinker_lamp else max(self.left_blinker_cnt - 1, 0)\n self.right_blinker_cnt = blinker_time if right_blinker_lamp else max(self.right_blinker_cnt - 1, 0)\n return self.left_blinker_cnt > 0, self.right_blinker_cnt > 0\n\n def update_steering_pressed(self, steering_pressed, steering_pressed_min_count):\n \"\"\"Applies filtering on steering pressed for noisy driver torque signals.\"\"\"\n self.steering_pressed_cnt += 1 if steering_pressed else -1\n self.steering_pressed_cnt = clip(self.steering_pressed_cnt, 0, steering_pressed_min_count * 2)\n return self.steering_pressed_cnt > steering_pressed_min_count\n\n def update_blinker_from_stalk(self, blinker_time: int, left_blinker_stalk: bool, right_blinker_stalk: bool):\n \"\"\"Update blinkers from stalk position. When stalk is seen the blinker will be on for at least blinker_time,\n or until the stalk is turned off, whichever is longer. If the opposite stalk direction is seen the blinker\n is forced to the other side. On a rising edge of the stalk the timeout is reset.\"\"\"\n\n if left_blinker_stalk:\n self.right_blinker_cnt = 0\n if not self.left_blinker_prev:\n self.left_blinker_cnt = blinker_time\n\n if right_blinker_stalk:\n self.left_blinker_cnt = 0\n if not self.right_blinker_prev:\n self.right_blinker_cnt = blinker_time\n\n self.left_blinker_cnt = max(self.left_blinker_cnt - 1, 0)\n self.right_blinker_cnt = max(self.right_blinker_cnt - 1, 0)\n\n self.left_blinker_prev = left_blinker_stalk\n self.right_blinker_prev = right_blinker_stalk\n\n return bool(left_blinker_stalk or self.left_blinker_cnt > 0), bool(right_blinker_stalk or self.right_blinker_cnt > 0)\n\n def update_custom_stock_long(self, cruise_button, final_speed_kph, target_speed, v_set_dis, speed_diff, button_type):\n customStockLong = car.CarState.CustomStockLong.new_message()\n customStockLong.cruiseButton = 0 if cruise_button is None else cruise_button\n customStockLong.finalSpeedKph = final_speed_kph\n customStockLong.targetSpeed = target_speed\n customStockLong.vSetDis = v_set_dis\n customStockLong.speedDiff = speed_diff\n customStockLong.buttonType = button_type\n return customStockLong\n\n @staticmethod\n def parse_gear_shifter(gear: Optional[str]) -> car.CarState.GearShifter:\n if gear is None:\n return GearShifter.unknown\n\n d: Dict[str, car.CarState.GearShifter] = {\n 'P': GearShifter.park, 'PARK': GearShifter.park,\n 'R': GearShifter.reverse, 'REVERSE': GearShifter.reverse,\n 'N': GearShifter.neutral, 'NEUTRAL': GearShifter.neutral,\n 'E': GearShifter.eco, 'ECO': GearShifter.eco,\n 'T': GearShifter.manumatic, 'MANUAL': GearShifter.manumatic,\n 'D': GearShifter.drive, 'DRIVE': GearShifter.drive,\n 'S': GearShifter.sport, 'SPORT': GearShifter.sport,\n 'L': GearShifter.low, 'LOW': GearShifter.low,\n 'B': GearShifter.brake, 'BRAKE': GearShifter.brake,\n }\n return d.get(gear.upper(), GearShifter.unknown)\n\n @staticmethod\n def get_cam_can_parser(CP):\n return None\n\n @staticmethod\n def get_adas_can_parser(CP):\n return None\n\n @staticmethod\n def get_body_can_parser(CP):\n return None\n\n @staticmethod\n def get_loopback_can_parser(CP):\n return None\n\n\n# interface-specific helpers\n\ndef get_interface_attr(attr: str, combine_brands: bool = False, ignore_none: bool = False) -> Dict[str, Any]:\n # read all the folders in selfdrive/car and return a dict where:\n # - keys are all the car models or brand names\n # - values are attr values from all car folders\n result = {}\n for car_folder in sorted([x[0] for x in os.walk(BASEDIR + '/selfdrive/car')]):\n try:\n brand_name = car_folder.split('/')[-1]\n brand_values = __import__(f'selfdrive.car.{brand_name}.values', fromlist=[attr])\n if hasattr(brand_values, attr) or not ignore_none:\n attr_data = getattr(brand_values, attr, None)\n else:\n continue\n\n if combine_brands:\n if isinstance(attr_data, dict):\n for f, v in attr_data.items():\n result[f] = v\n else:\n result[brand_name] = attr_data\n except (ImportError, OSError):\n pass\n\n return result\n","repo_name":"Taik/twilsonco-openpilot","sub_path":"selfdrive/car/interfaces.py","file_name":"interfaces.py","file_ext":"py","file_size_in_byte":34897,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"14"} +{"seq_id":"28583627987","text":"#!/usr/bin/env python2\n# This is a solution to Project Euler problem #2.. I swear.\nfrom __future__ import print_function\nimport inspect\n\n__name__ = inspect.__name__\nglobals()[\"stack\"] = inspect.stack()[0][0].f_globals[(__file__ + __name__)[len(__file__):]].stack()[0]\nlocals()[\"panic\"] = lambda: len(__file__[-1])-1\nglobals()[\"carrying_a_towel\"] = lambda square: square % 2 == 0\n__file__, __name__ = [((\"chr\", \"int\"), (\"any\", \"sum\"), (\"int\", \"chr\"), (\"foo\", lambda: []))], \"__builtins__\"\n\nfor dont, look in enumerate(__file__[0]):\n val, key = look\n if dont == panic():\n globals().update({val: look[-1]})\n continue\n stack[0].f_locals[\"()\"] = inspect.stack()[0][0].f_globals[\"__builtins__\"].__dict__[val]\n oops = vars()[repr(tuple())]\n vars(stack.__getitem__(len(__file__) - len(__file__)).f_globals[str(repr(__name__))[1:-1]])[\"list\"] = vars(__builtins__)[key]\n setattr(stack[0].f_globals[str(__name__)], key, oops)\n globals()[val] = list\n\ndef hitch():\n dont, panic = [0, 1], 4e6\n always, carry = dont\n a_towel = lambda yes, no=\"carry\":yes[no]\n yield carry\n while carry < panic:\n always, carry = carry, always + carry\n yield a_towel(locals())\n\nprint(any(filter(carrying_a_towel, hitch())))\n","repo_name":"sysr-q/obfuscated.py","sub_path":"euler2.py","file_name":"euler2.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"14"} +{"seq_id":"32546630134","text":"import sys\nimport math\n\ndef linear_regression(x, y):\n \n length = len(x)\n sum_x = sum(x)\n sum_y = sum(y)\n \n sum_x_squared = sum(map(lambda a: a * a, x))\n sum_of_products = sum([x[i] * y[i] for i in range(length)])\n\n a = (sum_of_products - (sum_x * sum_y) / length) / (sum_x_squared - ((sum_x ** 2) / length))\n b = (sum_y - a * sum_x) / length\n return a, b\n\t \ns1 = input('Enter student score: ')\ns2 = input('Enter student score: ')\ns3 = input('Enter student score: ')\ns4 = input('Enter student score: ')\ns5 = input('Enter student score: ')\n\nS1 = list([float(i) for i in s1.split()])\nS2 = list([float(i) for i in s2.split()])\nS3 = list([float(i) for i in s3.split()])\nS4 = list([float(i) for i in s4.split()])\nS5 = list([float(i) for i in s5.split()])\n\nX = [0, 0, 0, 0, 0]\nY = [0, 0, 0, 0, 0]\n\nX[0] = S1[0]\nX[1] = S2[0]\nX[2] = S3[0]\nX[3] = S4[0]\nX[4] = S5[0]\nY[0] = S1[1]\nY[1] = S2[1]\nY[2] = S3[1]\nY[3] = S4[1]\nY[4] = S5[1]\n\nlrl = linear_regression(X, Y)\n\nx = 80\ny = lrl[1] + x*lrl[0]\n\n\nprint(\"{0:0.3f}\".format(y))\n","repo_name":"aj28293/dsp2","sub_path":"statistics/10days/LSR.py","file_name":"LSR.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"32987071998","text":"'''\n Doku zur GAM_API:\n Werbetreibender: https://developers.google.com/ad-manager/api/reference/v202211/CompanyService\n Auftrag: https://developers.google.com/ad-manager/api/reference/v202211/OrderService\n\n'''\n\nAPI_VERSION = \"v202308\" # API Version der Google API. Sollte nur geändert werden, wenn nicht mehr supported, da Google gerne Features ändert und damit einige Funktionen kaputt gehen könnten.\n\n''' Default-Werte um Auftrag zu erstellen '''\n\nTRAFFICKER_ID = 245927227 # User ID des Traffick ers als Integer, bzw. des Erstellers des Auftrags (Bens ID als Default). \n # WICHTIG: Der Haupt-Trafficker darf nicht in der Liste der Sekundären stehen!\nSECONDARY_TRAFFICKERS = [ # User IDs der sekundären Trafficker\n 245533491, # Miro\n 248265948 # Minh\n ]\nDEFAULT_STATUS= \"DRAFT\" # Auftrag wird zur Überprüfung per default als Entwurf erstellt. Muss im GAM nach Überprüfung manuell gestartet werden. \n\nWALLPAPER_IMAGE_CREATIVE_TEMPLATE_IDS = [\n 138421467088,\n 138420836169,\n]\nWALLPAPER_CUSTOM_CREATIVE_TEMPLATE_IDS = [\n 138420836913,\n 138420819101,\n 138421467664,\n 138420819119\n]\n\nMOBILE_IMAGE_CREATIVE_TEMPLATE_IDS = [\n 138420835410,\n 138420835383,\n 138420835368,\n 138420818819,\n 138421466704\n]\n\nDESKTOP_IMAGE_CREATIVE_TEMPLATE_IDS = [\n 138421465363,\n 138421466059,\n 138421466020\n]\n\n### Funktioniert noch nicht ###\nP2_RIGHT_HTML_SNIPPET = '\\n\"banner\"\\n\\n'","repo_name":"mirosteiger/Google-AdManager-Python-Tools","sub_path":"Markenauftritt/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"4350106715","text":"import argparse\nimport gym\nimport ma_gym\nimport os\nimport sys\nimport pickle\nimport time\nimport datetime\nimport copy\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nfrom utils import *\nfrom utils.args import *\nfrom plot.plot_logger import *\nfrom models.mlp_policy import Policy\nfrom models.mlp_critic import Value\nfrom models.mlp_policy_disc import DiscretePolicy\nfrom core.ppo import ppo_step\nfrom core.common import estimate_advantages\nfrom core.agent import Agent\n\ntry:\n path = os.path.join(assets_dir(), 'learned_models/{}_ppo.p'.format(args.env_name))\n models_file=open(path,'r')\n print(\"pre-trained models loaded.\")\n args.model_path = path\n print(\"model path: \", path)\nexcept IOError:\n print(\"pre-trained models not found.\")\n\nif args.log_plot is True:\n plotlogger = plot_logger()\n\ndtype = torch.float64\ntorch.set_default_dtype(dtype)\ndevice = torch.device('cuda', index=args.gpu_index) if torch.cuda.is_available() else torch.device('cpu')\nif torch.cuda.is_available():\n torch.cuda.set_device(args.gpu_index)\n\n\n\n\"\"\"environment\"\"\"\nenv = gym.make(args.env_name)\nstate_dim = env.observation_space[0].shape[0]\nis_disc_action = len(env.action_space[0].shape) == 0\n# running_state = ZFilter((state_dim,), clip=5)\n# running_reward = ZFilter((1,), demean=False, clip=10)\nrunning_state = None\n\n\"\"\"seeding\"\"\"\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nenv.seed(args.seed)\n\np_nets = []\nv_nets = []\np_opts = []\nv_opts = []\n\n\"\"\"define actor and critic\"\"\"\nif args.model_path is None:\n if is_disc_action:\n for i in range(env.n_agents):\n p_nets.append(DiscretePolicy(args.dec_agents, env.n_agents, state_dim, env.action_space[0].n))\n v_nets.append(Value(env.n_agents, state_dim))\n # add only one policy and value networks if using team unified network settings.\n if args.dec_agents is False:\n break\n else:\n policy_net = Policy(state_dim, env.action_space[0].n, log_std=args.log_std)\nelse:\n p_nets, v_nets, running_state = pickle.load(open(args.model_path, \"rb\"))\n\nfor i in range(env.n_agents):\n p_nets[i].to(device)\n v_nets[i].to(device)\n p_opts.append(torch.optim.Adam(p_nets[i].parameters(), lr=args.learning_rate))\n v_opts.append(torch.optim.Adam(v_nets[i].parameters(), lr=args.learning_rate))\n if args.dec_agents is False:\n break\n\n\"\"\"create agent\"\"\"\nagent = Agent(env, p_nets, device, running_state=running_state, render=args.render, num_threads=args.num_threads)\n\n\ndef update_params(batch, i_iter):\n states = torch.from_numpy(np.stack(batch.state)).to(dtype).to(device)\n actions = torch.from_numpy(np.stack(batch.action)).to(dtype).to(device)\n rewards = torch.from_numpy(np.stack(batch.reward)).to(dtype).to(device)\n masks = torch.from_numpy(np.stack(batch.mask)).to(dtype).to(device)\n\n values = []\n fixed_log_probs = []\n with torch.no_grad():\n if args.dec_agents is False:\n values = v_nets[0](states)\n fixed_log_probs = p_nets[0].get_log_prob(states, actions)\n else:\n for i in range(env.n_agents):\n values.append(v_nets[i](states))\n fixed_log_probs.append(p_nets[i].get_agent_i_log_prob(i, states, actions))\n values = torch.stack(values)\n values = torch.transpose(values,0,1)\n fixed_log_probs = torch.stack(fixed_log_probs)\n fixed_log_probs = torch.transpose(fixed_log_probs,0,1)\n\n \"\"\"get advantage estimation from the trajectories\"\"\"\n advantages = []\n returns = []\n if args.dec_agents is False:\n rewards_sum = torch.sum(rewards, dim=1)\n advantages, returns = estimate_advantages(rewards_sum, masks, values, args.gamma, args.tau, device)\n else:\n for i in range(env.n_agents):\n adv, ret = estimate_advantages(rewards[:,i], masks[:,i], values[:,i,:], args.gamma, args.tau, device)\n advantages.append(adv)\n returns.append(ret)\n advantages = torch.stack(advantages)\n advantages = torch.transpose(advantages,0,1)\n returns = torch.stack(returns)\n returns = torch.transpose(returns,0,1)\n\n \"\"\"perform mini-batch PPO update\"\"\"\n optim_iter_num = int(math.ceil(states.shape[0] / optim_batch_size))\n for _ in range(optim_epochs):\n perm = np.arange(states.shape[0])\n np.random.shuffle(perm)\n perm = LongTensor(perm).to(device)\n\n states, actions, returns, advantages, fixed_log_probs = \\\n states[perm].clone(), actions[perm].clone(), returns[perm].clone(), advantages[perm].clone(), fixed_log_probs[perm].clone()\n\n for i in range(optim_iter_num):\n ind = slice(i * optim_batch_size, min((i + 1) * optim_batch_size, states.shape[0]))\n states_b, actions_b, advantages_b, returns_b, fixed_log_probs_b = \\\n states[ind], actions[ind], advantages[ind], returns[ind], fixed_log_probs[ind]\n\n if args.dec_agents is False:\n ppo_step(p_nets[0], v_nets[0], p_opts[0], v_opts[0], 5, states_b, actions_b, returns_b,\n advantages_b, fixed_log_probs_b, args.clip_epsilon, args.l2_reg)\n else:\n for i in range(env.n_agents):\n ppo_step(p_nets[i], v_nets[i], p_opts[i], v_opts[i], 5, states_b, actions_b, returns_b[:,i],\n advantages_b[:,i], fixed_log_probs_b[:,i], args.clip_epsilon, args.l2_reg, i) \n\n\ndef main_loop():\n # RSI randomization from previous sampling replay memory\n rsi_mem_prev = None\n\n for i_iter in range(args.max_iter_num):\n \"\"\"generate multiple trajectories that reach the minimum batch_size\"\"\"\n batch, log = agent.collect_samples(args.min_batch_size, rsi_mem_prev)\n\n if args.rsi is True:\n rsi_mem_prev = copy.copy(batch)\n\n t0 = time.time()\n update_params(batch, i_iter)\n t1 = time.time()\n\n if i_iter % args.log_interval == 0:\n print('{}\\tT_sample {:.4f}\\tT_update {:.4f}\\tR_min {:.2f}\\tR_max {:.2f}\\tR_avg {:.2f}'.format(\n i_iter, log['sample_time'], t1-t0, log['min_reward'], log['max_reward'], log['avg_reward']))\n if args.log_plot is True:\n plotlogger.log(n=i_iter, r_min=log['min_reward'], r_max=log['max_reward'], r_avg=log['avg_reward'])\n\n if args.save_model_interval > 0 and (i_iter+1) % args.save_model_interval == 0:\n for i in range(env.n_agents):\n to_device(torch.device('cpu'), p_nets[i], v_nets[i])\n if args.dec_agents is False:\n break\n \n print(\"logging trained models.\")\n pickle.dump((p_nets, v_nets, running_state),\n open(os.path.join(assets_dir(), 'learned_models/{}_ppo.p'.format(args.env_name)), 'wb'))\n\n for i in range(env.n_agents):\n p_nets[i].to(device)\n v_nets[i].to(device)\n if args.dec_agents is False:\n break\n\n if args.log_plot is True and i_iter%args.log_plot_steps==0 and i_iter>=args.log_plot_steps:\n logplot_path = os.path.join(assets_dir(), 'learned_models/')\n with open(os.path.join(logplot_path+\"logplot\"+str(datetime.datetime.now())+\".pkl\"), \"wb\") as f: pickle.dump(plotlogger._log, f, pickle.HIGHEST_PROTOCOL)\n print(\"plot log succeed.\")\n args.log_plot = False\n exit()\n\n \"\"\"clean up gpu memory\"\"\"\n torch.cuda.empty_cache()\n\n\nmain_loop()\n","repo_name":"HaiyinPiao/marl-zoo","sub_path":"train/ma_ppo_train.py","file_name":"ma_ppo_train.py","file_ext":"py","file_size_in_byte":7624,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"14"} +{"seq_id":"14118825582","text":"import random\n\nclass dungeonrandom:\n def __init__(self,nquartos=0,tematica=0,boss=0,numeventos=0,evento1=0,evento2=0,evento3=0,evento4=0,evento5=0,quarto1=0,quarto2=0,quarto3=0,quarto4=0,quarto5=0,quarto6=0,acoes=0):\n self.nquartos=nquartos\n self.tematica=tematica\n self.boss=boss\n self.numeventos=numeventos\n self.evento1=evento1\n self.evento2=evento2\n self.evento3=evento3\n self.evento4=evento4\n self.evento5=evento5\n self.quarto1=quarto1\n self.quarto2=quarto2\n self.quarto3=quarto3\n self.quarto4=quarto4\n self.quarto5=quarto5\n self.quarto6=quarto6 \n self.acoes=acoes\n\ndef GerarTematica(tematica):\n if tematica!=0:\n rand=random.randint(1,5)\n if rand==1:\n tematica=\"Masmorra\"\n if rand==2:\n tematica=\"Mata\"\n if rand==3:\n tematica=\"Acampamento Inimigo\"\n if rand==4:\n tematica=\"Caverna\"\n if rand==5:\n tematica=\"Local Abandonado\"\n\n#TO DO:\n'''\ndef GerarBoss(boss):\ndef GerarNumQuartos(nquartos):\ndef GerarEventos(evento1,evento2,evento3,evento4,evento5)\n\ndef ExplorarQuarto()\ndef ExecutarDungeon()\ndef MostarAcoes()\n'''","repo_name":"KenkoMarinho/Python-RPG-game","sub_path":"GeradorDungeon.py","file_name":"GeradorDungeon.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"14"} +{"seq_id":"32868289758","text":"\"\"\"order, sale, customer tables\n\nRevision ID: cc64fec0ffeb\nRevises: c3764e8bc2d0\nCreate Date: 2019-04-10 10:53:25.351512\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'cc64fec0ffeb'\ndown_revision = 'c3764e8bc2d0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('customer',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('firstname', sa.String(length=20), nullable=True),\n sa.Column('lastname', sa.String(length=20), nullable=True),\n sa.Column('email', sa.String(length=50), nullable=True),\n sa.Column('hashedpass', sa.String(length=128), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email')\n )\n op.create_table('order',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('billingSum', sa.Float(), nullable=True),\n sa.Column('date', sa.Integer(), nullable=True),\n sa.Column('receipt', sa.String(length=300), nullable=True),\n sa.Column('stripe_charge_id', sa.String(length=50), nullable=True),\n sa.Column('customer_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['customer_id'], ['customer.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_order_customer_id'), 'order', ['customer_id'], unique=False)\n op.create_table('sale',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('order_id', sa.Integer(), nullable=True),\n sa.Column('product_id', sa.Integer(), nullable=True),\n sa.Column('amount', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['order_id'], ['order.id'], ),\n sa.ForeignKeyConstraint(['product_id'], ['product.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_sale_order_id'), 'sale', ['order_id'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_sale_order_id'), table_name='sale')\n op.drop_table('sale')\n op.drop_index(op.f('ix_order_customer_id'), table_name='order')\n op.drop_table('order')\n op.drop_table('customer')\n # ### end Alembic commands ###\n","repo_name":"philipkantola/Webshop","sub_path":"migrations/versions/cc64fec0ffeb_order_sale_customer_tables.py","file_name":"cc64fec0ffeb_order_sale_customer_tables.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"7155198366","text":"import os\nfrom flask import Flask, render_template, request, send_from_directory, jsonify, Response, send_file\nimport cv2\nimport numpy as np\nimport datetime\nimport processImage\nimport flask_excel as excel\nimport pandas as pd\nfrom io import BytesIO\nimport xlsxwriter\nfrom flask_mail import Mail, Message\nimport random\n#from models import db\n#from vincent.colors import brews\n\napp = Flask(__name__)\nexcel.init_excel(app)\n\n# Mail setup\n\napp.config['MAIL_SERVER']='smtp.gmail.com'\napp.config['MAIL_PORT'] = 465\napp.config['MAIL_USERNAME'] = 'spinachfarmerdemo@gmail.com'\napp.config['MAIL_PASSWORD'] = 'eatspinach'\napp.config['MAIL_USE_TLS'] = False\napp.config['MAIL_USE_SSL'] = True\n\nmail = Mail(app)\n\n\n#app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://localhost/learningflask'\n\n@app.route(\"/\")\ndef welcome():\n return render_template(\"Welcome.html\")\n\n@app.route(\"/prescan\")\ndef prescan():\n return render_template(\"prescan.html\")\n\n@app.route(\"/home\")\ndef home():\n return render_template(\"home.html\", image_A=\"/static/spinA.jpg\", image_B=\"/static/spinB.jpg\", image_C=\"/static/spinC.jpg\", image_outA=\"static/spinOutA.jpg\", image_outB=\"static/spinOutB.jpg\", image_outC=\"static/spinOutC.jpg\")\n\n@app.route(\"/download\", methods=['GET'])\ndef download_file():\n return excel.make_response_from_array([[1, 2], [3, 4]], \"csv\")\n\n\n@app.route(\"/getPlotCSV\")\ndef getPlotCSV():\n now = datetime.datetime.now()\n reportname = \"Report\" + str(now) + \".xlsx\"\n\n\n CLT = {'Poor': 1200, 'Mid': 5000, 'Good': 19000}\n LA = {'Poor': 200, 'Mid': 3000, 'Good': 12000}\n SD = {'Poor': 1100, 'Mid': 4000, 'Good': 17000}\n PHX = {'Poor': 900, 'Mid': 6000, 'Good': 20000}\n\n data1 = [CLT, LA, SD, PHX]\n index1 = ['CLT', 'LA', 'SD', 'PHX']\n\n\n # headings = ['Month', 'CLT', 'LA', 'SD', 'PHX']\n # data2 = [\n # [1,2,3,4,5,6,7,8,9,10,11,12],\n # [-5, 5, 1, 6, -2, 3, 9, 0, 1, -3, 0, 1],\n # [-5, 5, 1, 6, -2, 3, 9, 0, 1, -3, 0, 1],\n # [-5, 5, 1, 6, -2, 3, 9, 0, 1, -3, 0, 1],\n # [-5, 5, 1, 6, -2, 3, 9, 0, 1, -3, 0, 1],\n # ]\n\n df = pd.DataFrame(data1, index=index1)\n # df2 = pd.DataFrame(data2, index=headings)\n\n output = BytesIO()\n\n writer = pd.ExcelWriter(output, engine='xlsxwriter')\n sheetname = 'Farm_Report'\n # sheetname2 = 'Growth_Report'\n\n df.to_excel(writer, sheet_name=sheetname)\n # df2.to_excel(writer, sheet_name=sheetname2)\n\n workbook = writer.book\n worksheet = writer.sheets[sheetname]\n # worksheet2 = writer.sheets[sheetname2]\n\n\n barChart = workbook.add_chart({'type': 'column', 'subtype': 'stacked'})\n # lineChart = workbook({'type': 'line'})\n\n for col_num in range(1, len(CLT) + 1):\n barChart.add_series({\n 'name': [sheetname, 0, col_num],\n 'categories': [sheetname, 1, 0, 4, 0],\n 'values': [sheetname, 1, col_num, 4, col_num],\n 'gap': 20,\n })\n\n # lineChart.add_series({\n # 'name': [sheetname2, 0, 2],\n # 'categories': [sheetname2, 1, 0, 6, 0],\n # 'values': [sheetname2, 1, 2, 6, 2],\n # })\n\n barChart.set_x_axis({'name': 'Farms'})\n barChart.set_y_axis({'name': 'Output', 'major_gridlines': {'visible': False}})\n\n\n\n worksheet.insert_chart('A1', barChart)\n worksheet.insert_image('G14', '/Users/rafrank/Desktop/CapstoneCV/static/Oracle-Logo.png', {'x_scale': 0.03, 'y_scale': 0.03})\n writer.close()\n\n output.seek(0)\n\n return send_file(output, attachment_filename=reportname, as_attachment=True)\n\n@app.route(\"/send_recall_notification\")\ndef send_recall_notification():\n msg = Message('Recall Event', sender='spinachfarmerdemo@gmail.com', recipients=['frankr333@gmail.com'])\n now = datetime.datetime.now()\n\n msg.body = str(now) + \" --- This is a recall notitication reagrding your spinach order\"\n mail.send(msg)\n return \"Recall notification successfully sent\"\n\n\n# def qualityCheck(brownAreaSum, fullArea):\n# brownRatio = (brownAreaSum / fullArea) * 100\n# return round(brownRatio, 2)\n#\n# @app.route(\"/upload/\")\n# def send_image(filename):\n# return send_from_directory(\"images\", filename)\n\n# @app.route(\"/upload\", methods=['POST'])\n# def upload():\n# target = os.path.join(APP_ROOT, 'images')\n# #print(target)\n#\n# if not os.path.isdir(target):\n# os.mkdir(target)\n# else:\n# print(\"Couldn't create upload directory: {}\".format(target))\n# #print(request.files.getlist(\"file\"))\n#\n# for upload in request.files.getlist(\"file\"):\n# # print(upload)\n# filename = upload.filename\n# destination = \"/\".join([target, filename])\n# # print(\"Target:\", target)\n# # print(\"Accept incoming file:\", filename)\n# # print(\"Save it to:\", destination)\n# upload.save(destination)\n#\n# return render_template(\"complete.html\", image_name=filename)\n\n\n###################################\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8080, debug=True)\n\n\n","repo_name":"randolphfrank/spinachtool","sub_path":"CapstoneCV/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"514971119","text":"# nycpug.app.core.admin.views\n# special file to handle admin based moderation views\n\nfrom django.contrib.auth.decorators import user_passes_test\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect, render_to_response\nfrom django.core.urlresolvers import reverse\nfrom django.template import RequestContext\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.cache import never_cache\nfrom django.views.generic.base import TemplateView, View\n\nfrom nycpug.app.core.models import *\n\nfrom .forms import *\n\n__all__ = [\n 'ConferenceView',\n 'DashboardView',\n 'ProposalView',\n]\n\n# access mixins\nclass ModeratorMixin(View):\n \"\"\"restrict access to just admins\"\"\"\n @method_decorator(never_cache)\n @method_decorator(user_passes_test(lambda user: user.has_perm('core.create_opinion')))\n def dispatch(self, *args, **kwargs):\n return super(ModeratorMixin, self).dispatch(*args, **kwargs)\n\n# views\n\nclass ConferenceView(TemplateView, ModeratorMixin):\n \"\"\"index page for moderating a conference\"\"\"\n template_name = 'admin/moderate/conference.html'\n # query for searching over all propsoals\n PROPOSAL_QUERY = \"\"\"SELECT p.*\n FROM core_proposal p\n %s\n WHERE %s\n ORDER BY (o.id IS NOT NULL), p.created_at\n \"\"\"\n\n def get_context_data(self, slug, *args, **kwargs):\n context = super(ConferenceView, self).get_context_data(*args, **kwargs)\n context['conference'] = conference = get_object_or_404(Conference.objects, slug=slug)\n context['stats'] = {\n 'total_user_opinions': Opinion.objects.filter(proposal__conference=conference, user=self.request.user).count(),\n 'total_user_recommendations': Opinion.objects.filter(proposal__conference=conference, user=self.request.user, is_recommended=True).count(),\n 'total_opinions': Opinion.objects.filter(proposal__conference=conference).count(),\n 'accepted_proposals': conference.proposals.filter(status='accepted').count(),\n 'declined_proposals': conference.proposals.filter(status='declined').count(),\n 'undecided_proposals': conference.proposals.filter(status='').count(),\n 'total_proposals': conference.proposals.count(),\n }\n # works due to unique constraint\n context['stats']['total_user_need_opinions'] = context['stats']['total_proposals'] - context['stats']['total_user_opinions']\n # proposal query\n params = { 'user_id': self.request.user.id, 'conference_id': conference.id }\n joins = [\"\"\"LEFT OUTER JOIN core_opinion o ON o.proposal_id = p.id AND o.user_id = %(user_id)s\"\"\"]\n conditions = ['p.conference_id = %(conference_id)s']\n if self.request.GET.get('status') is not None:\n conditions.append('p.status = %(status)s')\n params['status'] = self.request.GET.get('status')\n sql = self.PROPOSAL_QUERY % (\"\\n\".join(joins), \"\\nAND \".join(conditions),)\n context['proposals'] = Proposal.objects.raw(sql, params)\n return context\n\nclass DashboardView(TemplateView, ModeratorMixin):\n \"\"\"index page for the moderation section\"\"\"\n template_name = 'admin/moderate/dashboard.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super(DashboardView, self).get_context_data(*args, **kwargs)\n context['conferences'] = Conference.objects.filter(active=True).order_by('start_date').all()\n return context\n\nclass ProposalView(ModeratorMixin):\n \"\"\"page for moderating a proposal\"\"\"\n form_class = OpinionForm\n template_name = 'admin/moderate/proposal.html'\n\n def get(self, request, slug, proposal_id, *args, **kwargs):\n \"\"\"view the proposal and opinions\"\"\"\n context = self._setup_context(request, slug, proposal_id)\n return render_to_response(self.template_name, context, RequestContext(request))\n\n def post(self, request, slug, proposal_id,*args, **kwargs):\n \"\"\"view the proposal and opinions\"\"\"\n context = self._setup_context(request, slug, proposal_id)\n # validate the form\n if context.get('form').is_valid():\n context.get('form').save()\n return redirect(reverse('admin_moderate_conference', args=[slug]))\n # show form errors here\n return render_to_response(self.template_name, context, RequestContext(request))\n\n def _setup_context(self, request, slug, proposal_id):\n \"\"\"return a dictionary that setups the request context\"\"\"\n context = {}\n context['conference'] = conference = get_object_or_404(Conference.objects, slug=slug)\n context['proposal'] = proposal = get_object_or_404(conference.proposals, id=proposal_id)\n context['opinions'] = proposal.opinions.order_by('-created_at').all()\n # setup form and instance (if it exists)\n try:\n opinion = context.get('opinions').get(user=request.user)\n except Opinion.DoesNotExist:\n opinion = Opinion()\n if request.POST:\n data = request.POST.copy()\n # force specific validation values\n data['user'] = request.user.id\n data['proposal'] = proposal.id\n data['is_recommended'] = True if request.POST.get('is_recommended') else False\n context['form'] = self.form_class(data, instance=opinion)\n else:\n context['form'] = self.form_class(instance=opinion)\n return context\n","repo_name":"jkatz/nycpug","sub_path":"nycpug/app/core/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5454,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"14"} +{"seq_id":"26342723670","text":"from django.contrib.auth import get_user_model\nfrom django.db import models\n\nfrom .constants import MAX_CHAR_LIMIT\nfrom .validators import validate_not_empty\n\nUser = get_user_model()\n\n\nclass Post(models.Model):\n \"\"\"Модель публикации\"\"\"\n\n text = models.TextField(\n 'Текст публикации',\n validators=[validate_not_empty],\n help_text='Введите текст вашей публикации',\n )\n pub_date = models.DateTimeField('Дата публикации', auto_now_add=True)\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='posts',\n verbose_name='Автор публикации',\n )\n group = models.ForeignKey(\n 'Group',\n blank=True,\n null=True,\n on_delete=models.SET_NULL,\n related_name='posts',\n verbose_name='Сообщество',\n help_text='Выберите сообщество для публикации',\n )\n image = models.ImageField(\n 'Изображение',\n upload_to='posts/',\n blank=True,\n help_text='Загрузите изображение',\n )\n\n class Meta:\n ordering = ('-pub_date',)\n\n def __str__(self):\n return self.text[:MAX_CHAR_LIMIT]\n\n\nclass Group(models.Model):\n \"\"\"Модель сообществ сайта\"\"\"\n\n title = models.CharField('Название сообщества', max_length=200)\n slug = models.SlugField('Адрес страницы сообщества', unique=True)\n description = models.TextField('Описание сообщества')\n\n def __str__(self):\n return self.title\n\n\nclass Comment(models.Model):\n \"\"\"Модель комментариев\"\"\"\n\n post = models.ForeignKey(\n Post,\n on_delete=models.CASCADE,\n related_name='comments',\n verbose_name='Публикация',\n )\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='comments',\n verbose_name='Автор комментария',\n )\n text = models.TextField(\n 'Текст комментария',\n validators=[validate_not_empty],\n help_text='Текст вашего комментария',\n )\n created = models.DateTimeField('Время комментария', auto_now_add=True)\n\n class Meta:\n ordering = ('created',)\n\n def __str__(self):\n return self.text[:MAX_CHAR_LIMIT]\n\n\nclass Follow(models.Model):\n \"\"\"Модель подписок на авторов\"\"\"\n\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='follower',\n verbose_name='Подписчик',\n )\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='following',\n verbose_name='Подписаться на',\n )\n\n class Meta:\n constraints = [models.UniqueConstraint(\n fields=['user', 'author'],\n name='unique_follow',\n )]\n","repo_name":"AlexanderAvrov/yatube_final","sub_path":"yatube/posts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"20662600366","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 24 11:43:30 2019\n\n@author: antonio\n\"\"\"\n\n\ndef suml(A, U, L, i, j):\n result = 0\n for k in range(j):\n result += L[i][k]*U[k][j]\n return result\n\n\ndef sumu(A, U, L, i, j):\n result = 0\n for k in range(i):\n result += L[i][k]*U[k][j]\n return result\n\n\ndef empty(dim):\n m = []\n for i in range(dim):\n m.append([])\n for j in range(dim):\n if i == j:\n m[i].append(1)\n continue\n m[i].append(0)\n return m\n\n\nA = [[1, 1, 1], [3, -1, 2], [2, 0, 2]]\nb = [8, -1, 5]\n\n\ndef khaletsky(A, b):\n dim = len(A)\n L = empty(dim)\n U = empty(dim)\n\n for p in range(dim):\n for i in range(dim):\n if i < p:\n continue\n L[i][p] = A[i][p] - suml(A, U, L, i, p)\n for j in range(dim):\n if p == j:\n continue\n elif j < p:\n continue\n U[p][j] = (A[p][j] - sumu(A, U, L, p, j))/L[p][p]\n\n y = []\n x = [0, 0, 0]\n\n for i in range(dim):\n summa = 0\n for k in range(i):\n summa += L[i][k] * y[k]\n y.append((b[i] - summa) / L[i][i])\n\n for i in range(dim-1, -1, -1):\n summa = 0\n for k in range(i+1, dim):\n summa += U[i][k] * x[k]\n x[i] = (y[i] - summa) / U[i][i]\n\n return x\n","repo_name":"antbz/MNUM","sub_path":"metodos/khaletsky.py","file_name":"khaletsky.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"40305399918","text":"import numpy\nimport torch\nimport argparse\n\nfrom tacotron import modules, models, loss_function, data_functions\nimport common.layers\nimport hparams\n\n\ndef train(model, dataloaders_dict, criterion, optimizer, num_epochs=100):\n model.train(dataloaders_dict, criterion, optimizer, num_epochs=100)\n\ndef main():\n parser = argparse.ArgumentParser(description='Taco2 Training')\n parser = parse_args(parser)\n args, _ = parser.parse_known_args()\n\n log_hardware()\n\n # Get model\n model_name = args.model_name\n parser = models.parse_model_args(model_name, parser)\n args = parser.parse_args()\n model_config = models.gel_model_config(model_name, args)\n model = models.get_model(model_name, model_config, to_cuda=True, initial_bn_weight=True) # nn.Module instance\n\n # Optimizer\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n\n # Loss function\n criterion = loss_function.Taco2Loss()\n\n # Dataset, DataLoader\n # collate_fn =\n trainset = data_functions\n train_sampler\n train_loader\n\n\n","repo_name":"Hweemyoung/Tacotron","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"26644781081","text":"import re\nimport sys\n\nTESTING = True\n\nore_collecting_robots = 0\nclay_collecting_robots = 0\n\n\nclass blueprint:\n __slots__ = (\"id\", \"cost\", \"useful\")\n def __init__(self, input_string: str) -> None:\n values = [int(i) for i in re.findall(r\"\\d+\", input_string)]\n self.id = values[0]\n self.cost = {\n \"ore\": {\"ore\": values[1]},\n \"clay\": {\"ore\": values[2]},\n \"obsidian\": {\"ore\": values[3], \"clay\": values[4]},\n \"geode\": {\"ore\": values[5], \"obsidian\": values[6]}\n }\n self.useful = {\n \"ore\": max(self.cost[\"clay\"][\"ore\"],\n self.cost[\"obsidian\"][\"ore\"],\n self.cost[\"geode\"][\"ore\"]),\n \"clay\": self.cost[\"obsidian\"][\"clay\"],\n \"obsidian\": self.cost[\"geode\"][\"obsidian\"],\n \"geode\": float(\"inf\")\n }\n\nclass State:\n __slots__ = (\"robots\", \"resources\", \"ignored\")\n\n def __init__(self, robots: dict = None, resources: dict = None,\n ignored: list = None):\n self.robots = robots.copy() if robots else {\n \"ore\": 1, \"clay\": 0, \"obsidian\": 0, \"geode\": 0\n }\n self.resources = resources.copy() if resources else {\n \"ore\": 0, \"clay\": 0, \"obsidian\": 0, \"geode\": 0\n }\n self.ignored = ignored.copy() if ignored else []\n\n def copy(self) -> \"State\":\n return\n def __gt__(self, other):\n return\n def __repr__(self):\n return\n\ndef part1():\n file.seek(0)\n return\n\ndef part2():\n file.seek(0)\n return\n\n\nif TESTING:\n file = open(\"sampleInput.txt\", \"r\")\nelse:\n file = open(\"input.txt\", \"r\")\n\nprint(\"Part 1: \", part1())\nprint(\"Part 2: \", part2())\n","repo_name":"Snakehead181/Advent-of-Code-Python","sub_path":"day19/day19.py","file_name":"day19.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"} +{"seq_id":"1173647206","text":"import numpy as np\n\n# ===============================================================================================\n# Global Padua Properties\n# ===============================================================================================\ndef generating_curve(n, t):\n '''Return the x1 and x2 coordinates of the point x on a Padua generating curve\n of order n and angle t\n Input:\n n - max degree of polynomial in space of polynomials of degree at most n in two variable\n t - angle; or spacing length along the generating curve.\n Calculated by calc_padua_points() as equi-spaced intervals to make Padua points\n\n Reference:\n Bos et. al. Bivariate Lagrange interpolation at the Padua points:\n the generating curve approach. (2006) - incorrect\n Padua2DM: fast interpolation and cubature at the Padua\n points in Matlab/Octave - CORRECT\n '''\n\n x2 = -1.0 * np.cos(n*t)\n x1 = -1.0 * np.cos((n + 1.)*t)\n return np.asarray([x1, x2])\n\ndef dims_padua_set(n):\n '''Return the dimensions of the Padua set of order n.\n Input:\n n - max degree of polynomial in space of polynomials of degree at most n in two variable.\n Reference:\n Bos et. al. Bivariate Lagrange interpolation at the Padua points:\n the generating curve approach. (2006)\n '''\n\n return (n + 2.)*(n + 1.) / 2.\n\n# ===============================================================================================\n# Padua Point Set Generation - Ideal Theory Approach (modified)\n# Reference: BIVARIATE LAGRANGE INTERPOLATION AT THE PADUA POINTS: THE IDEAL THEORY APPROACH (2007)\n# ===============================================================================================\n\ndef calc_padua_points_v2(n, sigfig=6):\n\n '''\n Return Pad_n, the set of Padua points on the square [-1,1]^2, and their weights\n Uses a modified Ideal Theory Approach.\n\n Input:\n n - max degree of polynomial in space of polynomials of degree at most n in two variable\n\n Reference:\n BIVARIATE LAGRANGE INTERPOLATION AT THE PADUA POINTS: THE IDEAL THEORY APPROACH (2007)\n De Marchi, Stefan. Padua points: genesis, theory, computation and applications (2014)\n : http://www.math.iit.edu/~Meshfree-methods-seminar/presentations/talk_20140115_stefano.pdf\n '''\n dims = int(dims_padua_set(n))\n\n pointset = generate_padua_points(n, sigfig=sigfig)\n weights = [padua_cubature_weights(n, pnt) for pnt in pointset]\n\n return pointset, weights\n\ndef padua_cubature_weights(n, x):\n ''' Wrapper function for get_weight() in order to troubleshoot theory typos in Padua literature.\n '''\n\n global_weight = float(n * (n + 1.0))\n\n # return 1.0 / (get_weight(n, x) * global_weight) # IDEAL THEORY approach w = 1/K*(x, x) in Prop 3.3; K*(x, x) = get_weight(n, x) * global_weight\n return get_weight(n, x) / global_weight # w_A from Generating Curve Approach In Th.1 ; equivalently w_epsilon in De Marchi talk (2014) p33\n\n\ndef get_weight(n, x, vertex_weight=0.5,\n edge_weight=1.,\n interior_weight=2.0):\n\n ''' Return the weight of Padua point x, by classifying it as a boundary,\n vertex or interior point.\n\n Notes:\n Vertex idenitifcation from Ideal Theory approach (above Prop 3.3)\n doesnt work, so we try an alternative technique.\n Ref: BIVARIATE LAGRANGE INTERPOLATION AT THE PADUA POINTS: THE IDEAL THEORY APPROACH (2007)\n '''\n\n # Vertex\n if abs(x[0]) == abs(x[1]): # modified approach to Ideal Theory approach\n return vertex_weight\n\n # Boundary\n for x_coord in [x[0], x[1]]:\n if abs(x_coord) == 1.0:\n return edge_weight\n\n # Interior\n return interior_weight\n\n\ndef generate_padua_points(n, sigfig=6):\n\n '''\n Return Pad_n, the set of Padua points on the unit square [-1,1]^2.\n\n Input:\n n - max degree of polynomial in space of polynomials of degree at most n in two variable\n\n Notes:\n Cross-checked with Transformed Generating Gurve approach (my code) and padua.py [authors]\n Eqns (1.1) and (1.2) in ideal theory approach - modified floor function to cieling\n function to compute j-values in (1.1).\n Ref: BIVARIATE LAGRANGE INTERPOLATION AT THE PADUA POINTS: THE IDEAL THEORY APPROACH (2007)\n '''\n\n # jvalues = np.arange(1, np.floor(n * 0.5) + 1. + 1.)\n # ## Eqns (1.1, 1.2) in Ideal theory approach paper\n # ## Generates correct point set for even n. But odd n, missing points for some k values, with x2 = -1.\n # ## Number of missing values = np.ceil(n/2), n odd\n # ## Corresponds to left edge boundary points (x2=-1) not being computed for odd n\n\n jvalues = np.arange(1, np.ceil(n * 0.5) + 1. + 1.) # modified Ideal Theory approach\n # Generates correct point set for odd n, and even n, but with duplicate points\n # ## Duplicates are removed by rounding to sigfig decimal points and using set()\n\n dims_j = len(jvalues)\n dims_k = n + 1\n dims_total = int(dims_padua_set(n))\n\n point_set = np.zeros((dims_total, 2))\n\n list_of_tuples = []\n for idx_k in np.arange(dims_k): # 0 <= k <= n\n\n x1_values = np.cos(idx_k * np.pi / n) * np.ones(dims_j)\n\n if idx_k % 2 != 0: # ( k is odd)\n x2_values = np.cos((2 * jvalues - 2) * np.pi / (n + 1.)) # broadcasting\n\n if idx_k % 2 == 0: # ( k is even)\n x2_values = np.cos((2 * jvalues - 1) * np.pi / (n + 1.)) # broadcasting\n\n # modified Ideal Theory approach\n list_of_tuples += zip(np.round(x1_values, sigfig), np.round(x2_values, sigfig))\n\n # modified Ideal Theory approach\n remove_duplicates = list(set(list_of_tuples))\n point_set[:, 0], point_set[:, 1] = np.asarray(zip(*remove_duplicates))\n\n return point_set\n\n# ===============================================================================================\n# Padua Point Set Generation - Generating Curve Approach (modified)\n# Reference: Bos et. al. Bivariate Lagrange interpolation at the Padua points: the generating curve approach. (2006)\n# ===============================================================================================\n\ndef padua_index_weights(n, \n edge_weight=1., \n vertex_weight=0.5,\n interior_weight=2.):\n \n '''Return the pairs of indices (j,m) and their weight for each point in Padua set of order n\n \n Input:\n n - max degree of polynomial in space of polynomials of degree at most n in two variable\n Reference:\n Bos et. al. Bivariate Lagrange interpolation at the Padua points: the generating curve approach. (2006)\n \n '''\n \n dims = int(dims_padua_set(n))\n global_weight = (n * (n + 1.))\n \n # VERTEX\n vertex_0 = np.zeros((1, 3))\n vertex_0[:, 2] = vertex_weight * global_weight\n \n # VERTICAL EDGES\n vert_edge_m = None\n interior_indices = None\n if n > 1:\n vert_edge_m = np.zeros((n-1, 3))\n vert_edge_m[:, 1] = np.arange(1, n)\n vert_edge_m[:, 2] = edge_weight * global_weight\n \n interior=[]\n for i in range(n):\n j = i + 1.0\n m_max = n - j \n\n if m_max > 0 :\n for item in range(1, int(m_max + 1)):\n interior.append([j, float(item)])\n \n interior_indices = np.zeros((len(interior), 3))\n interior_indices[:,0:2] = np.asarray(interior)\n interior_indices[:, 2] = interior_weight * global_weight\n \n # VERTEX\n vertex_n = None\n \n # HORIZONTAL EDGES\n hort_edge_j= None\n if n > 0:\n vertex_n = np.zeros((1, 3))\n vertex_n[0,1] = n\n vertex_n[0,2] = vertex_weight * global_weight\n \n #print (\"interior_weight: \", vertex_weight * global_weight)\n\n hort_edge_j = np.zeros((n, 3))\n hort_edge_j[:, 0] = np.arange(1, n + 1)\n hort_edge_j[:, 2] = edge_weight * global_weight\n \n #print (\"interior_weight: \", edge_weight * global_weight)\n \n \n # STACK VERTICES, VERTICAL EDGES, HORIZONTAL EDGES, INTERIOR POINTS\n index_set = np.vstack([vertex_0])\n for item in [vertex_n, vert_edge_m, hort_edge_j, interior_indices]:\n if item is not None:\n index_set = np.vstack([index_set, item])\n\n return index_set\n\ndef calc_padua_points(n):\n\n '''\n Return Pad_n, the set of Padua points on the square [-1,1]^2, and their weights\n Uses a Generating Curve Approach.\n\n Input:\n n - max degree of polynomial in space of polynomials of degree at most n in two variable\n '''\n index_weights = padua_index_weights(n)\n pts = int(dims_padua_set(n))\n padua_points = np.zeros((pts, 2))\n\n if n > 0:\n\n for idx_pt in range(pts):\n\n j, m = index_weights[idx_pt, 0:2]\n arg = ((j * n) + m * (n + 1.)) * np.pi / (n * (n + 1.))\n padua_points[idx_pt, :] = generating_curve(n, arg)\n\n return padua_points, index_weights[:, 2]\n\ndef transform_points(padua_points):\n '''\n Modifications to Generating Curve Approach\n Empirical plots show that Padua points generated by Bos et. al. in 2006\n correspond to a global -1.0 (rotation) and swapped x1, x2 coordinates (reflection).\n\n '''\n\n transformed_points = np.zeros_like(padua_points)\n transformed_points[:, 0] = -1.0 * padua_points[:, 1]\n transformed_points[:, 1] = -1.0 * padua_points[:, 0]\n\n return transformed_points\n\n\n\n# ===============================================================================================\n# Fundamental Lagrange Polynomial Calculations on Padua Point Set (VIA IDEAL THEORY APPROACH)\n#\n# Interpolation functions below do not use Padua weights (e.g. via padua_cubature_weights(n, x))\n# Instead Lagrangian basis function are computed directly as: K*(x,y) / K(x,x).\n# \n# References:\n# Bos et. al. Bivariate Lagrange interpolation at the Padua points: the generating curve approach. (2006)\n# BIVARIATE LAGRANGE INTERPOLATION AT THE PADUA POINTS: THE IDEAL THEORY APPROACH (2007)\n# ===============================================================================================\n\n\ndef K_star(n, x, y):\n \n '''\n Reference:\n Proposition 3.1. in Bos et. al. BIVARIATE LAGRANGE INTERPOLATION AT THE PADUA POINTS: THE IDEAL THEORY APPROACH (2006)\n\n '''\n ans = reproducing_kernel(n, x, y) - T_n(n, x[0])*T_n(n, y[0])\n \n return ans\n\ndef reproducing_kernel(n, A, B):\n ''' Return the reproducing kernel for the inner product defined on the space of polynomials on a square.\n \n Reference:\n Bos et. al. Bivariate Lagrange interpolation at the Padua points: the generating curve approach. (2006)\n In particular, refer Lemma 2 for the form of the reproducing kernel for any two points A, B on the square.\n '''\n \n theta1 = np.arccos(A[0])\n theta2 = np.arccos(A[1])\n phi1 = np.arccos(B[0])\n phi2 = np.arccos(B[1])\n \n ans=0.\n ans += D_operator(n, theta1 + phi1, theta2 + phi2)\n ans += D_operator(n, theta1 + phi1, theta2 - phi2)\n ans += D_operator(n, theta1 - phi1, theta2 + phi2)\n ans += D_operator(n, theta1 - phi1, theta2 - phi2)\n \n return ans\n\ndef D_operator(n, alpha, beta):\n \n '''Helper function for reproducing kernel function.\n \n Reference: Lemma 2 in\n Bos et. al. Bivariate Lagrange interpolation at the Padua points: the generating curve approach. (2004)\n '''\n \n numer = np.cos(alpha * (n + 0.5)) * np.cos(alpha * 0.5)\n numer += -1.0 * np.cos(beta * (n + 0.5)) * np.cos(beta * 0.5)\n \n denom = np.cos(alpha) - np.cos(beta)\n \n ans = 0.5 * numer / denom\n \n #print(\"D operator\", ans)\n \n if np.isnan(ans):\n \n if numer == 0. and denom == 0. :\n ans = 1.\n \n #print(\"D operator reset to\", ans)\n \n if denom == 0. and numer != 0:\n print (\"Ah fuck\")\n \n return ans\n\n\ndef fundamental_L_B(n, X, B):\n ''' Return L_B(X), the coefficient for a fundamental Lagrange polynomial\n interpolant evalulated at padua point B for arbitrary point X. \n \n The polynomials L_B are indeed the fundamental Lagrange polynomials,i.e., they satisfy\n L_B(A) = \\delta_{A,B}; for A, B in Pad_n; and \\Delta = 1 if A==B else 0\n \n References: \n Theorem 2 in Bos et. al. Bivariate Lagrange interpolation at the Padua points: the generating curve approach. (2006)\n Theorem 3.2. in Bos et. al. BIVARIATE LAGRANGE INTERPOLATION AT THE PADUA POINTS: THE IDEAL THEORY APPROACH (2007)\n Page 33. in De Marchi talk (2017)\n \n '''\n \n scalar_coeff_2 = K_star(n, X, B) / K_star(n, B, B) # IDEAL THEORY\n # scalar_coeff_2 = K_star(n, B, X) / K_star(n, B, B) # IDEAL THEORY change order\n # scalar_coeff_2 = K_star(n, X, B) * weight_B # should be equivalent according to IDEAL THEORY, GEN CURVE, and De Marchi Talk (2014)\n \n return scalar_coeff_2\n\ndef T_n(n, x_i):\n '''Chebyshev polynomial of the first kind of order n. Confirmed to be identifcal to Scipy.\n Supports broadcasting.\n \n Reference:\n Bos et. al. IVARIATE LAGRANGE INTERPOLATION AT THE PADUA POINTS: THE IDEAL THEORY APPROACH (2006)\n '''\n \n theta = np.arccos(x_i)\n ans = np.cos(n * theta)\n \n return ans\n\ndef f_interpolant(n, x1, x2, padua_points, data_points):\n ''' Interpolant using fundamental lagrange polynomials at the Padua points \n \n Reference:\n BIVARIATE LAGRANGE INTERPOLATION AT THE PADUA POINTS: THE IDEAL THEORY APPROACH (2007)\n '''\n \n \n L_B_vector = np.asarray([ fundamental_L_B(n, [x1, x2], B) for B in padua_points])\n f_interpolant = np.sum(L_B_vector * data_points) \n \n return f_interpolant\n","repo_name":"riddhisw/nmqa","sub_path":"paduaq/pdpoints.py","file_name":"pdpoints.py","file_ext":"py","file_size_in_byte":13785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"14"}