diff --git "a/3812.jsonl" "b/3812.jsonl" new file mode 100644--- /dev/null +++ "b/3812.jsonl" @@ -0,0 +1,749 @@ +{"seq_id":"494921685","text":"import os\nimport shutil\n\n\n\nfrom files_extractor import extract_file, is_compressed_file\n\n\nclass CompressedFile:\n def __init__(self, report):\n \tself.report = report\n\n def extract_files(self, compressed_file, destination_path):\n \"\"\"\n Extract files to destination_path from compressed files that are in compressed_path \n \"\"\"\n r = False\n \n if not os.path.exists(destination_path):\n os.makedirs(destination_path)\n \n self.report.write('package file: ' + compressed_file, True, False, True)\n if os.path.isfile(compressed_file):\n if is_compressed_file(compressed_file):\n self.report.write('Extract ' + compressed_file + ' to ' + destination_path, True, False, True) \n if self.__extract__(compressed_file, destination_path):\n r = True\n if not r:\n self.report.write(compressed_file + ' is not a valid file. It must be a compressed file.', True, True, True)\n \n return r\n\n def __extract__(self, compressed_file, destination_path):\n r = False\n # create destination path\n if not os.path.exists(destination_path):\n os.makedirs(destination_path)\n # delete content of destination path\n if os.path.exists(destination_path):\n for i in os.listdir(destination_path):\n os.unlink(destination_path + '/' + i)\n # create tempdir\n temp_dir = self.create_temp_dir()\n # extract in tempdir\n if extract_file(compressed_file, temp_dir):\n # eliminate folders\n for i in os.listdir(temp_dir):\n if os.path.isfile(temp_dir + '/' + i):\n shutil.copy(temp_dir + '/' + i, destination_path)\n os.unlink(temp_dir + '/' + i)\n elif os.path.isdir(temp_dir + '/' + i):\n for f in os.listdir(temp_dir + '/' + i ):\n if os.path.isfile(temp_dir + '/' + i + '/' + f):\n shutil.copy(temp_dir + '/' + i + '/' + f, destination_path)\n os.unlink(temp_dir + '/' + i + '/' + f)\n else:\n self.report.write(f + ' is directory and its contents will be ignored.', True, True, True)\n shutil.rmtree(temp_dir + '/' + i)\n shutil.rmtree(temp_dir)\n r = True\n return r\n\n def create_temp_dir(self):\n import tempfile\n return tempfile.mkdtemp().replace('\\\\', '/')\n \n \n\n \n","sub_path":"src/xml_converter/src/reuse/files/compressed_file.py","file_name":"compressed_file.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"222011955","text":"# Standard libs\nimport argparse\nimport logging\nimport os\nimport tempfile\nfrom datetime import datetime\nfrom glob import glob\nfrom shutil import rmtree\n\n# 3rd party libs\nimport matplotlib.pyplot as plt\nimport netCDF4\nimport numpy as np\nimport pyart\nimport utm\n\nfrom nexradaws.scripts.aws import get_nexrad_data\nfrom utils import *\n\n# Sweep to take\nSWEEP = 0\n\n# Cave file\ncave_csv = 'cave_locations.csv'\n# caves = read_caves(cave_csv, 'KDFX')\n\n# Expected radar grid (azimuth and range)\nAZ_SIZE = None\nRNG_SIZE = None\nAZ = None\nRNG = None\n\n# Thresholds\nPHIDP_THRESH = 70\nREF_THRESH = 5\n\n\ndef process_files(start_date, end_date, site, data_dir, out_dir, verbose=False):\n if verbose:\n logging.basicConfig(format=\"%(asctime)s:%(levelname)s:%(message)s\", level=logging.DEBUG)\n else:\n # Set up logging\n logging.basicConfig(format=\"%(asctime)s:%(levelname)s:%(message)s\", level=logging.INFO)\n\n # Set up the directory for the radar download\n if data_dir is None:\n data_dir = tempfile.mkdtemp()\n logging.debug(\"Created temp dir: {0}\".format(data_dir))\n tmp_dir = True\n else:\n tmp_dir = False\n\n # Start by trying to download the data (should go quick if the data is already in the right directory)\n logging.debug(\"Starting to download data\")\n data_dirs = get_nexrad_data(site, start_date, end_date, data_dir)\n # Make a list of the files to process\n files = []\n for dir in data_dirs:\n dir = os.path.join(dir, '*')\n files = files + glob(dir)\n logging.debug(\"Processing {0} files\".format(len(files)))\n\n # Do some bookkeeping for later\n image_base_dir = os.path.join(out_dir, 'images')\n logging.debug(\"Images being written to {0}\".format(image_base_dir))\n nc_base_dir = os.path.join(out_dir, 'netcdf')\n logging.debug(\"NetCDFs being written to {0}\".format(nc_base_dir))\n\n # Iterate through the files\n first = True\n count = 0\n for f in files:\n logging.info(\"Processing {0}\".format(f))\n\n try:\n radar = pyart.io.read_nexrad_archive(f)\n except Exception as e:\n print(\"Can't open file \" + f)\n continue\n\n dt = pyart.graph.common.generate_radar_time_begin(radar) # Scan time\n slice = radar.get_slice(SWEEP)\n\n if radar.metadata['vcp_pattern'] not in [31, 32]:\n logging.warning(\"VCP other than clear air mode found, skipping file\")\n continue\n\n if dt > datetime.strptime(end_date, \"%Y%m%d-%H%M%S\"):\n break\n\n if first:\n radar_lat = radar.latitude['data'][0]\n radar_lon = radar.longitude['data'][0]\n\n cave_x = []\n cave_y = []\n # Convert cave lat/lon to x/y coord system\n x_radar, y_radar, _, _ = utm.from_latlon(radar_lat, radar_lon)\n\n caves = read_caves(cave_csv, site)\n\n for cave in caves:\n # Convert lat-lons to utm for\n x_bat, y_bat, _, _ = utm.from_latlon(float(cave['lat']), float(cave['lon']))\n # Calc relative x and y of roost\n x_rel_m = (x_bat - x_radar)\n y_rel_m = (y_bat - y_radar)\n cave['x'] = x_rel_m / 1e3\n cave['y'] = y_rel_m / 1e3\n\n cave_x.append(x_rel_m/1e3)\n cave_y.append(y_rel_m/1e3)\n # Get rid of misc crap\n del x_bat, y_bat, _\n\n # Convert lat-lons to utm forndvf0\n x_radar, y_radar, _, _ = utm.from_latlon(radar_lat, radar_lon)\n\n # Init arrays for averaging\n phi_dp_running = np.zeros_like(radar.fields['differential_phase']['data'][slice].data)\n phi_dp_weighted_running = np.zeros_like(phi_dp_running)\n phi_dp_linear_weighted_running = np.zeros_like(phi_dp_running)\n ref_linear_running = np.zeros_like(radar.fields['reflectivity']['data'][slice].data)\n ref_running = np.zeros_like(ref_linear_running)\n eta_linear_running = np.zeros_like(ref_linear_running)\n\n # Get azimuth, range, and elevation for conversion to x and y\n range_m, az_deg = np.meshgrid(radar.range['data'], radar.azimuth['data'][slice])\n az_rad = np.deg2rad(az_deg)\n elev = np.deg2rad(np.mean(radar.elevation['data'][slice]))\n\n x_m = range_m * np.cos(elev) * np.sin(az_rad)\n y_m = range_m * np.cos(elev) * np.cos(az_rad)\n\n # Get the correct order of the azimuths\n az_p = np.argsort(az_rad, axis=0)[:, 0]\n\n # Sort the x and y grids based on the order of the azimuths\n x_m = x_m[az_p, :]\n y_m = y_m[az_p, :]\n\n if first:\n RNG = range_m[0, :]\n RNG_SIZE = RNG.size\n AZ = az_rad[:, 0][az_p]\n AZ_SIZE = AZ.size\n first = False\n\n # # Apply filters and corrections to data\n logging.debug(\"Applying corrections\")\n gate_filter = pyart.filters.GateFilter(radar)\n gate_filter.exclude_below('differential_phase', PHIDP_THRESH)\n gate_filter.exclude_below('reflectivity', REF_THRESH)\n gate_filter = pyart.correct.despeckle_field(radar, 'differential_phase', gatefilter=gate_filter)\n\n # Extract the desired data and get it in the correct order\n phi_dp = radar.fields['differential_phase']['data'][slice][az_p, :]\n ref = radar.fields['reflectivity']['data'][slice][az_p, :]\n\n # Convert the filter to mask\n phi_dp.mask = gate_filter.gate_excluded[az_p, :]\n ref.mask = gate_filter.gate_excluded[az_p, :]\n\n # Check to make sure the data lines up with the desired size\n if phi_dp_running.shape != phi_dp.shape:\n logging.warning(\"Data size other than ({}, {}) found\".format(AZ_SIZE, RNG_SIZE))\n logging.warning(\"Data size: ({}, {})\".format(phi_dp.shape[0], phi_dp.shape[1]))\n if az_rad.size < AZ_SIZE:\n logging.debug(\"Applying pad to account for having too few azimuths\")\n diff = abs(az_rad.size - AZ_SIZE)\n phi_dp = np.pad(phi_dp, diff, mode='constant')[diff:, diff:-diff]\n ref = np.pad(ref, diff, mode='constant')[diff:, diff:-diff]\n elif az_rad.size > AZ_SIZE:\n logging.debug(\"Chopping off end of grid because too many azimuths\")\n phi_dp = phi_dp[:AZ_SIZE, :]\n ref = ref[:AZ_SIZE, :]\n else:\n logging.critical(\"SOMETHING WENT WRONG WITH THE RANGE\")\n raise Exception\n\n # Add data to the running sums\n # try:\n logging.debug(\"Added data to running sums\")\n phi_dp_running[~phi_dp.mask] += phi_dp[~phi_dp.mask]\n phi_dp_weighted_running[~phi_dp.mask] += phi_dp[~phi_dp.mask] * ref[~phi_dp.mask]\n phi_dp_linear_weighted_running[~phi_dp.mask] += phi_dp[~phi_dp.mask] * db2pow(ref[~phi_dp.mask])\n\n ref_running[~ref.mask] += ref[~ref.mask]\n ref_linear_running[~ref.mask] += db2pow(ref[~ref.mask])\n eta_linear_running[~ref.mask] += db2pow(ref[~ref.mask] + 11.6)\n\n # Make some plots\n plt.figure(figsize=(16, 8))\n plt.subplot(1, 2, 1)\n plt.xlim(-100, 100)\n plt.ylim(-100, 100)\n plt.pcolormesh(x_m * 1e-3, y_m * 1e-3, phi_dp, vmin=0, vmax=360, cmap='nipy_spectral')\n plt.colorbar()\n plt.scatter(0, 0, color='k')\n plt.scatter(cave_x, cave_y, c='y')\n\n plt.subplot(1, 2, 2)\n plt.xlim(-100, 100)\n plt.ylim(-100, 100)\n plt.pcolormesh(x_m * 1e-3, y_m * 1e-3, ref, vmin=0, vmax=50)\n plt.colorbar()\n plt.scatter(0, 0, color='k')\n\n # Create directory if needed\n image_dir = os.path.join(image_base_dir, dt.strftime('%Y/%m/'))\n if not os.path.exists(image_dir): os.makedirs(image_dir)\n image_name = os.path.join(image_dir, dt.strftime('{site}_%Y%m%d_%H%M%S.png'.format(site=site)))\n\n plt.suptitle(dt.strftime('{site} %Y%m%d-%H%M%S Elev: {elev}'.format(elev=np.rad2deg(elev), site=site)))\n logging.debug(\"Saving image: {}\".format(image_name))\n plt.savefig(image_name)\n plt.close()\n # plt.show(block=True)\n\n count += 1\n\n # If no files were processed\n if count == 0:\n return\n\n # Write out the netcdf\n logging.info(\"Preparing netCDF\")\n start_time = datetime.strptime(start_date, \"%Y%m%d-%H%M%S\")\n if not os.path.exists(nc_base_dir): os.makedirs(nc_base_dir)\n nc_name = os.path.join(nc_base_dir, start_time.strftime(\"{}_average_%Y%m%d.nc\".format(site)))\n nc = netCDF4.Dataset(nc_name, 'w')\n\n # Add the dimensions\n print(phi_dp_running.shape[1], AZ.shape)\n az = nc.createDimension('az', size=phi_dp_running.shape[0],)\n rng = nc.createDimension('rng', size=phi_dp_running.shape[1],)\n\n # Add the attributes\n attrs = {'num_scans': count,\n 'start_time': start_date,\n 'end_time': end_date,\n 'radar_lat': radar_lat,\n 'radar_lon': radar_lon,\n 'sweep_number': SWEEP,\n 'elevation': np.rad2deg(elev),\n 'site': site\n }\n nc.setncatts(attrs)\n\n # Add the variables\n var = nc.createVariable('phi_dp_sum', datatype='f8', dimensions=('az', 'rng'))\n var.setncattr('units', 'degrees')\n var[:] = phi_dp_running\n\n var = nc.createVariable('phi_dp_weighted_sum', datatype='f8', dimensions=('az', 'rng'))\n var[:] = phi_dp_weighted_running\n\n var = nc.createVariable('phi_dp_linear_weighted_sum', datatype='f8', dimensions=('az', 'rng'))\n var[:] = phi_dp_linear_weighted_running\n\n var = nc.createVariable('ref_linear_sum', datatype='f8', dimensions=('az', 'rng'))\n var.setncattr('units', 'mm^6/m^3')\n var[:] = ref_linear_running\n\n var = nc.createVariable('ref_sum', datatype='f8', dimensions=('az', 'rng'))\n var[:] = ref_running\n\n var = nc.createVariable('eta_linear_sum', datatype='f8', dimensions=('az', 'rng'))\n var[:] = eta_linear_running\n\n var = nc.createVariable('azimuth', datatype='f8', dimensions=('az',))\n var.setncattr('units', 'radians')\n var[:] = AZ\n\n var = nc.createVariable('range', datatype='f8', dimensions=('rng',))\n var.setncattr('units', 'm')\n var[:] = RNG\n\n nc.close()\n logging.info(\"NetCDF write successful\")\n\n # Delete the temporary folder if used\n if tmp_dir is True:\n logging.debug(\"Removing temp dir\")\n rmtree(data_dir)\n\nif __name__=='__main__':\n # Set up argument parser\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', dest='start_date', help=\"YYYYmmdd-HHMMSS\")\n parser.add_argument('-e', dest='end_date', help=\"YYYYmmdd-HHMMSS\")\n parser.add_argument('-r', dest='radar')\n parser.add_argument('-d', dest='data_dir',\n help='directory to download radar data (uses tmp dir otherwise)',\n default=None)\n parser.add_argument('-o', dest='out_dir',\n help='Directory to put images and netcdfs. Code will organize the dir structure')\n args = parser.parse_args()\n\n process_files(args.start_date, args.end_date, args.radar, args.data_dir, args.out_dir)\n\n\n\n","sub_path":"nightly_average.py","file_name":"nightly_average.py","file_ext":"py","file_size_in_byte":11166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491821998","text":"from selenium import webdriver\nimport pytest\n\n# browser = webdriver.Firefox()\n# browser.get('http://localhost:8000')\n\n\nclass TestNewVisitor:\n\n @pytest.yield_fixture\n def driverPJS(self):\n driver = webdriver.PhantomJS()\n print(\"Created PhantomJS driver\")\n driver.get(\"http://localhost:8000\")\n yield driver\n driver.quit()\n print(\"\\nDestroyed PhantomJS driver\")\n\n def test_page_title(self, driverPJS):\n # She notices the page title and header mention to-do lists\n print(\"Running test\")\n assert 'Django' in driverPJS.title\n\n\n\n # # She is invited to enter a to-do item straight away\n\n\n","sub_path":"tests/functional_tests/selenium_test.py","file_name":"selenium_test.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"450022947","text":"import flask\nimport datetime\nimport random\n\napp = flask.Flask(\"my_app\")\n\n\n@app.route(\"/like\")\n@app.route(\"/home\")\ndef first_view():\n my_name = \"adam\"\n now = datetime.datetime.now()\n time = \"{}/{}/{}\".format(now.day, now.month, now.year)\n secend = datetime.time\n rendr = flask.render_template(\"te.html\", name=my_name, time=time\n , sea=secend)\n return rendr\n\n\n@app.route(\"/\")\ndef welc(num):\n number = random.randrange(1, 10)\n if num == number:\n return \"your number is\" + number\n else:\n return \"number not match\"\n\n\n@app.route(\"/me\")\ndef sec_view():\n t2 = flask.render_template(\"t2\", )\n return t2\n\n\nif __name__ == \"__main__\":\n app.run(port=5000)\n","sub_path":"python+flask_work/class_21/flask1.py","file_name":"flask1.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"509954716","text":"# maximum path sum II\nf = open('input/064input.txt').read().split('\\n')\nfor x in range(len(f)):\n\tf[x] = list(map(int,f[x].split()))\nf = f[:-1] # weird empty array in the end.\n\nrow_now = len(f) - 2 # second last\n\nwhile row_now >= 0:\n\trow_down = row_now + 1\n\tfor ind in range(len(f[row_now])): f[row_now][ind] += max(f[row_down][ind],f[row_down][ind+1])\n\trow_now -= 1\n\nprint (f[0][0]) # max sum.\n\n","sub_path":"euler/py/067.py","file_name":"067.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"125980529","text":"#!usr/bin/env python\nimport numpy as np\nfrom lxml import etree\nimport sys\n\n# some constant\nC2B = 2.\nagf_bs = 0.7\n\n# get filename\nprint(sys.argv)\nsim_type = str(sys.argv[1])\n\n# separate the string to get site name and setup\nsite_name = sim_type.split('_')[0]\nsetup = sim_type.split('_')[1]\n\nxml_fn = \"{:s}_config.xml\".format(sim_type)\n\n# first check the setup array to determine which pft to use\nif setup[0:2] == 'p0':\n # ED2 original\n pft_array=[2,3,4]\nelif setup[0:2] == 'p1':\n # ED2 TLP 6 PFT\n pft_array=[2,3,4,5,6,7]\nelif setup[0:2] == 'p2':\n # ED2 XXT 3 PFT\n pft_array=[2,3,4]\n\n\n\n# write the header for xml files\ns = '''\n'''\ntree = etree.fromstring(s)\n\n# dictionary to store parameters to change for all PFTs\npft_dict_all = {\n 'root_beta' : '0.01',\n }\n\n# create three pfts in xml\nxml_pfts=[]\nfor ipft, pft in enumerate(pft_array):\n xml_pfts.append(etree.SubElement(tree,\"pft\"))\n etree.SubElement(xml_pfts[ipft], \"num\").text = \"{:d}\".format(pft)\n for trait in pft_dict_all.keys():\n etree.SubElement(xml_pfts[ipft], trait).text = pft_dict_all[trait]\n\n# always modify fuse_dbh_max\nxml_ff = etree.SubElement(tree,\"fusefiss\")\netree.SubElement(xml_ff, \"fuse_dbh_max\").text = \"{:f}\".format(20.)\n\n\n\n# now setup hydro\nif (setup[1] == '0'):\n # ED2 default\n #do nothing\n pass\nelif (setup[1] == '2'):\n # ED2 XXT\n # do nothing\n # need to change qsw\n for ipft, pft in enumerate(pft_array):\n etree.SubElement(xml_pfts[ipft], \"qsw\").text = \"{:f}\".format(0.)\n\nelif (setup[1] == '1'):\n # ED2 TLP\n # first read in the h0_params.xml\n template_params_tree = etree.parse('./template_params.xml')\n template_root = template_params_tree.getroot()\n\n # loop over the pft setups and write into the new xml_pfts\n for ipft in np.arange(3):\n pft_to_copy = template_root[ipft]\n\n # write the intolerant pft\n pft_to_write = xml_pfts[ipft]\n\n params_dict = {\n 'wood_psi50' : -1.2 * 102.,\n 'wood_Kmax' : 3.3 / 102.,\n 'leaf_psi_tlp' : -1.67 * 102.,\n 'stoma_psi_b' : -1.67 * 102.,\n 'stoma_psi_c' : 3.,\n 'qsw' : 0.,\n }\n\n exist_vars = [element.tag for element in pft_to_write.iter()]\n # loop over pft_to_copy\n\n for element in pft_to_copy.iter():\n if element.tag == 'pft' or element.tag == 'init_laimax':\n # pass these two tags\n continue\n\n\n # over write the hydraulic properties\n if element.tag in params_dict.keys():\n etree.SubElement(pft_to_write, element.tag).text = \"{:f}\".format(\n params_dict[element.tag])\n elif element.tag in exist_vars:\n pass\n # no need to do anything\n else:\n etree.SubElement(pft_to_write, element.tag).text = element.text\n\n\n # write the tolerant pft\n params_dict = {\n 'wood_psi50' : -2.2 * 102.,\n 'wood_Kmax' : 3. / 102.,\n 'leaf_psi_tlp' : -2.83 * 102.,\n 'stoma_psi_b' : -2.83 * 102.,\n 'stoma_psi_c' : 3.5,\n 'qsw' : 0.,\n }\n\n pft_to_write = xml_pfts[ipft+3]\n\n exist_vars = [element.tag for element in pft_to_write.iter()]\n\n # loop over pft_to_copy\n for element in pft_to_copy.iter():\n if element.tag == 'pft' or element.tag == 'init_laimax':\n continue\n\n # over write the hydraulic properties\n if element.tag in params_dict.keys():\n etree.SubElement(pft_to_write, element.tag).text = \"{:f}\".format(\n params_dict[element.tag])\n elif element.tag in exist_vars:\n pass\n # no need to do anything\n else:\n etree.SubElement(pft_to_write, element.tag).text = element.text\n\n\noutput_str = etree.tostring(tree, encoding=\"UTF-8\",\n xml_declaration=True,\n pretty_print=True,\n doctype='')\n\n# write into file\nwith open(xml_fn,'wb') as f:\n f.write(output_str)\n","sub_path":"ED2-treering/run_HKK/create_xml.py","file_name":"create_xml.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"499078090","text":"import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport itertools\nimport pandas as pd\nplt.style.use('ggplot')\n# from sklearn import preprocessing\n# import seaborn as sns\n\n# def plot(v,pv_anPDF,pv_anCDF,pv_emp_exp,pv_emp_log,phase):\ndef plot(v,pv_anPDF,pv_emp_exp,pv_emp_exp_95,phase):\n print(pv_emp_exp_95)\n plt.hist(pv_emp_exp,bins=100,density=True,color='mediumseagreen',alpha=1,label='Simulated PDF')\n # sns.distplot(pv_emp_exp, hist=True, kde=False, bins=100, norm_hist=True)\n plt.plot(v,pv_anPDF,color='darkred',linestyle='dashed',label='Analytical PDF')\n # plt.plot(v,pv_anCDF,'k:',label='Analytical CDF (exponential)')\n # plt.hist(pv_emp_log,bins=100,density=1,color='green',alpha=0.7,label='Simulated PDF (log-norm)')\n plt.axvline(x=pv_emp_exp_95,color='k',label='95% = £{}'.format(pv_emp_exp_95))\n plt.xlabel('Present Value £ '+ '('+phase+')')\n plt.ylabel('Frequency')\n plt.legend(loc='best',fontsize=11)\n # fig.tight_layout()\n plt.show()\n\n\ndef pv_total(pv1,pv2,pv_95,phase):\n plt.hist(pv1,bins=100,density=1,color='blue',alpha=0.6,label='Simulated PDF (exponential)')\n plt.hist(pv2,bins=100,density=1,color='green',alpha=0.7,label='Simulated PDF (log-norm)')\n plt.axvline(x=pv_95,color='k',label='95% = £{}'.format(pv_95))\n plt.xlabel('Present Value £ '+ '('+phase+')')\n plt.ylabel('Frequency')\n plt.legend(loc='best')\n plt.show()\n\n\ndef setCoverPlot(subcontrols_list,position,positionCL,positionCH,positionEL,positionEH):\n no_constraint = [float('nan') for x in range(len(subcontrols_list))]\n costL = [float('nan') for x in range(len(subcontrols_list))]\n costH = [float('nan') for x in range(len(subcontrols_list))]\n cost_efficacyL = [float('nan') for x in range(len(subcontrols_list))]\n cost_efficacyH = [float('nan') for x in range(len(subcontrols_list))]\n for i in position:\n no_constraint[i] = 1\n for i in positionCL:\n costL[i] = 2\n for i in positionCH:\n costH[i] = 3\n for i in positionEL:\n cost_efficacyL[i] = 4\n for i in positionEH:\n cost_efficacyH[i] = 5\n\n x = np.arange(len(subcontrols_list))\n y = [0,1,2,3,4,5]\n fig, ax = plt.subplots()\n ax.scatter(x,no_constraint,s=70,color='black')\n ax.scatter(x,costL,marker='D',s=60,color='black')\n ax.scatter(x,costH,marker='X',s=60,color='black')\n ax.scatter(x,cost_efficacyL,marker='h',s=80,color='black')\n ax.scatter(x,cost_efficacyH,marker='*',s=90,color='black')\n # plt.axhline(y=1, linestyle=':',color='k',alpha=0.2)\n # plt.axhline(y=2, linestyle=':',color='k',alpha=0.2)\n # plt.axhline(y=3, linestyle=':',color='k',alpha=0.2)\n # plt.axhline(y=4, linestyle=':',color='k',alpha=0.2)\n # plt.axhline(y=5, linestyle=':',color='k',alpha=0.2)\n\n # ax.set_ylabel('Subcontrol Selection with')\n ax.set_yticks(y)\n # ax.set_yticklabels([0,'No Constraint','Cost (Level L)','Cost (Level H)', 'Cost and Efficacy\\n(Level L,eff=0.015)', 'Cost and Efficacy\\n(Level H,eff=0.015)'])\n # ax.text(s='Cost and Efficacy\\n(Level L)', x=-6, y=3.7)\n # ax.text(s='Cost and Efficacy\\n(Level H)', x=-12.2, y=4.7)\n ax.set_yticklabels([0,'A','B','C','D','E'])\n ax.text(-1, -3, \"(A) no constraint. (B) budget constraint for subcontrols level L. (C) budget constraint for subcontrols level H. (D) budget and efficacy bound for subcontrols level L. (E) budget and efficacy bound for subcontrols level H.\", color='black', wrap=True,\n bbox=dict(facecolor='none', edgecolor='black', pad=10.0))\n\n ax.set_xlabel('CIS Subcontrols')\n # ax.set_title('Set Cover Problem Control Selection')\n ax.set_xticks(x)\n ax.set_xticklabels(subcontrols_list, rotation=90)\n fig.tight_layout()\n plt.show()\n\n\ndef setCoverEfficacyBoundPlot(subcontrols_list,efficacy_bound,pos_EL,pos_EH):\n cost_efficacyL = [[float('nan') for x in range(len(subcontrols_list))] for x in range(len(pos_EL))]\n cost_efficacyH = [[float('nan') for x in range(len(subcontrols_list))] for x in range(len(pos_EH))]\n efficacy_bound.insert(0,0)\n for i in range(len(pos_EL)):\n if i == 0:\n for x in pos_EL[i]:\n cost_efficacyL[i][x] = 1\n for y in pos_EH[i]:\n cost_efficacyH[i][y] = 1\n elif i == 1:\n for x in pos_EL[i]:\n cost_efficacyL[i][x] = 2\n for y in pos_EH[i]:\n cost_efficacyH[i][y] = 2\n elif i == 2:\n for x in pos_EL[i]:\n cost_efficacyL[i][x] = 3\n for y in pos_EH[i]:\n cost_efficacyH[i][y] = 3\n elif i == 3:\n for x in pos_EL[i]:\n cost_efficacyL[i][x] = 4\n for y in pos_EH[i]:\n cost_efficacyH[i][y] = 4\n\n x = np.arange(len(subcontrols_list))\n y = np.arange(len(efficacy_bound))\n fig, ax = plt.subplots()\n for i in range(len(cost_efficacyL)):\n L = ax.scatter(x,cost_efficacyL[i],s=70,color='steelblue')\n H = ax.scatter(x,cost_efficacyH[i],marker='x',s=60,color='black')\n # plt.axhline(y=i+1, linestyle=':',color='k',alpha=0.2)\n\n ax.set_ylabel('Efficacy Bound')\n ax.set_yticks(y)\n ax.set_yticklabels(efficacy_bound)\n ax.set_xlabel('CIS Subcontrols')\n # ax.set_title('Set cover control selection with cost and efficacy bounds')\n ax.set_xticks(x)\n ax.set_xticklabels(subcontrols_list, rotation=90)\n plt.legend((L, H), ('Level L', 'Level H'), scatterpoints=1)\n fig.tight_layout()\n plt.show()\n\n\ndef knapsackOptimisationPlot(subcontrols_list,position,levels):\n kp_selection = [float('nan') for x in range(len(subcontrols_list))]\n for (i,j) in zip(position,levels):\n if j == 0:\n kp_selection[i] = 1\n else:\n kp_selection[i] = 2\n\n x = np.arange(len(subcontrols_list))\n y = [0,1,2]\n fig, ax = plt.subplots()\n ax.bar(x,kp_selection,color=\"lightseagreen\",alpha=0.8)\n ax.set_yticks(y)\n ax.set_yticklabels([0,'Level L','Level H'])\n ax.set_xlabel('CIS Subcontrols')\n ax.set_xticks(x)\n ax.set_xticklabels(subcontrols_list,rotation=90)\n\n fig.tight_layout()\n plt.show()\n\n\ndef riskPlot(risk_noconstraint,risk_CL,risk_CH,risk_EL,risk_EH,risk_KP,budget):\n\n figure, axes = plt.subplots(1, 3)\n '''ROSI'''\n rosi = []\n rosi.append((risk_noconstraint[1]-risk_noconstraint[3]-risk_noconstraint[4])/risk_noconstraint[4])\n rosi.append((risk_CL[1]-risk_CL[3]-risk_CL[4])/risk_CL[4])\n rosi.append((risk_CH[1]-risk_CH[3]-risk_CH[4])/risk_CH[4])\n rosi.append((risk_EL[1]-risk_EL[3]-risk_EL[4])/risk_EL[4])\n rosi.append((risk_EH[1]-risk_EH[3]-risk_EH[4])/risk_EH[4])\n rosi.append((risk_KP[1]-risk_KP[3]-risk_KP[4])/risk_KP[4])\n # df = pd.DataFrame({'eZn^':[risk_noconstraint[3],risk_CL[3],risk_CH[3],risk_EL[3],risk_EH[3]], 'Cost':[risk_noconstraint[4],risk_CL[4],risk_CH[4],risk_EL[4],risk_EH[4]], 'ROSI':rosi})\n df1 = pd.DataFrame({'ROSI':rosi})\n ax = df1.plot(kind=\"barh\",ax=axes[2],color={\"steelblue\"})\n ax.set_yticklabels(['A','B','C','D','E','F'])\n box = ax.get_position()\n ax.set_position([box.x0, box.y0 + box.height * 0.2, box.width, box.height * 0.8])\n ax.legend(loc='upper center',bbox_to_anchor=(0.5,-0.09),fancybox=True,shadow=True,ncol=1,prop={'size': 9})\n\n '''Risk reduction vs cost'''\n risk_reduction = []\n risk_reduction.append((risk_noconstraint[1]-risk_noconstraint[3]))\n risk_reduction.append((risk_CL[1]-risk_CL[3]))\n risk_reduction.append((risk_CH[1]-risk_CH[3]))\n risk_reduction.append((risk_EL[1]-risk_EL[3]))\n risk_reduction.append((risk_EH[1]-risk_EH[3]))\n risk_reduction.append((risk_KP[1]-risk_KP[3]))\n cost = [risk_noconstraint[4],risk_CL[4],risk_CH[4],risk_EL[4],risk_EH[4],risk_KP[4]]\n\n\n # df = pd.DataFrame({'Residual Risk':risk_reduction})\n df2 = pd.DataFrame({'Residual Expected Impact':[risk_noconstraint[3],risk_CL[3],risk_CH[3],risk_EL[3],risk_EH[3],risk_KP[3]], 'Reduced Expected Impact':risk_reduction})\n ax = df2.plot(kind=\"barh\",stacked=True,ax=axes[0],color={\"indianred\",\"black\"})\n ax.set_yticklabels(['A','B','C','D','E','F'])\n ax.text(0, -3.5, \"(A) Set cover with no constraint. (B) Set cover with budget constraint for subcontrols level L. (C) Set cover with budget constraint for subcontrols level H. (D) Set cover with budget and efficacy bound for subcontrols level L. (E) Set cover with budget and efficacy bound for subcontrols level H. (F) Knapsack Optimisation with budget\", color='black', wrap=True,\n bbox=dict(facecolor='none', edgecolor='black', pad=5.0), fontsize=10)\n box = ax.get_position()\n ax.set_position([box.x0, box.y0 + box.height * 0.2, box.width, box.height * 0.8])\n\n # Put a legend below current axis\n ax.legend(loc='upper center',bbox_to_anchor=(0.5,-0.09),fancybox=True,shadow=True,ncol=1,prop={'size': 9})\n\n df3 = pd.DataFrame({'Cost':cost})\n ax = df3.plot(kind=\"barh\",ax=axes[1],color={\"gray\"})\n ax.axvline(x=budget, linestyle=':',color='k',alpha=0.2,label='Budget = £'+str(budget))\n ax.set_yticklabels(['A','B','C','D','E','F'])\n # ax.set_xlabel('Budget='+str(budget),fontsize=10)\n # ax.text(budget-10,-0.8,budget)\n # ax.text(-1.4, -2.8, \"(A) Set cover with no constraint. (B) Set cover with budget constraint for subcontrols level L. (C) Set cover with budget constraint for subcontrols level H. (D) Set cover with budget and efficacy bound for subcontrols level L. (E) Set cover with budget and efficacy bound for subcontrols level H. (F) Knapsack Optimisation with budget\", color='black', wrap=True,\n # bbox=dict(facecolor='none', edgecolor='black', pad=10.0))\n # ax.get_legend()\n # plt.legend()\n box = ax.get_position()\n ax.set_position([box.x0, box.y0 + box.height * 0.2, box.width, box.height * 0.8])\n ax.legend(loc='upper center',bbox_to_anchor=(0.5,-0.09),fancybox=True,shadow=True,ncol=1,prop={'size': 9})\n\n plt.show()\n\n\ndef knapsackRiskPlot(risk_KP_list,budget_list):\n eZn = []\n eZn_cap = []\n cost = []\n rosi = []\n reduced_risk = []\n for i in risk_KP_list:\n eZn.append(i[1])\n eZn_cap.append(i[3])\n cost.append(i[4])\n rosi.append((i[1]-i[3]-i[4])/i[4])\n reduced_risk.append(i[1]-i[3])\n\n print(f'eZn_cap:{eZn_cap}')\n print(f'rosi:{rosi}')\n\n # df = pd.DataFrame({'eZn_cap':eZn_cap})\n # ax = df.plot.line(color={\"indianred\"})\n # plt.plot(budget_list,eZn_cap,color='indianred',label='residual')\n plt.plot(budget_list,rosi,color='steelblue',label='rosi')\n plt.plot(budget_list,reduced_risk,color='black',label='Reduced Expected Impact')\n plt.xlabel('Budget')\n\n # fig.tight_layout()\n plt.legend(loc='best')\n plt.show()\n","sub_path":"plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":10695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"261804201","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nРабота с проф. стандартами\n\"\"\"\nfrom sqlalchemy import Column, DateTime, ForeignKey, Integer, String, text, Text, TIMESTAMP, Float, JSON, Date, Numeric, Table\nfrom sqlalchemy.orm import relationship, exc\nfrom sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta\nimport logging\n\nBase = declarative_base()\nmetadata = Base.metadata\n\n\nclass ProfStandard(Base):\n __tablename__ = 'prof_standard'\n __table_args__ = {'schema': 'data'}\n\n id = Column(Integer, primary_key=True, server_default=text(\"nextval('data.prof_standard_id_seq'::regclass)\"))\n code = Column(String(16), nullable=False, unique=True)\n name = Column(String(1024), nullable=False)\n date_accepted = Column(String(1024))\n tf_cnt = Column(Integer)\n remark = Column(String(1024))\n load_date = Column(DateTime, server_default=text(\"now()\"))\n\n\nclass ProfStandardOkso(Base):\n __tablename__ = 'prof_standard_okso'\n __table_args__ = {'schema': 'data'}\n\n id = Column(Integer, primary_key=True, server_default=text(\"nextval('data.prof_standard_okso_id_seq'::regclass)\"))\n id_prof_standard = Column(ForeignKey('data.prof_standard.id'), nullable=False)\n code_okso = Column(String(16), nullable=False)\n remark = Column(String(1024))\n load_date = Column(DateTime, server_default=text(\"now()\"))\n\n prof_standard = relationship('ProfStandard')\n\n\nclass ProfStandardOkved(Base):\n __tablename__ = 'prof_standard_okved'\n __table_args__ = {'schema': 'data'}\n\n id = Column(Integer, primary_key=True, server_default=text(\"nextval('data.prof_standard_okved_id_seq'::regclass)\"))\n id_prof_standard = Column(ForeignKey('data.prof_standard.id'), nullable=False)\n code_okved = Column(String(16), nullable=False)\n remark = Column(String(1024))\n load_date = Column(DateTime, server_default=text(\"now()\"))\n\n prof_standard = relationship('ProfStandard')\n\n\nclass ProfStandardOkz(Base):\n __tablename__ = 'prof_standard_okz'\n __table_args__ = {'schema': 'data'}\n\n id = Column(Integer, primary_key=True, server_default=text(\"nextval('data.prof_standard_okz_id_seq'::regclass)\"))\n id_prof_standard = Column(ForeignKey('data.prof_standard.id'), nullable=False)\n code_okz = Column(String(16), nullable=False)\n remark = Column(String(1024))\n load_date = Column(DateTime, server_default=text(\"now()\"))\n\n prof_standard = relationship('ProfStandard')\n\n\nclass ProfTf(Base):\n __tablename__ = 'prof_tf'\n __table_args__ = {'schema': 'data'}\n\n id = Column(Integer, primary_key=True, server_default=text(\"nextval('data.prof_tf_id_seq'::regclass)\"))\n id_prof_standard = Column(ForeignKey('data.prof_standard.id'), nullable=False)\n level = Column(String(16), nullable=False)\n num = Column(Integer, nullable=False)\n remark = Column(String(1024))\n load_date = Column(DateTime, server_default=text(\"now()\"))\n\n prof_standard = relationship('ProfStandard')\n\n\nclass ProfTfAccessReq(Base):\n __tablename__ = 'prof_tf_access_reqs'\n __table_args__ = {'schema': 'data'}\n\n id = Column(Integer, primary_key=True, server_default=text(\"nextval('data.prof_tf_access_reqs_id_seq'::regclass)\"))\n id_prof_tf = Column(ForeignKey('data.prof_tf.id'), nullable=False)\n access_req = Column(String(1024), nullable=False)\n remark = Column(String(1024))\n load_date = Column(DateTime, server_default=text(\"now()\"))\n\n prof_tf = relationship('ProfTf')\n\n\nclass ProfTfEducReq(Base):\n __tablename__ = 'prof_tf_educ_reqs'\n __table_args__ = {'schema': 'data'}\n\n id = Column(Integer, primary_key=True, server_default=text(\"nextval('data.prof_tf_educ_reqs_id_seq'::regclass)\"))\n id_prof_tf = Column(ForeignKey('data.prof_tf.id'), nullable=False)\n educ_req = Column(String(4000), nullable=False)\n remark = Column(String(4000))\n load_date = Column(DateTime, server_default=text(\"now()\"))\n\n prof_tf = relationship('ProfTf')\n\n\nclass ProfTfOkdptr(Base):\n __tablename__ = 'prof_tf_okdptr'\n __table_args__ = {'schema': 'data'}\n\n id = Column(Integer, primary_key=True, server_default=text(\"nextval('data.prof_tf_okdptr_id_seq'::regclass)\"))\n id_prof_tf = Column(ForeignKey('data.prof_tf.id'), nullable=False)\n code_okdptr = Column(String(32), nullable=False)\n remark = Column(String(1024))\n load_date = Column(DateTime, server_default=text(\"now()\"))\n\n prof_tf = relationship('ProfTf')\n\n\nclass ProfTfOkso(Base):\n __tablename__ = 'prof_tf_okso'\n __table_args__ = {'schema': 'data'}\n\n id = Column(Integer, primary_key=True, server_default=text(\"nextval('data.prof_tf_okso_id_seq'::regclass)\"))\n id_prof_tf = Column(ForeignKey('data.prof_tf.id'), nullable=False)\n code_okso = Column(String(32), nullable=False)\n remark = Column(String(1024))\n load_date = Column(DateTime, server_default=text(\"now()\"))\n\n prof_tf = relationship('ProfTf')\n\n\nclass ProfTfProfession(Base):\n __tablename__ = 'prof_tf_professions'\n __table_args__ = {'schema': 'data'}\n\n id = Column(Integer, primary_key=True, server_default=text(\"nextval('data.prof_tf_professions_id_seq'::regclass)\"))\n id_prof_tf = Column(ForeignKey('data.prof_tf.id'), nullable=False)\n prof = Column(String(1024), nullable=False)\n remark = Column(String(1024))\n load_date = Column(DateTime, server_default=text(\"now()\"))\n\n prof_tf = relationship('ProfTf')\n\n\nclass ProfTfStageReq(Base):\n __tablename__ = 'prof_tf_stage_reqs'\n __table_args__ = {'schema': 'data'}\n\n id = Column(Integer, primary_key=True, server_default=text(\"nextval('data.prof_tf_stage_reqs_id_seq'::regclass)\"))\n id_prof_tf = Column(ForeignKey('data.prof_tf.id'), nullable=False)\n stage_req = Column(String(1024), nullable=False)\n remark = Column(String(1024))\n load_date = Column(DateTime, server_default=text(\"now()\"))\n\n prof_tf = relationship('ProfTf')\n\n\nclass UtlSPOGosFgos(Base):\n __tablename__ = 'utl_spo_gos_fgos'\n __table_args__ = {'schema': 'data'}\n\n id = Column(Integer, primary_key=True, server_default=text(\"nextval('data.utl_spo_gos_fgos_id_seq'::regclass)\"))\n code_fgos = Column(String(32), nullable=False)\n name_fgos = Column(String(1024), nullable=False)\n code_gos = Column(String(32), nullable=False)\n name_gos = Column(String(1024), nullable=False)\n\n\n\ndef get_ps_standart(sess, vcode):\n \"\"\"\n возврат объекта проф. стандарта, если такого нет, то возвращается новый\n :param sess: сеанс\n :param vcode: код стандарта\n :return: объект, либо полный, либо только с code\n \"\"\"\n try:\n res = sess.query(ProfStandard).filter(ProfStandard.code == vcode).one()\n except exc.NoResultFound:\n res = ProfStandard(code=vcode)\n return res\n\n\ndef get_ps_okz(sess, ps, vcode):\n \"\"\"\n возврат ОКЗ для проф. стандарта, если такого нет, то возвращается новый\n :param sess: сеанс\n :param ps: проф. стандарт\n :param vcode: код ОКЗ\n :return: объект, либо полный, либо только с code\n \"\"\"\n try:\n res = sess.query(ProfStandardOkz).filter(ProfStandardOkz.prof_standard == ps,\n ProfStandardOkz.code_okz == str(vcode)).one()\n except exc.NoResultFound:\n res = ProfStandardOkz(prof_standard=ps, code_okz=str(vcode))\n return res\n\n\ndef get_ps_okved(sess, ps, vcode):\n \"\"\"\n возврат ОКВЭД проф. стандарта, если такого нет, то возвращается новый\n :param sess: сеанс\n :param ps: проф. стандарт\n :param vcode: код ОКВЭД\n :return: объект, либо полный, либо только с code\n \"\"\"\n try:\n res = sess.query(ProfStandardOkved).filter(ProfStandardOkved.prof_standard == ps,\n ProfStandardOkved.code_okved == str(vcode)).one()\n except exc.NoResultFound:\n res = ProfStandardOkved(prof_standard=ps, code_okved=str(vcode))\n return res\n\n\ndef get_ps_okso(sess, ps, vcode):\n \"\"\"\n возврат ОКСО проф. стандарта, если такого нет, то возвращается новый\n :param sess: сеанс\n :param ps: проф. стандарт\n :param vcode: код ОКСО\n :return: объект, либо полный, либо только с code\n \"\"\"\n try:\n res = sess.query(ProfStandardOkso).filter(ProfStandardOkso.prof_standard == ps,\n ProfStandardOkso.code_okso == str(vcode)).one()\n except exc.NoResultFound:\n res = ProfStandardOkso(prof_standard=ps, code_okso=str(vcode))\n return res\n\n\ndef get_ps_tf(sess, ps, vnum, lev):\n \"\"\"\n возврат ОКСО проф. стандарта, если такого нет, то возвращается новый\n :param sess: сеанс\n :param ps: проф. стандарт\n :param vnum: номер\n :param lev: уровень\n :return: объект, либо полный, либо только с num\n \"\"\"\n tclass = ProfTf\n try:\n res = sess.query(tclass).filter(tclass.prof_standard == ps,\n tclass.num == vnum).one()\n except exc.NoResultFound:\n res = tclass(prof_standard=ps, num=vnum)\n res.level = lev\n return res\n\n\ndef get_ps_tf_prof(sess, ps_tf, vprof):\n \"\"\"\n\n :param sess:\n :param ps_tf:\n :param vprof:\n :return:\n \"\"\"\n tclass = ProfTfProfession\n try:\n res = sess.query(tclass).filter(tclass.prof_tf == ps_tf,\n tclass.prof == vprof).one()\n except exc.NoResultFound:\n res = tclass(prof_tf=ps_tf, prof=vprof)\n return res\n\n\ndef get_ps_tf_stage(sess, ps_tf, stage):\n \"\"\"\n\n :param sess:\n :param ps_tf:\n :param stage:\n :return:\n \"\"\"\n tclass = ProfTfStageReq\n try:\n res = sess.query(tclass).filter(tclass.prof_tf == ps_tf,\n tclass.stage_req == stage).one()\n except exc.NoResultFound:\n res = tclass(prof_tf=ps_tf, stage_req=stage)\n return res\n\n\ndef get_ps_tf_okso(sess, ps_tf, okso):\n \"\"\"\n\n :param sess:\n :param ps_tf:\n :param okso:\n :return:\n \"\"\"\n tclass = ProfTfOkso\n try:\n res = sess.query(tclass).filter(tclass.prof_tf == ps_tf,\n tclass.code_okso == str(okso)).one()\n except exc.NoResultFound:\n res = tclass(prof_tf=ps_tf, code_okso=str(okso))\n return res\n\n\ndef get_ps_educ_reqs(sess, ps_tf, educ):\n \"\"\"\n\n :param sess:\n :param ps_tf:\n :param educ:\n :return:\n \"\"\"\n tclass = ProfTfEducReq\n try:\n res = sess.query(tclass).filter(tclass.prof_tf == ps_tf,\n tclass.educ_req == educ).one()\n except exc.NoResultFound:\n res = tclass(prof_tf=ps_tf, educ_req=educ)\n return res\n\n\ndef get_ps_okdptr(sess, ps_tf, okdptr):\n \"\"\"\n\n :param sess:\n :param ps_tf:\n :param okdptr:\n :return:\n \"\"\"\n tclass = ProfTfOkdptr\n try:\n res = sess.query(tclass).filter(tclass.prof_tf == ps_tf,\n tclass.code_okdptr == str(okdptr)).one()\n except exc.NoResultFound:\n res = tclass(prof_tf=ps_tf, code_okdptr=str(okdptr))\n return res\n\n\ndef get_ps_access_reqs(sess, ps_tf, access):\n \"\"\"\n :param sess:\n :param ps_tf:\n :param access:\n :return:\n \"\"\"\n tclass = ProfTfAccessReq\n try:\n res = sess.query(tclass).filter(tclass.prof_tf == ps_tf,\n tclass.access_req == str(access)).one()\n except exc.NoResultFound:\n res = tclass(prof_tf=ps_tf, access_req=str(access))\n return res\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG, format='%(lineno)d %(asctime)s %(message)s')\n","sub_path":"entity/prof_standards.py","file_name":"prof_standards.py","file_ext":"py","file_size_in_byte":12061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"381149078","text":"import os\nimport time \nimport csv\n\ncols = ['t','dmv_confirmed','dmv_deaths','dmv_recovered','us_confirmed','us_deaths','us_recovered']\n\nrootdir = 'csse_covid_19_data/csse_covid_19_daily_reports'\noutfilename = f'report_{int(time.time())}.csv'\n\nwith open(outfilename, 'a') as outfile:\n outfile.write(','.join(str(x) for x in cols) + '\\n')\n writer = csv.writer(outfile)\n for subdir, dirs, filenames in os.walk(rootdir):\n for filename in sorted(filenames):\n if filename.endswith('csv'):\n with open(f'{rootdir}/{filename}') as file:\n stats = dict(zip(cols, [0] * len(cols)))\n stats['t'] = filename.split('.')[0];\n file.readline(); # chomp first line\n source = csv.reader(file)\n for source_row in source:\n country = source_row[1];\n if country != 'US':\n continue\n state = source_row[0];\n dmv = (state == 'Virginia' or state == 'Maryland' or state == 'District of Columbia')\n confirmed = 0\n deaths = 0\n recovered = 0\n\n try:\n confirmed = int(source_row[3]);\n deaths = int(source_row[4]);\n recovered = int(source_row[5]);\n except: \n pass\n \n if dmv:\n stats['dmv_confirmed'] += confirmed\n stats['dmv_deaths'] += deaths\n stats['dmv_recovered'] += recovered\n\n stats['us_confirmed'] += confirmed\n stats['us_deaths'] += deaths\n stats['us_recovered'] += recovered\n\n writer.writerow(str(stats[col]) for col in cols);\n","sub_path":"generate_report.py","file_name":"generate_report.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"133958106","text":"from vpython import *\nfrom math import sin, cos, radians\nimport argparse\nimport numpy as np\nimport pprint as pp\nimport matplotlib.pyplot as plt\n\n\n\n\ndef set_scene(data):\n \"\"\"\n Set Vpython Scene\n param: data = dictionary with all data\n \"\"\"\n scene.title = \"Assignment 5: Projectile motion\"\n scene.width = 800\n scene.heigth = 600\n scene.caption = \"\"\"Right button drag or Ctrl-drag to rotate \"camera\" to view scene.\n To zoom, drag with middle button or Alt/Option depressed, or use scroll wheel.\n On a two-button mouse, middle is left + right.\n Touch screen: pinch/extend to zoom, swipe or two-finger rotate.\"\"\"\n scene.forward = vector(0, -.3, -1)\n scene.x = -1\n # Set background: floor, table, etc\n\n\ndef motion_no_drag(data):\n \"\"\"\n Create animation for projectile motion with no dragging force\n param: dictionary with all data\n \"\"\"\n ball_nd = sphere(pos=vector(0, data['init_height'], 0),\n radius=1, color=color.cyan, make_trail=True)\n \n # # Follow the movement of the ball\n scene.camera.follow(ball_nd)\n\n # Create lists of x and y values of motion\n index = 1 \n y_values = [data['init_height']] #list of all y positions\n y_velocities = [data['init_y_vel']] # list of y velocities\n x_values = [0] #list of all x positions\n while y_values[index-1] > 0 or index == 1:\n new_x = x_values[index - 1] + data['init_x_vel'] * data['deltat'] # find next x position\n x_values.append(new_x) #add generated x position to list\n \n new_y_vel = y_velocities[index - 1] + data['gravity'] * data['deltat'] # find new y velocity\n y_velocities.append(new_y_vel)\n new_y = y_values[index - 1] + new_y_vel * data['deltat'] #fine new y position\n y_values.append(new_y) #add new position to the list\n index += 1\n\n #create scenery elements (ground and mountains)\n ground = box(pos=vector(x_values[-1]/2,-1,-x_values[-1]/4 + 10), color=color.green, size=vector(x_values[-1] + 20, 1, x_values[-1]/2))\n mount1 = cone(pos=vector(3 * x_values[-1] / 8 - 20,-1,-3 * x_values[-1] / 8), axis=vector(0, max(y_values), 0), radius=(3 * x_values[-1] / 8), color=color.white)\n mount2 = cone(pos=vector(x_values[-1] * .825 - 20,-1,-3 * x_values[-1] / 8), axis=vector(0, max(y_values) * 2, 0), radius=(3 * x_values[-1] / 8), color=color.white)\n #Animate\n\n #loop through lists to change the position of the ball\n pos_index = 0\n while pos_index < len(y_values):\n rate(500)\n position = vector(x_values[pos_index], y_values[pos_index], 0)\n ball_nd.pos = position\n pos_index += 1\n \n #add positions to the data dictionary to be graphed\n data['x_no_drag'] = x_values\n data['y_no_drag'] = y_values\n\n\n\ndef motion_drag(data):\n \"\"\"\n Create animation for projectile motion with dragging force\n param: data = dictionary with all data\n \"\"\"\n ball_nd = sphere(pos=vector(0, data['init_height'], 0),\n radius=1, color=color.magenta, make_trail=True)\n \n # # Follow the movement of the ball\n scene.camera.follow(ball_nd)\n # Create lists of x and y values of motion\n index = 1\n x_vel = data['init_x_vel']\n y_values = [data['init_height']]\n y_vel = data['init_y_vel']\n x_values = [0]\n while y_values[index-1] > 0 or index == 1:\n # new_x = x_values[index - 1] + data['init_x_vel'] * data['deltat'] # find next x position\n x_vel = x_vel + data['x_drag_accel'] * data['deltat']\n # data['x_drag_accel'] = data['x_drag_accel'] - x_vel * data['beta'] #update acceleration based on new velocity\n new_x = x_values[index - 1] + x_vel * data['deltat']\n x_values.append(new_x) #add generated x position to list\n \n y_vel = y_vel + data['y_drag_accel'] * data['deltat'] # find new y velocity\n new_y = y_values[index - 1] + y_vel * data['deltat']\n y_values.append(new_y)\n\n index += 1\n #Animate\n pos_index = 0\n while pos_index < len(y_values):\n rate(500)\n position = vector(x_values[pos_index], y_values[pos_index], 0)\n ball_nd.pos = position\n pos_index += 1\n \n #add lists of positions to the data dictionary to be graphed\n data['x_drag'] = x_values\n data['y_drag'] = y_values\n \n \ndef plot_data(data):\n \"\"\"\n Use lists of positions with and without drag\n to create a graph\n param: data = dictionary with all data\n \"\"\"\n # Create canvas with two plots on one graph\n plt.figure()\n plt.title(\"Position with and without drag force\")\n plt.plot(data[\"x_no_drag\"], data[\"y_no_drag\"], \"g-\", label=\"Position without Drag\") #plot x vs y position without drag\n plt.ylabel(\"Y Position (m)\")\n plt.xlabel(\"X Position (m)\")\n\n plt.plot(data[\"x_drag\"], data[\"y_drag\"], \"b-\", label=\"Position with Drag\") #plot x vs y position with drag\n plt.legend()\n plt.show() # display plot\n \n\n\ndef main():\n \"\"\"\n Main method\n \"\"\"\n # 1) Parse the arguments\n parser = argparse.ArgumentParser(description=\"Projectile Motion\")\n parser.add_argument(\"--velocity\", \"-v\", action=\"store\", help=\"velocity in m/s\", dest=\"velocity\", type=float, required=\"true\")\n parser.add_argument(\"--angle\", \"-a\", action=\"store\", help=\"angle in degrees\", dest=\"angle\", type=float, required=\"true\")\n parser.add_argument(\"--height\", action=\"store\", help=\"height in meters\", dest=\"height\", type=float, default=1.2)\n\n args = parser.parse_args()\n # Set Variables\n data = {} # empty dictionary for all data and variables\n data['init_height'] = args.height # y-axis \n data['init_velocity'] = args.velocity # m/s\n data['theta'] = args.angle # degrees\n\n rad_angle = radians(args.angle) # angle in radians\n data['init_x_vel'] = cos(rad_angle) * args.velocity # velocity in the x-direction\n data['init_y_vel'] = sin(rad_angle) * args.velocity # velocity in the y-direction\n\n # Constants\n data['rho'] = 1.225 # kg/m^3, density\n data['Cd'] = 0.5 # coefficient friction\n data['deltat'] = 0.005\n data['gravity'] = -9.8 # m/s^2\n\n data['ball_mass'] = 0.145 # kg\n data['ball_radius'] = 0.075 # meters\n data['ball_area'] = pi * data['ball_radius']**2\n data['alpha'] = data['rho'] * data['Cd'] * data['ball_area'] / 2.0\n data['beta'] = data['alpha'] / data['ball_mass']\n\n #acceleration when the ball experiences drag\n data['x_drag_accel'] = - data['beta'] * data['init_x_vel']\n data['y_drag_accel'] = data['gravity'] - data['beta'] * data['init_y_vel'] \n # Set Scene\n set_scene(data)\n # 2) No Drag Animation\n motion_no_drag(data)\n # 3) Drag Animation\n motion_drag(data)\n # 4) Plot Information: extra credit\n plot_data(data)\n\n\nif __name__ == \"__main__\":\n main()\n exit(0)\n","sub_path":"lab5/lab5.py","file_name":"lab5.py","file_ext":"py","file_size_in_byte":6831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"27652530","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport six\n\nfrom requests_mock import adapter\nfrom requests_mock import response\nfrom requests_mock.tests import base\n\n\nclass ResponseTests(base.TestCase):\n\n def setUp(self):\n super(ResponseTests, self).setUp()\n self.method = 'GET'\n self.url = 'http://test.url/path'\n self.request = adapter._RequestObjectProxy._create(self.method,\n self.url,\n {})\n\n def create_response(self, **kwargs):\n return response.create_response(self.request, **kwargs)\n\n def test_create_response_body_args(self):\n self.assertRaises(RuntimeError,\n self.create_response,\n raw='abc',\n body='abc')\n\n self.assertRaises(RuntimeError,\n self.create_response,\n text='abc',\n json={'a': 1})\n\n def test_content_type(self):\n self.assertRaises(TypeError, self.create_response, text=55)\n self.assertRaises(TypeError, self.create_response, text={'a': 1})\n\n def test_text_type(self):\n self.assertRaises(TypeError, self.create_response, content=six.u('t'))\n self.assertRaises(TypeError, self.create_response, content={'a': 1})\n\n def test_json_body(self):\n data = {'a': 1}\n resp = self.create_response(json=data)\n\n self.assertEqual('{\"a\": 1}', resp.text)\n self.assertIsInstance(resp.text, six.string_types)\n self.assertIsInstance(resp.content, six.binary_type)\n self.assertEqual(data, resp.json())\n\n def test_body_body(self):\n value = 'data'\n body = six.BytesIO(six.b(value))\n resp = self.create_response(body=body)\n\n self.assertEqual(value, resp.text)\n self.assertIsInstance(resp.text, six.string_types)\n self.assertIsInstance(resp.content, six.binary_type)\n","sub_path":"requests_mock/tests/test_response.py","file_name":"test_response.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"486520232","text":"def CalcMes():\r\n soma = \"0\"\r\n contas = []\r\n nomec = []\r\n cont = 0\r\n while (soma != \"1\"):\r\n nomec.append(input(\"Crie um rótulo para a conta: \"))\r\n contas.append(float(input(\"Insira o valor: \")))\r\n cont = cont + 1\r\n soma = input(\"Para parar, digite 1!\")\r\n\r\n for i in range(cont):\r\n print(nomec[i],\":\",contas[i])\r\n result = sum(contas)\r\n print(\"O somatório das contas é igual a:\",result)\r\n \r\nCalcMes()\r\n\r\n","sub_path":"CalcMes.py","file_name":"CalcMes.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"475306494","text":"# This file is python code\n\nimport os\n\nDecider('timestamp-match')\n\ncommon_files = Glob(\"src/*.cpp\") + Glob(\"src/graphics/*.cpp\") + Glob(\"src/graphics/frustum/*.cpp\") + Glob(\"src/net/*.cpp\");\nserver_files = Glob(\"src/dedicated/*.cpp\")\nclient_files = Glob(\"src/main/*.cpp\")\neditor_files = Glob(\"src/editor/*.cpp\")\nloader3ds_files = Glob(\"src/loader_3ds/*.cpp\")\n\ninclude_dirs = ['src', 'src/graphics'] + os.environ['C_INCLUDE_PATH'].split(':')\n#libs = ['boost_system-gcc41-mt-1_39']\nlibs = ['SDL', 'SDL_mixer', 'GL', 'GLU', 'png', 'GLEW']\nlib_dirs = [os.environ['LD_LIBRARY_PATH'].split(':'), './lib/']\nenv = Environment(CPPPATH = include_dirs, LIBS = libs, LIBPATH = lib_dirs)\nenv.ParseConfig('pkg-config --cflags --libs sdl')\n\ncommon_flags = '-Wall -Wextra -Werror -std=c++0x -pedantic'\n\n\n\nopt = env.Clone(CCFLAGS = common_flags + ' -O3', LINKFLAGS = '-O3')\noptcommon = opt.Object(common_files)\noptclient = opt.Program('bin/client', optcommon + opt.Object(client_files))\noptserver = opt.Program('bin/server', optcommon + opt.Object(server_files))\nopteditor = opt.Program('bin/editor', optcommon + opt.Object(editor_files))\noptloader_3ds = opt.Program('bin/loader_3ds', opt.Object(loader3ds_files))\nopt.Alias('client', 'bin/client')\nopt.Alias('server', 'bin/server')\nopt.Alias('editor', 'bin/editor')\nopt.Alias('loader3ds', 'bin/loader_3ds')\n\ndbg = env.Clone(CCFLAGS = common_flags + ' -g -O0', LINKFLAGS = '-g')\ndbgcommon = dbg.Object(common_files, OBJPREFIX = 'debug-')\ndebugclient = dbg.Program('bin/debug-client', dbgcommon + dbg.Object(client_files, OBJPREFIX = 'debug-'))\ndebugserver = dbg.Program('bin/debug-server', dbgcommon + dbg.Object(server_files, OBJPREFIX = 'debug-'))\ndebugeditor = dbg.Program('bin/debug-editor', dbgcommon + dbg.Object(editor_files, OBJPREFIX = 'debug-'))\ndbg.Alias('debug', 'bin/debug-client')\ndbg.Alias('debug', 'bin/debug-server')\ndbg.Alias('debug', 'bin/debug-editor')\n\nprof = env.Clone(CCFLAGS = common_flags + ' -pg -O3 -D NDEBUG', LINKFLAGS = '-pg -O3')\nprofcommon = prof.Object(common_files, OBJPREFIX = 'profile-')\nprofileclient = prof.Program('bin/profile-client', profcommon + prof.Object(client_files, OBJPREFIX = 'profile-'))\nprofileserver = prof.Program('bin/profile-server', profcommon + prof.Object(server_files, OBJPREFIX = 'profile-'))\nprofileeditor = prof.Program('bin/profile-editor', profcommon + prof.Object(editor_files, OBJPREFIX = 'profile-'))\nprof.Alias('profile', 'bin/profile-client')\nprof.Alias('profile', 'bin/profile-server')\nprof.Alias('profile', 'bin/profile-editor')\n\n\nDefault(optclient, optserver, opteditor)\n\n\n","sub_path":"SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"330247770","text":"from urllib.request import Request, urlopen\nfrom bs4 import BeautifulSoup\nfrom discord.ext import commands\nimport json\nimport time\nimport discord\nfrom discord.ext.commands import Bot\nimport logging\n\n\nclass senate:\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def senatecount(self):\n req = Request('http://oppressive.games/power/bill.php?bill=', headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) '\n 'Version/7.0.3 Safari/7046A194A'})\n html = str(urlopen(req).read())\n s = BeautifulSoup(html, \"lxml\")\n second_req = Request('http://oppressive.games/power/senate.php', headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) '\n 'Version/7.0.3 Safari/7046A194A'})\n senator_html = str(urlopen(second_req).read())\n s2 = BeautifulSoup(senator_html, \"lxml\").find_all('table')[1]\n senator_data = [[cell.text for cell in row(\"td\")]\n for row in s2(\"tr\")]\n senator_data = [cell for cell in senator_data if 'Democratic Party' in ''.count(cell)]\n # senator_data = [cell for cell in senator_data if 'Democratic Party' in cell]\n print(senator_data)\n await self.bot.say(str(senator_data))\n \n \ndef setup(bot):\n bot.add_cog(senate(bot))\n","sub_path":"senate.py","file_name":"senate.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"96343238","text":"from .captchaRecognizeMain import CAPTCHA_SET, captchaRecognize\nfrom ..utils import Post\nimport os\nimport random\nimport requests\n\n\n__all__ = [\"createTestSet\", \"cropImage\", \"CAPTCHA_SET_PATH\",\n \"CURRENT_DIR\", \"getRequsetCaptcha\"]\n\n\nCURRENT_DIR = os.path.dirname(__file__)\nCAPTCHA_SET_PATH = CURRENT_DIR + \"/captcha_set\"\n\n\ndef getRequsetCaptcha(headers, telephone_number, dir_path=None, captcha_name=\"captcha\"):\n ''' 获取验证码 '''\n captcha_params = {\n \"captcha_str\": telephone_number,\n }\n\n captcha_url = \"https://h5.ele.me/restapi/eus/v3/captchas\"\n\n captcha_json = Post(captcha_url, headers=headers, jsons=captcha_params).json\n captcha_hash = captcha_json[\"captcha_hash\"]\n b64data = captcha_json['captcha_image']\n filepath, extension = Post.base64decode(b64data, captcha_name, dir_path)\n return filepath, extension, captcha_hash\n\n\ndef cropImage(binary_object, letters, extension, dir_path=\".\", captcha_name=\"captcha\"):\n \"\"\" 分割图片,使用md5哈希命名 \"\"\"\n image_objects = []\n count = 0\n for letter in letters:\n # 四元组,左、上、右、下\n temp_object = binary_object.crop(\n (letter[0], 0, letter[1], binary_object.size[1]))\n image_path = \"%s/%s.%s\" % (dir_path,\n captcha_name + f\"___{count+1}\", extension)\n temp_object.convert(\"RGB\").save(image_path)\n image_objects.append(temp_object)\n count += 1\n\n return image_objects\n\n\ndef splitCaptcha(captcha_name=\"captcha\"):\n ''' 请求并分割验证码, 将结果放入captcha_set目录,需要人工筛选放入对应的子目录 '''\n headers = {\n \"referer\": \"https://h5.ele.me/login/\"\n }\n telephone_numbers = [x for x in range(10)]\n telephone_heads = [\"1581\", \"1861\", \"1355\", \"1760\"]\n\n # 构造电话号码\n telephone_number = random.choice(telephone_heads)\n for i in range(7):\n telephone_number += str(random.choice(telephone_numbers))\n\n # 请求验证码\n filepath, extension, captcha_hash = getRequsetCaptcha(headers, telephone_number,\n dir_path=CAPTCHA_SET_PATH, captcha_name=captcha_name)\n\n # 扫描验证码\n binary_object, letters, extension = captchaRecognize(\n filepath, extension, captcha_name=captcha_name)\n\n # 分割验证码字符\n cropImage(binary_object, letters, extension,\n CAPTCHA_SET_PATH, captcha_name=captcha_name)\n if len(letters) < 4:\n raise\n\n return captcha_hash\n\n\ndef createTestSet(captcha_set_path=None, captcha_set=None, captcha_numbers=1):\n ''' 创建训练数据集,目录为captcha_set_path, captcha_set为验证码可能包含的文字或者字母等的list '''\n global CAPTCHA_SET_PATH\n global CAPTCHA_SET\n\n if captcha_set_path:\n captcha_set_path = captcha_set_path\n else:\n captcha_set_path = CAPTCHA_SET_PATH\n\n if captcha_set:\n captcha_set = captcha_set\n else:\n captcha_set = CAPTCHA_SET\n\n print(captcha_set_path)\n # 创建captcha_set目录及其子目录\n if os.system(f\"mkdir '{captcha_set_path}'\"):\n for capt in captcha_set:\n dir_path = captcha_set_path + '/' + capt\n os.system(f\"mkdir '{dir_path}'\")\n\n # 请求验证码并分割,将结果放入captcha_set目录,人为放入其子目录\n # 请求100次\n for i in range(captcha_numbers):\n # 请求1次\n splitCaptcha(f\"captcha__{str(i+1)}\")\n","sub_path":"crawlerUtils/captcha/captchaTestSetCreate.py","file_name":"captchaTestSetCreate.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"14574768","text":"from phidl import LayerSet\nimport numpy as np\n\n#%%\ndef vt_layers():\n vt_lyrs = LayerSet()\n \n color_mat = gds_colors()\n \n #gds layer name, layer number, data type, description, color from color list, dither\n layer_data = [['m1',40,0,'wiring for stf and r1',32,'I3'],\n ['m1e',40,1,'wiring for stf and r1 endpoint',32,'I2'],\n ['m1f',40,2,'wiring for stf and r1 fill',32,'I1'],\n ['m1l',40,3,'wiring for stf and r1 label',32,'I1'],\n ['m1p',40,4,'wiring for stf and r1 pad',32,'I4'],\n ['stf',10,0,'superconducting thin film for spds and inductors',3,'I5'],\n ['stfe',10,1,'superconducting thin film endpoint',3,'I1'],\n ['stff',10,2,'superconducting thin film fill',3,'I1'],\n ['stfp',10,3,'superconducting thin film pad',3,'I3'],\n ['r1',30,0,'spd and loop resistor',18,'I5'],\n ['r1f',30,2,'spd and loop resistor fill',18,'I1'],\n ['r1p',30,3,'spd and loop resistor pad',18,'I3'],\n ['v1',50,0,'via from m1 to m2',11,'I5'],\n ['v1e',50,1,'v1 endpoint',11,'I1'], \n ['m2',41,0,'ground plane',32,'I1'],\n ['m2e',41,1,'m2 endpoint',32,'I1'],\n ['m2l',41,3,'m2 label',32,'I0'],\n ['m2i',41,4,'m2 invert',32,'I1'],\n ['m2o',41,5,'m2 offset',32,'I2'],#postprocessing will trace around this layer to allow vias\n ['m2m',41,6,'m2 moats',32,'I2'], \n ['v2',51,0,'via from m2 to jj1, jj2, or m1',11,'I6'],\n ['v2e',51,1,'v2 endpoint',11,'I1'],\n ['jj1',21,0,'jj bottom contact / m3',14,'I4'],\n ['jj1e',21,1,'jj bottom contact endpoint',14,'I2'],\n ['jj1f',21,2,'jj bottom contact fill',14,'I1'], \n ['jj2',22,0,'jj top contact',13,'I5'],\n ['jj2e',22,1,'jj top contact endpoint',13,'I2'],\n ['jj2f',22,2,'jj top contact fill',13,'I1'],\n ['v3',52,0,'via to jj top and bottom contacts',11,'I8'],\n ['v3e',52,1,'v3 endpoint',11,'I5'],\n ['m3',42,0,'jj contact metal',34,'I9'],\n ['m3e',42,1,'jj contact metal endpoint',34,'I1'],\n ['m3f',42,2,'jj contact metal fill',34,'I1'],\n ['m3l',42,3,'jj contact metal label',34,'I1'],\n ['m3p',42,4,'jj contact metal pad',34,'I4'],\n ['m3cs',43,6,'m3 label',34,'I0'],\n ['r2',31,0,'jj shunt resistor',8,'I9'],\n ['r2f',31,2,'jj shunt resistor fill',8,'I1'],\n ['r2p',31,3,'jj shunt resistor pad',8,'I3'],\n ['v4',54,0,'via to r2 / pad opening',16,'I1'],\n ['v4e',54,1,'v4 endpoint',16,'I2'], \n ['pkg',60,0,'SU8 packaging layer',1,'I1'], \n ['pkfc',60,1,'fiber core dummy layer',21,'I1'],\n ['ipm1',19,0,'inductor port m1',11,'I1'],\n ['ipj1',19,0,'inductor port jj1',11,'I1'],\n ['ipm3',19,0,'inductor port m3',11,'I1'],\n ['ipl',19,0,'inductor port labels',11,'I1'],\n ['pl',95,0,'pad locations',47,'I1'],\n ['ce',96,0,'chip edge',48,'I1'],\n ['dp',99,10,'data prep dummy',8,'I1'],\n ]\n \n# ['v4',53,0,'via from m4 to m3',11,'I9'],\n# ['v4e',53,1,'via from m4 to m3 endpoint',11,'I1'], \n# ['m4',43,0,'upper metal wiring',34,'I9'],\n# ['m4e',43,1,'m4 endpoint',34,'I2'],\n# ['m4f',43,2,'m4 fill',34,'I1'],\n# ['m4l',43,3,'m4 label',34,'I0'],\n# ['m4cs',43,6,'m4 label',34,'I0'],\n# ['r3',32,0,'resistor / pad cap',19,'I9'],\n# ['r3f',32,1,'resistor / pad cap fill',16,'I1'],\n# ['v5',54,0,'via to r3 / pad opening',16,'I1'],\n# ['v5e',54,1,'v4 endpoint',11,'I2'],\n \n \n num_layers = len(layer_data) \n for ii in range(num_layers): \n color_number = layer_data[ii][4]\n color_hex = '#{0:02x}{1:02x}{2:02x}'.format(clamp(color_mat[:,color_number-1][0]*256), clamp(color_mat[:,color_number-1][1]*256), clamp(color_mat[:,color_number-1][2]*256))\n vt_lyrs.add_layer(name = layer_data[ii][0], gds_layer = layer_data[ii][1], gds_datatype = layer_data[ii][2],description = layer_data[ii][3], color = color_hex, inverted = False,alpha = 0.6, dither = layer_data[ii][5])\n \n return vt_lyrs, layer_data\n\n#%%\ndef vt_layers_post():\n vt_lyrs = LayerSet()\n \n color_mat = gds_colors()\n \n #gds layer name, layer number, data type, description, color from color list, dither\n layer_data = [['m1',40,0,'wiring for stf and r1',32,'I9'],\n ['stf',10,0,'superconducting thin film for spds and inductors',3,'I5'],\n ['r1',30,0,'spd and loop resistor',18,'I5'],\n ['v1',50,0,'via from m1 to m2',11,'I5'], \n ['m2',41,0,'ground plane',32,'I1'], \n ['v2',51,0,'via from m2 to jj1, jj2, or m1',11,'I6'],\n ['jj1',21,0,'jj bottom contact / m3',14,'I4'], \n ['jj2',22,0,'jj top contact',13,'I5'],\n ['v3',52,0,'via to jj top and bottom contacts',11,'I8'],\n ['m3',42,0,'jj contact metal',34,'I9'],\n ['r2',31,0,'jj shunt resistor',8,'I9'],\n ['v4',54,0,'via to r2 / pad opening',16,'I1'], \n ['pkg',60,0,'SU8 packaging layer',1,'I1'], \n ['ce',96,0,'chip edge',48,'I1'],\n ] \n \n num_layers = len(layer_data) \n for ii in range(num_layers): \n color_number = layer_data[ii][4]\n color_hex = '#{0:02x}{1:02x}{2:02x}'.format(clamp(color_mat[:,color_number-1][0]*256), clamp(color_mat[:,color_number-1][1]*256), clamp(color_mat[:,color_number-1][2]*256))\n vt_lyrs.add_layer(name = layer_data[ii][0], gds_layer = layer_data[ii][1], gds_datatype = layer_data[ii][2],description = layer_data[ii][3], color = color_hex, inverted = False,alpha = 0.6, dither = layer_data[ii][5])\n \n return vt_lyrs, layer_data\n\n#%%\ndef write_lyp(lyrs,layer_data,lyp_file_name):\n\n num_layers = len(lyrs._layers)\n color_mat = gds_colors()\n \n# gds_layers = np.zeros([num_layers,1])\n# for ii in range(num_layers):\n# gds_layers[ii] = layer_data[ii][1]\n# \n# index_array,gds_layers_sorted = np.argsort(gds_layers)\n \n A = '\\n'\n \n for kk in range(num_layers):\n \n ii = kk#index_array[kk]\n color_number = layer_data[ii][4]\n color_hex = '#{0:02x}{1:02x}{2:02x}'.format(clamp(color_mat[:,color_number-1][0]*256), clamp(color_mat[:,color_number-1][1]*256), clamp(color_mat[:,color_number-1][2]*256))\n A = A + '\\n'+color_hex+'\\n'\n A = A + ''+color_hex+'\\n'\n A = A + '0\\n'\n A = A + '0\\n'\n A = A + ''+layer_data[ii][5]+'\\n'\n A = A + 'true\\n'\n A = A + 'false\\n'\n A = A + '1\\n'\n A = A + 'false\\n'\n A = A + '0\\n'\n A = A + ''+str(layer_data[ii][1])+'/'+str(layer_data[ii][2])+': '+str(layer_data[ii][0])+'; '+str(layer_data[ii][3])+'\\n'\n A = A + ''+str(layer_data[ii][1])+'/'+str(layer_data[ii][2])+'@1'+'\\n'\n A = A + '\\n'\n \n A = A + '' \n \n print(A,file=open(lyp_file_name+'.lyp','w'))\n# with open('vt.lyp','w') as text_file:\n# text_file.write(A)\n \n return\n\n#%%\ndef gds_colors():\n \n ## define colors\n #blues lightest to darkest\n blueVec1 = np.array([145,184,219]); blue1 = blueVec1/256;\n blueVec2 = np.array([96,161,219]); blue2 = blueVec2/256;\n blueVec3 = np.array([24,90,149]); blue3 = blueVec3/256;\n blueVec4 = np.array([44,73,100]); blue4 = blueVec4/256;\n blueVec5 = np.array([4,44,80]); blue5 = blueVec5/256;\n #reds lightest to darkest\n redVec1 = np.array([246,177,156]); red1=redVec1/256;\n redVec2 = np.array([246,131,98]); red2 = redVec2/256;\n redVec3 = np.array([230,69,23]); red3 = redVec3/256;\n redVec4 = np.array([154,82,61]); red4 = redVec4/256;\n redVec5 = np.array([123,31,4]); red5 = redVec5/256;\n #greens lightest to darkest\n greenVec1 = np.array([142,223,180]); green1 = greenVec1/256;\n greenVec2 = np.array([89,223,151]); green2 = greenVec2/256;\n greenVec3 = np.array([16,162,84]); green3 = greenVec3/256;\n greenVec4 = np.array([43,109,74]); green4 = greenVec4/256;\n greenVec5 = np.array([3,87,42]); green5 = greenVec5/256;\n #yellows lightest to darkest\n yellowVec1 = np.array([246,204,156]); yellow1 = yellowVec1/256;\n yellowVec2 = np.array([246,185,98]); yellow2 = yellowVec2/256;\n yellowVec3 = np.array([230,144,23]); yellow3 = yellowVec3/256;\n yellowVec4 = np.array([154,115,61]); yellow4 = yellowVec4/256;\n yellowVec5 = np.array([123,74,4]); yellow5 = yellowVec5/256;\n \n #blue grays\n gBlueVec1 = np.array([197,199,202]); gBlue1 = gBlueVec1/256;\n gBlueVec2 = np.array([195,198,202]); gBlue2 = gBlueVec2/256;\n gBlueVec3 = np.array([142,145,149]); gBlue3 = gBlueVec3/256;\n gBlueVec4 = np.array([108,110,111]); gBlue4 = gBlueVec4/256;\n gBlueVec5 = np.array([46,73,97]); gBlue5 = gBlueVec5/256;\n #red grays\n gRedVec1 = np.array([242,237,236]); gRed1 = gRedVec1/256;\n gRedVec2 = np.array([242,235,233]); gRed2 = gRedVec2/256;\n gRedVec3 = np.array([230,231,218]); gRed3 = gRedVec3/256;\n gRedVec4 = np.array([172,167,166]); gRed4 = gRedVec4/256;\n gRedVec5 = np.array([149,88,71]); gRed5 = gRedVec5/256;\n #green grays\n gGreenVec1 = np.array([203,209,206]); gGreen1 = gGreenVec1/256;\n gGreenVec2 = np.array([201,209,204]); gGreen2 = gGreenVec2/256;\n gGreenVec3 = np.array([154,162,158]); gGreen3 = gGreenVec3/256;\n gGreenVec4 = np.array([117,122,119]); gGreen4 = gGreenVec4/256;\n gGreenVec5 = np.array([50,105,76]); gGreen5 = gGreenVec5/256;\n #yellow grays\n gYellowVec1 = np.array([242,240,236]); gYellow1 = gYellowVec1/256;\n gYellowVec2 = np.array([242,239,233]); gYellow2 = gYellowVec2/256;\n gYellowVec3 = np.array([230,225,218]); gYellow3 = gYellowVec3/256;\n gYellowVec4 = np.array([172,169,166]); gYellow4 = gYellowVec4/256;\n gYellowVec5 =np.array( [149,117,71]); gYellow5 = gYellowVec5/256;\n \n #pure grays (white to black)\n gVec1 = np.array([256,256,256]); g1 = gVec1/256;\n gVec2 = np.array([242,242,242]); g2 = gVec2/256;\n gVec3 = np.array([230,230,230]); g3 = gVec3/256;\n gVec4 = np.array([204,204,204]); g4 = gVec4/256;\n gVec5 = np.array([179,179,179]); g5 = gVec5/256;\n gVec6 = np.array([153,153,153]); g6 = gVec6/256;\n gVec7 = np.array([128,128,128]); g7 = gVec7/256;\n gVec8 = np.array([102,102,102]); g8 = gVec8/256;\n gVec9 = np.array([77,77,77]); g9 = gVec9/256;\n gVec10 = np.array([51,51,51]); g10 = gVec10/256;\n gVec11 = np.array([26,26,26]); g11 = gVec11/256;\n gVec12 = np.array([0,0,0]); g12 = gVec12/256;\n \n color_mat = np.column_stack((blue1,blue2,blue3,blue4,blue5,red1,red2,red3,red4,red5,green1,green2,green3,green4,green5,yellow1,yellow2,yellow3,yellow4,yellow5,\n gBlue1,gBlue2,gBlue3,gBlue4,gBlue5,gRed1,gRed2,gRed3,gRed4,gRed5,gGreen1,gGreen2,gGreen3,gGreen4,gGreen5,gYellow1,gYellow2,gYellow3,gYellow4,gYellow5,\n g1,g2,g3,g4,g5,g6,g7,g8,g9,g10,g11,g12))\n \n return color_mat\n\n#%%\ndef clamp(x): return int(max(0, min(x, 255)))","sub_path":"gds_backups/spd_res__gds_made_20200201/vt_util.py","file_name":"vt_util.py","file_ext":"py","file_size_in_byte":12143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"480993042","text":"import os\nimport os.path as osp\nimport numpy as np\n# `pip install easydict` if you don't have it\nfrom easydict import EasyDict as edict\n\n__C = edict()\n# Consumers can get config by:\n# from wsl.config import cfg_wsl\ncfg_wsl = __C\n\n#\n# Training options\n#\n\n__C.TRAIN = edict()\n\n# Scales to use during training (can list multiple scales)\n# Each scale is the pixel size of an image's shortest side\n__C.TRAIN.SCALES = (600, )\n\n# Max pixel size of the longest side of a scaled input image\n__C.TRAIN.MAX_SIZE = 1000\n\n# Images to use per minibatch\n# If image per Batch lagerer than 64, blob will exceed INT_MAX.\n__C.TRAIN.IMS_PER_BATCH = 2\n\n# TODO(YH): BATCH_SIZE is determined by IM_PER_BATCH and iter_size\n# Minibatch size (number of regions of interest [ROIs])\n# __C.TRAIN.BATCH_SIZE = 128\n\n__C.TRAIN.ROIS_PER_IM = 10000\n\n# Use horizontally-flipped images during training?\n__C.TRAIN.USE_FLIPPED = True\n\n__C.TRAIN.USE_DISTORTION = True\n__C.TRAIN.SATURATION = 1.5\n__C.TRAIN.EXPOSURE = 1.5\n\n__C.TRAIN.USE_CROP = False\n__C.TRAIN.CROP = 0.9\n\n__C.TRAIN.ROI_AU = False\n__C.TRAIN.ROI_AU_STEP = 1\n\n__C.TRAIN.CPG_CACHE = False\n__C.TRAIN.CPG_CACHE_PATH = 'data/cpg_cache/'\n\n# Overlap required between a ROI and ground-truth box in order for that ROI to\n# be used as a bounding-box regression training example\n__C.TRAIN.BBOX_THRESH = 0.5\n\n# Iterations between snapshots\n__C.TRAIN.SNAPSHOT_ITERS = 10000\n\n# solver.prototxt specifies the snapshot path prefix, this adds an optional\n# infix to yield the path: [_]_iters_XYZ.caffemodel\n__C.TRAIN.SNAPSHOT_INFIX = ''\n\n# Use a prefetch thread in roi_data_layer.layer\n# So far I haven't found this useful; likely more engineering work is required\n__C.TRAIN.USE_PREFETCH = False\n\n# Train using these proposals\n__C.TRAIN.PROPOSAL_METHOD = 'selective_search'\n\n# Make minibatches from images that have similar aspect ratios (i.e. both\n# tall and thin or both short and wide) in order to avoid wasting computation\n# on zero-padding.\n__C.TRAIN.ASPECT_GROUPING = True\n\n__C.TRAIN.PASS_IM = 0\n\n__C.TRAIN.SHUFFLE = True\n\n__C.TRAIN.GAN_STEP = 0.0\n__C.TRAIN.GAN_imdb_name = ''\n\n\n#\n# Testing options\n#\n\n__C.TEST = edict()\n\n# Scales to use during testing (can list multiple scales)\n# Each scale is the pixel size of an image's shortest side\n__C.TEST.SCALES = (600, )\n\n# Max pixel size of the longest side of a scaled input image\n__C.TEST.MAX_SIZE = 1000\n\n# Overlap threshold used for non-maximum suppression (suppress boxes with\n# IoU >= this threshold)\n__C.TEST.NMS = 0.3\n\n# Experimental: treat the (K+1) units in the cls_score layer as linear\n# predictors (trained, eg, with one-vs-rest SVMs).\n__C.TEST.SVM = False\n\n# Test using these proposals\n__C.TEST.PROPOSAL_METHOD = 'selective_search'\n\n__C.TEST.ROIS_PER_IM = 10000\n__C.TEST.USE_FLIPPED = True\n__C.TEST.BBOX = False\n\n# for grid search NMS max_per_image thresh and so on\n__C.TEST.CACHE = False\n__C.TEST.MAP = 0.0\n\n#\n# MISC\n#\n\n# The mapping from image coordinates to feature map coordinates might cause\n# some boxes that are distinct in image space to become identical in feature\n# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor\n# for identifying duplicate boxes.\n# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16\n__C.DEDUP_BOXES = 1. / 16.\n\n# Pixel mean values (BGR order) as a (1, 1, 3) array\n# We use the same pixel mean for all networks even though it's not exactly what\n# they were trained with\n# fast rcnn\n# __C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])\n# VGG 16\n__C.PIXEL_MEANS = np.array([[[103.939, 116.779, 123.68]]])\n# CaffeNet\n# __C.PIXEL_MEANS = np.array([[[104.00, 117.00, 123.00]]])\n\n# For reproducibility\n__C.RNG_SEED = 3\n\n# A small number that's used many times\n__C.EPS = 1e-14\n\n# Root directory of project\n__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))\n\n# Data directory\n__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))\n\n# Model directory\n__C.MODELS_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'models', 'pascal_voc'))\n\n# Name (or path to) the matlab executable\n__C.MATLAB = 'matlab'\n\n# Place outputs under an experiments directory\n__C.EXP_DIR = 'default'\n\n# Use GPU implementation of non-maximum suppression\n__C.USE_GPU_NMS = True\n\n# Default GPU device id\n__C.GPU_ID = 0\n\n__C.CSC_DEBUG = False\n\n__C.CONTEXT = False\n__C.CONTEXT_RATIO = 1.8\n\n__C.USE_ROI_SCORE = False\n\n__C.USE_BG = False\n\n__C.SPATIAL_SCALE = 1. / 16.\n\n__C.RESIZE_MODE = 'FIT_SMALLEST'\n\n__C.USE_FEEDBACK = False\n__C.FEEDBACK_DIR = ''\n__C.FEEDBACK_NUM = 0\n\n\ndef get_vis_dir(imdb, net=None):\n \"\"\"Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n \"\"\"\n outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'vis', __C.EXP_DIR, imdb.name))\n if net is not None:\n outdir = osp.join(outdir, net.name)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n file_path = 'tmp'\n if os.path.islink(file_path):\n os.remove(file_path)\n elif os.path.isdir(file_path):\n import shutil\n shutil.rmtree(file_path)\n else:\n # It is a file\n os.remove(file_path)\n\n os.symlink(outdir, file_path)\n return outdir\n\n\ndef get_output_dir(imdb, net=None):\n \"\"\"Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n \"\"\"\n outdir = osp.abspath(\n osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))\n if net is not None:\n outdir = osp.join(outdir, net.name)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n return outdir\n","sub_path":"lib/wsl/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"639186911","text":"from my_celery.main import app\nimport time\n\n@app.task(bind=True)\ndef t2(self,a,b):\n # print(args)\n print(\"++++++++++\",a + b)\n time.sleep(5)\n print(\"t2 end\")\n print(self.request.id)\n return a+b","sub_path":"celery2/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"183940689","text":"from telegram import ReplyKeyboardMarkup\n\n\nTOKEN = \"TOKEN\"\n\nmusic_tracks = ['Bot Queue Music/{}'.format(i) for i in ('Elevator Music A.mp3',\n 'Elevator Music B.mp3',\n # 'It Hates Me So Much Extended.mp3',\n # 'Seduce Me.mp3',\n 'TF2 Upgrade Station.mp3')]\nadmins = []\npeople = {}\nsubjects = {}\nqueue = []\npath_to_people = 'config/people.cfg'\npath_to_admins = 'config/admins.cfg'\npath_to_subjects = 'config/subjects.cfg'\npath_to_subjects_folder = 'subjects/'\nwith open(path_to_people, encoding='utf-8') as file:\n for line in file.readlines():\n line = line.split()\n people[int(line[0])] = line[1]\nwith open(path_to_admins, encoding='utf-8') as file:\n for line in file.readlines():\n admins.append(int(line))\nwith open(path_to_subjects, encoding='utf-8') as file:\n for line in file.readlines():\n line = line.split()\n namelen = int(line[0])\n subjects[' '.join(line[1:namelen+1])] = line[namelen+1:]\n\n\nmarkups = {'idle': ReplyKeyboardMarkup([['Собрать отчёт в PDF',\n 'Отправить отчёт вышестоящим инстанциям'],\n ['Встать в очередь',\n 'Выйти из очереди',\n 'Послушать музыку'],\n ['Панель админ. доступа']],\n one_time_keyboard=True,\n resize_keyboard=True),\n 'admin': ReplyKeyboardMarkup([['Получить архив с отчётами',\n 'Разослать \"письма счастья\"'],\n ['Следующий']],\n one_time_keyboard=True,\n resize_keyboard=True),\n 'gathering': ReplyKeyboardMarkup([['Конец']],\n resize_keyboard=True,\n one_time_keyboard=False),\n 'subjects': ReplyKeyboardMarkup([[subject] for subject in subjects],\n resize_keyboard=True,\n one_time_keyboard=False),\n 'letter_type_choice': ReplyKeyboardMarkup([['Шаблонные', 'Написать своё']],\n resize_keyboard=True,\n one_time_keyboard=False)\n }\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"344974691","text":"from logging import getLogger\nfrom random import choice, random\nfrom .components import *\nimport tcod\nfrom engine.ecs import Entity\n\nfrom core.ai import Wander, FollowEntity, FollowAndAttack\n\n\nclass Player(Entity):\n tile = '@'\n\n def __init__(self, x, y):\n Entity.__init__(self,\n Position(x, y),\n Moveable(),\n Renderable(x,y, self.tile),\n Controllable(),\n Inventory(),\n BlocksMovement(),\n Combat(20, 5),\n )\n\nclass Wall(Entity):\n tile = '#'\n color = tcod.white\n\n def __init__(self, x, y):\n Entity.__init__(self,\n Position(x, y),\n BlocksMovement(),\n Renderable(x, y, self.tile)\n )\n\n\nclass Floor(Entity):\n tile = '.'\n\n def __init__(self, x, y):\n Entity.__init__(self,\n Position(x, y),\n Renderable(x, y, Floor.tile)\n )\n\nclass CreditStick(Entity):\n tile = '$'\n\n def __init__(self, x, y, value=0):\n Entity.__init__(self,\n Position(x, y),\n Renderable(x, y, self.tile),\n Pickup(),\n )\n self.value = value\n\n def pickup(self, other_ent, _map):\n if Inventory in other_ent:\n inventory = other_ent.components[Inventory]\n pos = self.components[Position]\n map_ents = _map.grid[pos.y][pos.x]\n\n inventory.credits += self.value\n map_ents.remove(self)\n\n def __str__(self):\n return self.__class__.__name__\n\n CREDIT_RANGE = range(1, 30)\n\n @classmethod\n def create(cls, x, y):\n value = choice(CreditStick.CREDIT_RANGE)\n return cls(x, y, value)\n\n\nclass ItemPickup:\n\n def pickup(self, other_ent, _map):\n if Inventory in other_ent:\n inventory = other_ent.components[Inventory]\n pos = self.components[Position]\n map_ents = _map.grid[pos.y][pos.x]\n\n inventory.items.append(self)\n map_ents.remove(self)\n\n def __str__(self):\n return self.__class__.__name__\n\n\nclass Rock(Entity, ItemPickup):\n tile = '*'\n\n def __init__(self, x, y):\n Entity.__init__(self,\n Position(x, y),\n Renderable(x, y, self.tile),\n Pickup(),\n )\n\n @classmethod\n def create(cls, x, y):\n return cls(x, y)\n\nclass PlayerSpawner(Entity):\n\n def __init__(self, x, y):\n Entity.__init__(self,\n Position(x, y),\n PlayerSpawn()\n )\n\n\nclass Citizen(Entity):\n tile = 'O'\n\n def __init__(self, x, y, ai_class, ai_args=None):\n ai_function = ai_class(self, *ai_args if ai_args else ())\n super().__init__(\n Position(x, y),\n Moveable(),\n AIComponent(ai_function=ai_function),\n Renderable(x, y, self.tile),\n BlocksMovement(),\n Combat(10, 2),\n )\n\n PROB_FOLLOWER = 0.5\n\n @classmethod\n def create(cls, x, y, player=None):\n #TODO: Should randomized values be strictly in map generation, or\n # does it make sense for entities to define how they're generated?\n if random() < cls.PROB_FOLLOWER:\n return cls(x, y, Wander)\n else:\n return cls(x, y, FollowEntity, (player,))\n\n\nclass KillerRobot(Entity):\n tile = 'r'\n\n def __init__(self, x, y, player=None):\n super().__init__(\n Position(x, y),\n Moveable(),\n AIComponent(ai_function=FollowAndAttack(self, player)),\n Renderable(x, y, self.tile),\n BlocksMovement(),\n Combat(10, 2),\n )\n\n @classmethod\n def create(cls, x, y):\n return cls(x, y)\n\nclass Terminal(Entity):\n tile = '?'\n log = getLogger('Terminal')\n\n def __init__(self, x, y):\n super().__init__(\n Position(x, y),\n Renderable(x, y, self.tile),\n BlocksMovement(),\n Interactable(self.open_terminal),\n )\n\n def open_terminal(self, bcast):\n bcast.publish('open-terminal')\n","sub_path":"core/entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":4042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"508273425","text":"\"\"\"\nbooruhelper.py - Required for the booru modules to work correctly\nCopyright 2014 Max Gurela\n\nLicensed under the Eiffel Forum License 2 (It's GPL compatible!).\n\"\"\"\nimport json\nimport urllib\nimport urllib2\nimport urlparse\nimport re\n\nfrom urllib import quote\n\nua_firefox = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:17.0) Gecko/17.0' \\\n ' Firefox/17.0'\n\ndef get(*args, **kwargs):\n return open(*args, **kwargs).read()\n\ndef open(url, query_params=None, user_agent=None, post_data=None,\n referer=None, get_method=None, **kwargs):\n\n if query_params is None:\n query_params = {}\n\n if user_agent is None:\n user_agent = ua_firefox\n\n query_params.update(kwargs)\n\n url = prepare_url(url, query_params)\n\n request = urllib2.Request(url, post_data)\n\n if get_method is not None:\n request.get_method = lambda: get_method\n\n request.add_header('User-Agent', user_agent)\n\n if referer is not None:\n request.add_header('Referer', referer)\n\n return urllib2.build_opener().open(request)\n\n\ndef prepare_url(url, queries):\n if queries:\n scheme, netloc, path, query, fragment = urlparse.urlsplit(url)\n\n query = dict(urlparse.parse_qsl(query))\n query.update(queries)\n query = urllib.urlencode(dict((to_utf8(key), to_utf8(value))\n for key, value in query.iteritems()))\n\n url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))\n\n return url\n\ndef to_utf8(s):\n if isinstance(s, unicode):\n return s.encode('utf8', 'ignore')\n else:\n return str(s)","sub_path":"booruhelper.py","file_name":"booruhelper.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"490558632","text":"\n\nimport os\nimport sys\nimport time\n\n\nos.environ['SPARK_HOME']=\"/spark/spark-1.6.0-bin-hadoop2.6/\"\n\nsys.path.append(\"/spark/spark-1.6.0-bin-hadoop2.6/python\")\n\ntry:\n from pyspark import SparkContext\n from pyspark import SparkConf\n import pyspark.mllib.linalg.distributed.CoordinateMatrix\n import pyspark.mllib.linalg.distributed.MatrixEntry\n from pyspark.mllib.linalg import Vectors\n import numpy as np\n\n print (\"Successfully imported Spark Modules\")\n\n\n if __name__ == \"__main__\":\n start_time = time.time()\n master = \"local\"\n sc = SparkContext(master, \"WordCount\")\n\n path = \"/ydata-ymusic-user-song-ratings-meta-v1_03/\"\n data = sc.textFile(path + \"train_0_sub_100k.txt\")\n\n data = data.repartition(8)\n data.count()\n\n # distribution of the ratings\n drt = data.map(lambda x: (x.split(\"\\t\")[2], 1))\n drt1 = drt.reduceByKey(lambda x,y: x+y).collectAsMap()\n\n # distribution of songs by genre\n # \"song idalbumidartist idgenre id\"\n gendata = sc.textFile(path+\"song-attributes.txt\")\n gdata = gendata.map(lambda x: (x.split(\"\\t\")[0], x.split(\"\\t\")[3]))\n\n\n gendata1 = (gendata.map(lambda x: (x.split(\"\\t\")[3], 1))\n .reduceByKey(lambda x,y: x+y)\n .takeOrdered(10, key = lambda x: -x[1])\n )\n\n ###Top 5 genre\n data = sc.textFile(path + \"train_0.txt\")\n data = data.repartition(8)\n songs_ratings = data.map(lambda x: (x.split(\"\\t\")[1], 1))\n song_attributes = (sc.textFile(path+\"song-attributes.txt\")\n .map(lambda x: (x.split(\"\\t\")[0], x.split(\"\\t\")[3])))\n\n top5genre = (song_attributes.join(songs_ratings)\n .map(lambda x: (x[1][0], x[1][1]))\n .reduceByKey(lambda x,y: x+y)\n .takeOrdered(5, key= lambda x:-x[1]))\n\n ###Top 5 songs\n top5songs = (songs_ratings.reduceByKey(lambda x,y: x+y)\n .takeOrdered(5, key = lambda x: -x[1]))\nexcept ImportError as e:\n print (\"Can not import Spark Modules\", e)\n sys.exit(1)\n\n\n","sub_path":"scripts-Swetha/edacode.py","file_name":"edacode.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"23385415","text":"from Jumpscale import j\n\n\nclass BuilderRocksDB(j.baseclasses.builder):\n def build(self, reset=True, install=True):\n self.install(reset=reset)\n\n def install(self, reset=False):\n # install required packages to run.\n if self._done_check(\"install\", reset):\n return\n j.builders.system.python_pip.install(\n \"http://home.maxux.net/wheelhouse/python_rocksdb-0.6.9-cp35-cp35m-manylinux1_x86_64.whl\"\n )\n\n self._done_set(\"install\")\n","sub_path":"JumpscaleBuildersExtra/db/BuilderRocksDB.py","file_name":"BuilderRocksDB.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"316193059","text":"import random \nwinning_number = random.randint(1,100)\nguess = 1\nnum = int(input(\" Enter any number : \"))\ngame_over = False\n\nwhile not game_over:\n if num == winning_number:\n print(f\"You Win, and You Guessed this number in {guess} times\")\n game_over = True\n else: \n if num < winning_number:\n print(\"Too Low\")\n \n else:\n print(\"Too High\")\n guess += 1 \n num = int(input(\"Guess Again : \"))\n \n # Dry - don't repeat yourself","sub_path":"number_guessing_game2.py","file_name":"number_guessing_game2.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"401886570","text":"from setuptools import setup\nimport os\n\n# Grab line that contains version number from a file,\n# then execute it into the name space.\nversion_line = None\nwith open(os.path.join('til', 'til.py')) as f:\n for line in f.readlines():\n if '__version__' in line:\n version_line = line\n break\n\n__version__ = None\nif version_line:\n exec(version_line)\nelse:\n raise ValueError(\"Version number not found.\")\n\nsetup(\n name=\"cmdline-til\",\n packages=[\"til\"],\n entry_points={\n \"console_scripts\": ['til = til.til:main']\n },\n version=__version__,\n description=\"Python command line to record TILs (Today I Learned) quickly.\",\n long_description=\"\"\"A python command line tool to quickly take down notes and store it in a directory in /home.\"\"\",\n author=\"Patrick Lee\",\n author_email=\"me@patricklee.nyc\",\n url=\"https://github.com/patleeman/til\",\n download_url=\"https://github.com/patleeman/til/tarball/{}\".format(__version__)\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"201621007","text":"import pytest\n\nfrom dvc.exceptions import InvalidArgumentError\n\n\ndef test_file(tmp_dir, dvc):\n msg = (\n \"`--file` is currently incompatible with `-n|--name` \"\n \"and requires `--single-stage`\"\n )\n with pytest.raises(InvalidArgumentError, match=msg):\n dvc.run(fname=\"path/dvc.yaml\", name=\"my\", cmd=\"mycmd\")\n","sub_path":"tests/unit/repo/test_run.py","file_name":"test_run.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"276429298","text":"# -*- coding: utf-8 -*-\n\"\"\"\n flask_security.decorators\n ~~~~~~~~~~~~~~~~~~~~~~~~~\n\n Flask-Security decorators module\n\n :copyright: (c) 2012 by Matt Wright.\n :license: MIT, see LICENSE for more details.\n\"\"\"\n\nfrom functools import wraps\n\nfrom flask import Response, abort, current_app, redirect, request, url_for\nfrom flask_login import current_user # pragma: no flakes\nfrom flask_principal import Permission, RoleNeed\nfrom werkzeug.local import LocalProxy\nfrom werkzeug.routing import BuildError\n\nfrom . import utils\n\n# Convenient references\n_security = LocalProxy(lambda: current_app.extensions['security'])\n\n\n_default_unauthorized_html = \"\"\"\n

Unauthorized

\n

The server could not verify that you are authorized to access the URL\n requested. You either supplied the wrong credentials (e.g. a bad password),\n or your browser doesn't understand how to supply the credentials required.\n

\n \"\"\"\n\n\ndef _get_unauthorized_response(text=None, headers=None):\n text = text or _default_unauthorized_html\n headers = headers or {}\n return Response(text, 401, headers)\n\n\ndef _get_unauthorized_view():\n view = utils.get_url(utils.config_value('UNAUTHORIZED_VIEW'))\n if view:\n if callable(view):\n view = view()\n else:\n try:\n view = url_for(view)\n except BuildError:\n view = None\n utils.do_flash(*utils.get_message('UNAUTHORIZED'))\n redirect_to = '/'\n if (request.referrer and\n not request.referrer.split('?')[0].endswith(request.path)):\n redirect_to = request.referrer\n\n return redirect(view or redirect_to)\n abort(403)\n\n\ndef auth_required(*auth_methods):\n \"\"\"\n Decorator that protects enpoints through multiple mechanisms\n Example::\n\n @app.route('/dashboard')\n @auth_required('session')\n def dashboard():\n return 'Dashboard'\n\n :param auth_methods: Specified mechanisms.\n \"\"\"\n login_mechanisms = {\n 'session': lambda: current_user.is_authenticated\n }\n\n def wrapper(fn):\n @wraps(fn)\n def decorated_view(*args, **kwargs):\n h = {}\n mechanisms = [(method, login_mechanisms.get(method))\n for method in auth_methods]\n for method, mechanism in mechanisms:\n if mechanism and mechanism():\n return fn(*args, **kwargs)\n if _security._unauthorized_callback:\n return _security._unauthorized_callback()\n else:\n return _get_unauthorized_response(headers=h)\n return decorated_view\n return wrapper\n\n\ndef roles_required(*roles):\n \"\"\"Decorator which specifies that a user must have all the specified roles.\n Example::\n\n @app.route('/dashboard')\n @roles_required('admin', 'editor')\n def dashboard():\n return 'Dashboard'\n\n The current user must have both the `admin` role and `editor` role in order\n to view the page.\n\n :param args: The required roles.\n \"\"\"\n def wrapper(fn):\n @wraps(fn)\n def decorated_view(*args, **kwargs):\n perms = [Permission(RoleNeed(role)) for role in roles]\n for perm in perms:\n if not perm.can():\n if _security._unauthorized_callback:\n return _security._unauthorized_callback()\n else:\n return _get_unauthorized_view()\n return fn(*args, **kwargs)\n return decorated_view\n return wrapper\n\n\ndef roles_accepted(*roles):\n \"\"\"Decorator which specifies that a user must have at least one of the\n specified roles. Example::\n\n @app.route('/create_post')\n @roles_accepted('editor', 'author')\n def create_post():\n return 'Create Post'\n\n The current user must have either the `editor` role or `author` role in\n order to view the page.\n\n :param args: The possible roles.\n \"\"\"\n def wrapper(fn):\n @wraps(fn)\n def decorated_view(*args, **kwargs):\n perm = Permission(*[RoleNeed(role) for role in roles])\n if perm.can():\n return fn(*args, **kwargs)\n if _security._unauthorized_callback:\n return _security._unauthorized_callback()\n else:\n return _get_unauthorized_view()\n return decorated_view\n return wrapper\n\n\ndef anonymous_user_required(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n if current_user.is_authenticated:\n return redirect(utils.get_url(_security.post_login_view))\n return f(*args, **kwargs)\n return wrapper\n","sub_path":"flask_security/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"116022576","text":"import asyncio\n\nfrom collections import namedtuple\n\nfrom aiocache import cached, RedisCache\nfrom aiocache.serializers import PickleSerializer\n\nResult = namedtuple('Result', \"content, status\")\n\nRedisCache.set_defaults(\n namespace=\"main\",\n db=1,\n pool_min_size=3,\n serializer=PickleSerializer())\n\n\n@cached(cache=RedisCache, ttl=10, key=\"key\")\nasync def decorator():\n return Result(\"content\", 200)\n\n\nasync def global_cache():\n cache = RedisCache()\n obj = await cache.get(\"key\")\n\n assert obj.content == \"content\"\n assert obj.status == 200\n assert cache.db == 1\n assert cache.pool_min_size == 3\n\n\ndef test_default_cache():\n loop = asyncio.get_event_loop()\n loop.run_until_complete(decorator())\n loop.run_until_complete(global_cache())\n\n loop.run_until_complete(RedisCache(namespace=\"main\").delete(\"key\"))\n\n\nif __name__ == \"__main__\":\n test_default_cache()\n","sub_path":"examples/config_default_cache.py","file_name":"config_default_cache.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"21888225","text":"import math\ndef isPrime(x):\n temp = int(math.sqrt(x))\n for y in range(2,temp+1):\n if(x % y == 0):\n return False\n return True\n\ndef primeFactors(x):\n answer = []\n for tmp in range(2,x):\n if(isPrime(tmp)):\n if(x % tmp == 0):\n while(x % tmp == 0):\n x = x/tmp\n answer.append(tmp)\n print(answer)\nprimeFactors(270)\nprimeFactors(45)\nprint(isPrime(2))\nprint(isPrime(45))\n","sub_path":"q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"527837731","text":"load(\"@bazel_skylib//lib:collections.bzl\", \"collections\")\nload(\"@fbcode_macros//build_defs/lib:python_common.bzl\", \"python_common\")\nload(\"@fbcode_macros//build_defs/lib:visibility.bzl\", \"get_visibility\")\nload(\"@fbsource//tools/build_defs:fb_native_wrapper.bzl\", \"fb_native\")\n\ndef python_unittest(\n name,\n py_version = None,\n py_flavor = \"\",\n base_module = None,\n main_module = None,\n strip_libpar = True,\n srcs = (),\n versioned_srcs = (),\n tags = (),\n gen_srcs = (),\n deps = (),\n tests = (),\n par_style = None,\n emails = None,\n external_deps = (),\n needed_coverage = None,\n argcomplete = None,\n strict_tabs = None,\n compile = None,\n args = None,\n env = None,\n python = None,\n allocator = None,\n check_types = False,\n preload_deps = (),\n visibility = None,\n resources = (),\n jemalloc_conf = None,\n typing = False,\n typing_options = \"\",\n check_types_options = \"\",\n runtime_deps = (),\n cpp_deps = (), # ctypes targets\n helper_deps = False,\n analyze_imports = False,\n additional_coverage_targets = (),\n version_subdirs = None):\n visibility = get_visibility(visibility, name)\n\n all_attributes = python_common.convert_binary(\n is_test = True,\n fbconfig_rule_type = \"python_unittest\",\n buck_rule_type = \"python_test\",\n base_path = native.package_name(),\n name = name,\n py_version = py_version,\n py_flavor = py_flavor,\n base_module = base_module,\n main_module = main_module,\n strip_libpar = strip_libpar,\n srcs = srcs,\n versioned_srcs = versioned_srcs,\n tags = tags,\n gen_srcs = gen_srcs,\n deps = deps,\n tests = tests,\n par_style = par_style,\n emails = emails,\n external_deps = external_deps,\n needed_coverage = needed_coverage,\n argcomplete = argcomplete,\n strict_tabs = strict_tabs,\n compile = compile,\n args = args,\n env = env,\n python = python,\n allocator = allocator,\n check_types = check_types,\n preload_deps = preload_deps,\n visibility = visibility,\n resources = resources,\n jemalloc_conf = jemalloc_conf,\n typing = typing,\n typing_options = typing_options,\n check_types_options = check_types_options,\n runtime_deps = runtime_deps,\n cpp_deps = cpp_deps,\n helper_deps = helper_deps,\n analyze_imports = analyze_imports,\n additional_coverage_targets = additional_coverage_targets,\n version_subdirs = version_subdirs,\n )\n\n py_tests = []\n for attributes in all_attributes:\n fb_native.python_test(**attributes)\n py_tests.append(\n (\":\" + attributes[\"name\"], attributes.get(\"tests\")),\n )\n\n # TODO: This should probably just be test_suite? This rule really doesn't\n # make sense....\n # Create a genrule to wrap all the tests for easy running if a test was created\n # for multiple python versions (they'll have different names)\n if len(py_tests) > 1:\n # We are propogating tests from sub targets to this target\n gen_tests = []\n for test_target, tests_attribute in py_tests:\n gen_tests.append(test_target)\n if tests_attribute:\n gen_tests.extend(tests_attribute)\n gen_tests = collections.uniq(gen_tests)\n\n cmd = \" && \".join([\n \"echo $(location {})\".format(test_target)\n for test_target in gen_tests\n ] + [\"touch $OUT\"])\n\n fb_native.genrule(\n name = name,\n visibility = visibility,\n out = \"unused\",\n tests = gen_tests,\n cmd = cmd,\n )\n","sub_path":"infra_macros/fbcode_macros/build_defs/python_unittest.bzl","file_name":"python_unittest.bzl","file_ext":"bzl","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"11054463","text":"from itertools import groupby\nfrom typing import Any, List\n\nimport sqlalchemy as sa\nfrom repka.api import BaseRepository, T\nfrom sqlalchemy import Table, UniqueConstraint\n\nfrom polytical_views.models import (\n Person,\n Tweet,\n Sentiment,\n FullSentiment,\n Feature,\n PersonScore,\n TwitterTask,\n)\n\nmetadata = sa.MetaData()\n\npeople_table = sa.Table(\n \"people\",\n metadata,\n sa.Column(\"id\", sa.Integer, primary_key=True, autoincrement=True),\n sa.Column(\"url\", sa.String),\n sa.Column(\"pic\", sa.String),\n sa.Column(\"name\", sa.String),\n)\n\ntweets_table = sa.Table(\n \"tweets\",\n metadata,\n sa.Column(\"id\", sa.Integer, primary_key=True, autoincrement=True),\n sa.Column(\n \"person_id\", sa.Integer, sa.ForeignKey(people_table.c.id), nullable=False\n ),\n sa.Column(\"url\", sa.String),\n sa.Column(\"text\", sa.String),\n sa.Column(\"created_at\", sa.DateTime),\n)\n\nfeatures_table = sa.Table(\n \"features\",\n metadata,\n sa.Column(\"id\", sa.Integer, primary_key=True, autoincrement=True),\n sa.Column(\"title\", sa.String, unique=True),\n sa.Column(\"keywords\", sa.ARRAY(sa.String)),\n sa.Column(\"vocab_to_int\", sa.JSON),\n sa.Column(\"net\", sa.JSON)\n)\n\ntweets_sentiment_table = sa.Table(\n \"tweets_sentiment\",\n metadata,\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"tweet_id\", sa.Integer, sa.ForeignKey(tweets_table.c.id)),\n sa.Column(\"feature_id\", sa.Integer, sa.ForeignKey(features_table.c.id)),\n sa.Column(\"sentiment\", sa.Integer),\n UniqueConstraint(\"tweet_id\", \"feature_id\"),\n)\n\ntwitter_tasks_table = sa.Table(\n \"twitter_task\",\n metadata,\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"result_id\", sa.Integer, sa.ForeignKey(people_table.c.id)),\n sa.Column(\"status\", sa.Integer),\n)\n\n\nclass PersonRepository(BaseRepository[Person]):\n @property\n def table(self) -> Table:\n return people_table\n\n def deserialize(self, **kwargs: Any) -> T:\n return Person(**kwargs)\n\n\nclass TweetRepository(BaseRepository[Tweet]):\n @property\n def table(self) -> Table:\n return tweets_table\n\n def deserialize(self, **kwargs: Any) -> T:\n return Tweet(**kwargs)\n\n\nclass FeatureRepository(BaseRepository[Feature]):\n @property\n def table(self) -> Table:\n return features_table\n\n def deserialize(self, **kwargs: Any) -> T:\n return Feature(**kwargs)\n\n\nclass SentimentRepository(BaseRepository[Sentiment]):\n @property\n def table(self) -> Table:\n return tweets_sentiment_table\n\n @property\n def features_table(self) -> Table:\n return features_table\n\n @property\n def tweets_table(self) -> Table:\n return tweets_table\n\n def deserialize(self, **kwargs: Any) -> T:\n return Sentiment(**kwargs)\n\n async def get_full_sentiments(self, tweet_id: int) -> List[FullSentiment]:\n join = sa.join(\n self.table,\n self.features_table,\n self.table.c.feature_id == self.features_table.c.id,\n )\n query = (\n sa.select(\n [*self.table.c, self.features_table.c.title.label(\"feature_name\")]\n )\n .select_from(join)\n .where(self.table.c.tweet_id == tweet_id)\n )\n return [FullSentiment(**row) async for row in self.connection.execute(query)]\n\n async def get_person_scores(self, person_id: int) -> List[PersonScore]:\n scores = await self.get_all_people_scores()\n return [score for score in scores if score.person_id == person_id]\n\n async def get_all_people_scores(self) -> List[PersonScore]:\n join = sa.join(\n sa.join(\n self.table,\n self.tweets_table,\n self.table.c.tweet_id == self.tweets_table.c.id,\n ),\n self.features_table,\n self.table.c.feature_id == self.features_table.c.id,\n )\n\n query = (\n sa.select(\n [\n self.tweets_table.c.person_id,\n self.features_table.c.id.label(\"feature_id\"),\n self.features_table.c.title.label(\"feature_name\"),\n self.table.c.sentiment,\n self.tweets_table.c.id.label(\"tweet_id\"),\n ]\n )\n .select_from(join)\n .order_by(\"person_id\", \"feature_id\")\n )\n\n rows = await self.connection.execute(query)\n rows_list = [row async for row in rows]\n\n grouped = groupby(rows_list, lambda row: (row.person_id, row.feature_id))\n\n res = []\n for (person_id, feature_id), group in grouped:\n group_list = list(group)\n\n score = PersonScore(\n person_id=person_id,\n feature_id=feature_id,\n feature_name=group_list[0].feature_name,\n score=sum(row.sentiment for row in group_list) / len(group_list),\n reasons=[row.tweet_id for row in group_list],\n )\n res.append(score)\n\n return res\n\n\nclass TwitterTaskRepository(BaseRepository[TwitterTask]):\n @property\n def table(self) -> Table:\n return twitter_tasks_table\n\n def deserialize(self, **kwargs: Any) -> T:\n return TwitterTask(**kwargs)\n\n async def get_by_result_id(self, result_id: int):\n return (await self.get_all([self.table.c.result_id == result_id]))[0]\n","sub_path":"polytical_views/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":5416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"253954183","text":"from module import *\nfrom DCview import *\n\nclass DCplayer(Hand):\n def __init__(self):\n super(DCplayer, self).__init__()\n self.check_correct=[]\n\n def __str__(self):\n show=super(DCplayer, self).__str__()\n show+=\" \"\n for check in self.check_correct:\n show+=check.rjust(8)+\" \"\n return show\n\n def set_Joker(self):\n left=[]\n right=[]\n while self.joker!=0:\n print(\"You have to insert your Joker! \",end='')\n loc=Reader.select_loc(self, self)\n if self.cards[len(self.cards)-1].color==\"White\":\n self.white_joker=loc\n else:\n self.black_joker=loc\n left=self.cards[:loc-1]\n right=self.cards[loc-1:]\n left.append(self.cards[len(self.cards)-self.joker])\n right.remove(self.cards[len(self.cards)-self.joker])\n self.cards=left+right\n self.joker=self.joker-1\n\n def match(self, card, you):\n num=0\n import random\n while True:\n num=random.randrange(len(self.cards))\n if self.check_correct[num]==\"Covered\":\n break\n if self.cards[num-1]==card:\n print(\"\\n\\n\\n\\n\\nCPU is right\")\n self.cards[self.cards.index(card)].check=1\n else:\n print(\"\\n\\n\\n\\n\\nCPU is wrong\")\n you.wrong()\n\n def status(self):\n for i in range(len(self.cards)):\n if self.cards[i].check==0:\n self.check_correct[i]=\"Covered\"\n else:\n self.check_correct[i]=\"Opened\"\n\n def sorting(self, card):\n super(DCplayer, self).sorting(card)\n for i in range(len(self.cards)):\n self.check_correct[i]=\" \"\n self.status()\n\n\n def check_win(self, you):\n count=0\n for card in you.cards:\n if card.face_up:\n count+=1\n if count==len(you.cards):\n self.win()\n return True\n\n def check_lose(self):\n count=0\n for card in self.cards:\n if card.check==1:\n count+=1\n if count==len(self.cards):\n self.lose()\n return True\n\n def wrong(self):\n print(\"You wrong! You have to open your card.\")\n loc=Reader.select_loc(self, self)\n while True:\n if self.check_correct[loc-1]==\"Covered\":\n self.cards[loc-1].check=1\n self.sorting(None)\n break\n else:\n loc=Reader.select_loc(self, self)\n\nclass DCcpu(Hand):\n def __init__(self, cards):\n super(DCcpu, self).__init__()\n self.ans=cards\n self.check_correct=[]\n\n def choose_loc(self):#조커 위치\n loc=0\n for i in range(len(self.cards)-1):\n if int(self.cards[i+1].number)-int(self.cards[i].number)>3:\n return i\n elif int(self.cards[i+1].number)-int(self.cards[i].number)==0:\n import random\n return i+1\n else:\n import random\n loc=random.randrange(2)\n if loc==0:\n return 2\n else:\n return len(self.cards)-1\n\n def set_Joker(self):\n left=[]\n right=[]\n while self.joker!=0:\n loc=self.choose_loc()\n if self.cards[len(self.cards)-1].color==\"White\":\n self.white_joker=loc\n else:\n self.black_joker=loc\n left=self.cards[:loc-1]\n right=self.cards[loc-1:]\n left.append(self.cards[len(self.cards)-self.joker])\n right.remove(self.cards[len(self.cards)-self.joker])\n self.cards=left+right\n self.joker=self.joker-1\n \n def correct_ans(self):\n for i in range(len(self.cards)):\n for j in range(len(self.ans)):\n if self.cards[i].color==self.ans[j].color\\\n and self.cards[i].number==self.ans[j].number:\n self.ans.remove(self.ans[j])\n break\n import random\n return self.ans[random.randrange(len(self.ans))]\n\n def wrong(self):\n while True:\n import random\n loc=random.randrange(len(self.cards))\n if not self.cards[loc].face_up:\n self.cards[loc].flip()\n break\n","sub_path":"DaVinci.py","file_name":"DaVinci.py","file_ext":"py","file_size_in_byte":4428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"547056823","text":"#! /usr/bin/python3\n#-*- coding:utf-8 -*-\n\nfrom influxdb import InfluxDBClient\nfrom datetime import datetime\n\nprotocol = 'line'\ndef write2db(datatype,data,client):\n tmp = [{\"measurement\":None,\"tags\":{},\"fields\":{},\"time\":datetime.now().isoformat()}]\n tmp[0][\"measurement\"] = datatype[\"measurement\"]\n for x in datatype['tags']:\n tmp[0][\"tags\"][x] = getattr(data,x)\n for y in datatype['fields']:\n tmp[0][\"fields\"][y] = getattr(data,y)\n for z in datatype['time']:\n tmp[0]['time'][z] = getattr(data,z)\n client.write_points(tmp)","sub_path":"plugins/db_modules.py","file_name":"db_modules.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"305545161","text":"import asyncio\nimport configparser\nimport json\nfrom sqlite3 import OperationalError\n\nfrom telethon import TelegramClient\n\nclient = None\nclass TelegramConnection:\n\n def __init__(self):\n config = configparser.ConfigParser()\n config.read(\"config.ini\")\n\n self.api_id = config['Telegram']['api_id']\n self.api_hash = str(config['Telegram']['api_hash'])\n\n self.phone = config['Telegram']['phone']\n self.username = config['Telegram']['username']\n\n def set_credtional(self, api_id, api_hash, phone, username):\n self.api_id = api_id\n self.api_hash = api_hash\n self.phone = phone\n self.username = username\n\n async def get_client(self):\n global client\n try:\n if client is not None:\n return client\n client = TelegramClient(self.username, api_id=self.api_id, api_hash=self.api_hash)\n await client.start()\n return client\n except Exception as e:\n print(e)\n return None\n","sub_path":"global_utils/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"67873777","text":"# Copyright 2021 The Google Earth Engine Community Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# [START earthengine__apidocs__ee_data_getdownloadid]\n\"\"\"Demonstrates the ee.data.getDownloadId method.\"\"\"\n\nimport io\nimport requests\nimport ee\n\n\nee.Authenticate()\nee.Initialize()\n\n# A Sentinel-2 surface reflectance image.\nimg = ee.Image('COPERNICUS/S2_SR/20210109T185751_20210109T185931_T10SEG')\n\n# A small region within the image.\nregion = ee.Geometry.BBox(-122.0859, 37.0436, -122.0626, 37.0586)\n\n# Image chunk as a NumPy structured array.\nimport numpy\ndownload_id = ee.data.getDownloadId({\n 'image': img,\n 'bands': ['B3', 'B8', 'B11'],\n 'region': region,\n 'scale': 20,\n 'format': 'NPY'\n})\nresponse = requests.get(ee.data.makeDownloadUrl(download_id))\ndata = numpy.load(io.BytesIO(response.content))\nprint(data)\nprint(data.dtype)\n\n# Single-band GeoTIFF files wrapped in a zip file.\ndownload_id = ee.data.getDownloadId({\n 'image': img,\n 'name': 'single_band',\n 'bands': ['B3', 'B8', 'B11'],\n 'region': region\n})\nresponse = requests.get(ee.data.makeDownloadUrl(download_id))\nwith open('single_band.zip', 'wb') as fd:\n fd.write(response.content)\n\n# Multi-band GeoTIFF file wrapped in a zip file.\ndownload_id = ee.data.getDownloadId({\n 'image': img,\n 'name': 'multi_band',\n 'bands': ['B3', 'B8', 'B11'],\n 'region': region,\n 'scale': 20,\n 'filePerBand': False\n})\nresponse = requests.get(ee.data.makeDownloadUrl(download_id))\nwith open('multi_band.zip', 'wb') as fd:\n fd.write(response.content)\n\n# Band-specific transformations.\ndownload_id = ee.data.getDownloadId({\n 'image': img,\n 'name': 'custom_single_band',\n 'bands': [\n {'id': 'B3', 'scale': 10},\n {'id': 'B8', 'scale': 10},\n {'id': 'B11', 'scale': 20}\n ],\n 'region': region\n})\nresponse = requests.get(ee.data.makeDownloadUrl(download_id))\nwith open('custom_single_band.zip', 'wb') as fd:\n fd.write(response.content)\n\n# Multi-band GeoTIFF file.\ndownload_id = ee.data.getDownloadId({\n 'image': img,\n 'bands': ['B3', 'B8', 'B11'],\n 'region': region,\n 'scale': 20,\n 'format': 'GEO_TIFF'\n})\nresponse = requests.get(ee.data.makeDownloadUrl(download_id))\nwith open('multi_band.tif', 'wb') as fd:\n fd.write(response.content)\n# [END earthengine__apidocs__ee_data_getdownloadid]\n","sub_path":"samples/python/apidocs/ee_data_getdownloadid.py","file_name":"ee_data_getdownloadid.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"464777027","text":"# Copyright (c) 2016. Zuercher Hochschule fuer Angewandte Wissenschaften\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom django.core.urlresolvers import reverse\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.translation import ungettext_lazy\n\nfrom horizon import tables\n\nfrom mistraldashboard import api\nfrom mistraldashboard.default.utils import humantime\n\n\nclass CreateDelayTolerantWorkload(tables.LinkAction):\n name = \"create\"\n verbose_name = _(\"Create Delay Tolerant Workload\")\n url = \"horizon:mistral:delayt_workloads:create\"\n classes = (\"ajax-modal\",)\n icon = \"plus\"\n\n\nclass DeleteDelayTolerantWorkload(tables.DeleteAction):\n @staticmethod\n def action_present(count):\n return ungettext_lazy(\n u\"Delete Delay Tolerant Workload\",\n u\"Delete Delay Tolerant Workloads\",\n count\n )\n\n @staticmethod\n def action_past(count):\n return ungettext_lazy(\n u\"Deleted Delay Tolerant Workload\",\n u\"Deleted Delay Tolerant Workloads\",\n count\n )\n\n def delete(self, request, delay_tolerant_workload_name):\n api.delay_tolerant_workload_delete(request,\n delay_tolerant_workload_name)\n\n\nclass WorkflowColumn(tables.Column):\n def get_link_url(self, datum):\n workflow_url = \"horizon:mistral:workflows:detail\"\n obj_id = datum.workflow_name\n return reverse(workflow_url, args=[obj_id])\n\n\nclass DelayTolerantWorkloadsTable(tables.DataTable):\n id = tables.Column(\n \"id\",\n verbose_name=_(\"ID\"),\n link=\"horizon:mistral:delayt_workloads:detail\"\n )\n name = tables.Column(\n \"name\",\n verbose_name=_(\"Name\")\n )\n workflow_name = WorkflowColumn(\n \"workflow_name\",\n verbose_name=_(\"Workflow\"),\n link=True\n )\n deadline = tables.Column(\n \"deadline\",\n verbose_name=_(\"Deadline\"),\n )\n job_duration = tables.Column(\n \"job_duration\",\n verbose_name=_(\"Job Duration\"),\n )\n created_at = tables.Column(\n \"created_at\",\n verbose_name=_(\"Created at\"),\n filters=[humantime]\n )\n updated_at = tables.Column(\n \"updated_at\",\n verbose_name=_(\"Updated at\"),\n filters=[humantime]\n )\n\n def get_object_id(self, datum):\n return datum.name\n\n class Meta(object):\n name = \"delay tolerant workload\"\n verbose_name = _(\"Delay Tolerant Workload\")\n table_actions = (\n tables.FilterAction,\n CreateDelayTolerantWorkload,\n DeleteDelayTolerantWorkload\n )\n row_actions = (DeleteDelayTolerantWorkload,)\n","sub_path":"mistraldashboard/delayt_workloads/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"198232258","text":"import csv\nimport datetime\nimport re\nimport yaml\n\nclass Population( object ):\n \n def __init__( self ):\n\n self.entry_rx_short = re.compile( '^\\s*#(?P\\w+)\\s+(?P\\d+|\\d+\\.\\d+|\\.\\d+)\\s*(h|hr|hrs)\\s*$' )\n self.entry_rx_long = re.compile( '^\\s*(?P.*?)\\s+#(?P\\w+)\\s+(?P\\d+|\\d+\\.\\d+|\\.\\d+)\\s*(h|hr|hrs)\\s*$' )\n\n self.entries = []\n self.entries_rejected = []\n self.entries_earliest = False\n self.entries_latest = False\n\n self.tag_counts = {}\n\n def _sum_tags( self ):\n \n for e in self.entries:\n if e['tag'] not in self.tag_counts:\n self.tag_counts[e['tag']] = 0\n self.tag_counts[e['tag']] += e['effort']\n \n def _set_earliest_and_latest( self ):\n \n self.entries_earliest = min( [ e['date'] for e in self.entries ] )\n self.entries_latest = max( [e['date'] for e in self.entries ] )\n\nclass CSVPopulation( Population ):\n\n def __init__( self, path_to_export_file, path_to_filter_file ):\n super().__init__()\n \n self.path_to_export_file = path_to_export_file\n self.path_to_filter_file = path_to_filter_file\n \n with open( self.path_to_filter_file ) as f:\n self.filters = yaml.safe_load( f )\n \n with open( self.path_to_export_file, newline='' ) as f:\n rdr = csv.reader( f )\n rdr.__next__()\n for r in rdr:\n\n # 0 user_email_address\n # 1 status\n # 2 body\n # 3 occurred_on\n # 4 completed_on\n # 5 created_at\n # 6 archived_at\n \n if r[0] in self.filters['skip']:\n continue\n \n m = self.entry_rx_short.match( r[2] )\n if m:\n \n if m.group( 'tag' ) in self.filters['tags'].keys():\n filtered_tag = self.filters['tags'][m.group( 'tag' )]\n else:\n filtered_tag = m.group( 'tag' )\n \n self.entries.append( {\n 'email': r[0],\n 'date': datetime.datetime.strptime( r[4], '%Y-%m-%d' ),\n 'description': '(none)',\n 'tag': filtered_tag,\n 'effort': float( m.group( 'effort' ) )\n } )\n continue\n \n m = self.entry_rx_long.match( r[2] )\n if m:\n\n if m.group( 'tag' ) in self.filters['tags'].keys():\n filtered_tag = self.filters['tags'][m.group( 'tag' )]\n else:\n filtered_tag = m.group( 'tag' )\n \n self.entries.append( {\n 'email': r[0],\n 'date': datetime.datetime.strptime( r[4], '%Y-%m-%d' ),\n 'description': m.group( 'description' ),\n 'tag': filtered_tag,\n 'effort': float( m.group( 'effort' ) )\n } )\n continue\n \n self.entries_rejected.append( {\n 'email': r[0],\n 'date': r[4],\n 'body': r[2]\n } )\n \n self._sum_tags()\n self._set_earliest_and_latest()\n \n @property\n def source( self ):\n return 'csv | %s | %s' % ( self.path_to_export_file, self.path_to_filter_file )\n","sub_path":"src/ireportedthis/population.py","file_name":"population.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"532277873","text":"import numpy as np \nimport os\nimport skimage.io as io\nimport skimage.transform as trans\nimport numpy as np\nimport tensorflow as tf\nfrom keras.models import *\nfrom keras.layers import *\nfrom keras.optimizers import *\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras import backend as keras\n\nsmooth = 1.\n\n# def iou(y_true, y_pred):\n# y_pred = tf.cast(y_pred[:,:,:,0],'float32')\n# y_true = tf.cast(y_true[:,:,:,0],'float32') \n \n# # pred = tf.cast(y_pred,'float32')\n# # true = tf.cast(y_true,'float32') \n \n# y_pred = keras.batch_flatten(y_pred)\n# y_true = keras.batch_flatten(y_true)\n \n# tp = y_true * y_pred\n# fp = y_true * (1 - y_pred)\n# fn = (1-y_true) * y_pred\n \n# tp = keras.sum(tp,axis=-1)\n# fp = keras.sum(fp,axis=-1)\n# fn = keras.sum(fn,axis=-1)\n \n# intersection = tp + smooth \n# union= (tp+fp+fn) + smooth\n \n# iou = intersection/union\n# return iou\n\ndef iou(y_true, y_pred):\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f) \n return (intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection + smooth)\n\ndef dice_coef(y_true, y_pred):\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n\ndef dice_loss(y_true, y_pred):\n return -dice_coef(y_true, y_pred)\n\ndef unet(pretrained_weights = None, batchnorm = True, input_size = (256,256,1)):\n \n inputs = Input(input_size)\n \n #Downsample Block 1\n conv1 = Conv2D(64, (3, 3), padding='same')(inputs)\n if batchnorm == True:\n conv1 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv1)\n conv1 = Activation('relu')(conv1) \n \n conv1 = Conv2D(64, (3, 3), padding='same')(conv1)\n if batchnorm == True:\n conv1 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv1)\n else:\n conv1 = conv1\n conv1 = Activation('relu')(conv1) \n \n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n #Downsample Block 2\n conv2 = Conv2D(128, (3, 3), padding='same')(pool1)\n if batchnorm == True:\n conv2 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv2)\n conv2 = Activation('relu')(conv2)\n \n conv2 = Conv2D(128, (3, 3), padding='same')(conv2)\n if batchnorm == True:\n conv2 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv2)\n else:\n conv2 = conv2\n conv2 = Activation('relu')(conv2)\n \n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n \n #Downsample Block 3\n conv3 = Conv2D(256, (3, 3), padding='same')(pool2)\n if batchnorm == True:\n conv3 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv3)\n conv3 = Activation('relu')(conv3)\n \n conv3 = Conv2D(256, (3, 3), padding='same')(conv3)\n if batchnorm == True:\n conv3 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv3)\n else:\n conv3 = conv3\n conv3 = Activation('relu')(conv3)\n \n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n\n #Downsample Block 4\n conv4 = Conv2D(512, (3, 3), padding='same')(pool3)\n if batchnorm == True:\n conv4 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv4)\n conv4 = Activation('relu')(conv4)\n \n conv4 = Conv2D(512, (3, 3), padding='same')(conv4)\n if batchnorm == True:\n conv4 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv4)\n else:\n conv4 = conv4 \n conv4 = Activation('relu')(conv4)\n \n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n \n #Middle block \n conv5 = Conv2D(1024, (3, 3), padding='same')(pool4)\n if batchnorm == True:\n conv5 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv5)\n conv5 = Activation('relu')(conv5)\n \n conv5 = Conv2D(1024, (3, 3), padding='same')(conv5)\n if batchnorm == True:\n conv5 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv5)\n else:\n conv5 = conv5\n conv5 = Activation('relu')(conv5)\n \n #Upsample Block 1 \n up6 = Conv2DTranspose(512, (2, 2), strides=(2, 2), padding='same')(conv5) \n if batchnorm == True:\n up6 = BatchNormalization(axis=3, epsilon=1.001e-5)(up6)\n up6 = Activation('relu')(up6) \n \n up6 = concatenate([up6, conv4], axis=3)\n \n conv6 = Conv2D(512, (3, 3), padding='same')(up6)\n if batchnorm == True:\n conv6 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv6)\n conv6 = Activation('relu')(conv6)\n \n conv6 = Conv2D(512, (3, 3), padding='same')(conv6)\n if batchnorm == True:\n conv6 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv6)\n conv6 = Activation('relu')(conv6)\n \n #Upsample Block 2\n up7 = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv6) \n if batchnorm == True:\n up7 = BatchNormalization(axis=3, epsilon=1.001e-5)(up7)\n up7 = Activation('relu')(up7) \n \n up7 = concatenate([up7, conv3], axis=3)\n \n conv7 = Conv2D(256, (3, 3), padding='same')(up7)\n if batchnorm == True:\n conv7 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv7)\n conv7 = Activation('relu')(conv7)\n \n conv7 = Conv2D(256, (3, 3), padding='same')(conv7)\n if batchnorm == True:\n conv7 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv7)\n conv7 = Activation('relu')(conv7)\n \n #Upsample Block 3\n up8 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv7) \n if batchnorm == True:\n up8 = BatchNormalization(axis=3, epsilon=1.001e-5)(up8)\n up8 = Activation('relu')(up8)\n \n up8 = concatenate([up8, conv2], axis=3)\n \n conv8 = Conv2D(128, (3, 3), padding='same')(up8)\n if batchnorm == True:\n conv8 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv8)\n conv8 = Activation('relu')(conv8)\n \n conv8 = Conv2D(128, (3, 3), padding='same')(conv8)\n if batchnorm == True:\n conv8 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv8)\n conv8 = Activation('relu')(conv8)\n \n #Upsample Block 4\n up9 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv8) \n if batchnorm == True:\n up9 = BatchNormalization(axis=3, epsilon=1.001e-5)(up9)\n up9 = Activation('relu')(up9)\n \n up9 = concatenate([up9, conv1], axis=3) \n \n conv9 = Conv2D(64, (3, 3), padding='same')(up9)\n if batchnorm == True:\n conv9 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv9)\n conv9 = Activation('relu')(conv9)\n \n conv9 = Conv2D(64, (3, 3), padding='same')(conv9)\n if batchnorm == True:\n conv9 = BatchNormalization(axis=3, epsilon=1.001e-5)(conv9)\n conv9 = Activation('relu')(conv9)\n \n #Output\n conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)\n\n model = Model(inputs=[inputs], outputs=[conv10])\n\n model.compile(optimizer = Adam(lr = 1e-4), loss = dice_loss, metrics = ['accuracy', iou])\n \n #model.summary()\n\n if(pretrained_weights):\n model.load_weights(pretrained_weights)\n\n return model\n\n\n","sub_path":"unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":7056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"40753028","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPyramid views for IAM Policies (permissions)\n\n\"\"\"\nimport simplejson as json\n\nfrom boto.exception import BotoServerError\nfrom pyramid.httpexceptions import HTTPFound\nfrom pyramid.i18n import TranslationString as _\nfrom pyramid.view import view_config\n\nfrom ..constants import policies, permissions\nfrom ..forms import ChoicesManager\nfrom ..forms.policies import IAMPolicyWizardForm\nfrom ..models import Notification\nfrom ..views import BaseView, JSONResponse, TaggedItemView\n\n\nclass IAMPolicyWizardView(BaseView):\n \"\"\"Create IAM Policy wizard\"\"\"\n TEMPLATE = '../templates/policies/iam_policy_wizard.pt'\n\n def __init__(self, request):\n super(IAMPolicyWizardView, self).__init__(request)\n self.request = request\n self.ec2_conn = self.get_connection()\n self.iam_conn = self.get_connection(conn_type='iam')\n self.policy_json_endpoint = self.request.route_url('iam_policy_json')\n self.create_form = IAMPolicyWizardForm(request=self.request, formdata=self.request.params or None)\n self.target_type = self.request.params.get('type', 'user') # 'user' or 'group'\n self.target_name = self.request.params.get('id', '') # user or group name\n self.choices_manager = ChoicesManager(conn=self.ec2_conn)\n self.render_dict = dict(\n page_title=self.get_page_title(),\n create_form=self.create_form,\n policy_json_endpoint=self.policy_json_endpoint,\n policy_actions=permissions.POLICY_ACTIONS,\n controller_options=json.dumps(self.get_controller_options()),\n resource_choices=dict(\n instances=self.get_instance_choices(),\n images=self.get_image_choices(),\n volumes=self.get_volume_choices(),\n snapshots=self.get_snapshot_choices(),\n security_groups=self.get_security_group_choices(),\n key_pairs=self.get_key_pair_choices(),\n vm_types=self.get_vm_type_choices(),\n availability_zones=self.get_availability_zone_choices(),\n ),\n )\n\n @view_config(route_name='iam_policy_new', renderer=TEMPLATE, request_method='GET')\n def iam_policy_new(self):\n \"\"\"Displays the Create IAM Policy wizard\"\"\"\n return self.render_dict\n\n @view_config(route_name='iam_policy_create', renderer=TEMPLATE, request_method='POST')\n def iam_policy_create(self):\n \"\"\"Handles the POST from the Create IAM Policy wizard\"\"\"\n target_route = '{0}_view'.format(self.target_type) # 'user_view' or 'group_view'\n location = self.request.route_url(target_route, name=self.target_name) # redirect to detail page after submit\n if self.create_form.validate():\n policy_name = self.request.params.get('name')\n policy_json = self.request.params.get('policy', '{}')\n try:\n if self.target_type == 'user':\n caller = self.iam_conn.put_user_policy\n else:\n caller = self.iam_conn.put_group_policy\n caller(self.target_name, policy_name, policy_json)\n prefix = _(u'Successfully created IAM policy')\n msg = '{0} {1}'.format(prefix, policy_name)\n queue = Notification.SUCCESS\n except BotoServerError as err:\n msg = err.message\n queue = Notification.ERROR\n self.request.session.flash(msg, queue=queue)\n return HTTPFound(location=location)\n else:\n self.request.error_messages = self.create_form.get_errors_list()\n return self.render_dict\n\n def get_page_title(self):\n prefix = _(u'Add access policy for')\n return '{0} {1} {2}'.format(prefix, self.target_type.capitalize(), self.target_name)\n\n def get_controller_options(self):\n return {\n 'policyJsonEndpoint': self.policy_json_endpoint,\n 'cloudType': self.cloud_type,\n 'actionsList': self.get_all_actions(),\n }\n\n def get_instance_choices(self):\n resource_name = 'instance'\n arn_prefix = self.get_arn_prefix(resource_name)\n choices = [(self.get_all_choice(resource_name), _(u'All instances...'))]\n for instance in self.ec2_conn.get_only_instances():\n value = '{0}{1}'.format(arn_prefix, instance.id)\n label = TaggedItemView.get_display_name(instance)\n choices.append((value, label))\n return choices\n\n def get_vm_type_choices(self):\n resource_name = 'vmtype'\n arn_prefix = self.get_arn_prefix(resource_name)\n choices = [(self.get_all_choice(resource_name), _(u'All instance types...'))]\n vm_type_choices = self.choices_manager.instance_types(\n cloud_type=self.cloud_type, add_blank=False, add_description=False)\n for vm_type_choice in vm_type_choices:\n label = vm_type_choice[1]\n value = '{0}{1}'.format(arn_prefix, vm_type_choice[0])\n choices.append((value, label))\n return choices\n\n def get_image_choices(self):\n resource_name = 'image'\n arn_prefix = self.get_arn_prefix(resource_name)\n choices = [(self.get_all_choice(resource_name), _(u'All images...'))]\n # Set owner alias to 'self' for AWS\n owner_alias = 'self' if self.cloud_type == 'aws' else None\n owners = [owner_alias] if owner_alias else []\n images = self.ec2_conn.get_all_images(owners=owners, filters={'image-type': 'machine'})\n for image in images:\n value = '{0}{1}'.format(arn_prefix, image.id)\n label = TaggedItemView.get_display_name(image)\n choices.append((value, label))\n return choices\n\n def get_volume_choices(self):\n resource_name = 'volume'\n arn_prefix = self.get_arn_prefix(resource_name)\n choices = [(self.get_all_choice(resource_name), _(u'All volumes...'))]\n for volume in self.ec2_conn.get_all_volumes():\n value = '{0}{1}'.format(arn_prefix, volume.id)\n label = TaggedItemView.get_display_name(volume)\n choices.append((value, label))\n return choices\n\n def get_snapshot_choices(self):\n resource_name = 'snapshot'\n arn_prefix = self.get_arn_prefix(resource_name)\n choices = [(self.get_all_choice(resource_name), _(u'All snapshots...'))]\n for snapshot in self.ec2_conn.get_all_snapshots():\n value = '{0}{1}'.format(arn_prefix, snapshot.id)\n label = TaggedItemView.get_display_name(snapshot)\n choices.append((value, label))\n return choices\n\n def get_security_group_choices(self):\n resource_name = 'securitygroup'\n arn_prefix = self.get_arn_prefix(resource_name)\n choices = [(self.get_all_choice(resource_name), _(u'All security groups...'))]\n for security_group in self.ec2_conn.get_all_security_groups():\n value = '{0}{1}'.format(arn_prefix, security_group.name)\n label = '{0} ({1})'.format(security_group.name, security_group.id)\n choices.append((value, label))\n return choices\n\n def get_availability_zone_choices(self):\n resource_name = 'availabilityzone'\n arn_prefix = self.get_arn_prefix(resource_name)\n choices = [(self.get_all_choice(resource_name), _(u'All zones...'))]\n for avail_zone_choice in self.choices_manager.availability_zones(add_blank=False):\n value = '{0}{1}'.format(arn_prefix, avail_zone_choice[0])\n label = avail_zone_choice[0]\n choices.append((value, label))\n return choices\n\n def get_key_pair_choices(self):\n resource_name = 'keypair'\n arn_prefix = self.get_arn_prefix(resource_name)\n choices = [(self.get_all_choice(resource_name), _(u'All key pairs...'))]\n for key_pair in self.ec2_conn.get_all_key_pairs():\n value = '{0}{1}'.format(arn_prefix, key_pair.name)\n label = key_pair.name\n choices.append((value, label))\n return choices\n\n def get_arn_prefix(self, resource, add_all=False):\n region = ''\n if self.cloud_type == 'aws':\n region = self.region\n return 'arn:aws:ec2:{region}::{resource}/{all}'.format(\n region=region, resource=resource, all='*' if add_all else '')\n\n def get_all_choice(self, resource):\n return self.get_arn_prefix(resource, add_all=True)\n\n @staticmethod\n def get_all_actions():\n actions = []\n for namespace in permissions.POLICY_ACTIONS:\n actions.extend(namespace.get('actions'))\n return actions\n\n\nclass IAMPolicyWizardJsonView(BaseView):\n \"\"\"View for returning JSON of canned policies\"\"\"\n\n @view_config(route_name='iam_policy_json', renderer='json', request_method='GET')\n def iam_policy_json(self):\n policy_type = self.request.params.get('type')\n policy_dict = policies.TYPE_POLICY_MAPPING.get(policy_type)\n if policy_dict:\n return dict(policy=policy_dict)\n return JSONResponse(status=404, message=_(u'Unable to locate policy'))\n\n\n","sub_path":"koala/views/policies.py","file_name":"policies.py","file_ext":"py","file_size_in_byte":9158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"170029527","text":"#!/usr/bin/env python3\nimport random\nimport sys\ntry:\n lines = sys.argv[1]\n try:\n number = int(lines)\n except ValueError as err:\n print(err)\n lines = 5\n if 1<= number <=10:\n lines = number\n else:\n lines = 5\nexcept IndexError:\n lines = 5\nguanci = ('the', 'a')\nzhuti = ('cat', 'dog', 'man', 'woman')\ndongci = ('sang', 'ran', 'jumped')\nzhuangyu = ('loudly', 'quietly', 'well', 'badly')\ni = 0\nwhile i < lines:\n rnd = random.randint(10, 19)\n if 10 <= rnd <= 14:\n print(random.choice(guanci), random.choice(zhuti), random.choice(dongci), random.choice(zhuangyu))\n else:\n print(random.choice(guanci), random.choice(zhuti), random.choice(dongci))\n i += 1 \n","sub_path":"chaper 1/4/aweful_poetry_2.py","file_name":"aweful_poetry_2.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"145201952","text":"# -*- coding: utf-8 -*-\nimport os\nimport multiprocessing\nimport time\nimport pandas as pd\nimport re\nimport php_fn\n\n\n# 特征集\n[text_feature_set, other_feature_set] = [multiprocessing.Manager().list(), multiprocessing.Manager().list()] \n\n\n# 获取php函数名\nfn_files_path = php_fn.project_path + \"/res/php_fn/\" # txt文件所在文件夹的绝对路径\nfn_files = os.listdir(fn_files_path) # txt文件名\nfn = pd.Series([[], [], [], []], index=[fn_file[:-4] for fn_file in fn_files])\nfor fn_file in fn_files:\n # 打开某个txt文件\n with open(fn_files_path + fn_file, \"r\", encoding=\"UTF-8\", errors=\"ignore\") as f:\n source = f.read().split()\n fn[str(fn_file[:-4])] = source\n\n\ndef text_feature(sample_file):\n \"\"\"\n @author: hanchenchen\n @date: 9/18/2018\n @fn:\n @version: 2.1\n \"\"\"\n tf = pd.Series([0, 0, 0, 0, 0, 0], index=[\"cmt_chars_num\", \"words_num\", \"diff_words_num\", \"longest_word_len\", \"chars_num\", \"special_chars_num\"])\n with open(sample_file, \"r\", encoding=\"UTF-8\", errors=\"ignore\") as f:\n source = f.read()\n tf[\"cmt_chars_num\"] = len(source) # 注释字符数\n # 字符串中的注释暂时当作注释,因为正常的代码中字符串极少包含注释\n source = re.compile(\"\\/\\*[\\s\\S]*\\*\\/\").sub('', source) # 去/*...*/注释\n source = re.compile(\"\\/\\/.*?\").sub('', source) # 去//注释\n tf[\"cmt_chars_num\"] -= len(source)\n words = re.findall(\"[a-zA-Z]+\", source)\n tf[\"words_num\"] = len(words) # 单词数量\n tf[\"diff_words_num\"] = len(set(words)) # 不同单词数量\n tf[\"longest_word_len\"] = max([len(word) for word in words]) # 最大单词长度\n tf[\"chars_num\"] = len(re.findall(\"\\S\", source)) # 字符数量\n tf[\"special_chars_num\"] = tf[\"chars_num\"] - len(re.findall(\"[a-zA-Z0-9]\", source)) # 特殊字符数量\n return tf\n\n\ndef other_feature(sample_file):\n \"\"\"\n @author: hanchenchen\n @date: 9/24/2018\n @function:\n @version: 2.0\n \"\"\"\n of = pd.Series([0, 0, 0, 0], index=[fn_file[:-4] for fn_file in fn_files])\n with open(sample_file, \"r\", encoding=\"UTF-8\", errors=\"ignore\") as f:\n source = f.read()\n for i in range(0, len(fn)):\n for item in fn[i]:\n of[str(fn.index[i])] += len(re.findall(item, source))\n return of\n\n\ndef analyze_feature(file_name):\n \"\"\"\n @author: hanchenchen\n @date: 9/23/2018\n @function:\n @version: 2.0\n \"\"\"\n text_feature_set.append(text_feature(file_name))\n other_feature_set.append(other_feature(file_name))\n\n\ndef main():\n \"\"\"\n @author: hanchenchen\n @date: 9/26/2018\n @function: 多线程分析样本特征\n @version: 1.0\n \"\"\" \n \n \"\"\"\n # singleprocess\n start_time = time.time() \n sample_files_path = php_fn.project_path + \"/res/samples/\"\n files = os.listdir(sample_files_path)\n for file in files:\n analyze_feature(sample_files_path + file)\n end_time = time.time()\n print(\"singleprocess needs \" + str(end_time - start_time) + \"s\")\n \"\"\"\n \n # multiprocess\n start_time = time.time() \n sample_files_path = php_fn.project_path + \"/res/samples/\"\n files = os.listdir(sample_files_path)\n pool = multiprocessing.Pool()\n for file in files:\n pool.apply_async(analyze_feature, [sample_files_path+file])\n pool.close()\n pool.join()\n end_time = time.time()\n print(\"分析样本特征成功!耗时:%.3f s\" % (end_time - start_time))\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"webshell/src/analyze_feature.py","file_name":"analyze_feature.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"177152055","text":"\"\"\"\nЗадание:\n1) В файле, содержащем фамилии студентов и их оценки, изменить на прописные буквы фамилии тех студентов, которые имеют\nсредний балл за национальной шкалой более «4».\n\"\"\"\npath_1 = \"Task_1/file.txt\"\n\n\ndef average_score(filename):\n students = []\n grades = []\n sum_grades = []\n student = []\n with open(filename, \"r\", encoding=\"utf8\") as open_file:\n for line in open_file:\n for i in line:\n if i.isdigit():\n sum_grades.append(int(i))\n grades.append(i)\n elif i.isalpha():\n student.append(i)\n if (sum(sum_grades)/len(sum_grades)) < 4:\n students.append({\"student\": \"\".join(student), \"grades\": grades})\n else:\n students.append({\"student\": (\"\".join(student)).lower(), \"grades\": grades})\n sum_grades = []\n grades = []\n student = []\n print(students)\n\n with open(filename, \"w\", encoding=\"utf8\") as write_file:\n for i in range(len(students)):\n write_file.write(f\"{students[i]['student']} - {', '.join(students[i]['grades'])}\\n\")\n\n\naverage_score(path_1)\n\n\"\"\"\n2) Из текстового файла удалить все слова, содержащие от трех до пяти символов, но при этом из каждой строки должно быть\nудалено только четное количество таких слов.\n\"\"\"\npath_2 = \"Task_2/file.txt\"\n\n\ndef delete_small_words(filename):\n lines = []\n with open(filename, \"r\", encoding=\"utf8\") as open_file:\n for line in open_file:\n current_line = line.split()\n count = 0\n for word in current_line:\n if 3 <= len(word) <= 5:\n count += 1\n\n if count % 2 == 0:\n for idx, word in enumerate(current_line):\n if 3 <= len(word) <= 5:\n current_line.pop(idx)\n lines.append(current_line)\n\n if count % 2 != 0:\n for idx, word in enumerate(current_line):\n if 3 <= len(word) <= 5 and count != 1:\n current_line.pop(idx)\n count -= 1\n lines.append(current_line)\n\n with open(filename, \"w\", encoding=\"utf8\") as write_file:\n for i in range(len(lines)):\n write_file.write(f\"{' '.join(x for x in lines[i])}\\n\")\n\n\ndelete_small_words(path_2)\n\n\"\"\"\n3) Из текста программы выбрать все числа (целые и вещественные) и записать их в файл g в виде: число 1 – номер строки,\nчисло 2 – номер строки и так далее.\nВ качестве выполненного ДЗ отправить ссылку на проект GitHub в котором будет находится код.\n\"\"\"\npath_3 = \"Task_3/file.txt\"\n\n\ndef numbers(filename):\n digits = []\n current_line = 0\n with open(f\"{filename}\", \"r\", encoding=\"utf-8\") as open_file:\n for line in open_file:\n numbers_list = line.split()\n current_line += 1\n\n for i in numbers_list:\n digit = ''.join([char for char in i if char.isdigit() or char == '.'])\n if digit:\n digits.append({\"number\": digit, \"line\": current_line})\n\n open_file.close()\n\n with open(\"Task_3/g.txt\", \"w\", encoding=\"utf8\") as write_file:\n for i in range(len(digits)):\n write_file.write(f\"Число {digits[i]['number']} - строка {digits[i]['line']}\\n\")\n\n\nnumbers(path_3)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"545142506","text":"import sys\nimport copy\nimport itertools\nsys.path.append('../intcode')\n\nimport intcode\n\nwith open(\"day17.input\") as file:\n program = [int(val) for val in file.read().split(',')]\n\nsystem = intcode.IntCode()\nsystem.load_program(program)\n\nin_queue = system.get_input_queue()\nout_queue = system.get_output_queue()\n\nship_map = {}\n\nsystem.run_program()\n\ndef read_map(out_queue, ship_map, do_print):\n pos = (0,0)\n robot_pos = None\n robot_dir = None\n was_newline = False\n map_done = False\n while not out_queue.empty():\n obj = chr(out_queue.get())\n if do_print:\n print(obj, end='')\n if obj == '\\n':\n if was_newline:\n map_done = True\n else:\n was_newline = True\n pos = (0, pos[1]+1)\n elif not map_done:\n was_newline = False\n if obj == '#':\n ship_map[pos] = {'visits': 0, 'allowed_visits': 1}\n elif obj == '^':\n robot_pos = pos\n robot_dir = (0,-1)\n ship_map[pos] = {'visits': 0, 'allowed_visits': 1}\n pos = (pos[0]+1, pos[1])\n return robot_pos, robot_dir\n\ndef read_map2(ship_map):\n with open(\"day17.part2.input.example\") as file:\n robot_pos = None\n robot_dir = None\n pos = (0,0)\n for line in file:\n for obj in line.rstrip():\n if obj == '#':\n ship_map[pos] = {'visits': 0, 'allowed_visits': 1}\n elif obj == '^':\n robot_pos = pos\n robot_dir = (0,-1)\n ship_map[pos] = {'visits': 0, 'allowed_visits': 1}\n pos = (pos[0]+1, pos[1])\n pos = (0, pos[1]+1)\n return robot_pos, robot_dir\n\ndef is_corner(ship_map, pos):\n total_num_paths = 0\n for step in [(1,0),(0,1)]:\n next_pos1 = (pos[0] + step[0], pos[1] + step[1])\n next_pos2 = (pos[0] - step[0], pos[1] - step[1])\n num_paths = 0\n if ((next_pos1 in ship_map and next_pos2 in ship_map) or\n (next_pos1 not in ship_map and next_pos2 not in ship_map)):\n return False\n return True\n\ndef is_end(ship_map, pos):\n num_paths = 0\n for step in [(1,0),(-1,0),(0,1),(0,-1)]:\n num_paths += 1 if (pos[0] + step[0], pos[1] + step[1]) in ship_map else 0\n return num_paths == 1\n\ndef get_nodes(ship_map, robot_pos):\n checksum = 0\n nodes = {robot_pos:{}}\n for pos in ship_map:\n if (((pos[0] + 1, pos[1]) in ship_map) and\n ((pos[0] - 1, pos[1]) in ship_map) and\n ((pos[0], pos[1] + 1) in ship_map) and\n ((pos[0], pos[1] - 1) in ship_map)):\n checksum += pos[0]*pos[1]\n elif is_end(ship_map, pos) or is_corner(ship_map, pos):\n nodes[pos] = None\n return nodes,checksum\n\ndef find_next_node(ship_map, nodes, node_pos, prev = None):\n search_dirs = [(1,0),(-1,0),(0,1),(0,-1)]\n for direction in search_dirs:\n for length in itertools.count(1):\n next_pos = (node_pos[0] + direction[0] * length, node_pos[1] + direction[1] * length)\n if not next_pos in ship_map:\n if length > 1:\n next_pos = (node_pos[0] + direction[0] * (length - 1), node_pos[1] + direction[1] * (length - 1))\n if next_pos != prev:\n return next_pos,direction,(length-1)\n break\n return None,None,None\n\ndef get_turn(current_direction, next_direction):\n directions = [(0,-1), (1,0), (0,1), (-1,0)]\n if ((directions.index(current_direction) + 1) % len(directions)) == directions.index(next_direction):\n return \"R\"\n else:\n return \"L\"\n\ndef get_path(ship_map, nodes, robot_pos, robot_dir):\n prev_node = None\n cur_node = robot_pos\n cur_dir = robot_dir\n path = []\n while True:\n next_node,next_dir,length = find_next_node(ship_map, nodes, cur_node, prev_node)\n if not next_node:\n return path\n\n path.append(get_turn(cur_dir, next_dir))\n path.append(str(length))\n\n nodes[cur_node] = next_node\n prev_node = cur_node\n cur_node = next_node\n cur_dir = next_dir\n\ndef input_string(string, in_queue):\n for ch in string:\n in_queue.put(ord(ch))\n in_queue.put(ord('\\n'))\n\nprogram[0] = 2\nsystem.load_program(program)\nsystem.run_program()\n\nrobot_pos, robot_dir = read_map(out_queue, ship_map, True)\n#robot_pos, robot_dir = read_map2(ship_map)\nnodes, checksum = get_nodes(ship_map, robot_pos)\nprint(\"Alignemnt calibration=%u\" % (checksum))\npath = get_path(ship_map, nodes, robot_pos, robot_dir)\nprint(''.join(path))\n\nA=\"R,8,L,4,R,4,R,10,R,8\"\nB=\"L,12,L,12,R,8,R,8\"\nC=\"R,10,R,4,R,4\"\nseq=\"A,A,B,C,B,C,B,C,C,A\"\n\ninput_string(seq, in_queue)\ninput_string(A, in_queue)\ninput_string(B, in_queue)\ninput_string(C, in_queue)\ninput_string(\"n\", in_queue)\nsystem.run_program()\nwhile not out_queue.empty():\n data = out_queue.get()\n if data > 400:\n print(data)\n else:\n print(chr(data), end='')\n","sub_path":"17/day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":5062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"464506944","text":"from PyQt5.QtWidgets import QLabel\nfrom PyQt5.QtCore import Qt, QPoint, QThread, QMutex\nfrom PyQt5.QtGui import QPixmap, QPainter, QPen, QColor\nimport cv2\nfrom Board.ImgProcess import ImgProcess\nimport threading\n\n'''暂未完成的功能(1)\nclass ColorizeThread(QThread):\n def __init__(self, colSignal, showSignal, img_sket, img_style):\n super().__init__()\n self.mutex = QMutex()\n self.colSignal = colSignal\n self.showSignal = showSignal\n self.img_sket = img_sket\n self.img_style = img_style\n\n def run(self):\n self.mutex.lock()\n self.colSignal.emit(self.img_sket, self.img_style)\n self.showSignal.emit()\n self.mutex.unlock()\n'''\n\nclass DrawingBoard(QLabel, ImgProcess):\n pen = 0\n eraser = 1\n def __init__(self, parent=None):\n super().__init__(parent)\n\n # 鼠标移动事件的起点与终点\n self.startPos = QPoint(0, 0)\n self.endPos = QPoint(0, 0)\n self.leftMousePress = False\n\n self.imgLayer = None # 线稿图层,用于显示原线稿(QPixmap对象)\n self.paintLayer = QPixmap(200, 200) # 画板图层,用于交互涂色\n self.paintLayer.fill(Qt.transparent)\n\n self.imgLoc = (0, 0) # 图层的左上角坐标\n\n # 画笔参数\n self.penCol = \"#87CEFA\"\n self.penDiameter = 15\n\n self.using = self.pen\n\n # 信号传递器\n self.paintComplete = None\n\n def mousePressEvent(self, QMouseEvent):\n if QMouseEvent.button() == Qt.LeftButton:\n self.leftMousePress = True\n self.startPos = QMouseEvent.pos()\n self.endPos = self.startPos\n\n def mouseReleaseEvent(self, QMouseEvent):\n if QMouseEvent.button() == Qt.LeftButton:\n self.leftMousePress = False\n # 获取涂色后的线稿\n if self.imgLayer != None:\n self.downloadImg()\n\n def mouseMoveEvent(self, QMouseEvent):\n if self.leftMousePress:\n self.endPos = QMouseEvent.pos()\n self.update()\n\n def downloadImg(self):\n \"\"\"获取用户涂色后的图片并传递给上色AI\"\"\"\n\n ''' 暂未完成的功能(2)\n def colorizeThread(img_bottom, img_style):\n self.paintComplete.colorizeSignal.emit(img_bottom, img_style)\n self.paintComplete.showSignal.emit()\n '''\n\n img_bottom = self.Qimg2opencv(self.imgLayer) # 将QPixmap对象转化为opencv对象\n img_top = self.Qimg2opencv(self.paintLayer)\n img_style = self.coverImg(img_bottom.copy(), img_top.copy()) # 画板覆盖在原线稿上\n\n ''' 暂未完成的功能(1)——AI上色时左侧画板可继续涂写(QThread)\n self.colThread = ColorizeThread(\n self.paintComplete.colorizeSignal,\n self.paintComplete.showSignal,\n img_bottom, img_style)\n self.colThread.start()\n '''\n\n ''' 暂未完成的功能(2)——AI上色时左侧画板可继续涂写(threading)\n threading.Thread(target=colorizeThread, args=(img_bottom, img_style), daemon=True).start()\n '''\n\n self.paintComplete.waitSignal.emit()\n self.paintComplete.colorizeSignal.emit(img_bottom, img_style)\n self.paintComplete.showSignal.emit()\n\n def revealImg(self):\n def checkPos(pos):\n '''检查画笔坐标是否越界'''\n if pos.x() < self.imgLoc[0]:\n return False\n if pos.x() > self.imgLoc[0] + self.paintLayer.width():\n return False\n if pos.y() < self.imgLoc[1]:\n return False\n if pos.y() > self.imgLoc[1] + self.paintLayer.height():\n return False\n return True\n\n # 图片适应画板大小\n scale_x = (self.width() - 80) / self.imgLayer.width()\n scale_y = (self.height() - 80) / self.imgLayer.height()\n scale = min(scale_x, scale_y)\n\n size = self.imgLayer.size()\n self.imgLayer = self.imgLayer.scaled(scale * size)\n self.paintLayer = self.paintLayer.scaled(scale * size)\n\n # 图片居中,记录左上角坐标\n x = int((self.width() - self.imgLayer.width()) / 2)\n y = int((self.height() - self.imgLayer.height()) / 2)\n self.imgLoc = (x, y)\n\n '''在画板图层涂色'''\n qp = QPainter(self.paintLayer)\n qp.begin(self.paintLayer)\n\n # 设置画笔\n if self.using == self.pen:\n col = QColor(self.penCol)\n elif self.using == self.eraser:\n col = QColor(Qt.white)\n pen = QPen(col, self.penDiameter, Qt.SolidLine)\n qp.setPen(pen)\n\n # 沿轨迹涂色\n if self.startPos != self.endPos and checkPos(self.startPos) and checkPos(self.endPos):\n diff = QPoint(self.imgLoc[0], self.imgLoc[1])\n qp.drawLine(self.startPos - diff, self.endPos - diff)\n\n qp.end()\n\n self.startPos = self.endPos\n\n '''重新显示线稿图层与画板图层'''\n painter = QPainter(self)\n painter.begin(self)\n\n x, y = self.imgLoc[:]\n painter.drawPixmap(x, y, self.imgLayer)\n painter.drawPixmap(x, y, self.paintLayer)\n\n painter.end()\n\n def paintEvent(self, QPaintEvent):\n super().paintEvent(QPaintEvent)\n\n if self.imgLayer != None:\n self.revealImg()\n\n def loadImg(self, fpath):\n img = cv2.imread(fpath, 1)\n pixmap = self.opencv2Qimg(img)\n\n self.imgLayer = pixmap\n\n self.paintLayer.fill(Qt.transparent)\n self.paintLayer = self.paintLayer.scaled(\n self.imgLayer.width(), self.imgLayer.height())\n\n self.update()","sub_path":"Board/DrawingBoard.py","file_name":"DrawingBoard.py","file_ext":"py","file_size_in_byte":5690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"56010744","text":"# -*- coding: utf-8 -*-\n\"\"\"Application configuration.\n\nMost configuration is set via environment variables.\n\nFor local development, use a .env file to set\nenvironment variables.\n\"\"\"\nimport os\n\nfrom environs import Env\n\nENV = Env()\nENV.read_env()\n\nPROJECT_ROOT: str = os.path.join(\n os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir), os.pardir\n)\nTEST_PATH: str = os.path.join(PROJECT_ROOT, \"tests\")\nFLASK_ENV: str = ENV.str(\"FLASK_ENV\", default=\"production\")\nDEBUG: bool = FLASK_ENV == \"development\" # if flask environment is development set debug to True\nSQLALCHEMY_DATABASE_URI = ENV.str(\"DATABASE_URL\")\nSECRET_KEY = ENV.str(\"SECRET_KEY\")\nBCRYPT_LOG_ROUNDS = ENV.int(\"BCRYPT_LOG_ROUNDS\", default=13)\nDEBUG_TB_ENABLED = DEBUG\nDEBUG_TB_INTERCEPT_REDIRECTS: bool = False\nCACHE_TYPE: str = \"simple\" # Can be \"memcached\", \"redis\", etc.\nSQLALCHEMY_TRACK_MODIFICATIONS: bool = False\nWEBPACK_MANIFEST_PATH: str = \"webpack/manifest.json\"\nJWT_SECRET_KEY: str = ENV.str(\n \"JWT_SECRET_KEY\", default=SECRET_KEY\n) # If this is not set, we use the flask SECRET_KEY value instead.\nREFRESH_EXP_LENGTH: int = ENV.int(\"REFRESH_EXP_LENGTH\", default=30)\nACCESS_EXP_LENGTH: int = ENV.int(\"ACCESS_EXP_LENGTH\", default=10)\n","sub_path":"src/config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"480352052","text":"import sys\n\nfrom typing import Optional, List, Generator\n\nif sys.version_info >= (3, 8):\n from typing import TypedDict # pylint: disable=no-name-in-module\nelse:\n from typing_extensions import TypedDict\n\nfrom .base import Detector\nfrom ..filth.known import KnownFilth\n\nKnownFilthItem = TypedDict(\n 'KnownFilthItem',\n {'match': str, 'match_end': Optional[str], 'limit': Optional[int], 'filth_type': Optional[str]},\n total=False,\n)\n\n\nclass KnownFilthDetector(Detector):\n \"\"\"Use some predefined phrases to label the text.\n\n This is useful if you have found that some particular\n type of PII occurs regularly or you want to compare\n scrubadub with already selected PII.\n \"\"\"\n\n filth_cls = KnownFilth\n name = 'known'\n\n def __init__(self, known_filth_items: Optional[List[KnownFilthItem]] = None, **kwargs):\n super().__init__(**kwargs)\n if known_filth_items is None:\n known_filth_items = []\n self._known_filth_items = known_filth_items\n\n def _find_all(\n self,\n text: str,\n substr: str,\n comparison_type: Optional[str] = None,\n document_name: Optional[str] = None\n ) -> Generator[KnownFilth, None, None]:\n \"\"\"Yield filth for each match to substr in text.\"\"\"\n substr_len = len(substr)\n start_location = text.find(substr)\n\n while start_location >= 0:\n yield KnownFilth(\n start_location,\n start_location + substr_len,\n text[start_location:start_location + substr_len],\n comparison_type=comparison_type,\n detector_name=self.name,\n document_name=document_name,\n )\n start_location = text.find(\n substr,\n start_location + substr_len\n )\n\n def _find_all_between(\n self,\n text: str,\n substr_start: str,\n substr_end: str,\n limit: int = 150,\n comparison_type: Optional[str] = None,\n document_name: Optional[str] = None\n ) -> Generator[KnownFilth, None, None]:\n \"\"\"Yield filth for text between (and including)\n substr_start and substr_end, but only if the text\n between the two is less than limit characters.\n \"\"\"\n substr_start_len = len(substr_start)\n substr_end_len = len(substr_end)\n start_location = text.find(substr_start)\n\n while start_location >= 0:\n end_location = text.find(\n substr_end,\n start_location + substr_start_len,\n start_location + substr_start_len + limit + substr_end_len\n )\n if end_location >= 0:\n yield KnownFilth(\n start_location,\n end_location + substr_end_len,\n text[start_location:end_location + substr_end_len],\n comparison_type=comparison_type,\n detector_name=self.name,\n document_name=document_name,\n )\n next_search_start = end_location + substr_end_len\n else:\n next_search_start = start_location + substr_start_len\n\n start_location = text.find(substr_start, next_search_start)\n\n def iter_filth(\n self,\n text: str,\n document_name: Optional[str] = None\n ) -> Generator[KnownFilth, None, None]:\n \"\"\"Iterate over the predefined PII list and yield\n filth instances.\"\"\"\n for pii_item in self._known_filth_items:\n # could also implement other types in here too\n if 'match' in pii_item and 'match_end' in pii_item and pii_item['match_end'] is not None:\n for found_item in self._find_all_between(\n text,\n pii_item['match'],\n pii_item['match_end'],\n limit=int(pii_item.get('limit', 150) or 150),\n comparison_type=pii_item.get('filth_type', None),\n document_name=document_name,\n ):\n yield found_item\n elif 'match' in pii_item:\n for found_item in self._find_all(\n text,\n pii_item['match'],\n comparison_type=pii_item.get('filth_type', None),\n document_name=document_name,\n ):\n yield found_item\n else:\n raise ValueError(\n \"Unknown keys in predefined PII item: \"\n \"{}\".format(pii_item.keys())\n )\n","sub_path":"scrubadub/detectors/known.py","file_name":"known.py","file_ext":"py","file_size_in_byte":4729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"444569582","text":"from qlib.web import CMESP500Scraper\nfrom datetime import datetime\n\n\nif __name__ == '__main__':\n run_date = datetime.today().strftime('%Y%m%d')\n\n scraper = CMESP500Scraper()\n scraper.load_all()\n\n data_dir = r'/home/yue/study/OptionStrategies/data/CMESP500'\n\n print(\"Loaded {} futures contracts\".format(scraper.futures_data.shape[0]))\n scraper.futures_data.to_csv('{}/CMESP500Futures_{}.csv'.format(data_dir, run_date), index=False)\n\n print(\"Loaded {} options contracts\".format(scraper.options_data.shape[0]))\n scraper.options_data.to_csv('{}/CMESP500Options_{}.csv'.format(data_dir, run_date), index=False)\n","sub_path":"python-package/scripts/download_cme_sp500.py","file_name":"download_cme_sp500.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"351422048","text":"'''Codificação run-lenght. Escreva uma função recursiva que implemente a técnica de\ncompressão run-lenght descrita no exercício anterior. Sua função deve receber uma lista ou\numa string como seu único parâmetro. Ela deve retornar a lista compactada em run-lenght\ncomo seu único resultado. Inclua um programa principal que leia uma string do usuário, a\ncompacte e exiba o resultado codificado em run-lenght.'''\n\ndef mapear(x):\n if x.isdigit() == False:\n return x \n\ndef is_null(str):\n if str.isdigit() == False:\n return True\n else:\n return False\n\ndef cod_run_lenght(string):\n if type(string) == str:\n resultado = map(mapear,filter(is_null,string))\n resultado = list(resultado)\n return cod_run_lenght([resultado,[]])\n\n elif len(string[0]) != 0:\n x = string[0]\n index = string[1]\n letra = x[0]\n if index == []:\n index.append(letra)\n index.append(0)\n if index[-2] == letra:\n index[-1] = index[-1]+1\n if index[-2] != letra:\n index.append(letra)\n index.append(1)\n x.pop(0)\n return cod_run_lenght([x,index])\n else:\n return string[1]\n\nprint(f'Decodificada = {cod_run_lenght(\"AAAAAABBAAACCAABVBB\")}')","sub_path":"Lista 8/EBSS-AER-Alg-08-Ex-11.py","file_name":"EBSS-AER-Alg-08-Ex-11.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"165797670","text":"from pandas2pygal import pandas_to_pygal_Bar, colour_dict\nimport pandas2pygal\nimport pandas\nfrom pygal.style import Style\n\ndata_path = \"./data/Sample - Superstore Sales (Excel).xlsx\"\ndata = pandas.read_excel(data_path, sheet_name = \"Orders\")\nprint(data.columns)\n\npyg = pandas_to_pygal_Bar(\n data = data,\n groupby1 = 'Region',\n aggregate = 'Sales',\n colourstyle= colour_dict[\"RedBlueStyle\"],\n decimal_places=0,\n print_values = False,\n rounded_bars = 0,\n title = \"Test Bar Chart\",\n value_suffix = \"\",\n x_label_rotation = 0,\n legend_at_bottom=True,\n legend_at_bottom_columns = 3,\n horizontal = False,\n agg_type = \"sum\")\n\npyg.render_in_browser()","sub_path":"test_pandas2pygal.py","file_name":"test_pandas2pygal.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"242448332","text":"\"\"\"\nThis class models the form on contact page\nThe form consists of some input fields.\n\"\"\"\n\nfrom .Base_Page import Base_Page\nimport conf.locators_conf as locators\nfrom utils.Wrapit import Wrapit\n\nclass Contact_Form_Object:\n \"Page object for the contact Form\"\n\n #locators\n #contact_name_field = locators.contact_name_field\n FORM_EMAIL_ID = locators.FORM_EMAIL_ID\n FORM_ACCOUNT_NUMBER = locators.FORM_ACCOUNT_NUMBER\n FORM_EXPIRY_DATE = locators.FORM_EXPIRY_DATE\n FORM_CVV = locators.FORM_CVV\n FORM_ZIP_CODE = locators.FORM_ZIP_CODE\n FORM_REMEMBER_ME = locators.FORM_REMEMBER_ME\n FORM_MOBILE = locators.FORM_MOBILE\n FORM_SUBMIT = locators.FORM_SUBMIT\n\n @Wrapit._exceptionHandler\n def set_name(self,name):\n \"Set the name on the Kick start form\"\n result_flag = self.set_text(self.contact_name_field,name)\n self.conditional_write(result_flag,\n positive='Set the name to: %s'%name,\n negative='Failed to set the name in the form',\n level='debug')\n\n return result_flag\n\n @Wrapit._exceptionHandler\n def set_email(self,email):\n \"Set the email on the form\"\n result_flag = self.set_text(self.FORM_EMAIL_ID,email)\n self.conditional_write(result_flag,\n positive='Set the email to: %s'%email,\n negative='Failed to set the email in the form',\n level='debug')\n\n return result_flag\n\n @Wrapit._exceptionHandler\n def set_account_number(self,account_number):\n \"Set the account number on the form\"\n result_flag = self.set_text(self.FORM_ACCOUNT_NUMBER,account_number)\n self.conditional_write(result_flag,\n positive='Set the account number to: %s'%account_number,\n negative='Failed to set the account number in the form',\n level='debug')\n\n return result_flag\n\n @Wrapit._exceptionHandler\n def set_expiry_date(self,expiry_date):\n \"Set the expiry date on the form\"\n result_flag = self.set_text(self.FORM_EXPIRY_DATE,expiry_date)\n self.conditional_write(result_flag,\n positive='Set the expiry date to: %s'%expiry_date,\n negative='Failed to set the expiry date in the form',\n level='debug')\n\n return result_flag\n\n @Wrapit._exceptionHandler\n def set_cvv(self,cvv):\n \"Set the cvv on the form\"\n result_flag = self.set_text(self.FORM_CVV,cvv)\n self.conditional_write(result_flag,\n positive='Set the cvv to: %s'%cvv,\n negative='Failed to set the cvv in the form',\n level='debug')\n\n return result_flag\n\n @Wrapit._exceptionHandler\n def set_zip_code(self,zip_code):\n \"Set the zip code on the form\"\n result_flag = self.set_text(self.FORM_ZIP_CODE,zip_code)\n self.conditional_write(result_flag,\n positive='Set the zip code to: %s'%zip_code,\n negative='Failed to set the zip code in the form',\n level='debug')\n\n return result_flag\n\n @Wrapit._exceptionHandler\n def set_mobile(self,mobile):\n \"Set the mobile number on the form\"\n result_flag = self.set_text(self.FORM_MOBILE,mobile)\n self.conditional_write(result_flag,\n positive='Set the mobile number to: %s'%mobile,\n negative='Failed to set the mobile number in the form',\n level='debug')\n\n return result_flag\n\n @Wrapit._screenshot\n def click_pay_button(self):\n \"Click the pay button\"\n result_flag = self.click_element(self.FORM_SUBMIT)\n self.conditional_write(result_flag,\n positive=\"Clicked on the pay button\",\n negative=\"Could not click on the pay button\")\n\n return result_flag","sub_path":"page_objects/contact_form_object.py","file_name":"contact_form_object.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"291895124","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom six.moves import xrange\n\nfrom util import log\nfrom pprint import pprint\n\nfrom model import Model\nfrom input_ops import create_input_ops\n\nimport os\nimport time\nimport h5py\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport numpy as np\n\n\nclass Trainer(object):\n\n def __init__(self,\n config,\n dataset_train,\n dataset_test):\n self.config = config\n hyper_parameter_str = config.dataset+'_lr_'+str(config.learning_rate)\n self.train_dir = './train_dir/%s-%s-%s' % (\n config.prefix,\n hyper_parameter_str,\n time.strftime(\"%Y%m%d-%H%M%S\")\n )\n\n if not os.path.exists(self.train_dir):\n os.makedirs(self.train_dir)\n log.infov(\"Train Dir: %s\", self.train_dir)\n\n # --- input ops ---\n self.batch_size = config.batch_size\n\n _, self.batch_train = create_input_ops(dataset_train, self.batch_size,\n is_training=True)\n _, self.batch_test = create_input_ops(dataset_test, self.batch_size,\n is_training=False)\n\n # --- create model ---\n self.model = Model(config)\n\n # --- optimizer ---\n self.global_step = tf.contrib.framework.get_or_create_global_step(graph=None)\n self.learning_rate = config.learning_rate\n if config.lr_weight_decay:\n self.learning_rate = tf.train.exponential_decay(\n config.learning_rate,\n global_step=self.global_step,\n decay_steps=10000,\n decay_rate=0.5,\n staircase=True,\n name='decaying_learning_rate'\n )\n\n self.check_op = tf.no_op()\n\n # --- checkpoint and monitoring ---\n log.warn(\"********* var ********** \")\n slim.model_analyzer.analyze_vars(tf.trainable_variables(), print_info=True)\n\n self.g_optimizer = tf.contrib.layers.optimize_loss(\n loss=self.model.loss,\n global_step=self.global_step,\n learning_rate=self.learning_rate,\n optimizer=tf.train.AdamOptimizer,\n clip_gradients=20.0,\n name='g_optimizer_loss',\n )\n\n self.summary_op = tf.summary.merge_all()\n\n self.saver = tf.train.Saver(max_to_keep=1000)\n self.summary_writer = tf.summary.FileWriter(self.train_dir)\n\n self.checkpoint_secs = 600 # 10 min\n\n self.supervisor = tf.train.Supervisor(\n logdir=self.train_dir,\n is_chief=True,\n saver=None,\n summary_op=None,\n summary_writer=self.summary_writer,\n save_summaries_secs=300,\n save_model_secs=self.checkpoint_secs,\n global_step=self.global_step,\n )\n\n session_config = tf.ConfigProto(\n allow_soft_placement=True,\n gpu_options=tf.GPUOptions(allow_growth=True),\n device_count={'GPU': 1},\n )\n self.session = self.supervisor.prepare_or_wait_for_session(config=session_config)\n\n self.ckpt_path = config.checkpoint\n if self.ckpt_path is not None:\n log.info(\"Checkpoint path: %s\", self.ckpt_path)\n self.saver.restore(self.session, self.ckpt_path)\n log.info(\"Loaded the pretrain parameters from the provided checkpoint path\")\n\n def train(self, dataset):\n log.infov(\"Training Starts!\")\n pprint(self.batch_train)\n\n max_steps = 100000\n\n output_save_step = 1000\n\n for s in xrange(max_steps):\n step, summary, x, loss, loss_g_update, loss_z_update, step_time = \\\n self.run_single_step(self.batch_train, dataset, step=s, is_train=True)\n\n if s % 10 == 0:\n self.log_step_message(step, loss, loss_g_update, loss_z_update, step_time)\n\n self.summary_writer.add_summary(summary, global_step=step)\n\n if s % output_save_step == 0:\n log.infov(\"Saved checkpoint at %d\", s)\n save_path = self.saver.save(self.session,\n os.path.join(self.train_dir, 'model'),\n global_step=step)\n if self.config.dump_result:\n f = h5py.File(os.path.join(self.train_dir, 'dump_result_'+str(s)+'.hdf5'), 'w')\n f['image'] = x\n f.close()\n\n def run_single_step(self, batch, dataset, step=None, is_train=True):\n _start_time = time.time()\n\n batch_chunk = self.session.run(batch)\n\n # Optmize the generator {{{\n # ========\n fetch = [self.global_step, self.summary_op, self.model.loss,\n self.model.x_recon, self.check_op, self.g_optimizer]\n\n fetch_values = self.session.run(\n fetch, feed_dict=self.model.get_feed_dict(batch_chunk, step=step)\n )\n [step, summary, loss, x] = fetch_values[:4]\n # }}}\n\n # Optimize the latent vectors {{{\n fetch = [self.model.z, self.model.z_grad, self.model.loss]\n\n fetch_values = self.session.run(\n fetch, feed_dict=self.model.get_feed_dict(batch_chunk, step=step)\n )\n\n [z, z_grad, loss_g_update] = fetch_values\n\n z_update = z - self.config.alpha * z_grad[0]\n norm = np.sqrt(np.sum(z_update ** 2, axis=1))\n z_update_norm = z_update / norm[:, np.newaxis]\n\n loss_z_update = self.session.run(\n self.model.loss, feed_dict={self.model.x: batch_chunk['image'], self.model.z: z_update_norm}\n )\n for i in range(len(batch_chunk['id'])):\n dataset.set_data(batch_chunk['id'][i], z_update_norm[i, :])\n # }}}\n\n _end_time = time.time()\n\n return step, summary, x, loss, loss_g_update, loss_z_update, (_end_time - _start_time)\n\n def run_test(self, batch, is_train=False, repeat_times=8):\n\n batch_chunk = self.session.run(batch)\n\n loss = self.session.run(\n self.model.loss, feed_dict=self.model.get_feed_dict(batch_chunk, is_training=False)\n )\n\n return loss\n\n def log_step_message(self, step, loss, loss_g_update,\n loss_z_update, step_time, is_train=True):\n if step_time == 0:\n step_time = 0.001\n log_fn = (is_train and log.info or log.infov)\n log_fn((\" [{split_mode:5s} step {step:4d}] \" +\n \"Loss: {loss:.5f} \" +\n \"G update: {loss_g_update:.5f} \" +\n \"Z update: {loss_z_update:.5f} \" +\n \"({sec_per_batch:.3f} sec/batch, {instance_per_sec:.3f} instances/sec) \"\n ).format(split_mode=(is_train and 'train' or 'val'),\n step=step,\n loss=loss,\n loss_z_update=loss_z_update,\n loss_g_update=loss_g_update,\n sec_per_batch=step_time,\n instance_per_sec=self.batch_size / step_time\n )\n )\n\n\ndef check_data_path(path):\n if os.path.isfile(os.path.join(path, 'data.hy')) \\\n and os.path.isfile(os.path.join(path, 'id.txt')):\n return True\n else:\n return False\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch_size', type=int, default=16)\n parser.add_argument('--prefix', type=str, default='default')\n parser.add_argument('--checkpoint', type=str, default=None)\n parser.add_argument('--dataset', type=str, default='MNIST', choices=['MNIST', 'SVHN', 'CIFAR10'])\n parser.add_argument('--learning_rate', type=float, default=1e-4)\n parser.add_argument('--alpha', type=float, default=1.0)\n parser.add_argument('--lr_weight_decay', action='store_true', default=False)\n parser.add_argument('--dump_result', action='store_true', default=False)\n config = parser.parse_args()\n\n if config.dataset == 'MNIST':\n import datasets.mnist as dataset\n elif config.dataset == 'SVHN':\n import datasets.svhn as dataset\n elif config.dataset == 'CIFAR10':\n import datasets.cifar10 as dataset\n else:\n raise ValueError(config.dataset)\n\n config.conv_info = dataset.get_conv_info()\n config.deconv_info = dataset.get_deconv_info()\n dataset_train, dataset_test = dataset.create_default_splits()\n\n m, l = dataset_train.get_data(dataset_train.ids[0])\n config.data_info = np.concatenate([np.asarray(m.shape), np.asarray(l.shape)])\n\n trainer = Trainer(config,\n dataset_train, dataset_test)\n\n log.warning(\"dataset: %s, learning_rate: %f\",\n config.dataset, config.learning_rate)\n trainer.train(dataset_train)\n\nif __name__ == '__main__':\n main()\n","sub_path":"Generative-Latent-Optimization-Tensorflow-master/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":8930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"438414206","text":"import json\nfrom unittest import TestCase, main\nfrom unittest.mock import patch, MagicMock\n\nfrom opengraph.opengraph import OpenGraph\n\nHTML = \"\"\"\n\n \n The Rock (1996)\n \n \n \n \n \n \n \n

hello world

\n \n\n\"\"\"\n\n\nHTML_WITH_MISSING_REQUIRED_ATTRS = \"\"\"\n\n \n The Rock (1996)\n \n \n \n \n \n

hello world

\n \n\n\"\"\"\n\n\nclass OpenGraphTests(TestCase):\n def test_parser(self):\n og = OpenGraph()\n og.parser(HTML)\n\n self.assertTrue(og.is_valid())\n self.assertDictEqual(\n og,\n {\n \"_url\": None,\n \"description\": \"movie description\",\n \"image\": \"http://ia.media-imdb.com/images/rock.jpg\",\n \"scrape\": False,\n \"title\": \"The Rock\",\n \"type\": \"movie\",\n \"url\": \"http://www.imdb.com/title/tt0117500/\"\n }\n )\n\n def test_parser_with_missing_required_attrs(self):\n og = OpenGraph()\n og.parser(HTML_WITH_MISSING_REQUIRED_ATTRS)\n\n self.assertFalse(og.is_valid())\n self.assertDictEqual(\n og,\n {\n \"_url\": None,\n \"scrape\": False,\n \"title\": \"The Rock\",\n \"type\": \"movie\",\n \"url\": \"http://www.imdb.com/title/tt0117500/\"\n }\n )\n\n def test_convert_to_json(self):\n og = OpenGraph()\n og.parser(HTML)\n\n json_encoded = og.to_json()\n\n self.assertIsInstance(json_encoded, str)\n\n decoded = json.loads(json_encoded)\n\n self.assertDictEqual(\n decoded,\n {\n \"_url\": None,\n \"description\": \"movie description\",\n \"image\": \"http://ia.media-imdb.com/images/rock.jpg\",\n \"scrape\": False,\n \"title\": \"The Rock\",\n \"type\": \"movie\",\n \"url\": \"http://www.imdb.com/title/tt0117500/\"\n }\n )\n\n def test_is_valid(self):\n og = OpenGraph()\n og.parser(HTML)\n\n self.assertTrue(og.is_valid())\n\n def test_is_valid_with_missing_required_attrs(self):\n og = OpenGraph()\n og.parser(HTML_WITH_MISSING_REQUIRED_ATTRS)\n\n self.assertFalse(og.is_valid())\n\n @patch(\"opengraph.opengraph.urlopen\")\n def test_open_url(self, urlopen_mock):\n read_mock = MagicMock()\n read_mock.read.return_value = HTML\n urlopen_mock.side_effect = lambda url: read_mock\n\n og = OpenGraph(url=\"http://www.example.com\")\n\n self.assertTrue(og.is_valid())\n self.assertDictEqual(\n og,\n {\n \"_url\": \"http://www.example.com\",\n \"description\": \"movie description\",\n \"image\": \"http://ia.media-imdb.com/images/rock.jpg\",\n \"scrape\": False,\n \"title\": \"The Rock\",\n \"type\": \"movie\",\n \"url\": \"http://www.imdb.com/title/tt0117500/\"\n }\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tests/test_opengraph.py","file_name":"test_opengraph.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"451702165","text":"import csv\nimport numpy\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\nnum_iter = 500\nnum_reps = 75\nmax_allowed_qual = 5000\n\nfile = 'probabilistic_selection_test_500_iterations.csv'\n\nall_repetitions_data = []\n\nnum_lowtier_rules = 8\nnum_hightier_rules = 5\n\nchunk_size = 100\n\nrule_applications_a = []\nrule_acceptance_a = []\nrule_effectiveness_a = []\nrule_selection_chance_a = []\nrule_proportions_a = []\n\nfor i in range(0, int(num_iter/chunk_size)):\n rule_applications_a.append(numpy.zeros(num_lowtier_rules+num_hightier_rules))\n rule_acceptance_a.append(numpy.zeros(num_lowtier_rules+num_hightier_rules))\n rule_effectiveness_a.append(numpy.zeros(num_lowtier_rules+num_hightier_rules))\n\nwith open(file, 'r') as sim_data_file:\n csv_reader = csv.DictReader(sim_data_file)\n\n valid_reps = []\n for row in csv_reader:\n if int(row['iteration']) == num_iter and float(row['current solution quality']) < max_allowed_qual and len(valid_reps) < num_reps:\n valid_reps.append(row['repetition'])\n\n print('')\n print(valid_reps)\n print('')\n print(len(valid_reps))\n print('')\n\n sim_data_file.seek(0)\n\n next(csv_reader)\n\n data_list = list(csv_reader)\n current_data_list_index = 0\n\n for repetition_index in range(0, len(valid_reps)):\n current_rep_num = valid_reps[repetition_index]\n current_rep_data = []\n\n for i in range(0, num_iter):\n current_rep_data.append([])\n\n for data_index in range(current_data_list_index, len(data_list)):\n row = data_list[data_index]\n current_data_list_index += 1\n if row['repetition'] == current_rep_num:\n rep = int(current_rep_num)\n iter = int(row['iteration'])\n tier = row['rule tier']\n rule = int(row['rule number'])\n acceptance = int(row['rule acceptance'])\n\n quality_before = float(row['quality before rule'])\n quality_after = float(row['quality after rule'])\n quality_change = quality_after - quality_before\n\n current_rep_data[int(row['iteration'])-1].append({'rep': rep,\n 'iter': iter,\n 'tier': tier,\n 'rule': rule,\n 'acceptance': acceptance,\n 'quality_change': quality_change})\n\n elif row['repetition'] in valid_reps:\n current_data_list_index -= 1\n break\n\n all_repetitions_data.append(current_rep_data)\n\nfor i in range(0, len(all_repetitions_data)):\n for j in range(0, len(all_repetitions_data[i])):\n iteration = all_repetitions_data[i][j][0]\n chunk = int((iteration['iter'] - 1) / chunk_size)\n\n if iteration['tier'] == 'low':\n rule_index = iteration['rule'] - 1\n elif iteration['tier'] == 'high':\n rule_index = iteration['rule'] - 2 + num_lowtier_rules\n\n rule_applications_a[chunk][rule_index] += 1\n\n if iteration['acceptance'] == 1:\n rule_acceptance_a[chunk][rule_index] += 1\n\nrule_effectiveness_a = numpy.divide(rule_acceptance_a, rule_applications_a)\nrule_selection_chance_a = numpy.divide(rule_applications_a, len(all_repetitions_data)*chunk_size)\n\nfor i in range(0, len(rule_acceptance_a)):\n total_accepted = sum(rule_acceptance_a[i])\n rule_proportions_a.append(numpy.divide(rule_acceptance_a[i], total_accepted))\n\nerror_a = []\n\nfor chunk in rule_proportions_a:\n error_a.append(stats.sem(chunk))\n\n# print(rule_applications_a)\n# print(rule_acceptance_a)\n# print(rule_effectiveness_a)\n# print(rule_selection_chance_a)\n# print(rule_proportions_a)\n# print('')\n\nfile = 'probabilistic_selection_test_500_iterations.csv'\n\nall_repetitions_data = []\n\nrule_applications_b = []\nrule_acceptance_b = []\nrule_effectiveness_b = []\nrule_selection_chance_b = []\nrule_proportions_b = []\n\nfor i in range(0, int(num_iter/chunk_size)):\n rule_applications_b.append(numpy.zeros(num_lowtier_rules+num_hightier_rules))\n rule_acceptance_b.append(numpy.zeros(num_lowtier_rules+num_hightier_rules))\n rule_effectiveness_b.append(numpy.zeros(num_lowtier_rules+num_hightier_rules))\n\nwith open(file, 'r') as sim_data_file:\n csv_reader = csv.DictReader(sim_data_file)\n\n valid_reps = []\n for row in csv_reader:\n if int(row['iteration']) == num_iter and float(row['current solution quality']) < max_allowed_qual and len(valid_reps) < num_reps:\n valid_reps.append(row['repetition'])\n\n print('')\n print(valid_reps)\n print('')\n print(len(valid_reps))\n print('')\n\n sim_data_file.seek(0)\n\n next(csv_reader)\n\n data_list = list(csv_reader)\n current_data_list_index = 0\n\n for repetition_index in range(0, len(valid_reps)):\n current_rep_num = valid_reps[repetition_index]\n current_rep_data = []\n\n for i in range(0, num_iter):\n current_rep_data.append([])\n\n for data_index in range(current_data_list_index, len(data_list)):\n row = data_list[data_index]\n current_data_list_index += 1\n if row['repetition'] == current_rep_num:\n rep = int(current_rep_num)\n iter = int(row['iteration'])\n tier = row['rule tier']\n rule = int(row['rule number'])\n acceptance = int(row['rule acceptance'])\n\n quality_before = float(row['quality before rule'])\n quality_after = float(row['quality after rule'])\n quality_change = quality_after - quality_before\n\n current_rep_data[int(row['iteration'])-1].append({'rep': rep,\n 'iter': iter,\n 'tier': tier,\n 'rule': rule,\n 'acceptance': acceptance,\n 'quality_change': quality_change})\n\n elif row['repetition'] in valid_reps:\n current_data_list_index -= 1\n break\n\n all_repetitions_data.append(current_rep_data)\n\nfor i in range(0, len(all_repetitions_data)):\n for j in range(0, len(all_repetitions_data[i])):\n iteration = all_repetitions_data[i][j][0]\n chunk = int((iteration['iter'] - 1) / chunk_size)\n\n if iteration['tier'] == 'low':\n rule_index = iteration['rule'] - 1\n elif iteration['tier'] == 'high':\n rule_index = iteration['rule'] - 2 + num_lowtier_rules\n\n rule_applications_b[chunk][rule_index] += 1\n\n if iteration['acceptance'] == 1:\n rule_acceptance_b[chunk][rule_index] += 1\n\nrule_effectiveness_b = numpy.divide(rule_acceptance_b, rule_applications_b)\nrule_selection_chance_b = numpy.divide(rule_applications_b, len(all_repetitions_data)*chunk_size)\n\nfor i in range(0, len(rule_acceptance_b)):\n total_accepted = sum(rule_acceptance_b[i])\n rule_proportions_b.append(numpy.divide(rule_acceptance_b[i], total_accepted))\n\nerror_b = []\n\nfor chunk in rule_proportions_b:\n error_b.append(stats.sem(chunk))\n\n# print(rule_applications_b)\n# print(rule_acceptance_b)\n# print(rule_effectiveness_b)\n\n#######################################################################################################################\n#######################################################################################################################\n\nchunk_labels = ('1', '2', '3', '4', '5')\ny_pos = numpy.arange(len(chunk_labels))\nbar_width = 0.35\n\n# for rule in range(0, num_lowtier_rules + num_hightier_rules):\n# effectiveness_a = []\n# effectiveness_b = []\n# for chunk in rule_proportions_a:\n# effectiveness_a.append(chunk[rule])\n# for chunk in rule_proportions_b:\n# effectiveness_b.append(chunk[rule])\n# plt.bar(y_pos, effectiveness_a, bar_width, color='g', align='center', alpha=0.5, label='Random Selection')\n# # plt.bar(y_pos+bar_width, effectiveness_b, bar_width, color='c', align='center', alpha=0.5, label='Probabilistic Selection')\n# # plt.errorbar(y_pos, effectiveness_a, yerr=error_a, color='g', alpha=0.5, fmt='o')\n# # plt.errorbar(y_pos + bar_width, effectiveness_b, yerr=error_b, color='c', alpha=0.5, fmt='o')\n# plt.xticks(y_pos, chunk_labels)\n# plt.ylim(0, 0.75)\n# plt.grid()\n# plt.xlabel('Iteration Chunk (Every 100 Iter.)')\n# plt.ylabel('Acceptance Rate of Applied Rule')\n# plt.legend(loc=1)\n#\n # if rule < 8:\n # plt.title('Lower-Tier Rule: ' + str(rule+1))\n # else:\n # plt.title('Higher-Tier Rule: ' + str(rule-7))\n# print(effectiveness_a)\n# print(effectiveness_b)\n# plt.show()\n\nall_rule_proportions = []\n\nfor rule in range(0, num_lowtier_rules + num_hightier_rules):\n proportion = []\n for chunk in rule_proportions_a:\n proportion.append(chunk[rule])\n all_rule_proportions.append(proportion)\n\nprint(all_rule_proportions)\n\ncolors = [(0.8, 0, 0), (0, 0.8, 0), (0, 0, 0.8), (0.8, 0.8, 0), (0.8, 0, 0.8), (0, 0.8, 0.8), (0.8, 0.4, 0.4), (0.4, 0.8, 0.4),\n (0.4, 0.4, 0.8), (0.8, 0.2, 0.4), (0.2, 0.2, 0), (0.8, 1.0, 0.4), (0.9, 0.6, 0.2)]\nlast_bottom = numpy.zeros(len(rule_proportions_a))\n\nfor rule_index in range(0, len(all_rule_proportions)):\n rule = all_rule_proportions[rule_index]\n if rule_index < 8:\n rule_name = \"LT Rule: \"+ str(rule_index+1)\n else:\n rule_name = \"HT Rule: \"+str(rule_index-7)\n plt.bar(y_pos, rule, bar_width, color=colors[rule_index], bottom=last_bottom, align='center', alpha=0.5, label=rule_name)\n plt.xticks(y_pos, chunk_labels)\n plt.ylim(0, 1.0)\n plt.xlabel('Iteration Chunk (Every 100 Iter.)')\n plt.ylabel('Proportion')\n plt.title('Proportion of Each Rule Within All Accepted Rules per Chunk (Random Rule Selection)')\n plt.legend(loc=1)\n\n last_bottom += rule\n\nplt.grid()\nplt.show()\n\n#######################################################################################################################\n#######################################################################################################################\n\nlumped_proportions = numpy.zeros(5)\nbest_rules = [4, 6, 7, 10, 12]\n\nfor rule_index in range(0, len(all_rule_proportions)):\n if rule_index not in best_rules:\n for chunk_index in range(len(rule_proportions_a)):\n lumped_proportions[chunk_index] += all_rule_proportions[rule_index][chunk_index]\n print(lumped_proportions)\n\nlumped_and_best_proportions = []\nlumped_and_best_proportions.append(lumped_proportions)\n\nfor index in best_rules:\n lumped_and_best_proportions.append(all_rule_proportions[index])\n\ncolors = [(0.5, 0.4, 0.2), (0, 0.6, 0), (0, 0.2, 0.5), (0.7, 0.2, 0.1), (0.4, 0.8, 0.2), (0.8, 0.5, 0)]\nlast_bottom = numpy.zeros(len(rule_proportions_a))\n\nfor rule_index in range(0, len(lumped_and_best_proportions)):\n rule = lumped_and_best_proportions[rule_index]\n if rule_index == 0:\n rule_name = \"OTHER\"\n elif rule_index == 1:\n rule_name = 'LT Rule 5'\n elif rule_index == 2:\n rule_name = 'LT Rule 7'\n elif rule_index == 3:\n rule_name = 'LT Rule 8'\n elif rule_index == 4:\n rule_name = 'HT Rule 3'\n elif rule_index == 5:\n rule_name = 'HT Rule 5'\n plt.bar(y_pos, rule, bar_width, color=colors[rule_index], bottom=last_bottom, align='center', alpha=0.5,\n label=rule_name)\n plt.xticks(y_pos, chunk_labels)\n plt.ylim(0, 1.0)\n plt.xlabel('Iteration Chunk (Every 100 Iter.)')\n plt.ylabel('Proportion')\n plt.title('Proportion of Each Rule Within All Accepted Rules per Chunk (Probabilistic Rule Selection)')\n plt.legend(loc=1)\n\n last_bottom += rule\n\nplt.grid()\nplt.show()\n\n#######################################################################################################################\n#######################################################################################################################\n\nlower_tier_proportions = numpy.zeros(5)\nhigher_tier_proportions = numpy.zeros(5)\n\nfor rule_index in range(len(all_rule_proportions)):\n if rule_index < 8:\n for chunk_index in range(len(all_rule_proportions[rule_index])):\n lower_tier_proportions[chunk_index] += all_rule_proportions[rule_index][chunk_index]\n print(lower_tier_proportions)\n elif rule_index >= 8:\n for chunk_index in range(len(all_rule_proportions[rule_index])):\n higher_tier_proportions[chunk_index] += all_rule_proportions[rule_index][chunk_index]\n print(higher_tier_proportions)\n\ncombined_tiers_proportions = []\ncombined_tiers_proportions.append(lower_tier_proportions)\ncombined_tiers_proportions.append(higher_tier_proportions)\n\ncolors = [(0.8, 0.8, 0), (0, 0.2, 0.8)]\n\nlast_bottom = numpy.zeros(len(rule_proportions_a))\n\nfor tier_index in range(len(combined_tiers_proportions)):\n tier = combined_tiers_proportions[tier_index]\n if tier_index == 0:\n tier_name = \"Lower-Tier\"\n elif tier_index == 1:\n tier_name = \"Higher-Tier\"\n plt.bar(y_pos, tier, bar_width, color=colors[tier_index], bottom=last_bottom, align='center', alpha=0.5, label=tier_name)\n plt.xticks(y_pos, chunk_labels)\n plt.ylim(0, 1.0)\n plt.xlabel('Iteration Chunk (Every 100 Iter.)')\n plt.ylabel('Proportion')\n plt.title('Proportion of Each Rule Tier Within All Accepted Rules per Chunk (Probabilistic Rule Selection)')\n plt.legend(loc=1)\n\n last_bottom += tier\n\nplt.grid()\nplt.show()\n","sub_path":"tests/Revised Simulations/rule_effectiveness_test.py","file_name":"rule_effectiveness_test.py","file_ext":"py","file_size_in_byte":13875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"528014061","text":"import Color\nimport Dimension\nimport Position\nimport Block\nimport Board\n\n\ndef let_all_full_rows_explode(board):\n \"\"\"\n Let all the blocks in all the full rows on the given board explode.\n - The function starts with examining the given board collecting all the\n blocks in all rows that are completely filled. Hereafter, it will let\n each of these blocks explode exactly once in ascending order of their\n position.\n - If part of a full row has already exploded because of explosions of\n (electrified) blocks in lower rows, other blocks in that row will still\n explode (even if the row is no longer completely filled).\n - If a fragile block in a full row has already exploded because of explosions\n of (electrified) blocks in lower rows, the replacing blocks will not explode\n on their own. They may, however, explode as a result of other electrified\n blocks exploding.\n - The function returns the score resulting from all explosions.\n ASSUMPTIONS\n - The given board is a proper board.\n NOTE\n - This function is already provided (you do not have to work out this function yourself).\n \"\"\"\n assert Board.is_proper_board(board)\n blocks_to_explode = []\n full_rows_sorted = list(Board.get_all_full_rows(board))\n list.sort(full_rows_sorted)\n for row in full_rows_sorted:\n list.extend(blocks_to_explode, Board.get_all_blocks_in_row(board, row))\n total_score = 0\n for block in blocks_to_explode:\n if Board.contains_block(board, block):\n total_score += Board.let_explode(board, block)\n return total_score\n\n\ndef adjust_score(score, level, score_from_explosions, nb_full_rows, nb_columns):\n \"\"\"\n Return the new score and the new level in view of the given score, the given\n level, the score resulting from explosions that have taken place, the total\n number of full rows in which these explosions took place and the number of\n columns on the board.\n NOTE\n - This function is already provided (you do not have to work out this function yourself). Its details are irrelevant.\n \"\"\"\n\n def treshold_for_level(level):\n if level == 1:\n return 11 * nb_columns\n else:\n return treshold_for_level(level - 1) + (10 + level) * nb_columns * level\n\n extra_score = score_from_explosions * nb_full_rows * level\n score += extra_score\n if score > treshold_for_level(level):\n level += 1\n return (score, level)\n\n\ndef stabilize_board(level, score, board):\n \"\"\"\n Stabilize the given board and return the updated level and score in view of\n the given level and given score.\n - The function continuously lets all blocks on the given board fall down,\n followed by explosions of all full rows, until the board is stable.\n - The function returns a tuple (l,s) in which l is the new level and s is\n the new score in view of the given level and given score.\n ASSUMPTIONS\n - The given level is a positive integer number.\n - The given score is a non-negative integer number.\n - The given board is a proper board.\n NOTE\n - This function is already provided (you do not have to work out this function yourself).\n \"\"\"\n assert isinstance(level, int) and (level >= 1)\n assert isinstance(score, int) and (score >= 0)\n assert Board.is_proper_board(board)\n Board.let_all_blocks_fall(board)\n nb_full_rows = len(Board.get_all_full_rows(board))\n while (nb_full_rows > 0):\n if nb_full_rows > 0:\n score_from_explosions = let_all_full_rows_explode(board)\n score, level = \\\n adjust_score(score, level, score_from_explosions, nb_full_rows,\n Dimension.get_nb_of_columns(Board.get_dimension(board)))\n Board.let_all_blocks_fall(board)\n nb_full_rows = len(Board.get_all_full_rows(board))\n return (level, score)\n\n\ndef get_all_possible_moves(board):\n all_possible_moves = []\n for block in Board.get_all_blocks(board):\n i = 1\n check1 = False\n check2 = False\n while 1:\n if Board.can_move_over(board, block, i) and not check1:\n all_possible_moves.append((block, i))\n else:\n check1 = True\n if Board.can_move_over(board, block, -i) and not check2:\n all_possible_moves.append((block, -i))\n else:\n check2 = True\n if check1 and check2:\n break\n i += 1\n return all_possible_moves\n\n\ndef play_greedy(blocks, dimension=(8, 10)):\n \"\"\"\n Play the game in a greedy way on a board with the given dimension,\n using the given blocks to fill the bottom row in each step of the game.\n The function repeatedly shifts all blocks up one row, adds new blocks to the\n bottom row and stabilizes the board, computes the move that yields the highest\n score, and makes that move.\n - The given blocks are collected in a list of which each element is a list of\n blocks to fill the bottom row once.\n - The function computes and executes in each step the move yielding the\n highest score.\n - The function returns a tuple consisting of the total score after all\n the given blocks have been used or as soon as the game has come to an end,\n followed by a list of all the moves that have been made. Each move in the\n latter list is a tuple containing the block to move, followed by the\n distance over which that block has been moved.\n ASSUMPTIONS\n - The given dimension is a proper dimension.\n - Each element in the list of blocks ((blocks[I]) is a sequence that can be\n used to fill the bottom row once in a valid way (i.e., no overlapping\n positions, no remaining gap larger than half the number of columns after\n a complete fill, ...)\n - The elements in the list of blocks (blocks[I]) are used in the order from left\n to right.\n - Each basic element in the list of blocks ((blocks[I][J]) is a tuple\n involving a (leftmost) position in the bottom row of the board followed by\n a proper block for a board with the given dimension.\n \"\"\"\n if not blocks:\n return 0, []\n\n board = Board.make_board(dimension)\n level = 1\n score = 0\n total_moves = []\n while blocks:\n Board.insert_bottom_row(board, blocks.pop(0))\n level, score = stabilize_board(level, score, board)\n all_possible_moves = get_all_possible_moves(board)\n max_score = 0\n best_moves = []\n for move in all_possible_moves:\n board_copy = Board.copy_board(board)\n Board.move_block_horizontally(board_copy, move[0], move[1])\n temp_level, temp_score = stabilize_board(level, score, board_copy)\n if temp_score > max_score:\n max_score = temp_score\n best_moves = [move]\n elif temp_score == max_score:\n best_moves.append(move)\n if len(best_moves) > 1:\n lowest_block_moves = []\n lowest_row = Dimension.get_nb_of_rows(dimension)\n lowest_column = Dimension.get_nb_of_columns(dimension)\n for move in best_moves:\n pos = Board.get_leftmost_position_of(board, move[0])\n row_nb = Position.nb_of_row(dimension, Position.get_row(pos))\n col = Position.get_column(pos)\n if row_nb <= lowest_row:\n lowest_row = row_nb\n if col < lowest_column:\n lowest_column = col\n lowest_block_moves = [move]\n elif col == lowest_column:\n lowest_block_moves.append(move)\n if len(lowest_block_moves) > 1:\n lowest_move = Dimension.get_nb_of_columns(dimension)\n for move in lowest_block_moves:\n if move[1] < lowest_move:\n lowest_move = move[1]\n best_moves = [move]\n else:\n best_moves = lowest_block_moves\n total_moves.append(best_moves[0])\n Board.move_block_horizontally(board, best_moves[0][0], best_moves[0][1])\n level, score = stabilize_board(level, score, board)\n return score, total_moves\n\n\ndef get_top_moves(board, blocks, min_score=100, max_nb_moves=10, level=1, score=0):\n \"\"\"\n Compute the best possible moves to play the game on the given board starting from\n the given level and the given score using the given blocks to fill the bottom row\n in each step of the game to reach a score at least as high as the given minimal\n score in no more than the given maximum number of moves.\n Play starts with moving all blocks up one row, adding new blocks to the bottom\n row and stabilizing the board.\n - The given blocks are collected in a list of which each element is a list of\n blocks to fill the bottom row once.\n - The function returns None if the given minimal score cannot be reached.\n Otherwise, the function returns a list of all the moves to reach at least\n the minimal score. Each move in the latter list is a tuple containing\n the lefmost position of the block to move, followed by the block itself,\n followed by the distance over which that block has to be moved.\n The position of the block is taken at the time of the move, which may obviously\n differ from the initial position taken by that block on the board.\n - If several solutions exist to reach at least the minimal score, the function\n returns the shortest of them in terms of number of moves. If several\n shortest solutions exist, the function returns the solution that is less\n than all other solutions of the same length using Python's operator to compare\n lists.\n - Upon exit, the given board and the given list of blocks must be in the same\n state they were in upon entry.\n ASSUMPTIONS\n - The given board is a proper and stable board.\n - Each element in the list of blocks ((blocks[I]) is a sequence that can be\n used to fill the bottom row once in a valid way (i.e., no overlapping\n positions, no remaining gap larger than half the number of columns after\n a complete fill, ...)\n - The elements in the list of blocks (blocks[I]) are used in the order from left\n to right.\n - Each basic element in the list of blocks ((blocks[I][J]) is a tuple\n involving a (leftmost) position in the bottom row of the board followed by\n a proper block for a board with the given dimension.\n - The given minimal score is a non-negative integer number.\n - The given maximum number of moves is an integer number. If it is negative,\n the function must return None.\n - The given level is a positive integer number.\n - The given score is a non-negative integer number.\n NOTE:\n - This function must use the given functions let_all_full_rows_explode\n and stabilize_board each time all rows of the board must explode,\n respectively the board must be stabilized.\n \"\"\"\n if max_nb_moves < 0:\n return None\n if max_nb_moves == 0 or not blocks:\n if min_score > 0:\n return None\n else:\n return []\n\n blocks = blocks.copy()\n cboard = Board.copy_board(board)\n if blocks:\n Board.insert_bottom_row(cboard, blocks.pop(0))\n else:\n return []\n level, score = stabilize_board(level, score, cboard)\n best_moves = []\n max_score = 0\n\n for move in get_all_possible_moves(cboard):\n temp_moves = []\n copy = Board.copy_board(cboard)\n pos = Board.get_leftmost_position_of(copy, move[0])\n Board.move_block_horizontally(copy, move[0], move[1])\n if not Board.is_empty_row(copy, 'X'):\n continue\n temp_level, temp_score = stabilize_board(level, score, copy)\n\n if max_nb_moves > 1 and temp_score < min_score:\n temp_moves = get_top_moves(copy, blocks.copy(), min_score, max_nb_moves-1, level, temp_score)\n if temp_moves:\n moves_test = temp_moves.copy()\n blocks_copy = blocks.copy()\n while moves_test:\n Board.insert_bottom_row(copy, blocks_copy.pop(0))\n temp_level, temp_score = stabilize_board(temp_level, temp_score, copy)\n cache_move = moves_test.pop(0)\n Board.move_block_horizontally(copy, Board.get_block_at(copy, cache_move[0]), cache_move[2])\n temp_level, temp_score = stabilize_board(temp_level, temp_score, copy)\n\n if temp_score >= min_score:\n if temp_moves:\n best_moves = [[(pos,) + move] + temp_moves]\n else:\n best_moves = [[(pos,) + move]]\n max_score = temp_score\n\n if max_score < min_score:\n return None\n\n if len(best_moves) == 0:\n return None\n\n if len(best_moves) > 1:\n min = max_nb_moves\n for i in best_moves:\n if len(i) < min:\n min = len(i)\n best_moves = [i]\n elif len(i) == min and i not in best_moves:\n best_moves.append(i)\n\n if len(best_moves) > 1:\n min = None\n for i in best_moves:\n if not min or i < min:\n min = i\n\n return best_moves[0]\n\n\n\n\n\ndef let_player_move_block(board):\n \"\"\"\n Let the player move one of the blocks on the given board.\n ASSUMPTIONS\n - The given board is a proper board.\n - The bottom row of the given board is not empty.\n \"\"\"\n assert Board.is_proper_board(board)\n assert not Board.is_empty_row(board, \"a\")\n block_to_move = None\n distance_to_move_over = None\n while (block_to_move is None) or (distance_to_move_over is None):\n players_position = \\\n input(\"Some position of block to move: \").split(',')\n if (len(players_position) > 1) and str.isdigit(players_position[1]):\n players_position[1] = eval(players_position[1])\n players_position = tuple(players_position)\n if not Position.is_proper_position(players_position):\n print(\" ---> A proper position consists of a letter, a comma and some digits!\")\n elif not Position.is_within_boundaries(Board.get_dimension(board), players_position):\n print(\" ---> The position is outside the boundaries of the board!\")\n elif Board.is_free_at(board, players_position):\n print(\" ---> No block at the given position\")\n else:\n the_block = Board.get_block_at(board, players_position)\n players_distance = int(input(\"Enter distance to move block over : \"))\n if (not isinstance(players_distance, int)) or (players_distance == 0):\n print(\" ---> The distance must be a non-zero integer number.!\")\n elif not Board.can_move_over(board, the_block, players_distance):\n print(\" ---> The given block cannot move over the given distance\")\n else:\n block_to_move = the_block\n distance_to_move_over = players_distance\n Board.move_block_horizontally(board, block_to_move, distance_to_move_over)\n\n\ndef play_keyboard(blocks = [], nb_rows=10, nb_columns=8):\n \"\"\"\n Function to play the game on a board with the given number of rows and the\n given number of columns via the keyboard, using the given blocks to fill\n the bottom row.\n - The given blocks are collected in a list of which each element is a list of\n blocks to fill the bottom row once. The function will first use elements from\n that list until the list is exhausted. From that point on, the function will\n generate blocks to fill the bottom row in a random way.\n ASSUMPTIONS\n - The given number of rows and the given number of columns are integer numbers\n greater than 1.\n \"\"\"\n assert (nb_rows > 1) and (nb_columns > 1)\n score = 0\n level = 1\n the_board = Board.make_board((nb_rows, nb_columns))\n while Board.is_empty_row(the_board, \"X\"):\n if len(blocks) > 0:\n Board.insert_bottom_row(the_board,blocks.pop(0))\n else:\n Board.push_all_blocks_up(the_board)\n max_block_length = max(2, \\\n round(nb_columns / 4) if level <= 3 else \\\n round(nb_columns / 3) if level <= 6 else\n round(nb_columns / 2))\n Board.fill_bottom_row(the_board, max_block_length)\n level, score = stabilize_board(level, score, the_board)\n Board.print_board(the_board)\n let_player_move_block(the_board)\n level, score = stabilize_board(level, score, the_board)\n print(\"Score: \", score, \"[level: \", level, \"]\")\n print(\"Einde spel!\")\n\n\nif __name__ == '__main__':\n # You are free to change the content of blocks_to_fill\n block1_1 = Block.make_block(1, color=Color.RED)\n block1_2 = Block.make_block(3, color=Color.RED)\n block1_3 = Block.make_block(4, color=Color.RED)\n block2_1 = Block.make_block(1, color=Color.BLUE)\n block2_2 = Block.make_block(2, color=Color.BLUE)\n block2_3 = Block.make_block(4, color=Color.BLUE)\n block3_1 = Block.make_block(3, color=Color.MAGENTA)\n block3_2 = Block.make_block(1, color=Color.MAGENTA)\n block3_3 = Block.make_block(2, color=Color.MAGENTA)\n block3_4 = Block.make_block(1, color=Color.MAGENTA)\n block4_1 = Block.make_block(3, color=Color.GREEN)\n block4_2 = Block.make_block(3, color=Color.GREEN)\n block4_3 = Block.make_block(1, color=Color.GREEN)\n blocks_to_fill = \\\n [[((\"a\", 1), block1_1), ((\"a\", 2), block1_2)], #((\"a\", 5), block1_3)],\n [((\"a\", 1), block2_1), ((\"a\", 3), block2_2), ((\"a\", 5), block2_3)],\n [((\"a\", 1), block3_1), ((\"a\", 5), block3_2), ((\"a\", 6), block3_3), ((\"a\", 8), block3_4)],\n [((\"a\", 1), block4_1), ((\"a\", 5), block4_2), ((\"a\", 8), block4_3)]\n ]\n play_keyboard(blocks_to_fill,nb_columns=9)\n","sub_path":"Game-backup.py","file_name":"Game-backup.py","file_ext":"py","file_size_in_byte":18449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"428535844","text":"\"\"\"Gateway for accessing the Discourse API (for forums)\"\"\"\n\nimport json\nimport re\nfrom urllib import urlencode\n\nfrom google.appengine.api import urlfetch\nfrom google.appengine.ext import ndb\n\n\nclass Error(Exception):\n pass\n\n\nclass DiscourseAPIClient(object):\n \"\"\"An API client for interacting with Discourse\"\"\"\n\n def __init__(self, discourse_url, api_key, api_username='system'):\n self._discourse_url = discourse_url\n self._api_key = api_key\n self._api_username = api_username\n\n @ndb.tasklet\n def _getRequest(self, req_string, params=None, payload=None):\n response = yield self._sendDiscourseRequest(req_string, params,\n payload, 'GET')\n raise ndb.Return(response)\n\n @ndb.tasklet\n def _putRequest(self, req_string, params=None, payload=None):\n response = yield self._sendDiscourseRequest(req_string, params,\n payload, 'PUT')\n raise ndb.Return(response)\n\n @ndb.tasklet\n def _postRequest(self, req_string, params=None, payload=None):\n response = yield self._sendDiscourseRequest(req_string, params,\n payload, 'POST')\n raise ndb.Return(response)\n\n @ndb.tasklet\n def _deleteRequest(self, req_string, params=None, payload=None):\n response = yield self._sendDiscourseRequest(req_string, params,\n payload, 'DELETE')\n raise ndb.Return(response)\n\n @ndb.tasklet\n def _sendDiscourseRequest(self, req_string, params, payload, method):\n if payload is None:\n payload = {}\n if params is None:\n params = {}\n\n if method == 'GET' or method == 'DELETE':\n params.update({'api_key': self._api_key,\n 'api_username': self._api_username})\n else:\n payload.update({'api_key': self._api_key,\n 'api_username': self._api_username})\n\n if params:\n url = '%s%s?%s' % (self._discourse_url, req_string,\n urlencode(params))\n else:\n url = '%s%s' % (self._discourse_url, req_string)\n\n response = yield ndb.get_context().urlfetch(\n url=url, payload=urlencode(payload), method=method,\n headers={'Content-Type': 'application/x-www-form-urlencoded'}\n )\n\n if response.status_code != 200:\n raise Error(\"%s request to %s returned a code of %d\" %\n (method, req_string, response.status_code))\n\n raise ndb.Return(json.loads(response.content))\n\n # USER ACTIONS\n\n @ndb.tasklet\n def getUserByEmail(self, user_email):\n \"\"\"Finds a user with the given email\n\n This method takes a user email and returns a future which resolves to\n the Discourse user with that email address, if they exist. If no user\n is found, None is returned.\n \"\"\"\n users = yield self._getRequest('admin/users/list/active.json',\n params={'filter': user_email,\n 'show_emails': 'true'})\n\n for user in users:\n if user['email'].lower() == user_email.lower():\n raise ndb.Return(user)\n\n raise ndb.Return(None)\n\n @ndb.tasklet\n def createUser(self, name, email, password, username, external_id=None):\n \"\"\"Create a Discourse account\n\n This method takes a user object and returns the Discourse API response\n containing the user information for that user.\n \"\"\"\n\n # user = yield self.getUserByEmail(email)\n # if user:\n # raise ndb.Return(user)\n\n payload = {\n 'username': username,\n 'email': email,\n 'name': name,\n 'password': password,\n }\n\n if external_id:\n payload['external_id'] = external_id\n\n response = yield self._postRequest('users/', payload=payload)\n raise ndb.Return(response)\n\n @ndb.tasklet\n def deleteUser(self, email):\n user = yield self.getUserByEmail(email)\n if user is None:\n raise ndb.Return(None)\n\n response = yield self._deleteRequest('admin/users/%s.json' % user['id'])\n raise ndb.Return(response)\n\n # CATEGORY ACTIONS\n\n @ndb.tasklet\n def getCategoryByName(self, category_name):\n categories = yield self._getRequest('categories.json')\n\n for category in categories['category_list']['categories']:\n if category['name'] == category_name:\n raise ndb.Return(category)\n\n raise ndb.Return(None)\n\n @ndb.tasklet\n def createCategory(self, category_name, parent_category_name=None,\n **kwargs):\n \"\"\"Create a category\"\"\"\n category = yield self.getCategoryByName(category_name)\n if category:\n raise ndb.Return(None)\n\n defaults = {\n 'color': 'FFFFFF',\n 'text_color': '000000'\n }\n\n payload = {\n 'name': category_name,\n 'allow_badges': True\n }\n\n payload.update(defaults)\n\n for k, v in kwargs.iteritems():\n payload[k] = v\n\n if parent_category_name:\n parent_category = yield \\\n self.getCategoryByName(parent_category_name)\n payload['parent_category_id'] = parent_category['id']\n\n response = yield self._postRequest('categories', payload=payload)\n raise ndb.Return(response)\n\n @ndb.tasklet\n def deleteCategory(self, category_name):\n category = yield self.getCategoryByName(category_name)\n if not category:\n raise ndb.Return(None)\n\n response = yield self._deleteRequest('categories/%s' % category['slug'])\n raise ndb.Return(response)\n\n # GROUP ACTIONS\n\n @ndb.tasklet\n def addUserToGroup(self, user_email, group_name):\n \"\"\"Adds the given account to the Discourse group with the given name\"\"\"\n\n user = yield self.getUserByEmail(user_email)\n if not user:\n raise Error(\"Unable to find user with email %s\" % user_email)\n\n groups = yield self._getRequest('admin/groups.json')\n\n group_id = None\n for group in groups:\n if group['name'] == group_name:\n group_id = group['id']\n break\n else:\n raise Error(\"Group named %s not found\" % group_name)\n\n payload = {\n 'usernames': user['username']\n }\n\n result = yield self._putRequest('admin/groups/%s/members.json' %\n group_id, payload=payload)\n raise ndb.Return(result)\n\n @ndb.tasklet\n def removeUserFromGroup(self, user_email, group_name):\n \"\"\"Removes an account from a group\"\"\"\n\n user = yield self.getUserByEmail(user_email)\n if not user:\n raise Error(\"Unable to find user with email %s\" % user_email)\n\n group = yield self.getGroupByName(group_name)\n if not group:\n raise Error(\"Group named %s not found\" % group_name)\n\n result = yield self._deleteRequest('admin/groups/%s/members.json' % group['id'],\n params={'user_id': user['id']})\n raise ndb.Return(result)\n\n @ndb.tasklet\n def createGroup(self, group_name, **kwargs):\n \"\"\"Creates a group with the given name on Discourse\"\"\"\n\n groups = yield self._getRequest('admin/groups.json')\n\n for group in groups:\n if group['name'] == group_name:\n raise ndb.Return(None)\n # raise Error(\"Group named %s already exists!\" % group_name)\n\n payload = {\n 'name': group_name\n }\n\n for k, v in kwargs.iteritems():\n payload[k] = v\n\n response = yield self._postRequest('admin/groups', payload=payload)\n raise ndb.Return(response)\n\n @ndb.tasklet\n def deleteGroup(self, group_name):\n group = yield self.getGroupByName(group_name)\n if not group:\n raise ndb.Return(None)\n\n response = yield self._deleteRequest('admin/groups/%s' % group['id'])\n raise ndb.Return(response)\n\n @ndb.tasklet\n def getGroupByName(self, group_name):\n groups = yield self._getRequest('admin/groups.json')\n\n for group in groups:\n if group['name'] == group_name:\n raise ndb.Return(group)\n\n raise ndb.Return(None)\n\n # CONTENT ACTIONS\n\n @ndb.tasklet\n def createPost(self, text, title, category_name, **kwargs):\n \"\"\"Creates a post\"\"\"\n\n category = yield self.getCategoryByName(category_name)\n\n payload = {\n 'raw': text,\n 'title': title,\n 'category': category['id']\n }\n\n for k, v in kwargs.iteritems():\n payload[k] = v\n\n response = yield self._postRequest('posts', payload=payload)\n","sub_path":"src/discourse.py","file_name":"discourse.py","file_ext":"py","file_size_in_byte":9000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"470679868","text":"import autograd.numpy as np\nimport so3\nimport parse\n\n\ndef GetIMU():\n TS, GYRO, TEMP, MAG, ACCEL = [], [], [], [], []\n for entry in parse.ParseLog(open(\"../rustlerlog-BMPauR\")):\n if entry[0] == 'img':\n continue\n elif entry[0] != 'imu':\n continue\n _, ts, gyro, mag, accel = entry\n TS.append(ts)\n # gyro (rads/sec)\n GYRO.append(np.array(gyro[:3], np.float32) * np.pi / (180 * 14.375))\n TEMP.append(gyro[3])\n # magnetometer (in some random uT/LSB count units)\n MAG.append(mag.astype(np.float32))\n # accel (m/s^2)\n ACCEL.append(accel.astype(np.float32) * 9.81 / 256.0)\n return (np.array(TS), np.array(GYRO), np.array(TEMP),\n np.array(MAG), np.array(ACCEL))\n\n\nTS, GYRO, TEMP, MAG, ACCEL = GetIMU()\n\n\ndef magcal_residual(MAG, a, mb):\n \"\"\" residual from all observations given magnetometer eccentricity, bias,\n gyro bias, and gyro scale\"\"\"\n\n A = np.array([\n [a[0], a[1], a[2]],\n [0, a[3], a[4]],\n [0, 0, a[5]]\n ])\n\n mag = np.dot(MAG - mb, A)\n return np.mean(np.abs(1 - np.einsum('ji,ji->j', mag, mag)))\n\n\ndef magcal_residual2(a, mb, gb, gs):\n \"\"\" residual from all observations given magnetometer eccentricity, bias,\n gyro bias, and gyro scale\"\"\"\n\n A = np.array([\n [a[0], a[1], a[2]],\n [0, a[3], a[4]],\n [0, 0, a[5]]\n ])\n\n mag = np.dot(MAG - mb, A)\n dt = TS[1:] - TS[:-1]\n w = gs * (GYRO[1:] - gb)\n C = so3.tensorexp(w.T * dt)\n rot_mag = np.einsum('ijl,lj->li', C, mag[:-1])\n return np.mean(np.abs(1 - np.einsum('ji,ji->j', mag[1:], rot_mag)))\n\n# (x-u)^T A (x-u) - 1\n# x^T A (x-u) - u^T A (x-u) - 1\n# x^T A x - x^T A u - u^T A x + u^T A u - 1\n# x^T A x - (2 u^T A) x + (u^T A u - 1) = 0\n\n# i wonder if a more numerically stable parameterization would be to use 1/diag\n# ...nope\n","sub_path":"py/calibrate.py","file_name":"calibrate.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"306573186","text":"import numpy as np\r\ndef forward(A,B,pi,T,N,o):\r\n alpha=np.zeros((T,N))\r\n for i in range(N):\r\n alpha[0][i]=pi[i]*B[i][o[0]]\r\n for t in range(T-1):\r\n for i in range(N):\r\n sum_temp = 0\r\n for j in range(N):\r\n sum_temp=sum_temp+alpha[t][j]*A[j][i]\r\n alpha[t+1][i]=sum_temp*B[i][o[t+1]]\r\n p_o_lamda=alpha[T-1].sum(axis=0)\r\n return p_o_lamda,alpha\r\ndef backward(A,B,pi,T,N,o):\r\n beta=np.ones((T,N))\r\n for t in range(T-1,0,-1):\r\n for i in range(N):\r\n beta[t-1][i]=0\r\n for j in range(N):\r\n beta[t-1][i]=beta[t-1][i]+A[i][j]*B[j][o[t]]*beta[t][j]\r\n p_o_lamda=0\r\n for i in range(N):\r\n p_o_lamda=p_o_lamda+pi[i]*B[i][o[0]]*beta[0][i]\r\n return p_o_lamda,beta\r\nif __name__ == '__main__':\r\n A=[[0.5,0.1,0.4],[0.3,0.5,0.2],[0.2,0.2,0.6]]\r\n B=[[0.5,0.5],[0.4,0.6],[0.7,0.3]]\r\n pi=[0.2,0.3,0.5]\r\n T=8\r\n N=3\r\n # 红色0,白色1\r\n o=[0,1,0,0,1,0,1,1]\r\n forward_p,alpha=forward(A,B,pi,T,N,o)\r\n backward_p,beta=backward(A,B,pi,T,N,o)\r\n # 求P(i4=q3|O,λ)\r\n i=3-1\r\n t=4-1\r\n densum=0\r\n for j in range(N):\r\n densum=densum+alpha[t][j]*beta[t][j]\r\n gamma43=alpha[t][i]*beta[t][i]/densum\r\n print('P(i_4=q_3|0,λ) =',gamma43)","sub_path":"forward_and_backward.py","file_name":"forward_and_backward.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"448741308","text":"from logic import TruthTable\r\n\r\nproposition1 = input(\"Enter Proposition 1:\\n\")\r\nproposition2 = input(\"Enter Proposition 2:\\n\")\r\n\r\nmyTable1 = TruthTable(['p', 'q'], [proposition1])\r\nmyTable2 = TruthTable(['p', 'q'], [proposition2])\r\n\r\nprint(myTable1.table)\r\nprint(myTable2.table)\r\n\r\nif myTable1.table == myTable2.table:\r\n print(\"The Propositions are equivalent.\")\r\nelse:\r\n print(\"The Propositions are not equivalent.\")\r\n","sub_path":"Lab9/CSE015/Lab2Problem2.py","file_name":"Lab2Problem2.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"14226365","text":"import argparse\nimport codecs\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef main(args, loglevel):\n # set up logging\n logging.basicConfig(format=\"%(levelname)s: %(message)s\", level=loglevel)\n\n file = codecs.open(args.file, 'r', 'utf-8')\n total = 0\n\n if args.output.endswith('/'):\n outfile_prefix = args.output + args.prefix\n else:\n outfile_prefix = args.output + '/' + args.prefix\n\n outfiles = [codecs.open('%s%s' % (outfile_prefix, i), 'w', 'utf-8') for i in xrange(args.number)]\n\n article = ''\n for line in file:\n article += line\n if line == '\\n':\n outf = outfiles[total % args.number]\n outf.write(article)\n total += 1\n article = ''\n\n file.close()\n [f.close() for f in outfiles]\n logger.info('Finished partititoning %d articles into %d partitions' % (total, args.number))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=\"Partitions a file generated by WikiExtractor\",\n fromfile_prefix_chars='@')\n\n parser.add_argument(\n \"-f\",\n \"--file\",\n help=\"Path to the file with plaintext articles generated by WikiExtractor\",\n required=True,\n type=str\n )\n\n parser.add_argument(\n \"-n\",\n \"--number\",\n help=\"number of partitions\",\n required=True,\n type=int\n )\n\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"increase output verbosity\",\n action=\"store_true\")\n\n parser.add_argument(\n \"-o\",\n \"--output\",\n help=\"Path to the output directory\",\n required=True,\n type=str\n )\n\n parser.add_argument(\n \"-p\",\n \"--prefix\",\n help=\"Prefix for output files (default: partition-)\",\n default=\"partition-\",\n type=str\n )\n\n args = parser.parse_args()\n\n loglevel = logging.INFO\n if args.verbose:\n loglevel = logging.DEBUG\n\n main(args, loglevel)\n","sub_path":"scripts/partition.py","file_name":"partition.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"168991974","text":"\"\"\"\nAdvent of Code 2020\nDay: 14\nPuzzle: 1\nLanguage: Python\n\"\"\"\n\n# incorrect guesses: 142232120440\n\nimport re\n\n\ndef add_mask(num, mask):\n res = list()\n for idx, mask_digit in enumerate(mask):\n if mask_digit == \"X\":\n res.append(num[idx])\n elif mask_digit == \"0\":\n res.append(\"0\")\n else:\n res.append(\"1\")\n\n return int(\"\".join(res), 2)\n\n\ndef main():\n infile_path = \"../../../data/2020_day_14.txt\"\n with open(infile_path, \"r\") as infile:\n data = infile.readlines()\n\n mask = None\n mem = dict()\n\n for line in data:\n if line.startswith(\"mask\"):\n mask = line.split(\"=\")[1].strip()\n else:\n # mem\n num = bin(int(line.split(\"=\")[1].strip()))[2:].zfill(36)\n target = int(re.search(r\"\\[(\\d+)\\]\", line).group(1))\n res = add_mask(num, mask)\n mem[target] = res\n\n print(sum(mem.values()))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/advent_of_python/2020_puzzles/day14puzzle1.py","file_name":"day14puzzle1.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"324237491","text":"import numpy as np\nimport pandas as pd\n\ndef keepindications_removeexclusionstt(metr,dat,lkupTab,include_or_exclude, lookbac, tempsort_dayx, b, uflagx):\n print('initiating keepIremoveE')\n print(len(dat))\n print('number unique MRNs')\n print(dat.MRN.nunique()) #\n \n if (include_or_exclude=='exclude'):\n ex_dat=dat[(dat.dys_difftime_test_code<=lookbac) & (dat.dys_difftime_test_code>=0)]\n if metr in ['dexa','narc']:\n ex_dat.loc[ex_dat.dys_difffrom_rCODE>tempsort_dayx, 'rCODE']='ERASEDplaceholder' # ERASEDplaceholder will not (likely) wind up in reference table cell, whereas None actually can\n elif metr == 'vitd':\n ex_dat.loc[ex_dat.dys_difffrom_rCODE>365, 'rCODE']='ERASEDplaceholder'\n elif len(lkupTab[lkupTab['class']=='CPT'])>0:\n raise ValueError('the comparison btwn main service code, and prior service codes is invalid. That is to say, prior service codes are not being considered as they should be, given that ref table has a CPT code in it')\n else:\n pass\n print('starting vect_mrn')\n deb1=ex_dat\n vect_mrn_pd=vect_mrn(metr, ex_dat, lkupTab, b, 'exclude', uflagx)\n debA = vect_mrn_pd[0]\n debB = vect_mrn_pd[1]\n deb2=vect_mrn_pd[-1]\n \n ### return only those from the dat.pd which are NOT in the vect_mrn result \n #(those in time window w/o valid indications, \n #or any with valid indications that date from beyond lookback period/or result after service performed)\n dat=dat[~dat['MRN'].isin(vect_mrn_pd[-1])]\n \n return (deb1, debA, debB, deb2, dat)\n elif (include_or_exclude=='include'):\n if metr in ['dexa','narc']:\n# in_dat = dat[dat.dys_difffrom_rCODE<=tempsort_dayx]\n in_dat = dat\n in_dat.loc[in_dat.dys_difffrom_rCODE>tempsort_dayx, 'rCODE']='ERASEDplaceholder'\n elif metr == 'vitd':\n in_dat = dat\n in_dat.loc[in_dat.dys_difffrom_rCODE>365, 'rCODE']='ERASEDplaceholder'\n elif len(lkupTab[lkupTab['class']=='CPT'])>0:\n raise ValueError('the comparison btwn main service code, and prior service codes is invalid.')\n else:\n in_dat = dat\n vect_mrn_pd=vect_mrn(metr, in_dat, lkupTab, b, 'include', uflagx)\n dat=dat[dat['MRN'].isin(vect_mrn_pd)] \n return dat\n else:\n raise ValueError(\"Set 3rd ARG to 'include' or 'exclude'\")\n\n\ndef vect_mrn(met, loc_pd, lkupTa, b, i_or_e, uflagxx): \n lkupTa_0= lkupTa[lkupTa.startWith==0]\n lkupTa_1= lkupTa[lkupTa.startWith==1]\n if len(lkupTa_1[lkupTa_1['class']!='ICD'])>0:\n raise ValueError('reference table has a startWith stem that has class other than ICD')\n else:\n pass \n \n if uflagxx == False: \n vect_mr=np.unique(pd.concat([loc_pd['MRN'][loc_pd.ccslev.astype(str).isin(lkupTa_0['subcode'][lkupTa_0['class']=='CCS'])],\n ## following line changed from TEST_CODE to rCODE, for the column of procedures/services merged on queried service\n loc_pd['MRN'][loc_pd.rCODE.isin(lkupTa_0['subcode'][lkupTa_0['class']=='CPT'])],\n loc_pd['MRN'][loc_pd.hcclev.astype(str).isin(lkupTa_0['subcode'][lkupTa_0['class']=='HCC'])],\n loc_pd['MRN'][loc_pd['ICD{}_subcode'.format(b)].isin(lkupTa_0['subcode'][lkupTa_0['class']=='ICD'])]]))\n elif uflagxx == True:\n vect_mr=np.unique(pd.concat([loc_pd['MRN'][loc_pd.rCODE.isin(lkupTa_0['subcode'][lkupTa_0['class']=='CPT'])],\n loc_pd['MRN'][loc_pd['ICD{}_subcode'.format(b)].isin(lkupTa_0['subcode'][lkupTa_0['class']=='ICD'])]]))\n deba=[loc_pd[loc_pd.rCODE.isin(lkupTa_0['subcode'][lkupTa_0['class']=='CPT'])],\n loc_pd[loc_pd['ICD{}_subcode'.format(b)].isin(lkupTa_0['subcode'][lkupTa_0['class']=='ICD'])]]\n else:\n raise KeyError('uflagxx in keepinclusionsremoveexclusions()/vect_mr() is incompatible with algorithm design')\n \n print('starting the startWith chunk')\n# if (np.sum(lkupTa_1['startWith'][lkupTa_1['key']==met])>0): ## This is the original rule for entering startWith chunk\n if len(lkupTa_1)>0:\n print('entered startWith chunk')\n lkupTa_1.loc[:,'length']=lkupTa_1['subcode'].apply(lambda x: len(x))\n print('the stems that are length zero: {}'.format(len(lkupTa_1[lkupTa_1.length==0])))\n print(lkupTa_1[lkupTa_1['length']==0])\n lkupTa_1=lkupTa_1[lkupTa_1.length>0]\n lengthsx=lkupTa_1.groupby('length').count()\n lengthsx.reset_index(inplace=True)\n lengthsx=lengthsx['length']\n testerbean=0\n debb=deba\n for n in lengthsx:\n print('stem length is: {}'.format(n))\n t_stCodes=list(lkupTa_1.loc[lkupTa_1.length==n, 'subcode'])\n # create temp dataframe that is light\n t_loc_pd=loc_pd[['MRN','ICD{}_subcode'.format(b)]]\n # create third column\n t_loc_pd.loc[:,'trunc']=t_loc_pd.loc[:,'ICD{}_subcode'.format(b)].str[:n]\n # add those MRNs to the main vect_mr\n vect_mr=np.append(vect_mr,np.unique(t_loc_pd['MRN'][t_loc_pd['trunc'].isin(t_stCodes)]))\n if testerbean ==0:\n debb = t_loc_pd[t_loc_pd['trunc'].isin(t_stCodes)]\n testerbean+=1\n else:\n debb = pd.concat([debb, t_loc_pd[t_loc_pd['trunc'].isin(t_stCodes)]])\n print('keepin/ex stemming cycle completed x1')\n# else:\n # raise KeyError('startWith Chunk is using incorrect uflagxx')\n #######################################\n ########################################\n print('done with startWith chunk, now parsing for uniques')\n \n # new line, just for cleanness - restrict to unique MRN\n vect_mr = np.unique(vect_mr)\n print('done with vect_mrn')\n return (deba, debb, vect_mr)\n","sub_path":"Scripts/keepind_ex_testingreportable_20170710.py","file_name":"keepind_ex_testingreportable_20170710.py","file_ext":"py","file_size_in_byte":6044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"444936546","text":"from django.shortcuts import render, render_to_response\nimport json\nimport urllib2\n\n# Create your views here.\n\n\ndef home(request):\n return render(request, \"index.html\")\n\n\ndef about(request):\n codeschool = urllib2.urlopen('https://www.codeschool.com/users/dmmoody.json')\n codeschool_data = json.load(codeschool)\n\n treehouse = urllib2.urlopen('http://teamtreehouse.com/duanemoody.json')\n treehouse_data = json.load(treehouse)\n badge_count = 0\n points = treehouse_data['points']['total']\n course_dict = {}\n activity_date = []\n for i in treehouse_data['badges']:\n badge_count += 1\n activity_date.append(i['earned_date'])\n if i['courses']:\n if i['courses'][0]['title'] in course_dict:\n course_dict[i['courses'][0]['title']].append([i['courses'][1]['title'], i['icon_url']])\n else:\n course_dict[i['courses'][0]['title']] = [[i['courses'][1]['title'], i['icon_url']]]\n activity_date = json.dumps(activity_date)\n return render_to_response('about.html', {'treehouse_data': treehouse_data,\n 'course_dict': course_dict,\n 'points': points,\n 'badge_count': badge_count,\n 'activity_date': activity_date,\n 'codeschool_data': codeschool_data})\n\ndef projects(request):\n github = urllib2.urlopen('https://api.github.com/users/dmmoody/repos')\n github_data = json.load(github)\n return render_to_response('projects.html', {'github_data': github_data})","sub_path":"dmmoody/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"173199171","text":"# ftp.retrbinary(\"RETR \" + file ,open(\"pub/sistemas/tup/downloads/\" + file, 'wb').write)\nimport pandas as pd\nimport urllib.request\nimport io\nfrom zipfile import ZipFile\nfrom ftplib import FTP\n\n\n# AQUI TALVEZ POSSA SER RETIRADO PRA NÃO PRECISAR ABRIR DUAS VEZES O FTP\ndef getfullfilename(year: int, month: int):\n TabelaUnificada = 'TabelaUnificada_{}{}'.format(year,month)\n ftp = FTP(\"ftp2.datasus.gov.br\")\n ftp.login()\n ftp.cwd(\"pub/sistemas/tup/downloads/\")\n filenames = ftp.nlst()\n result = list(filter(lambda x: x.startswith(TabelaUnificada), filenames))\n ftp.close()\n return result[0]\n\ndef download(file: str, year: int, month: int, cache: bool=True) -> object:\n TabelaUnificadaName = getfullfilename(year, month)\n mysock = urllib.request.urlopen('ftp://ftp2.datasus.gov.br/pub/sistemas/tup/downloads/' + TabelaUnificadaName)\n memfile = io.BytesIO(mysock.read())\n with ZipFile(memfile, 'r') as myzip:\n f = myzip.open(file + '.txt')\n col = myzip.open(file + '_layout.txt')\n colunas, content = col.read(), f.read()\n colunas, content = colunas.decode(\"unicode_escape\"), content.decode(\"unicode_escape\")\n dfcol = (pd.DataFrame([x.split(',') for x in colunas.split('\\r\\n')]))\n dfcol = dfcol.rename(columns=dfcol.iloc[0]).drop([0]).dropna()\n\n df = (pd.DataFrame([x.split('\\r\\n') for x in content.split('\\r\\n')]))\n FinalDF = pd.DataFrame(columns=dfcol['Coluna'].tolist())\n \n # AQUI TEM QUE ACHAR UMA MANEIRA MAIS EFETIVA QUE O FOR\n for i in dfcol.index:\n inicio = int(dfcol.loc[i]['Inicio'])\n fim = int(dfcol.loc[i]['Fim'])\n row = []\n for s in range(len(df)):\n row.append(df.loc[s][0][inicio-1:fim])\n FinalDF[dfcol.loc[i]['Coluna']] = row\n return FinalDF\n","sub_path":"pysus/online_data/SIGTAP.py","file_name":"SIGTAP.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"156312038","text":"import gi\nimport dbus\nimport dbus.service\nimport dbus.mainloop.glib\nimport time\n\nfrom dbus.mainloop.glib import DBusGMainLoop\nfrom dbus.exceptions import DBusException\nfrom dbus.types import ByteArray\n\nfrom g13gui.applet.loopbackdisplaydevice import LoopbackDisplayDevice\nfrom g13gui.bitwidgets.display import Display\nfrom g13gui.bitwidgets.screen import Screen\n\ngi.require_version('GLib', '2.0')\nfrom gi.repository import GLib\n\n\nBUTTONS = [\n 'L1', 'L2', 'L3', 'L4'\n]\n\n\nclass Applet(dbus.service.Object):\n BUS_INTERFACE = 'com.theonelab.g13.Applet'\n BUS_PATH = '/com/theonelab/g13/Applet'\n\n def __init__(self, name):\n dbus.service.Object.__init__(self, dbus.SessionBus(),\n Applet.BUS_PATH)\n\n self._name = name\n self._dd = LoopbackDisplayDevice()\n self._d = Display(self._dd)\n self._s = Screen(self._d)\n self._s.hide()\n\n self._registered = False\n self._manager = None\n\n def register(self):\n try:\n self._manager = self._bus.get_object(\n 'com.theonelab.g13.AppletManager',\n '/com/theonelab/g13/AppletManager')\n except DBusException:\n self._manager = None\n return True\n\n self._manager.Register(self._name)\n self._registered = True\n\n GLib.idle_add(self.onRegistered)\n GLib.timeout_add_seconds(1, self._ping)\n\n return False\n\n def _ping(self):\n if self._manager:\n result = False\n\n try:\n result = self._manager.Ping()\n except DBusException as err:\n print('Lost connection with AppletManager: %s' % err)\n self._registered = False\n GLib.idle_add(self.onUnregistered)\n GLib.timeout_add_seconds(1, self.register)\n return False\n\n if not result:\n print('Lost registration with AppletManager')\n self._registered = False\n GLib.idle_add(self.onUnregistered)\n GLib.timeout_add_seconds(1, self.register)\n return False\n\n return True\n\n def run(self):\n self._bus = dbus.SessionBus()\n\n GLib.timeout_add_seconds(1, self.register)\n\n loop = GLib.MainLoop()\n loop.run()\n\n @property\n def name(self):\n return self._name\n\n @property\n def displayDevice(self):\n return self._dd\n\n @property\n def display(self):\n return self._d\n\n @property\n def screen(self):\n return self._s\n\n @property\n def manager(self):\n return self._manager\n\n def onKeyPressed(self, timestamp, key):\n pass\n\n def onKeyReleased(self, timestamp, key):\n pass\n\n def onShown(self, timestamp):\n pass\n\n def onHidden(self):\n pass\n\n def onRegistered(self):\n pass\n\n def onUnregistered(self):\n pass\n\n def maybePresentScreen(self):\n if self.screen.visible and self._manager:\n self.screen.nextFrame()\n frame = self.displayDevice.frame\n frame = ByteArray(frame)\n self._manager.Present(frame, byte_arrays=True)\n\n @dbus.service.method(BUS_INTERFACE,\n in_signature='d', out_signature='ay',\n byte_arrays=True)\n def Present(self, timestamp):\n self.screen.show()\n self.onShown(timestamp)\n self.screen.nextFrame()\n return ByteArray(self.displayDevice.frame)\n\n @dbus.service.method(BUS_INTERFACE)\n def Unpresent(self):\n self.screen.hide()\n self.onHidden()\n\n def _setButtonPressed(self, state, button):\n if button in BUTTONS:\n buttonIdx = BUTTONS.index(button)\n button = self._s.buttonBar.button(buttonIdx)\n if button:\n button.pressed = state\n\n @dbus.service.method(BUS_INTERFACE,\n in_signature='di', out_signature='ay',\n byte_arrays=True)\n def KeyPressed(self, timestamp, key):\n self.onKeyPressed(timestamp, key)\n self._setButtonPressed(True, key)\n self.screen.nextFrame()\n return ByteArray(self.displayDevice.frame)\n\n @dbus.service.method(BUS_INTERFACE,\n in_signature='di', out_signature='ay',\n byte_arrays=True)\n def KeyReleased(self, timestamp, key):\n self.onKeyReleased(timestamp, key)\n self._setButtonPressed(False, key)\n self.screen.nextFrame()\n return ByteArray(self.displayDevice.frame)\n\n\ndef RunApplet(cls, *args, **kwargs):\n DBusGMainLoop(set_as_default=True)\n applet = cls(*args, **kwargs)\n applet.run()\n","sub_path":"g13gui/applet/applet.py","file_name":"applet.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"266664849","text":"from itertools import combinations\nn=int(input())\npairs=[]\nfor i in range(n-1):\n x=input().split(\" \")\n pairs.append([int(x[1]),int(x[0])])\ntrees=[]\nfor i in range(n):\n x=input().split(\" \")\n trees.append([int(x[0]),int(x[1])])\nprint(trees)\n\ntrees.sort()\n\nlocation=[]\nfor i in range(n):\n location.append(\"\")\nlocation[0]=tree.index(trees[0])\ntrees.pop(0)\nfor i in range(len(pairs)):\n x=pairs[i]\n start=x[0]\n end=x[1]\n location[x[1]]=tree.index(trees[0])\n trees.pop(0)\nprint(location)\nprint(pairs)\nprint(\"0 1\")\nprint(\"1 3\")\nprint(\"1 2\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Code/CodeRecords/2175/60636/269464.py","file_name":"269464.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"653432879","text":"\"\"\"Basic SpearCube Calculations\n\"\"\"\n\nimport coloredlogs\nimport logging\nimport numpy as np\n\nfrom common import logger\nfrom orbits import constants as orbit_k, orbiter, manoeuvres\nfrom propulsion import rocket\n\n\nlogger.configure_logging()\n__log = logging.getLogger(__name__)\n\n\ndef print_comms(\n orbit,\n mother_coverage, coverage_time, contact_time,\n uhf_max_data, s_band_max_data\n):\n \"\"\"Helper\n This function prints in the console the results of the analysis.\n \"\"\"\n mother_coverage_h = mother_coverage / 3600\n coverage_time_h = coverage_time / 3600\n contact_time_h = contact_time / 3600\n\n u_max_data_B = np.divide(u_max_data, 8)\n u_max_data_kB = np.divide(u_max_data_B, 1024)\n s_max_data_B = np.divide(s_max_data, 8)\n s_max_data_kB = np.divide(s_max_data_B, 1024)\n\n __log.info(\n '>>> mother_coverage (s) = %.0f, (h) = %.3f',\n mother_coverage, mother_coverage_h\n )\n __log.info(\n '>>> coverage_time (s) = %.0f, (h) = %.3f',\n coverage_time, coverage_time_h\n )\n __log.info(\n '>>> contact_time (s) = %.0f, (h) = %.3f',\n contact_time, contact_time_h\n )\n __log.info(\n '>>> U max data (b) = %.0f, (kB) = %.0f',\n u_max_data, u_max_data_kB\n )\n __log.info(\n '>>> S max data (b) = %.0f, (kB) = %.0f',\n s_max_data, s_max_data_kB\n )\n\n comms_orbits = np.divide(mother_coverage, orbit.T)\n\n u_orbit_kB = np.divide(u_max_data_kB, comms_orbits)\n s_orbit_kB = np.divide(s_max_data_kB, comms_orbits)\n u_orbit_MB = np.divide(u_orbit_kB, 1024)\n s_orbit_MB = np.divide(s_orbit_kB, 1024)\n\n __log.info('>>> Comms Orbits = %.0f', comms_orbits)\n __log.info('>>> U per orbit (kB) = %.0f', u_orbit_kB)\n __log.info('>>> S per orbit (kB) = %.0f', s_orbit_kB)\n __log.info('>>> U per orbit (MB) = %.0f', u_orbit_MB)\n __log.info('>>> S per orbit (MB) = %.0f', s_orbit_MB)\n\n\ndef print_analysis(orbit, impulse, fuel_mass, exhaust_rate):\n \"\"\"Helper\n This function prints in the console the results of the analysis.\n \"\"\"\n T_s = orbit.T\n T_h = np.divide(T_s, 3600)\n\n __log.info('>>> orbital period (s) = %.0f, (h) = %.3f', T_s, T_h)\n __log.info('>>> (T_i, T_e) (s) = (%.0f, %.0f)', orbit.T_i, orbit.T_e)\n __log.info('>>> delta_v (m/s) = %.3f,', orbit.v)\n __log.info('>>> impulse (s) = %s', impulse)\n __log.info('>>> fuel (kg) = %s', fuel_mass)\n __log.info('>>> exhaust rate (g/s) = %s', np.multiply(exhaust_rate, 1000))\n\n\nh = 500000 # altitude in meters\ni = [50, 90] # inclination of the orbital plane\n\nMOTHER_COVERAGE = 10 * 3600 # hours, mothership / mission specs\nTDMA_SLOTS = 10 # mothership / mission specs\nU_BITRATE = 512E3 # 512 kbps, mothership / mission specs\nS_BITRATE = 3E6 # 3 Mbps, mothership / mission specs\n\ncubesat_mass = 24 # 12U CubeSat mass (specs)\nprotocol_overhead = 0.1 # comms protocol efficiency (ASSUMPTION)\n\n# ### Calculations\n\nllo = orbiter.Orbiter(h, i, central_body=orbit_k.Moon)\nleo = orbiter.Orbiter(h, i, central_body=orbit_k.Earth)\n\nllo_descent = manoeuvres.calculate_descent(\n llo, cubesat_mass,\n descent_delta_v=orbit_k.Moon['llo2surface-delta-v']['value']\n)\nleo_descent = manoeuvres.calculate_descent(\n leo, cubesat_mass,\n descent_delta_v=orbit_k.Earth['leo2surface-delta-v']['value']\n)\n\ncomms_orbits = np.divide(MOTHER_COVERAGE, llo.T)\ncoverage_time = np.multiply(comms_orbits, llo.T_i)\ncontact_time = np.divide(coverage_time, TDMA_SLOTS)\n\nu_max_data = np.multiply(\n np.multiply(contact_time, U_BITRATE), protocol_overhead\n)\ns_max_data = np.multiply(\n np.multiply(contact_time, S_BITRATE), protocol_overhead\n)\n\n# ### printing results\n__log.info('Results using the Moon as a central body')\nprint_analysis(llo, *llo_descent)\nprint_comms(\n llo,\n MOTHER_COVERAGE, coverage_time, contact_time, s_max_data, u_max_data\n)\n# __log.info('Results for the Earth as a central body')\n# print_analysis(leo, *leo_descent)\n","sub_path":"src/spearcube.py","file_name":"spearcube.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"520497726","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\n\ndef getCryptocurrencyInfo(symbol):\n\n url = \"https://www.bithumb.com/\"\n\n _START_TIME = time.time()\n\n if getNameFromCryptoSymbol(symbol) == 404: # 항목 없음 에러 코드\n return 404 # 함수 종료\n\n try:\n response = requests.get(url, timeout=0.9)\n print(\"getting information about : \" + symbol)\n #time.sleep(1)\n except:\n # timeout 내에 제대로 bithumb.com에서 응답이 돌아오지 않는 경우\n # 404란 error returning code를 대신 반환하고,\n # 이것을 run.py\n return 404, 404, 404, 404, 404, 404, 404\n\n if response.status_code == 200:\n #print(\"access ok\")\n html = response.text\n soup = BeautifulSoup(html, 'html.parser')\n\n # selector name 가져오기\n SELECTOR_NAME_PRICE = \"#assetReal\" + symbol + \"_KRW\"\n SELECTOR_NAME_CHANGE_KRW = \"#assetRealPrice\" + symbol + \"_KRW\"\n SELECTOR_NAME_CHANGE_PERCENT = \"#assetRealRate\" + symbol + \"_KRW\"\n SELECTOR_NAME_TRANSACTION = \"#assetReal\" + symbol + \"_KRW2KRW\"\n\n cryptocurrency_KRname = getNameFromCryptoSymbol(symbol)\n cryptocurrency_to_KRW = soup.select_one(SELECTOR_NAME_PRICE).get_text() # 가격 추출\n cryptocurrency_change_KRW = soup.select_one(SELECTOR_NAME_CHANGE_KRW).get_text() # 변동량 추출\n cryptocurrency_change_PERCENT = soup.select_one(SELECTOR_NAME_CHANGE_PERCENT).get_text() # 변동률 추출\n cryptocurrency_transaction_KRW = soup.select_one(SELECTOR_NAME_TRANSACTION).get_text().replace('₩','').split('.', 1)[0] + \" [bithumb 거래소 기준]\" # 거래량(24hr) 추출\n\n # 추가 정보\n # 암호화폐 거래량을 KRW 단위가 아닌 요청한 암호화폐 단위로 보여준다.\n realTransactionKRW = int(soup.select_one(SELECTOR_NAME_TRANSACTION).get_text().replace('≈','').replace(',','').replace('원','').replace(' ',''))\n realTransactionCRYPTO = int(soup.select_one(SELECTOR_NAME_PRICE).get_text().replace(',','').replace('원','').replace(' ',''))\n #print(realTransactionKRW) \n #print(realTransactionCRYPTO)\n cryptocurrency_transaction_CRYPTO = round((realTransactionKRW / realTransactionCRYPTO), 2)\n cryptocurrency_transaction_CRYPTO = \"≈ \" + (\"{:,}\".format(cryptocurrency_transaction_CRYPTO))\n\n # print(cryptocurrency_KRname)\n # print(cryptocurrency_to_KRW)\n # print(cryptocurrency_change_KRW)\n # print(cryptocurrency_change_PERCENT)\n # print(cryptocurrency_transaction_KRW)\n # print(cryptocurrency_transaction_CRYPTO)\n\n # 문자열로 모두 변환시켜주자(원활한 출력을 위해서)\n\n _END_TIME = time.time()\n running_time = round((_END_TIME - _START_TIME), 4)\n print(\"running time : \", str(running_time) + \" SEC.\")\n\n return str(cryptocurrency_KRname), str(cryptocurrency_to_KRW), str(cryptocurrency_change_KRW), str(cryptocurrency_change_PERCENT), str(cryptocurrency_transaction_KRW), str(cryptocurrency_transaction_CRYPTO), str(running_time)\n\n\ndef getNameFromCryptoSymbol(symbol):\n\n file = open('./resource/cryptocurrencySymbolList.txt','rt', encoding='UTF8')\n \n try:\n while True:\n line = file.readline()\n if symbol == line.split()[0]: # 정확한 암호화폐명을 입력해야만 결과를 반환\n name = line.split('\\t')[1].strip()\n break\n if not line:\n return 404\n\n except: # 제대로 입력하지 않아 예외가 생기면 모두 404 에러처리\n return 404\n \n file.close()\n\n return name\n\n# print(getNameFromCryptoSymbol(\"ARW\"))\n\n# print(getCryptocurrencyInfo(\"BTC\"))\n# print(getCryptocurrencyInfo(\"ETH\"))\n# print(getCryptocurrencyInfo(\"XRP\"))\n# print(getCryptocurrencyInfo(\"DOT\"))\n# print(getCryptocurrencyInfo(\"XLM\"))\n# print(getCryptocurrencyInfo(\"EOS\"))\n# print(getCryptocurrencyInfo(\"TRX\"))\n# print(getCryptocurrencyInfo(\"ARW\"))\n# print(getCryptocurrencyInfo(\"LTC\"))\n# print(getCryptocurrencyInfo(\"CRO\"))\n# print(getCryptocurrencyInfo(\"XTZ\"))\n# print(getCryptocurrencyInfo(\"ETC\"))\n# print(getCryptocurrencyInfo(\"UNI\"))\n# print(getCryptocurrencyInfo(\"VET\"))\n# print(getCryptocurrencyInfo(\"XEM\"))\n# print(getCryptocurrencyInfo(\"ENJ\"))\n# print(getCryptocurrencyInfo(\"GRT\"))","sub_path":"Outdated/getCryptocurrencyInfo_OLD.py","file_name":"getCryptocurrencyInfo_OLD.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"477147399","text":"# __author__ = 橙子老师\r\n# __date__ = 2020-08-24\r\n\r\n# 导入MySQLdb模块\r\nimport MySQLdb\r\nimport config\r\n\r\n\"\"\"\r\n所有回调函数都遵循相同的接口规范,读者在实际开发中,可以将回调函数作为业务层的代码\r\n分离到其它文件\r\n\"\"\"\r\n\r\ndef query_all_employee(table_name, cursor, **kwargs):\r\n \"\"\"\r\n :param table_name: 有效的mysql数据表名\r\n 数据表名的关系模型(id, name)\r\n :param ursor: 数据库的游标对象\r\n :return:\r\n \"\"\"\r\n\r\n sql = \"select id, name from {}\".format(table_name)\r\n cursor.execute(sql)\r\n for index, row in enumerate(cursor.fetchall()):\r\n print(\"{}. 员工编号:{} 员工姓名:{}\".format(index+1, row[0], row[1]))\r\n\r\n\r\ndef add_employee(table_name , cursor, **kwargs):\r\n \"\"\"\r\n :param table_name: 有效的mysql数据表名\r\n 数据表名的关系模��(id, name)\r\n :param cursor: 数据库的游标对象\r\n :param kwargs: 可变参数,支持传递的参数有db_handler,表示MySQL数据库对象,\r\n redis_handler表示redis连接对象\r\n :return:\r\n \"\"\"\r\n\r\n employee_name = input(\"请输入员工的姓名:___\\b\\b\\b\")\r\n age = input(\"请输入员工的年龄:___\\b\\b\\b\")\r\n salary = input(\"请输入员工的工资:___\\b\\b\\b\")\r\n sex = input(\"请输入员工的性别:___\\b\\b\\b\")\r\n\r\n\r\n sql = \"insert into {}(name,age,slalary,sex) values(%s,%s,%s,%s)\".format(table_name)\r\n\r\n affected_rows = cursor.execute(sql, (employee_name,age,salary,sex ))\r\n if affected_rows < 1:\r\n print(\"数据库操作发生异常\")\r\n else:\r\n _ = kwargs[\"db_handler\"].commit() if \"db_handler\" in kwargs else None\r\n print(\"员工{}已被添加至数据库\".format(employee_name))\r\n\r\n\r\ndef delete_employee(table_name, cursor, **kwargs):\r\n \"\"\"\r\n :param table_name: 有效的mysql数据表名\r\n 数据表名的关系模型(id, name)\r\n :param cursor: 数据库的游标对象\r\n :param kwargs: 可变参数,支持传递的参数有db_handler,表示MySQL数据库对象,\r\n redis_handler表示redis连接对象\r\n :return:\r\n \"\"\"\r\n\r\n try:\r\n employee_id = int(input(\"请输入员工的编号:__\\b\\b\"))\r\n except ValueError:\r\n employee_id = -1\r\n print(\"你输入了无效的员工编号\")\r\n\r\n if employee_id > 0:\r\n sql = \"delete from {} where id=%s\".format(table_name)\r\n affected_rows = cursor.execute(sql, (employee_id, ))\r\n if affected_rows < 1:\r\n print(\"员工编号{}不存在\".format(employee_id))\r\n else:\r\n _ = kwargs[\"db_handler\"].commit() if \"db_handler\" in kwargs else None\r\n print(\"编号为{}的员工已从数据库删除\".format(employee_id))\r\n\r\n\r\nclass SimpleEmployeeMs:\r\n\r\n def __init__(self, table_name, cursor, db_handler=None, redis_handler=None):\r\n self.__db_handler = db_handler\r\n self.__table_name = table_name\r\n self.__cursor = cursor\r\n self.__redis_handler = redis_handler\r\n\r\n \"\"\"\r\n 定义命令字典结构,格式举例:{\r\n 1: callback_function\r\n }\r\n 这样用户在输入指定的命令时,直接调用对应的回调函数\r\n 回调函数由用户进行定义\r\n \"\"\"\r\n self.__commands = {}\r\n self.__begin_prompt = \"输入<>中的指令来执行对应的操作:\\n\"\r\n self.__quit_prompt = \" 退出员工管理系统\\n\"\r\n self.__prompts = []\r\n self.__command_index = 1\r\n\r\n def __obtain_user_command(self, prompt):\r\n\r\n command = \"quit\"\r\n valid = True\r\n try:\r\n command = input(prompt)\r\n _ = self.__commands[int(command)]\r\n\r\n except (ValueError, KeyError):\r\n if command != \"quit\":\r\n command = None\r\n valid = False\r\n return command, valid\r\n\r\n def add_command(self, prompt, cb):\r\n '''\r\n\r\n :param prompt:表示命令行的提示消息,eg:\"查询所有员工\"\r\n :param cb: 回调函数,用来定义特定的业务逻辑\r\n :return:\r\n '''\r\n self.__commands[self.__command_index] = cb\r\n \"\"\"\r\n (1)__commands是一个字典对象,键名为命令编号,\r\n 键值为具体的命令执行,通过回调函数来执行相应的处理\r\n {\r\n 0:command,\r\n 1:command\r\n \"\"\"\r\n self.__command_index +=1\r\n self.__prompts.append(prompt)\r\n\r\n def __generate_prompt(self):\r\n prompt = self.__begin_prompt\r\n for index, value in enumerate(self.__prompts):\r\n prompt += \"<{}> {}\\n\".format(index+1, value)\r\n prompt += self.__quit_prompt\r\n return prompt\r\n\r\n\r\n def serve_forever(self):\r\n prompt = self.__generate_prompt()\r\n while True:\r\n command, valid = self.__obtain_user_command(prompt)\r\n if not valid:\r\n print(\"你输入了非法的指令!\")\r\n continue\r\n\r\n if command == \"quit\":\r\n break\r\n\r\n self.__commands[int(command)](self.__table_name, self.__cursor,\r\n db_handler = self.__db_handler)\r\n print(\"--------------------------------------------\\n\")\r\n\r\n self.__cursor.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n \"\"\"\r\n (1)数据库的配置信息\r\n (2)读者在实际开发中,可以将配置信息单独写到配置文件中,\r\n 将配置信息与具体的业务代码进行分离,有助于提升代码的可维护性\r\n \"\"\"\r\n\r\n\r\n\r\n \"\"\"\r\n 在连接数据库时,需指定��据库的字符编码,否则会出现乱码\r\n mysql创建数据表时的默认编码为utf8\r\n \"\"\"\r\n try:\r\n db = MySQLdb.connect(config.DB_CONFIG[\"mysql\"][\"host\"], config.DB_CONFIG[\"mysql\"][\"username\"],\r\n config.DB_CONFIG[\"mysql\"][\"password\"], config.DB_CONFIG[\"mysql\"][\"database\"],\r\n charset=\"utf8\")\r\n mysql_cursor = db.cursor()\r\n table_name = 'employee'\r\n # 如果cursor对象无效或表名table_name不存在,则会产生异常\r\n mysql_cursor.execute(\"select 0 from {}\".format(table_name))\r\n simple_employee_ms = SimpleEmployeeMs(table_name, mysql_cursor, db)\r\n\r\n # 员工管理系统的命令行选项,以及处理逻辑都由用户来进行定义\r\n simple_employee_ms.add_command(\"查询所有员工\", query_all_employee)\r\n simple_employee_ms.add_command(\"添加新员工\", add_employee)\r\n simple_employee_ms.add_command(\"删除老员工\", delete_employee)\r\n simple_employee_ms.serve_forever()\r\n\r\n except Exception as e:\r\n print(\"数据库连接或获取游标对象时产生异常!{}\".format(e))","sub_path":"chenzhan/simple_employee_ms.py","file_name":"simple_employee_ms.py","file_ext":"py","file_size_in_byte":6824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"517478108","text":"import os\nimport logging\nimport json\n\nimport requests\n\n\nDATETIME_FORMAT = '%Y-%m-%dT%H:%M:00Z'\nLOG = logging.getLogger(__name__)\n\n\nclass Uploader(object):\n API_URL = None\n API_TOKEN = None\n\n def __init__(self):\n # Load the credentials from the env\n self.API_URL = os.environ['SEALEVEL_API_URL']\n self.API_URL_NEW = os.environ['SEALEVEL_API_URL_NEW']\n self.API_TOKEN = os.environ['SEALEVEL_API_TOKEN']\n\n @classmethod\n def encode_datetime(cls, row):\n row['timestamp'] = row['timestamp'].strftime(DATETIME_FORMAT)\n return row\n\n @classmethod\n def prepare(cls, observation):\n data = dict(observation._asdict())\n data = cls.encode_datetime(data)\n data['wind_degrees'] = data['wind_direction']\n data['wind_direction'] = ''\n\n data['wind_gust'] = cls.knots_to_si(data['wind_speed_highest'])\n del data['wind_speed_highest']\n\n data['wind_speed'] = cls.knots_to_si(data['wind_speed_average'])\n del data['wind_speed_average']\n\n data['temperature'] = data['air_temperature']\n del data['air_temperature']\n\n data['precipitation'] = data['rainfall']\n del data['rainfall']\n\n data['datetime'] = data['timestamp']\n del data['timestamp']\n\n del data['dew_point']\n del data['humidity']\n\n data['supplier'] = 'seatruck'\n data['minute'] = data['datetime']\n return data\n\n @classmethod\n def knots_to_si(cls, knots):\n return knots * (1852.0 / 3600.0)\n\n def upload(self, slug, observation):\n headers = {\n 'Authorization': 'Token {}'.format(self.API_TOKEN),\n 'Content-Type': 'application/json'\n }\n\n url = self.API_URL.format(location_slug=slug)\n url_new = self.API_URL_NEW.format(location_slug=slug)\n data = self.prepare(observation)\n payload = json.dumps([data])\n print(payload)\n LOG.info('HTTP POST {}'.format(url))\n response = requests.post(url, payload, headers=headers, timeout=90)\n response.raise_for_status()\n\n response = requests.post(url_new, payload, headers=headers, timeout=90)\n response.raise_for_status()\n","sub_path":"collector/uploader.py","file_name":"uploader.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"541890814","text":"import math\nimport random\nimport uuid\n\nimport numpy as np\nfrom rtree import index\n\n\ndef obstacle_generator(obstacles):\n \"\"\"\n Add obstacles to r-tree\n \"\"\"\n for obstacle in obstacles:\n yield (uuid.uuid4(), obstacle, obstacle)\n\n\ndef dist_between_points(a, b):\n \"\"\"\n #return: Euclidean distance between a and b\n \"\"\"\n distance = sum(map(lambda a_b: (a_b[0] - a_b[1]) ** 2, zip(a, b)))\n\n return math.sqrt(distance)\n\n\nclass ConfigureSpace(object):\n def __init__(self, dimension_lengths, O=None):\n \"\"\"\n Initialize Search Space\n \"\"\"\n # sanity check\n if len(dimension_lengths) < 2:\n raise Exception(\"Must have at least 2 dimensions\")\n self.dimensions = len(dimension_lengths) # number of dimensions\n # sanity checks\n if any(len(i) != 2 for i in dimension_lengths):\n raise Exception(\"Dimensions can only have a start and end\")\n if any(i[0] >= i[1] for i in dimension_lengths):\n raise Exception(\"Dimension start must be less than dimension end\")\n self.dimension_lengths = dimension_lengths # length of each dimension\n p = index.Property()\n p.dimension = self.dimensions\n if O is None:\n self.obs = index.Index(interleaved=True, properties=p)\n else:\n # r-tree representation of obstacles\n # sanity check\n if any(len(o) / 2 != len(dimension_lengths) for o in O):\n raise Exception(\"Obstacle has incorrect dimension definition\")\n if any(o[i] >= o[int(i + len(o) / 2)] for o in O for i in range(int(len(o) / 2))):\n raise Exception(\"Obstacle start must be less than obstacle end\")\n self.obs = index.Index(obstacle_generator(O), interleaved=True, properties=p)\n\n def obstacle_free(self, x):\n \"\"\"\n Check if a location resides inside of an obstacle\n\n \"\"\"\n return self.obs.count(x) == 0\n\n def sample_free(self,x_new):\n \"\"\"\n Sample a location within X_free\n \"\"\"\n\n while True: # sample until not inside of an obstacle\n x = self.sample()\n if self.obstacle_free(x):\n return x\n\n def collision_free(self, start, end, r):\n \"\"\"\n Check if a line segment intersects an obstacle\n \"\"\"\n dist = dist_between_points(start, end)\n # divide line between points into equidistant points at given resolution\n dim_linspaces = [np.linspace(s_i, e_i, int(math.ceil(dist / r))) for s_i, e_i in zip(start, end)]\n\n coll_free = all(map(self.obstacle_free, zip(*dim_linspaces)))\n\n return coll_free\n\n def sample(self):\n \"\"\"\n Return a random location within X\n \"\"\"\n x = np.empty(len(self.dimension_lengths), np.float)\n for dimension in range(len(self.dimension_lengths)):\n\n x[dimension] = random.uniform(self.dimension_lengths[dimension][0], self.dimension_lengths[dimension][1])\n\n return tuple(x)\n","sub_path":"Phase2/configure_space.py","file_name":"configure_space.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"145460040","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom hellodjango.views import hello, current_datetime, hours_ahead\n\nurlpatterns = patterns('',\n # Examples:\n url(r'^$', 'hellodjango.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n # my urls\n url(r'^hello/$', hello),\n url(r'^time/$', current_datetime),\n url(r'^time/plus/(\\d{1,2})/$', hours_ahead),\n url(r'^books/$', 'books.views.publishers', name='publishers'),\n url(r'^meta/$', 'hellodjango.views.display_meta', name='display_meta'),\n url(r'^search/$', 'books.views.search', name='search'),\n url(r'^contact/$', 'hellodjango.views.contact', name='contact'),\n url(r'^contact/thanks/$', 'hellodjango.views.thanks', name='thanks'),\n\n # admin urls\n url(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"hellodjango/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"17201275","text":"import json\nimport logging\nimport os\nimport time\n\nimport numpy as np\nimport scipy\nimport torch\nfrom torch.utils.data import DataLoader, Dataset, RandomSampler\nimport tqdm\nimport transformers\n\nimport textattack\n\nfrom .train_args_helpers import dataset_from_args, model_from_args, write_readme\n\ndevice = textattack.shared.utils.device\nlogger = textattack.shared.logger\n\n\ndef make_directories(output_dir):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n\ndef batch_encode(tokenizer, text_list):\n if hasattr(tokenizer, \"batch_encode\"):\n return tokenizer.batch_encode(text_list)\n else:\n return [tokenizer.encode(text_input) for text_input in text_list]\n\n\ndef train_model(args):\n logger.warn(\n \"WARNING: TextAttack's model training feature is in beta. Please report any issues on our Github page, https://github.com/QData/TextAttack/issues.\"\n )\n start_time = time.time()\n make_directories(args.output_dir)\n\n num_gpus = torch.cuda.device_count()\n\n # Save logger writes to file\n log_txt_path = os.path.join(args.output_dir, \"log.txt\")\n fh = logging.FileHandler(log_txt_path)\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n logger.info(f\"Writing logs to {log_txt_path}.\")\n\n # Use Weights & Biases, if enabled.\n if args.enable_wandb:\n global wandb\n import wandb\n\n wandb.init(sync_tensorboard=True)\n\n # Get list of text and list of label (integers) from disk.\n train_text, train_labels, eval_text, eval_labels = dataset_from_args(args)\n\n # Filter labels\n if args.allowed_labels:\n logger.info(f\"Filtering samples with labels outside of {args.allowed_labels}.\")\n final_train_text, final_train_labels = [], []\n for text, label in zip(train_text, train_labels):\n if label in args.allowed_labels:\n final_train_text.append(text)\n final_train_labels.append(label)\n logger.info(\n f\"Filtered {len(train_text)} train samples to {len(final_train_text)} points.\"\n )\n train_text, train_labels = final_train_text, final_train_labels\n final_eval_text, final_eval_labels = [], []\n for text, label in zip(eval_text, eval_labels):\n if label in args.allowed_labels:\n final_eval_text.append(text)\n final_eval_labels.append(label)\n logger.info(\n f\"Filtered {len(eval_text)} dev samples to {len(final_eval_text)} points.\"\n )\n eval_text, eval_labels = final_eval_text, final_eval_labels\n\n label_id_len = len(train_labels)\n label_set = set(train_labels)\n args.num_labels = len(label_set)\n logger.info(\n f\"Loaded dataset. Found: {args.num_labels} labels: ({sorted(label_set)})\"\n )\n\n if isinstance(train_labels[0], float):\n # TODO come up with a more sophisticated scheme for when to do regression\n logger.warn(f\"Detected float labels. Doing regression.\")\n args.num_labels = 1\n args.do_regression = True\n else:\n args.do_regression = False\n\n train_examples_len = len(train_text)\n\n if len(train_labels) != train_examples_len:\n raise ValueError(\n f\"Number of train examples ({train_examples_len}) does not match number of labels ({len(train_labels)})\"\n )\n if len(eval_labels) != len(eval_text):\n raise ValueError(\n f\"Number of teste xamples ({len(eval_text)}) does not match number of labels ({len(eval_labels)})\"\n )\n\n model = model_from_args(args, args.num_labels)\n tokenizer = model.tokenizer\n\n logger.info(f\"Tokenizing training data. (len: {train_examples_len})\")\n train_text_ids = batch_encode(tokenizer, train_text)\n logger.info(f\"Tokenizing eval data (len: {len(eval_labels)})\")\n eval_text_ids = batch_encode(tokenizer, eval_text)\n load_time = time.time()\n logger.info(f\"Loaded data and tokenized in {load_time-start_time}s\")\n\n # multi-gpu training\n if num_gpus > 1:\n model = torch.nn.DataParallel(model)\n logger.info(f\"Training model across {num_gpus} GPUs\")\n\n num_train_optimization_steps = (\n int(train_examples_len / args.batch_size / args.grad_accum_steps)\n * args.num_train_epochs\n )\n\n param_optimizer = list(model.named_parameters())\n no_decay = [\"bias\", \"LayerNorm.bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p for n, p in param_optimizer if not any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.01,\n },\n {\n \"params\": [\n p for n, p in param_optimizer if any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n\n optimizer = transformers.optimization.AdamW(\n optimizer_grouped_parameters, lr=args.learning_rate\n )\n\n scheduler = transformers.optimization.get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=args.warmup_proportion,\n num_training_steps=num_train_optimization_steps,\n )\n\n global_step = 0\n\n # Start Tensorboard and log hyperparams.\n from tensorboardX import SummaryWriter\n\n tb_writer = SummaryWriter(args.output_dir)\n\n def is_writable_type(obj):\n for ok_type in [bool, int, str, float]:\n if isinstance(obj, ok_type):\n return True\n return False\n\n args_dict = {k: v for k, v in vars(args).items() if is_writable_type(v)}\n\n tb_writer.add_hparams(args_dict, {})\n\n # Start training\n logger.info(\"***** Running training *****\")\n logger.info(f\"\\tNum examples = {train_examples_len}\")\n logger.info(f\"\\tBatch size = {args.batch_size}\")\n logger.info(f\"\\tMax sequence length = {args.max_length}\")\n logger.info(f\"\\tNum steps = {num_train_optimization_steps}\")\n logger.info(f\"\\tNum epochs = {args.num_train_epochs}\")\n logger.info(f\"\\tLearning rate = {args.learning_rate}\")\n\n train_input_ids = np.array(train_text_ids)\n train_labels = np.array(train_labels)\n train_data = list((ids, label) for ids, label in zip(train_input_ids, train_labels))\n train_sampler = RandomSampler(train_data)\n train_dataloader = DataLoader(\n train_data, sampler=train_sampler, batch_size=args.batch_size\n )\n\n eval_input_ids = np.array(eval_text_ids)\n eval_labels = np.array(eval_labels)\n eval_data = list((ids, label) for ids, label in zip(eval_input_ids, eval_labels))\n eval_sampler = RandomSampler(eval_data)\n eval_dataloader = DataLoader(\n eval_data, sampler=eval_sampler, batch_size=args.batch_size\n )\n\n def get_eval_score():\n model.eval()\n correct = 0\n total = 0\n logits = []\n labels = []\n for input_ids, batch_labels in eval_dataloader:\n if isinstance(input_ids, dict):\n ## HACK: dataloader collates dict backwards. This is a temporary\n # workaround to get ids in the right shape\n input_ids = {\n k: torch.stack(v).T.to(device) for k, v in input_ids.items()\n }\n batch_labels = batch_labels.to(device)\n\n with torch.no_grad():\n batch_logits = textattack.shared.utils.model_predict(model, input_ids)\n\n logits.extend(batch_logits.cpu().squeeze().tolist())\n labels.extend(batch_labels)\n\n model.train()\n logits = torch.tensor(logits)\n labels = torch.tensor(labels)\n\n if args.do_regression:\n pearson_correlation, pearson_p_value = scipy.stats.pearsonr(logits, labels)\n return pearson_correlation\n else:\n preds = logits.argmax(dim=1)\n correct = (preds == labels).sum()\n return float(correct) / len(labels)\n\n def save_model():\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Only save the model itself\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_model_file = os.path.join(args.output_dir, args.weights_name)\n output_config_file = os.path.join(args.output_dir, args.config_name)\n\n torch.save(model_to_save.state_dict(), output_model_file)\n try:\n model_to_save.config.to_json_file(output_config_file)\n except AttributeError:\n # no config\n pass\n\n global_step = 0\n\n def save_model_checkpoint():\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n # Take care of distributed/parallel training\n model_to_save = model.module if hasattr(model, \"module\") else model\n model_to_save.save_pretrained(output_dir)\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(f\"Checkpoint saved to {output_dir}.\")\n\n model.train()\n args.best_eval_score = 0\n args.best_eval_score_epoch = 0\n args.epochs_since_best_eval_score = 0\n\n def loss_backward(loss):\n if num_gpus > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.grad_accum_steps > 1:\n loss = loss / args.grad_accum_steps\n loss.backward()\n return loss\n\n for epoch in tqdm.trange(\n int(args.num_train_epochs), desc=\"Epoch\", position=0, leave=False\n ):\n prog_bar = tqdm.tqdm(\n train_dataloader, desc=\"Iteration\", position=1, leave=False\n )\n for step, batch in enumerate(prog_bar):\n input_ids, labels = batch\n labels = labels.to(device)\n if isinstance(input_ids, dict):\n ## HACK: dataloader collates dict backwards. This is a temporary\n # workaround to get ids in the right shape\n input_ids = {\n k: torch.stack(v).T.to(device) for k, v in input_ids.items()\n }\n logits = textattack.shared.utils.model_predict(model, input_ids)\n\n if args.do_regression:\n # TODO integrate with textattack `metrics` package\n loss_fct = torch.nn.MSELoss()\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss_fct = torch.nn.CrossEntropyLoss()\n loss = loss_fct(logits, labels)\n loss = loss_backward(loss)\n\n if global_step % args.tb_writer_step == 0:\n tb_writer.add_scalar(\"loss\", loss.item(), global_step)\n tb_writer.add_scalar(\"lr\", scheduler.get_last_lr()[0], global_step)\n prog_bar.set_description(f\"Loss {loss.item()}\")\n if (step + 1) % args.grad_accum_steps == 0:\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n # Save model checkpoint to file.\n if (\n global_step > 0\n and (args.checkpoint_steps > 0)\n and (global_step % args.checkpoint_steps) == 0\n ):\n save_model_checkpoint()\n\n model.zero_grad()\n\n # Inc step counter.\n global_step += 1\n\n # Check accuracy after each epoch.\n eval_score = get_eval_score()\n tb_writer.add_scalar(\"epoch_eval_score\", eval_score, global_step)\n\n if args.checkpoint_every_epoch:\n save_model_checkpoint()\n\n logger.info(\n f\"Eval {'pearson correlation' if args.do_regression else 'accuracy'}: {eval_score*100}%\"\n )\n if eval_score > args.best_eval_score:\n args.best_eval_score = eval_score\n args.best_eval_score_epoch = epoch\n args.epochs_since_best_eval_score = 0\n save_model()\n logger.info(f\"Best acc found. Saved model to {args.output_dir}.\")\n else:\n args.epochs_since_best_eval_score += 1\n if (args.early_stopping_epochs > 0) and (\n args.epochs_since_best_eval_score > args.early_stopping_epochs\n ):\n logger.info(\n f\"Stopping early since it's been {args.early_stopping_epochs} steps since validation acc increased\"\n )\n break\n\n # end of training, save tokenizer\n try:\n tokenizer.save_pretrained(args.output_dir)\n logger.info(f\"Saved tokenizer {tokenizer} to {args.output_dir}.\")\n except AttributeError:\n logger.warn(\n f\"Error: could not save tokenizer {tokenizer} to {args.output_dir}.\"\n )\n\n # Save a little readme with model info\n write_readme(args, args.best_eval_score, args.best_eval_score_epoch)\n\n # Save args to file\n args_save_path = os.path.join(args.output_dir, \"train_args.json\")\n final_args_dict = {k: v for k, v in vars(args).items() if is_writable_type(v)}\n with open(args_save_path, \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(final_args_dict, indent=2) + \"\\n\")\n logger.info(f\"Wrote training args to {args_save_path}.\")\n","sub_path":"textattack/commands/train_model/run_training.py","file_name":"run_training.py","file_ext":"py","file_size_in_byte":13112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"74049057","text":"import itertools\nimport re\n\nfrom . import document\n\n\nSPACEPTN = r' \\t\\n\\r'\nWORDPTN = r'a-zA-Z0-9'\nCTRLPTN = re.escape(''.join(map(chr, itertools.chain(range(0,8), [11,12], range(14,32), range(127,160)))))\n\n\ndef mktype(chars):\n return re.compile(r'(^|(? \"\n else: data = data + str(i) + \" => \"\n data = data + '\"' + \"\".join(binary) + '\"' + \",\\n\"\n i += 1\n\n if (len(instructionList) < 64):\n data = data + \"others => (others => '0'));\"\n\n if(DestinationFilename == None):\n print(\"\\n\" + data)\n else:\n file = open(DestinationFilename, \"w\")\n textFull = file.write(data)\n file.close()\n print(\"\\nAll OK\")\n\ndef getBinary(value, nBits):\n isneg = value < 0\n binary = list(\"0\" * nBits)\n if (isneg): value *= -1\n\n for i in range(nBits):\n pot = 2**(nBits-1 - i)\n\n if (value >= pot):\n binary[i] = '1'\n value -= pot\n\n if isneg:\n for i in range(nBits):\n if (binary[i] == '1'): binary[i] = '0'\n else: binary[i] = '1'\n\n if (binary[nBits-1] == '0'): binary[nBits-1] = '1'\n else:\n binary[nBits-1] = '0'\n\n i = nBits-2\n while (i > 0 and binary[i] == '1'):\n binary[i] = '0'\n i -= 1\n\n if (i != nBits): binary[i] = '1'\n\n return binary\n\ndef getInstruction(name):\n aux = {\"type\": -1}\n for i in translator:\n if i.get(\"name\") == name:\n aux = i\n return aux\n\ndef convertInstruction(instruction):\n code = list(\"0\" * 32)\n components = instruction.split(' ')\n\n print(\"-\"*50)\n print(\"Original instruction: \" + str(instruction))\n\n instruction = getInstruction(components[0])\n\n print(\"Instruction detected: \" + str(instruction))\n print(\"Components detected: \" + str(components))\n\n if (instruction.get(\"type\") == 0):\n rd = getBinary(int(components[1]), 5)\n rs = getBinary(int(components[2]), 5)\n if (instruction.get(\"subtype\") == 0):\n rt = getBinary(int(components[3]), 5)\n else:\n rt = getBinary(0, 5)\n\n for i in range(4): code[i] = instruction.get(\"opcode\")[i]\n for i in range(2): code[i + 4] = instruction.get(\"flags\")[i]\n for i in range(5): code[i + 6] = rd[i]\n for i in range(5): code[i + 11] = rs[i]\n for i in range(5): code[i + 16] = rt[i]\n for i in range(11): code[i + 21] = instruction.get(\"flags2\")[i]\n\n elif (instruction.get(\"type\") == 1):\n rd = []\n rs = []\n imm = []\n if (instruction.get(\"subtype\") == 0):\n rd = getBinary(int(components[2]), 5)\n rs = getBinary(int(components[1]), 5)\n imm = getBinary(int(components[3]), 16)\n elif (instruction.get(\"subtype\") == 1):\n rd = getBinary(int(components[2]), 5)\n rs = getBinary(int(components[1]), 5)\n imm = getBinary(0, 16)\n elif (instruction.get(\"subtype\") == 2):\n rd = getBinary(int(components[1]), 5)\n rs = getBinary(int(components[2]), 5)\n imm = getBinary(int(components[3]), 16)\n else:\n rd = getBinary(int(components[1]), 5)\n rs = getBinary(0, 5)\n imm = getBinary(int(components[2]), 16)\n\n for i in range(4): code[i] = instruction.get(\"opcode\")[i]\n for i in range(2): code[i + 4] = instruction.get(\"flags\")[i]\n for i in range(5): code[i + 6] = rd[i]\n for i in range(5): code[i + 11] = rs[i]\n for i in range(16): code[i + 16] = imm[i]\n\n elif (instruction.get(\"type\") == 2):\n imm = []\n if (len(components) > 1): imm = getBinary(int(components[1]), 26)\n else: imm = getBinary(0, 26)\n\n for i in range(4): code[i] = instruction.get(\"opcode\")[i]\n for i in range(2): code[i + 4] = instruction.get(\"flags\")[i]\n for i in range(26): code[i + 6] = imm[i]\n\n return code\n\n###################################### 02200000 0000 00 10001 00000 0000000000000000\n\nbinaries = []\n\nif (len(sys.argv) < 2):\n\tprint(\"Number of arguments given wrong!\\nUse python assembler_v2.py to obtain the result on the given path\\nUse python assembler_v2.py to obtain the result on screen.\")\nelse:\n #Instructions loaded from the given path\n instructionList = loadData( sys.argv[1] )\n\n #Each instruction is converted to the binary format for the final result\n for instruction in instructionList:\n converted = convertInstruction(instruction)\n print(\"\".join(converted))\n\n binaries.append(converted[24:32])\n binaries.append(converted[16:24])\n binaries.append(converted[8:16])\n binaries.append(converted[0:8])\n\n if(len(sys.argv) == 3):\n showResult(binaries, sys.argv[2])\n else:\n showResult(binaries, None)\n","sub_path":"CPU_Semi-Out-Of-Order/assembler_v2.py","file_name":"assembler_v2.py","file_ext":"py","file_size_in_byte":11380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"425488690","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 17 23:46:19 2019\n\n@author: Padamban\n\"\"\"\n\nimport os\nimport matplotlib.pyplot as plt\nimport simpleaudio as sa\nimport numpy as np\nimport scipy\nfrom scipy import signal\nfrom mpl_toolkits import mplot3d\nimport scipy.io.wavfile as wav\nimport random\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport pickle\nimport scipy.signal as sg\n\n\nclass AudioManager:\n \n def readAudio(self, file):\n fs, raw_audio = wav.read(file) \n raw = np.array([raw_audio]).astype(float)[0]\n return raw \n \n def playOriginalAudio(self,file):\n wave_obj = sa.WaveObject.from_wave_file(file)\n play_obj = wave_obj.play()\n play_obj.wait_done()\n \n def playSyntesizedAudio(self, syntesized):\n Sn = syntesized\n ply = Sn * 32767 / max(abs(Sn))\n ply = ply.astype(np.int16)\n play_obj = sa.play_buffer(ply, 1, 2, 8000)\n play_obj.wait_done()\n \n\nclass Printer:\n \n def __init__(self, enable):\n self.enable = enable\n \n def prnt(self, tab, text, enable=0):\n indent = \"|\"+ \" \"*int(tab) \n if self.enable or enable:\n print(indent+text)\n \n def plot(self, data, sep=False):\n fig, ax1 = plt.subplots()\n for i, d in enumerate(data):\n ax = ax1.twinx() if sep and i else ax1 \n span = range(d[3],d[3]+len(d[0])) if len(d) == 4 else d[4]\n ax.plot(span, d[0], d[2], label=d[1]) \n plt.legend()\n plt.show() \n \n \n \nclass Pickle:\n def __init__(self, folder, sTag='', lTag=''):\n self.folder = folder\n self.saveTag = sTag\n self.loadTag = lTag\n\n def path(self, name, tag=''):\n return self.folder + '/' + tag + str(name) +'.pkl'\n \n def save(self, name, data, oTag=None):\n tag = self.saveTag if oTag==None else oTag\n pickle_out = open( self.path(name, tag) ,\"wb\")\n pickle.dump(data, pickle_out)\n pickle_out.close()\n\n def load(self, name, oTag=None):\n tag = self.loadTag if oTag==None else oTag\n pickle_in = open(self.path(name, tag), 'rb')\n data = pickle.load(pickle_in)\n pickle_in.close()\n return data \n \n def SaveData(self, data, oTag=None):\n self.save('raw', data.raw, oTag)\n self.save('gain', data.gain, oTag)\n self.save('pitch', data.pitch, oTag)\n self.save('power', data.power, oTag)\n self.save('lpc', data.lpc, oTag)\n\n\n def LoadData(self, oTag=None):\n data = SpeachData(); \n data.raw = self.load('raw', oTag)\n data.gain = self.load('gain', oTag)\n data.power = self.load('power', oTag)\n data.pitch = self.load('pitch', oTag) \n data.lpc = self.load('lpc', oTag) \n return data\n\n \n def SaveEncoded(self, data, oTag=None):\n self.save('binaries', data.binaries, oTag)\n self.save('maxgain', data.maxGain, oTag)\n\n\n def LoadEncoded(self, oTag=None):\n data = EncodedData(); \n data.binaries = self.load('binaries', oTag)\n data.maxGain = self.load('maxgain', oTag) \n return data\n \n def SaveDecoded(self, data, oTag=None):\n self.save('qlpc', data.lpc, oTag)\n self.save('qpitch', data.pitch, oTag)\n self.save('qgain', data.gain, oTag)\n\n\n def LoadDecoded(self, oTag=None):\n data = DecodedData(); \n data.lpc = self.load('qlpc', oTag)\n data.gain = self.load('qgain', oTag) \n data.pitch = self.load('qpitch', oTag) \n return data\n\n def getFileSize(self, file):\n return os.path.getsize(file)\n\n\n\nclass Math:\n \n def autocorrelation(self, x) : \n xp = x-np.mean(x)\n f = np.fft.fft(xp)\n p = np.array([np.real(v)**2+np.imag(v)**2 for v in f]) \n pi = np.fft.ifft(p)\n c = np.real(pi)[:int(x.size/2)]/np.sum(xp**2)\n return c\n \n def normalize(self, d):\n return 2*(d - np.min(d))/np.ptp(d)-1\n \n \nclass SpeachData:\n raw=[]\n pitch=[]\n power=[]\n lpc=[]\n gain=[] \n \n\n \nclass EncodedData:\n binaries = ''\n maxGain = 0\n\nclass DecodedData:\n lpc = []\n gain = []\n pitch = []\n\n\n\n ","sub_path":"P1/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":4301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"14848109","text":"fname = input(\"Enter file name: \")\nfh = open(fname)\nlst = list()\nfor line in fh:\n line=line.rstrip()\n words=line.split()\n for each in words:\n if each not in lst:\n lst.append(each)\nlst.sort()\nprint(lst)\n","sub_path":"words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"57185148","text":"#!/usr/bin/env python3\n\"\"\"\nDefines a function that builds an identity block\nusing Keras\n\"\"\"\nimport tensorflow.keras as K\n\n\ndef identity_block(A_prev, filters):\n \"\"\"\n Builds an identity block using Keras\n \"\"\"\n F11, F3, F12 = filters\n init = K.initializers.he_normal()\n activation = K.activations.relu\n C11 = K.layers.Conv2D(filters=F11,\n kernel_size=(1, 1),\n padding='same',\n kernel_initializer=init)(A_prev)\n Batch_Norm11 = K.layers.BatchNormalization(axis=3)(C11)\n ReLU11 = K.layers.Activation(activation)(Batch_Norm11)\n C3 = K.layers.Conv2D(filters=F3,\n kernel_size=(3, 3),\n padding='same',\n kernel_initializer=init)(ReLU11)\n Batch_Norm3 = K.layers.BatchNormalization(axis=3)(C3)\n ReLU3 = K.layers.Activation(activation)(Batch_Norm3)\n C12 = K.layers.Conv2D(filters=F12,\n kernel_size=(1, 1),\n padding='same',\n kernel_initializer=init)(ReLU3)\n Batch_Norm12 = K.layers.BatchNormalization(axis=3)(C12)\n Addition = K.layers.Add()([Batch_Norm12, A_prev])\n output = K.layers.Activation(activation)(Addition)\n return output\n","sub_path":"supervised_learning/0x08-deep_cnns/2-identity_block.py","file_name":"2-identity_block.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491875204","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 15 15:47:20 2019\nPlots all the concentration files inside the folders\n@author: sr802\n\"\"\"\n\nimport sys\nimport os\nimport numpy as np\nimport warnings\nimport glob\nwarnings.filterwarnings(\"ignore\")\nimport matplotlib.pyplot as plt\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '../../../')) #This falls into Utilities path\nimport Lammps.core_functions as cf\n\n\n# Hydrodynamic radius using kirkwood expression using poly_analysis.py on the \n# Equilibration system(See for instance the radius.dat inside \n# 6.High_concentration/6.F_sequential/E_8.0_S_1.0/Conc_dist)\n\nRh=[2.758531,2.755166] #LJ,GLJ \ncf.set_plot_appearance()\n\n\ndirectories=glob.glob('*/')\n\nname={\"u\":\"Solute\",\"v\":\"Solvent\",\"t\":\"Solution\"}\ncolors={\"u\":\"red\",\"v\":\"blue\",\"t\":\"black\"}\nltype={\"u\":\"-\",\"v\":\"-\",\"t\":\"--\"}\n\nplt.close('all')\n\nfor counter,directory in enumerate(directories):\n files=glob.glob('%s/prof_*.dat'%directory)\n fig,ax=plt.subplots()\n files=sorted(files)[::-1]\n \n for f in files:\n key=f.split(\"/\")[-1].split('_')[1][0]\n print (key)\n data=cf.read_data_file(f).values\n plt.plot(data[:,1],data[:,3],color=colors[key],label=name[key],linestyle=ltype[key])\n \n plt.legend(loc='bottom right')\n ax.set_ylabel(r'$c[\\sigma^{-3}]$')\n ax.set_xlabel(r'$r[\\sigma]$')\n ax.set_xlim(0,9)\n ax.set_ylim(0,ax.get_ylim()[1])\n# ax.axvline(x=Rh[counter], ymin=0, ymax=1,ls='-.',c='black')\n fig.tight_layout()\n fig.savefig('%s.pdf'%directory.split('/')[0])\n\nplt.show()","sub_path":"Lammps/PDP/Plots/concentration_distributions.py","file_name":"concentration_distributions.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"251299585","text":"class Sort:\n\n\tdef __init__(self,arr,flag):\n\t\tself.list_ = arr\n\t\tself.length = len(arr)\n\t\tif flag==\"merge\":\n\t\t\tself.mergeSort_(self.list_)\n\t\telif flag==\"insert\":\n\t\t\tself.insertionSort()\n\n\tdef insertionSort(self):\n\t\tfor i in range(1, self.length): \n\t\t\tkey = self.list_[i] \n\t\t\tj = i-1\n\t\t\twhile j >=0 and key < self.list_[j] : \n\t\t\t\tself.list_[j+1] = self.list_[j] \n\t\t\t\tj -= 1\n\t\t\tself.list_[j+1] = key\t\t\n\n\tdef mergeSort_(self,arr):\n\t\tif len(arr) >1: \n\t\t\tmid = len(arr)//2\n\t\t\tleft = arr[:mid]\n\t\t\tright = arr[mid:]\n\n\t\t\tself.mergeSort_(left)\n\t\t\tself.mergeSort_(right)\n\t \n\t\t\ti = j = k = 0\n\n\t\t\twhile i < len(left) and j < len(right): \n\t\t\t\tif left[i] < right[j]: \n\t\t\t\t\tarr[k] = left[i] \n\t\t\t\t\ti+=1\n\t\t\t\telse: \n\t\t\t\t\tarr[k] = right[j] \n\t\t\t\t\tj+=1\n\t\t\t\tk+=1\n\t \n\t\t\twhile i < len(left): \n\t\t\t\tarr[k] = left[i] \n\t\t\t\ti+=1\n\t\t\t\tk+=1\n\n\t\t\twhile j < len(right): \n\t\t\t\tarr[k] = right[j] \n\t\t\t\tj+=1\n\t\t\t\tk+=1\n\n\tdef printlist(self):\n\t\tprint(*self.list_)\n\n_sort = Sort([13,17,8,19,2,5],\"insert\")\n_sort.printlist()\n\n_sort = Sort([12,7,3,5,17,15],\"merge\")\n_sort.printlist() ","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"73092506","text":"from blastaligner import *\r\nfrom csvtogw import *\r\nfrom gdv import *\r\nfrom seq_source_file import *\r\nimport sys\r\nimport os\r\n\r\nspecies1=str(input(\"Enter the filename of edgelist for Species 1 (do not include .csv): \"))\r\nspecies2=str(input(\"Enter the filename of edgelist for Species 2 (do not include .csv): \"))\r\n\r\npathname=os.path.dirname(sys.argv[0])\r\nnewdir1=os.path.join(pathname,species1)\r\nif not os.path.exists(newdir1):\r\n os.makedirs(newdir1)\r\n\r\nnewdir2=os.path.join(pathname,species2)\r\nif not os.path.exists(newdir2):\r\n os.makedirs(newdir2)\r\n\r\nListA=cleanlist(species1)\r\nListB=cleanlist(species2)\r\n\r\n\r\nwhile True:\r\n try:\r\n print(\"1\\tConvert .csv file to .gml and .gw files\\n2\\tConvert .csv file to .fasta files\\n3\\tObtain similarity score via orthologs and blast (requires .fasta files)\\n4\\tCreate orca input and output files (obtain graphlet count matrix in .out file)\\n5\\tGraphlet degree similarity signature (requires orca output files)\\n0\\tExit\")\r\n option=(int(input(\"Enter an option: \")))\r\n\r\n if option==0:\r\n sys.exit()\r\n elif option==1:\r\n csvtogw(species1, newdir1)\r\n csvtogw(species2, newdir2)\r\n elif option==2:\r\n print(\"Generating fasta files for\" + species1 + \"...\")\r\n fasta(species1,ListA,newdir1)\r\n print(\"Generating fasta files for\" + species2 + \"...\")\r\n fasta(species2,ListB,newdir2)\r\n elif option==3:\r\n databasename=str(input(\"Enter the name of the database from https://omabrowser.org/oma/genomePW/ (species must be in correct order!) (do not include .txt): \"))\r\n print(\"Calculating similarity score...\")\r\n seq_score(species1,species2,ListA,ListB,databasename,newdir1,newdir2)\r\n elif option==4:\r\n orca_input(species1,ListA, newdir1)\r\n orca_input(species2,ListB,newdir2)\r\n elif option==5:\r\n protein1=str(input(\"Please enter 1 protein from Species1: \"))\r\n protein2=str(input(\"Please enter 1 protein from Species2: \"))\r\n similarity=signature_score(protein1,protein2, ListA,ListB,newdir1,newdir2,species1,species2)\r\n print('Signature similarity= ',similarity)\r\n\r\n except ValueError:\r\n print(\"Please enter a valid option!\")\r\n","sub_path":"Benchmarking alignments/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"236309434","text":"import numpy as np\nfrom back.backend import FilterSpace, FilterType, ApproxType, plot_template\nimport matplotlib.pyplot as plt\n\nFS = FilterSpace()\n#FS.addFilter(FilterType.LP, ApproxType.BW, 1000 * (2 * np.pi), 5000 * (2 * np.pi), 0.5, 30, 100, 1, n=9, rp=1, GD=1, nmin=1, nmax=20, Qmax=150)\n#FS.addFilter(FilterType.LP, ApproxType.C, 1000 * (2 * np.pi), 3000 * (2 * np.pi), 3, 30, 0, rp=1, GD=1, nmin=1, nmax=20, Qmax=150)\n#FS.addFilter(FilterType.LP, ApproxType.CH1, 1000 * (2 * np.pi), 3000 * (2 * np.pi), 3, 30, 0, rp=1, GD=1, nmin=1, nmax=15, Qmax=150)\n#FS.addFilter(FilterType.HP, ApproxType.BW, 4000 * (2 * np.pi), 1000 * (2 * np.pi), 3, 30, 0, 1, rp=1, GD=1, nmin=1, nmax=20, Qmax=150)\n#FS.addFilter(FilterType.BP, ApproxType.BW, [2E3 * (2 * np.pi), 3E3 * (2 * np.pi)], [1E3 * (2 * np.pi), 4E3 * (2 * np.pi)], 3, 30, 100, 1, nmin=1, nmax=15, Qmax=150)\n#FS.addFilter(FilterType.BP, ApproxType.LG, [2 * (2 * np.pi), 4 * (2 * np.pi)], [1 * (2 * np.pi), 5 * (2 * np.pi)], 3, 20, 0, nmin=1, nmax=15, Qmax=150)\n#FS.addFilter(FilterType.BR, ApproxType.CH2, [1 * (2 * np.pi), 5 * (2 * np.pi)], [2 * (2 * np.pi), 4 * (2 * np.pi)], 0.5, 20, 0, 10, rp=1, nmin=1, nmax=15, Qmax=150)\n#FS.addFilter(FilterType.BR, ApproxType.C, [1, 5], [2, 4], 3, 20, 100, rp=1, nmin=1, nmax=15, Qmax=150)\nFS.addFilter(FilterType.GD, ApproxType.B, 10 * (2 * np.pi), 15 * (2 * np.pi), 3, 30, 0, tol=10, GD=1E-2, nmin=1, nmax=15, Qmax=150)\n#FS.addFilter(FilterType.GD, ApproxType.G, 10 * (2 * np.pi), 15 * (2 * np.pi), 3, 30, 0, tol=10, GD=1E-2, nmin=1, nmax=15, Qmax=150)\nfil = FS.filters[0]\n#fil.print_self()\n\nprint(\"Pole pairs: \", FS.filters[0].get_pole_pairs())\nprint(\"Zero pairs: \", FS.filters[0].get_zero_pairs())\n#FS.filters[0].add_stage(FS.filters[0].zeros, FS.filters[0].poles)\nFS.filters[0].get_stages()\n\nfor i in range(len(FS.filters[0].stages)):\n ns = \"\"\n num = FS.filters[0].stages[i][0]\n if len(num) == 3:\n ns = str(num[0]) + \"s^2 + \" + str(num[1]) + \"s + \" + str(num[2])\n elif len(num) == 2:\n ns = str(num[0]) + \"s + \" + str(num[1])\n elif len(num) == 1:\n ns = str(num[0])\n print(\" \\t \\t \" + ns)\n print(FS.filters[0].stage_names[i] + \": \" + \"--------------------------------\" + \" Order: \" + str(FS.filters[0].get_stage_n(i)) + \" Q: \" + str(FS.filters[0].get_stage_Q(i)))\n ds = \"\"\n den = FS.filters[0].stages[i][1]\n if len(den) == 3:\n ds = str(den[0]) + \"s^2 + \" + str(den[1]) + \"s + \" + str(den[2])\n elif len(num) == 2:\n ds = str(den[0]) + \"s + \" + str(den[1])\n elif len(num) == 1:\n ds = str(den[0])\n print(\" \\t \\t \" + ds)\n print(\"\\n\")\n\n# # BODE\n# fig, ax = plt.subplots(2, 1)\n# axmod, axph = ax\n# plot_template(axmod, fil.type, fil.data, False)\n# FS.plot_mod(axmod, A=False)\n# FS.plot_ph(axph)\n'''b, a = fil.num, fil.den\nwmin, wmax = fil.get_wminmax()\n#wmin = min(fil.data.wp, fil.data.wa)/10 if fil.type <= FilterType.HP elif fil.type <= FilterType.GD else min(fil.data.wp[0], fil.data.wa[0])/10\n#wmax = max(fil.data.wp, fil.data.wa)*10 if fil.type <= FilterType.HP else max(fil.data.wp[1], fil.data.wa[1])*10\nw = np.linspace(wmin, wmax, int(wmax/wmin * 10))\n#H = ss.TransferFunction(b, a)\nfil.plot_mod(axmod, w)\nfil.plot_ph(axph, w)\nfig.suptitle(\"Filter frequency response\")\naxmod.set_xlabel('Frequency [radians / second]')\naxmod.set_ylabel('Amplitude [dB]')\naxmod.grid()\naxph.set_xlabel('Frequency [radians / second]')\naxph.set_ylabel('Phase [°]')\naxph.grid()\n#plt.ylim(-60, 10)\n#plt.grid(which='both', axis='both')\n#plt.show()'''\n\n# BO2\nfig3, ax3 = plt.subplots(1, 1)\nFS.filters[0].plot_selected_stages(ax3, [1])\n\n\n# # POLOS Y CEROS\n# fig2, ax2 = plt.subplots(1, 1)\n# FS.plot_zp(ax2)\n'''fig2.suptitle(\"Poles and Zeros\")\nax2.scatter(fil.zeros.real, fil.zeros.imag, marker='o', edgecolors=\"red\", facecolors=\"None\")\nax2.scatter(fil.poles.real, fil.poles.imag, marker='x', color=\"blue\")\nax2.set_xlabel('Real')\nax2.set_ylabel('Imaginary')\nax2.grid()'''\n\n# # RETARDO DE GRUPO\n# figGD, axGD = plt.subplots(1, 1)\n# FS.plot_gd(axGD)\n'''wmin, wmax = fil.get_wminmax()\n#wmin = min(fil.data.wp, fil.data.wa)/10 if fil.type <= FilterType.HP elif fil.type <= FilterType.GD else min(fil.data.wp[0], fil.data.wa[0])/10\n#wmax = max(fil.data.wp, fil.data.wa)*10 if fil.type <= FilterType.HP else max(fil.data.wp[1], fil.data.wa[1])*10\nw = np.linspace(0, wmax*2/3, int(wmax/wmin * 10))\nfil.plot_gd(axGD, w)\n#axGD.set_ylim([0, 1])\nfigGD.suptitle(\"Filter group delay\")\naxGD.set_xlabel('Frequency [radians / second]')\naxGD.set_ylabel('Group Delay')\naxGD.grid()'''\n\nplt.show()\n\n\n\n\n","sub_path":"back/test_backend.py","file_name":"test_backend.py","file_ext":"py","file_size_in_byte":4539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"66058607","text":"import pandas as pd\nimport numpy as np\nimport argparse\nimport dataio\nimport json\nimport glob\nimport os\n\n\nparser = argparse.ArgumentParser(description='Combine runs')\nparser.add_argument('--datasets', type=str, nargs='+')\noptions = parser.parse_args()\n\nfor DATASET_NAME in options.datasets:\n CSV_FOLDER, CSV_TRAIN, CSV_TEST, CSV_VAL, CONFIG_FILE, Q_NPZ = dataio.build_paths(DATASET_NAME)\n\n tracked_metrics = ['accuracy', 'll_mcmc_all']\n experiments = next(os.walk(CSV_FOLDER))[1] # List all folders\n for experiment in experiments:\n results = {}\n df = {}\n for run_id in range(5):\n rlog = os.path.join(CSV_FOLDER, experiment, str(run_id), 'rlog.csv')\n results_file = os.path.join(CSV_FOLDER, experiment, str(run_id), 'results.json')\n if os.path.isfile(rlog):\n df[run_id] = pd.read_csv(rlog)\n if os.path.isfile(results_file):\n with open(results_file) as f:\n results[run_id] = json.load(f)\n if df:\n mean_df = pd.DataFrame()\n for column in tracked_metrics:\n mean_df[column] = np.column_stack([df[i][column] for i in df]).mean(axis=1)\n mean_df.to_csv(os.path.join(CSV_FOLDER, experiment, 'rlog.csv'))\n\n if results:\n with open(os.path.join(CSV_FOLDER, experiment, 'results.json'), 'w') as f:\n f.write(json.dumps({\n 'args': results[0]['args'],\n 'legends': {\n 'short': results[0]['legends']['short'],\n 'full': results[0]['legends']['full'],\n 'latex': results[0]['legends']['latex']\n },\n 'metrics': {\n metric: np.mean([results[i]['metrics'][metric] for i in results])\n for metric in {'ACC', 'AUC', 'NLL'}\n }\n }, indent=4))\n","sub_path":"combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"321366225","text":"import cv2\r\nfrom time import sleep\r\nimport numpy\r\n\r\ncamera = cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n\tstatus, image = camera.read()\r\n\tif(status):\r\n\t\tcv2.imwrite(\"video1.jpg\", image)\r\n\t\t\r\n\t\tgrayscaled = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\t\tretval, threshold = cv2.threshold(grayscaled, 60, 255, cv2.THRESH_BINARY)\r\n\t\t\r\n\t\ta = cv2.resize(image, (600, 500))\r\n\t\tcv2.imshow('original',a)\r\n\t\t\r\n\t\tif cv2.waitKey(5) == ord('o'):\r\n\t\t\tbreak\r\n\t\t\r\n\t\tb = cv2.resize(threshold, (600, 500))\r\n\t\tcv2.imshow('threshold', b)\r\n\t\t\r\n\t\tif cv2.waitKey(5) == ord('t'):\r\n\t\t\tbreak\r\n\tsleep(0.03)\r\n\r\n\r\n\r\n","sub_path":"My_programs/open_cv/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"586821381","text":"import music21\n\nimport sys\nimport copy\n\n#music21.environment.set('musicxmlPath', '/usr/bin/musescore')\nmusic21.environment.set('autoDownload', 'allow')\n\ndef convert(fin, lower=0, higher=0):\n s = music21.converter.parse(fin)\n noteOne = None\n for note in s.flat.notes:\n if isinstance(note, music21.chord.Chord):\n for pitch in note:\n if noteOne == None:\n noteOne = music21.note.Note()\n noteOne.pitch = pitch\n interval = music21.interval.Interval(noteStart = noteOne, noteEnd = pitch)\n rinterval = interval.reverse()\n #we have to do it twice, once would make everything noteOne\n pitch.transpose(rinterval, inPlace = True)\n pitch.transpose(rinterval, inPlace = True)\n for l in range(lower):\n pitch.transpose(music21.interval.GenericInterval('Octave').reverse(), inPlace = True)\n for h in range(higher):\n pitch.transpose(music21.interval.GenericInterval('Octave'), inPlace = True)\n else:\n if noteOne == None:\n noteOne = copy.deepcopy(note)\n interval = music21.interval.Interval(noteStart = noteOne, noteEnd = note)\n rinterval = interval.reverse()\n #we have to do it twice, once would make everything noteOne\n note.transpose(rinterval, inPlace = True)\n note.transpose(rinterval, inPlace = True)\n for l in range(lower):\n note.transpose(music21.interval.GenericInterval('Octave').reverse(), inPlace = True)\n for h in range(higher):\n note.transpose(music21.interval.GenericInterval('Octave'), inPlace = True)\n midiOut = 'inverse.mid'\n s.write('midi', fp = midiOut)\n return midiOut\n\ndef main():\n if len(sys.argv) < 2:\n print('You must provide the midi file to read')\n print('\\t{} '.format(sys.argv[0]))\n sys.exit(1)\n f = sys.argv[1]\n fout = convert(f)\n\nif __name__ == '__main__':\n main()\n","sub_path":"invert.py","file_name":"invert.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"582335154","text":"'''\nAuthor: alexc89@mit.edu\nSendMessageAchMatlab\nUsed to multicast Hubo's status on LCM. It will take the information received on ACH and convert it to LCM. This is needed for live visualization.\n\n'''\n\nimport lcm\nimport time\nimport ach\nimport hubo_ach as ha\nimport time\nfrom ctypes import *\n\n#Import LCM Messages\nfrom lcmtypes import hubo_hubo2state\n\n\n\n\n\n#Message Conversion\ndef convertLCM_Matlab(x):\n NUM_JOINT = 40\n msg = hubo_hubo2state()\n msg.timestamp = time.time()\n msg.state = [0,0,0,0,0,0] #Basic Link Position\n msg.state += [x.joint[i].pos for i in range(NUM_JOINT)] #Moter joints positions\n msg.state += [0,0,0,0,0,0] #Basic Link Velocities\n msg.state += [x.joint[i].vel for i in range(NUM_JOINT)] #Motor joint velocity\n #Retroactively adding passive finger joints and their velocity\n FINGER_JOINTPOS = [17,38] #Left and Right hand.\n FINGER_JOINTPOS = [[FINGER_JOINTPOS[hand] +2*finger for finger in range(5) ] for hand in range(2)]#Populate 5 fingers\n FINGER_JOINTPOS = FINGER_JOINTPOS[0] + FINGER_JOINTPOS[1]\n FINGER_JOINTPOS += [FINGER_JOINTPOS[finger] +1 for finger in range(len(FINGER_JOINTPOS))]#Populate 2 knuckles for each finger\n FINGER_JOINTPOS += [2*knuckles for knuckles in FINGER_JOINTPOS] #add velocity\n FINGER_JOINTPOS.sort()\n [msg.state.insert(knuckle, 0) for knuckle in FINGER_JOINTPOS] #Insert zero for the passive knuckles position + Velocity\n return msg\n\n\nif __name__ == \"__main__\":\n #Setup ACH LCM channels\n lc = lcm.LCM(\"udpm://239.255.76.67:7667?ttl=2\")\n #Setup ACH\n c = ach.Channel(ha.HUBO_CHAN_STATE_NAME)#HuboState\n c. flush()\n\n while True: #constant Transmission \n #Grab a frame form ACH\n state = ha.HUBO_STATE()\n [status, framesize] = c.get(state, wait=True, last=False)\n if status == ach.ACH_OK or status == ach.ACH_MISSED_FRAME:\n x =1#print \"ACH grab successful\" #Testing Probe 1\n else:\n raise ach.AchException( c.result_string(status))\n \n \n #ACH to LCM conversion\n msg = convertLCM_Matlab(state)\n# msg.imu = [convert_imu(x) for x in state.imu]\n# msg.ft = [convert_ft(x) for x in state.ft]\n# msg.joint = [convert_joint_state(x) for x in state.joint]\n# msg.status = [convert_joint_status(x) for x in state.status]\n# msg.driver = [convert_jmc_state(x) for x in state.driver]\n# msg.power = convert_power(state.power)\n# msg.time = state.time\n# msg.refWait = state.refWait\n \n #Pushout an LCM message\n lc.publish(\"HuboState\", msg.encode())\n #Loop Delay\n time.sleep(0.01)\n #ACH LCM terminate\n c.close()\n","sub_path":"ach-lcm-util-simplified/sendMessageAchMatlab.py","file_name":"sendMessageAchMatlab.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"39208081","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\ndef find(node, parent, target):\n if node is None:\n return None, -1\n if node.val == target:\n return parent, 0\n elif node.left is None and node.right is None:\n return None, -1\n\n lp, ld = find(node.left, node, target)\n rp, rd = find(node.right, node, target)\n if lp is None and rp is None:\n return None, -1\n elif lp is not None:\n return lp, ld + 1\n else:\n return rp, rd + 1\n\n\nclass Solution:\n def isCousins(self, root, x, y):\n xp, xd = find(root, None, x)\n yp, yd = find(root, None, y)\n return xd == yd and xp != yp\n","sub_path":"src/leetcode/P3322.py","file_name":"P3322.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"38990100","text":"import csv\nimport openpyxl as xl\nimport webbrowser\n\nweb_address = input('Please enter the web address:')\nworking_dir = '~/Documents/HUD/Projects/Scripts/{}'\n\n\n# noinspection PyUnboundLocalVariable\ndef main():\n \"\"\"\n function to control the script, taking in the name of the file that will be used and\n deciding whether to use a .csv function or .xlsx function\n \"\"\"\n while True:\n # noinspection PyBroadException\n try:\n\n filename = input('Please enter the file you wish to use: ')\n file = working_dir.format(filename)\n print('Reading {}'.format(file))\n if file.endswith('.csv'):\n process_csv(import_csv(file))\n elif file.endswith('.xlsx'):\n process_xlsx(import_xlsx(file))\n else:\n print('{} is not a valid file.'.format(filename))\n except Exception:\n print('{} does not exist.'.format(filename))\n pass\n\n\ndef import_csv(file):\n \"\"\"\n function to import the .csv file and convert it to a list\n :param file: the file name that is opened\n :return: returns data as a list of lists\n \"\"\"\n try:\n with open(file) as working_file:\n reader = csv.reader(working_file)\n csv_data = list(reader)\n return csv_data\n except Exception:\n raise\n\n\n\ndef import_xlsx(file):\n \"\"\"\n function to import the .xlsx file and convert it to a worksheet object\n :param file: the file name that is opened\n :return: returns the worksheet object\n \"\"\"\n try:\n wb = xl.load_workbook(file)\n except Exception:\n raise\n print('Choose a sheet from the workbook: ')\n print(wb.sheetnames)\n sheet = wb[input()]\n print('Reading {}{}'.format(sheet.title, '.'))\n return sheet\n\n\ndef process_csv(csv_list):\n \"\"\"\n function that iterates through the list and opens browser tabs when it\n finds seven digit number values\n :param csv_list: a list generated from a .csv file\n \"\"\"\n print('Opening browser tabs...')\n for elements in csv_list:\n for element in elements:\n if element.isdigit() and len(element) == 7:\n webbrowser.open(web_address + element)\n\n\ndef process_xlsx(sheet):\n \"\"\"\n function that iterates through the worksheet object and opens browser tabs when it\n finds seven digit number values\n :param sheet: a worksheet from an .xlsx file\n \"\"\"\n print('Opening browser tabs...')\n for row in sheet.iter_rows():\n for cell in row:\n if str(cell.value).isdigit() and len(str(cell.value)) == 7:\n webbrowser.open(web_address + str(cell.value))\n\n\nmain()\n","sub_path":"tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"606062055","text":"import sys\nimport numpy as np\n\n# Run:\n# python scripts/timeints-divide-and-partition.py \\\n#