\"\n\nimport argparse\nimport logging\n\nimport cPickle as pickle\nimport json\nimport os\nimport requests\nimport sys\nimport tarfile\nimport yaml\nfrom textwrap import wrap\nfrom time import time\n\nimport numpy as np\nimport pyfits\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nfrom emcee.utils import sample_ball\n\nimport sick\n\nCACHED_MODEL_GRID_URL = \\\n \"https://raw.githubusercontent.com/andycasey/sick/master/.cached-models.json\"\n\nlogger = logging.getLogger(\"sick\")\n\ndef download_file(url):\n \"\"\" Download a file to the current working directory. \"\"\"\n\n local_filename = url.split('/')[-1]\n r = requests.get(url, stream=True)\n with open(local_filename, 'wb') as f:\n progress, total = 0, int(r.headers.get(\"content-length\"))\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n progress += len(chunk)\n complete = int(50 * progress / total)\n sys.stdout.write(\"\\r[{0}{1}] {2:3.0f}%\".format('=' * complete,\n ' ' * (50-complete), 2*complete))\n sys.stdout.flush()\n f.write(chunk)\n f.flush()\n sys.stdout.flush()\n sys.stdout.write(\"\\nDownload of {0} complete.\\n\".format(local_filename))\n return local_filename\n\ndef resume(args):\n raise NotImplementedError\n\n\ndef cache(args):\n \"\"\" Cache a model \"\"\"\n\n if not os.path.exists(args.model):\n raise IOError(\"model filename {0} does not exist\".format(args.model))\n\n model = sick.models.Model(args.model)\n cached_configuration = model.cache(\n args.grid_points_filename, args.fluxes_filename, clobber=args.clobber)\n\n model.configuration = cached_configuration\n model.save(args.model, True)\n\n logging.info(\"Updated model filename {0} to include cached data.\".format(\n args.model))\n return (model, cached_configuration)\n \n\ndef download(args):\n \"\"\" Download requested files. \"\"\"\n\n # Get the current list of model grids\n cached_model_list = requests.get(CACHED_MODEL_GRID_URL).json()\n message = \\\n \"{0}: {1}\\n\"\\\n \"\\t{2}\\n\"\\\n \"\\tADS Reference: {3}\\n\\n\"\n\n def exit_without_download():\n sys.stdout.write(\"No model downloaded.\\n\")\n sys.exit(-1)\n\n if args.model_grid_name == \"list\":\n # Just print out all of the available ones. Follow pip-style.\n sys.stdout.write(\"Found {0} cached sick models online:\\n\\n\".format(\n len(cached_model_list)))\n for model in cached_model_list:\n sys.stdout.write(message.format(model[\"short_name\"],\n model[\"long_name\"], \"\\n\\t\".join(wrap(model[\"description\"])),\n model[\"ads_reference\"]))\n\n else:\n # Look for the specific model.\n cached_model_names = [model[\"short_name\"].lower() \\\n for model in cached_model_list]\n\n requested_model_name = args.model_grid_name.lower()\n if requested_model_name not in cached_model_names:\n sys.stdout.write(\"No cached model matching name '{0}' found. Use \" \\\n \"'sick get list' to retrieve the current list of cached models\"\\\n \" available online.\\n\".format(requested_model_name))\n sys.exit(-1)\n\n else:\n # Confirm the selection\n model = cached_model_list[cached_model_names.index(requested_model_name)]\n sys.stdout.write(\"Found {0} model:\\n\\n\".format(model[\"short_name\"]))\n sys.stdout.write(message.format(model[\"short_name\"],\n model[\"long_name\"], \"\\n\\t\".join(wrap(model[\"description\"])),\n model[\"ads_reference\"]))\n sys.stdout.write(\"Download {0} model? [y/N]\".format(model[\"short_name\"]))\n confirm = raw_input().lower().strip()\n if len(confirm) > 0 and confirm[0] == \"y\":\n\n # Check that we won't overwrite anything.\n filename = model[\"download_link\"].split(\"/\")[-1]\n if os.path.exists(filename):\n sys.stdout.write(\"Clobber existing file {0}? [y/N]\".format(\n filename))\n confirm = raw_input().lower().strip()\n if 1 > len(confirm) or confirm[0] != \"y\":\n exit_without_download()\n\n # Once downloaded, it could overwrite files in a directory:\n if os.path.exists(model[\"short_name\"]):\n sys.stdout.write(\"This may overwrite files in pre-existing\"\\\n \" folder {0}/ -- is that OK? [y/N]\".format(model[\"short_name\"]))\n confirm = raw_input().lower().strip()\n if 1 > len(confirm) or confirm[0] != \"y\":\n exit_without_download()\n\n # OK, download it.\n sys.stdout.write(\"Downloading {0} model...\\n\".format(\n model[\"short_name\"]))\n filename = download_file(model[\"download_link\"])\n\n # Now untar it to a new directory.\n with tarfile.open(filename, \"r\") as tarball:\n tarball.extractall(path=model[\"short_name\"])\n sys.stdout.write(\"Extracted files to {0}/\\n\".format(model[\"short_name\"]))\n\n # Remove the tarball\n os.remove(filename)\n\n else:\n exit_without_download()\n\n\ndef _check_analysis_args(args):\n \"\"\" Perform some analysis checks \"\"\"\n\n if not os.path.exists(args.model):\n raise IOError(\"model filename {0} does not exist.\".format(args.model))\n\n if args.plotting:\n fig = plt.figure()\n available = fig.canvas.get_supported_filetypes().keys()\n plt.close(fig)\n\n if args.plot_format.lower() not in available:\n raise ValueError(\"plotting format {0} not available: Options are: \"\\\n \"{1}\".format(args.plot_format.lower(), \", \".join(available)))\n return None\n\n\ndef _parse_and_load_spectra(args):\n \"\"\" Parse and load the spectra from the arguments provided. \"\"\"\n\n if args.read_from_filename:\n logger.debug(\"Reading sources from input filename {}\".format(\n args.spectrum_filenames[0]))\n\n with open(args.spectrum_filenames[0], \"r\") as fp:\n source_spectrum_filenames = map(str.strip, fp.readlines())\n\n all_spectra = []\n for row in source_spectrum_filenames:\n all_spectra.append(map(sick.specutils.Spectrum1D.load, row.split(\" \")))\n\n return all_spectra\n\n all_spectra = map(sick.specutils.Spectrum1D.load, args.spectrum_filenames)\n\n # Possibilities:\n # (1) Many spectra for single star [default behaviour]\n # (2) Single spectrum for many stars [indicated by --multi-sources]\n # (3) Many spectra for many stars [indicated by --multi-plexing]\n\n # Possibility (3): Are the input FITS files multiplexed spectra?\n if args.multiplexing:\n\n # This implies multiple sources.\n if len(set(map(len, all_spectra))) > 1:\n raise IOError(\"input filenames contain different number of spectra\")\n\n sources = []\n num_channels, num_sources = len(all_spectra), len(all_spectra[0])\n for i in xrange(num_sources):\n sources.append([all_spectra[j][i] for j in xrange(num_channels)])\n\n elif args.multiple_sources:\n # Possibility (2): Single spectrum for many stars. Each spectrum is a\n # different source.\n sources = [[each] for each in all_spectra]\n\n else:\n # Possibility (1): Many spectra for single star\n sources = [all_spectra]\n\n return sources\n\n\ndef estimate(args):\n \"\"\"\n Return a point estimate of the model parameters theta given the data.\n \"\"\"\n\n # Make some checks\n _check_analysis_args(args)\n\n # Load the model and the data\n model = sick.models.Model(args.model)\n all_spectra = _parse_and_load_spectra(args)\n\n # Display some information about the model\n logger.info(\"Model information: {0}\".format(model))\n logger.info(\"Configuration:\")\n map(logger.info, yaml.dump(model.configuration).split(\"\\n\"))\n\n # Define headers that we want in the results filename \n default_headers = (\"RA\", \"DEC\", \"COMMENT\", \"ELAPSED\", \"FIBRE_NUM\", \"LAT_OBS\",\n \"LONG_OBS\", \"MAGNITUDE\",\"NAME\", \"OBJECT\", \"RO_GAIN\", \"RO_NOISE\", \"UTEND\",\n \"UTDATE\", \"UTSTART\", )\n default_metadata = {\n \"model\": model.hash, \n \"input_filenames\": \", \".join(args.spectrum_filenames),\n \"sick_version\": sick.__version__,\n }\n\n if args.read_from_filename:\n with open(args.spectrum_filenames[0], \"r\") as fp:\n all_filenames = map(str.strip, fp.readlines())\n\n # For each source, solve\n for i, spectra in enumerate(all_spectra, start=1):\n\n # Force spectra as a list\n if not isinstance(spectra, (list, tuple)):\n spectra = [spectra]\n\n logger.info(\"Starting on object #{0} (RA {1}, DEC {2} -- {3})\".format(i,\n spectra[0].headers.get(\"RA\", \"None\"),\n spectra[0].headers.get(\"DEC\", \"None\"),\n spectra[0].headers.get(\"OBJECT\", \"Unknown\")))\n\n # Create metadata and put header information in\n if args.skip > i - 1:\n logger.info(\"Skipping object #{0}\".format(i))\n continue\n\n if args.number_to_solve != \"all\" and i > (int(args.number_to_solve) + args.skip):\n logger.info(\"We have analysed {0} spectra. Exiting..\".format(args.number_to_solve))\n break\n\n # If there are many spectra to analyse, include the run ID in the output filenames.\n # Update filename prefix if we are reading from a file\n if args.read_from_filename:\n filename_prefix = sick.utils.default_output_prefix(all_filenames[i].split())\n\n else:\n filename_prefix = args.filename_prefix\n\n if len(all_spectra) > 1 and not args.read_from_filename:\n output = lambda x: os.path.join(args.output_dir,\n \"-\".join([filename_prefix, str(i), x]))\n else:\n output = lambda x: os.path.join(args.output_dir,\n \"-\".join([filename_prefix, x]))\n\n # Does a solution already exist for this star? If so are we authorised to clobber it?\n if os.path.exists(output(\"estimate.json\")):\n if not args.clobber:\n logger.info(\"Skipping object #{0} as a results file already exists\"\\\n \" ({1}) and we have been asked not to clobber it\".format(i,\n output(\"estimate.json\")))\n continue\n else:\n logger.warn(\"Overwriting existing file {0}\".format(output(\"estimate.json\")))\n\n metadata = {}\n header_columns = []\n for header in default_headers:\n if header not in spectra[0].headers: continue\n header_columns.append(header)\n metadata[header] = spectra[0].headers[header]\n\n metadata.update({\"run_id\": i})\n metadata.update(default_metadata)\n \n # Determine an initial point\n try:\n initial_theta, initial_r_chi_sq = model.initial_theta(spectra)\n\n except:\n logger.exception(\"Failed to get initial point\")\n if args.debug: raise\n continue\n\n logger.info(\"Initial theta point with reduced chi_sq = {0:.2f} is {1}\"\\\n .format(initial_r_chi_sq, model._dictify_theta(initial_theta)))\n\n\n metadata.update(dict(zip(\n [\"mean_snr_{}\".format(c) for c in model.channels],\n [np.nanmean(s.flux/(s.variance**0.5)) for s in spectra])))\n\n metadata.update({\n \"reduced_chi_sq\": initial_r_chi_sq,\n \"model_configuration\": model.configuration\n })\n\n # Produce a plot projecting the initial value\n if args.plotting:\n projected_filename = output(\"projected-initial-theta.{}\".format(\n args.plot_format))\n\n # Show physical parameters in the title?\n title = \", \".join([\"{0} = {1:.2f}\".format(p, v)\n for p, v in zip(model.grid_points.dtype.names, initial_theta)])\n\n fig = sick.plot.projection(model, spectra, theta=initial_theta,\n title=title)\n fig.savefig(projected_filename)\n logger.info(\"Created figure {}\".format(projected_filename))\n plt.close(\"all\")\n\n # Write the point estimate to disk\n logger.info(\"Saving point estimate to {0}\".format(output(\"estimate.json\")))\n with open(output(\"estimate.json\"), \"wb+\") as fp:\n json.dump(metadata, fp, indent=2)\n\n logger.info(\"Finished with source {0}\".format(i))\n\n logger.info(\"Fin.\")\n\n\n\ndef solve(args):\n \"\"\" \n Calculate posterior distributions for model parameters given the data.\n \"\"\"\n\n # Make some checks\n _check_analysis_args(args)\n\n # Load the model and the data\n model = sick.models.Model(args.model)\n all_spectra = _parse_and_load_spectra(args)\n\n # Display some information about the model\n logger.info(\"Model information: {0}\".format(model))\n logger.info(\"Configuration:\")\n map(logger.info, yaml.dump(model.configuration).split(\"\\n\"))\n\n # Define headers that we want in the results filename \n default_headers = (\"RA\", \"DEC\", \"COMMENT\", \"ELAPSED\", \"FIBRE_NUM\", \"LAT_OBS\",\n \"LONG_OBS\", \"MAGNITUDE\",\"NAME\", \"OBJECT\", \"RO_GAIN\", \"RO_NOISE\", \"UTEND\",\n \"UTDATE\", \"UTSTART\", )\n default_metadata = {\n \"model\": model.hash, \n \"input_filenames\": \", \".join(args.spectrum_filenames),\n \"sick_version\": sick.__version__,\n }\n\n if args.read_from_filename:\n with open(args.spectrum_filenames[0], \"r\") as fp:\n all_filenames = map(str.strip, fp.readlines())\n\n # For each source, solve\n for i, spectra in enumerate(all_spectra, start=1):\n\n # Force spectra as a list\n if not isinstance(spectra, (list, tuple)):\n spectra = [spectra]\n\n logger.info(\"Starting on object #{0} (RA {1}, DEC {2} -- {3})\".format(i,\n spectra[0].headers.get(\"RA\", \"None\"),\n spectra[0].headers.get(\"DEC\", \"None\"),\n spectra[0].headers.get(\"OBJECT\", \"Unknown\")))\n\n # Create metadata and put header information in\n if args.skip > i - 1:\n logger.info(\"Skipping object #{0}\".format(i))\n continue\n\n if args.number_to_solve != \"all\" and i > (int(args.number_to_solve) + args.skip):\n logger.info(\"We have analysed {0} spectra. Exiting..\".format(args.number_to_solve))\n break\n\n # If there are many spectra to analyse, include the run ID in the output filenames.\n # Update filename prefix if we are reading from a file\n if args.read_from_filename:\n filename_prefix = sick.utils.default_output_prefix(all_filenames[i].split())\n\n else:\n filename_prefix = args.filename_prefix\n\n if len(all_spectra) > 1 and not args.read_from_filename:\n output = lambda x: os.path.join(args.output_dir,\n \"-\".join([filename_prefix, str(i), x]))\n else:\n output = lambda x: os.path.join(args.output_dir,\n \"-\".join([filename_prefix, x]))\n\n # Does a solution already exist for this star? If so are we authorised to clobber it?\n if os.path.exists(output(\"result.json\")):\n if not args.clobber:\n logger.info(\"Skipping object #{0} as a results file already exists\"\\\n \" ({1}) and we have been asked not to clobber it\".format(i,\n output(\"result.json\")))\n continue\n else:\n logger.warn(\"Overwriting existing file {0}\".format(output(\"result.json\")))\n\n metadata = {}\n header_columns = []\n for header in default_headers:\n if header not in spectra[0].headers: continue\n header_columns.append(header)\n metadata[header] = spectra[0].headers[header]\n\n metadata.update({\"run_id\": i})\n metadata.update(default_metadata)\n \n # Determine an initial point\n try:\n initial_theta, initial_r_chi_sq = model.initial_theta(spectra)\n\n except:\n logger.exception(\"Failed to get initial point\")\n if args.debug: raise\n continue\n\n\n logger.info(\"Initial theta point with reduced chi_sq = {0:.2f} is {1}\"\\\n .format(initial_r_chi_sq, model._dictify_theta(initial_theta)))\n\n # Save metadata about the initial point\n metadata[\"initial_theta\"] = model._dictify_theta(initial_theta)\n metadata[\"initial_r_chi_sq\"] = initial_r_chi_sq\n\n # Produce a plot projecting the initial value\n if args.plotting:\n projected_filename = output(\"projected-initial-theta.{}\".format(\n args.plot_format))\n\n fig = sick.plot.projection(model, spectra, theta=initial_theta)\n fig.savefig(projected_filename)\n logger.info(\"Created figure {}\".format(projected_filename))\n\n # Optimise the point\n if model.configuration[\"settings\"][\"optimise\"]:\n optimised_theta, optimised_r_chi_sq, info = model.optimise(\n spectra, initial_theta=initial_theta, \n fixed=[\"z\"] + [\"z.{}\".format(c) for c in model.channels])\n\n logger.info(\"Optimised theta is {0}\".format(model._dictify_theta(optimised_theta)))\n walker_theta = optimised_theta\n\n # Save metadata about the optimised value\n metadata[\"optimised_theta\"] = model._dictify_theta(optimised_theta)\n metadata[\"optimised_r_chi_sq\"] = optimised_r_chi_sq\n \n if args.plotting:\n projected_filename = output(\"projected-optimised-theta.{}\".format(\n args.plot_format))\n\n fig = sick.plot.projection(model, spectra, theta=optimised_theta)\n fig.savefig(projected_filename)\n logger.info(\"Created figure {}\".format(projected_filename))\n\n else:\n # MCMC initial point will be the initial point\n walker_theta = initial_theta\n\n try:\n posteriors, sampler, info = model.infer(spectra, theta=walker_theta)\n\n except:\n logger.exception(\"Failed to analyse source #{0}:\".format(i))\n if args.debug: raise\n\n else:\n \n # Update results with the posteriors\n logger.info(\"Posteriors:\")\n max_parameter_len = max(map(len, model.parameters))\n for parameter in model.parameters:\n posterior_value, pos_uncertainty, neg_uncertainty = posteriors[parameter]\n logger.info(\" {0}: {1:.2e} ({2:+.2e}, {3:+.2e})\".format(\n parameter.rjust(max_parameter_len), posterior_value, \n pos_uncertainty, neg_uncertainty))\n\n metadata.update({\n parameter: posterior_value,\n \"u_maxabs_{0}\".format(parameter): np.abs([\n neg_uncertainty,\n pos_uncertainty\n ]).max(),\n \"u_pos_{0}\".format(parameter): pos_uncertainty,\n \"u_neg_{0}\".format(parameter): neg_uncertainty,\n })\n\n # Save information related to the analysis\n metadata.update(dict(zip(\n [\"mean_snr_{}\".format(c) for c in model.channels],\n [np.nanmean(s.flux/(s.variance**0.5)) for s in spectra])))\n\n chain_filename = output(\"chain.fits\")\n metadata.update({\n \"reduced_chi_sq\": info[\"reduced_chi_sq\"],\n \"maximum_log_probability\": np.nanmax(info[\"lnprobability\"]),\n \"chain_filename\": chain_filename if args.save_chain_files else \"\",\n \"time_elapsed\": info[\"time_elapsed\"],\n \"final_mean_acceptance_fraction\": info[\"mean_acceptance_fractions\"][-1],\n \"model_configuration\": model.configuration\n })\n for channel, length in info[\"autocorrelation_times\"].iteritems():\n metadata[\"tau_{}\".format(channel)] = length\n\n walkers = model.configuration[\"settings\"][\"walkers\"]\n chain_length = info[\"chain\"].shape[0] * info[\"chain\"].shape[1]\n chain = np.core.records.fromarrays(\n np.vstack([\n np.arange(1, 1 + chain_length),\n np.arange(1, 1 + chain_length) % walkers,\n info[\"chain\"].reshape(-1, len(model.parameters)).T,\n info[\"lnprobability\"].reshape(-1, 1).T\n ]),\n names=[\"Iteration\", \"Sample\"] + model.parameters + [\"ln_probability\"],\n formats=[\"i4\", \"i4\"] + [\"f8\"] * (1 + len(model.parameters)))\n\n # Save the chain\n if args.save_chain_files:\n logger.info(\"Saving chains to {0}\".format(chain_filename))\n primary_hdu = pyfits.PrimaryHDU()\n table_hdu = pyfits.BinTableHDU(chain)\n hdulist = pyfits.HDUList([primary_hdu, table_hdu])\n hdulist.writeto(chain_filename, clobber=True)\n\n else:\n logger.warn(\"Chain not saved to disk.\")\n\n # Write the result to disk\n logger.info(\"Saving results to {0}\".format(output(\"result.json\")))\n with open(output(\"result.json\"), \"wb+\") as fp:\n json.dump(metadata, fp, indent=2)\n\n # Close sampler pool\n if model.configuration[\"settings\"].get(\"threads\", 1) > 1:\n sampler.pool.close()\n sampler.pool.join()\n\n # Save sampler state\n with open(output(\"model.state\"), \"wb+\") as fp:\n pickle.dump([\n sampler.chain[:, -1, :],\n sampler.lnprobability[:, -1],\n sampler.random_state\n ], fp, -1)\n\n # Plot results\n if args.plotting:\n \n # Plot the mean acceptance fractions\n acceptance_plot_filename = output(\"acceptance.{0}\".format(args.plot_format))\n fig = sick.plot.acceptance_fractions(info[\"mean_acceptance_fractions\"],\n burn_in=model.configuration[\"settings\"][\"burn\"])\n fig.savefig(acceptance_plot_filename)\n logger.info(\"Created figure {0}\".format(acceptance_plot_filename))\n\n # Plot the chains\n chain_plot_filename = output(\"chain.{0}\".format(args.plot_format))\n fig = sick.plot.chains(info[\"chain\"],\n labels=sick.utils.latexify(model.parameters), truth_color='r',\n burn_in=model.configuration[\"settings\"][\"burn\"],\n truths=[posteriors[p][0] for p in model.parameters])\n fig.savefig(chain_plot_filename)\n logger.info(\"Created figure {0}\".format(chain_plot_filename))\n\n # Make a corner plot with just the astrophysical parameters\n corner_plot_filename = output(\"corner.{0}\".format(args.plot_format))\n indices = np.arange(len(model.grid_points.dtype.names))\n fig = sick.plot.corner(sampler.chain.reshape(-1, len(model.parameters))[:, indices],\n labels=sick.utils.latexify(model.grid_points.dtype.names),\n truth_color='r', quantiles=[.16, .50, .84], verbose=False,\n truths=[posteriors[p][0] for p in model.grid_points.dtype.names])\n fig.savefig(corner_plot_filename)\n logger.info(\"Created figure {0}\".format(corner_plot_filename))\n\n # Plot the autocorrelation\n autocorrelation_filename = output(\"auto-correlation.{0}\".format(args.plot_format))\n fig = sick.plot.autocorrelation(sampler.chain)\n fig.savefig(autocorrelation_filename)\n logger.info(\"Created figure {0}\".format(autocorrelation_filename))\n\n # Plot some spectra\n pp_spectra_plot_filename = output(\"projected-spectra.{0}\".format(args.plot_format))\n fig = sick.plot.projection(model, spectra, chain=sampler.chain)\n fig.savefig(pp_spectra_plot_filename)\n logger.info(\"Created figure {0}\".format(pp_spectra_plot_filename))\n \n # Closing the figures isn't enough; matplotlib leaks memory\n plt.close(\"all\")\n\n # Delete some things\n del sampler, chain\n if args.save_chain_files:\n del primary_hdu, table_hdu, hdulist\n\n logger.info(\"Fin.\")\n return True\n\n\ndef aggregate(args):\n \"\"\" Aggregate JSON-formatted results into a single tabular FITS file. \"\"\"\n\n if os.path.exists(args.output_filename):\n if not args.clobber:\n raise IOError(\"output filename {0} already exists and we have been \"\\\n \"asked not to clobber it\".format(args.output_filename))\n else:\n logger.warn(\"Overwriting existing filename {0}\".format(args.output_filename))\n \n # Let's just assume it all aggregates from JSON to a FITS filename\n results = []\n for filename in args.result_filenames:\n with open(filename, \"r\") as fp:\n try:\n results.append(json.load(fp))\n except:\n logger.exception(\"Could not read results filename {0}\".format(filename))\n if args.debug: raise\n \n else:\n logging.debug(\"Successfully loaded results from {0}\".format(filename))\n\n # Get header order and sort them\n columns = results[0].keys()\n\n sorted_columns = []\n # Logic: RA, DEC then all other uppercase fields in alphabetical order\n # Then any other fields that have associated u_* headers in alphabetical order, as\n # well as their u_* columns\n # Then all the others in alphabetical order\n if \"RA\" in columns:\n sorted_columns.append(\"RA\")\n\n if \"DEC\" in columns:\n sorted_columns.append(\"DEC\")\n\n uppercase_columns = []\n parameteral_columns = []\n for column in columns:\n if column.isupper() and column not in sorted_columns: uppercase_columns.append(column)\n elif \"u_pos_{0}\".format(column) in columns: parameteral_columns.append(column)\n \n uppercase_columns, parameteral_columns = map(sorted, [uppercase_columns, parameteral_columns])\n all_parameteral_columns = []\n variants = (\"{0}\", \"u_pos_{0}\", \"u_neg_{0}\", \"u_maxabs_{0}\")\n for column in parameteral_columns:\n all_parameteral_columns.extend([variant.format(column) for variant in variants])\n\n sorted_columns.extend(uppercase_columns)\n sorted_columns.extend(all_parameteral_columns)\n\n other_columns = sorted(set(columns).difference(sorted_columns))\n ignore_columns = (\"model_configuration\", \"optimised_theta\", \"initial_theta\")\n sorted_columns.extend(list(set(other_columns).difference(ignore_columns)))\n\n # Create data types\n formats = [(\"f8\", \"|S256\")[isinstance(results[-1][each], (str, unicode))] \\\n for each in sorted_columns]\n\n # Create table\n data = [[result.get(each, [\"\", np.nan][formats[i] == \"f8\"]) \\\n for i, each in enumerate(sorted_columns)] for result in results]\n results_table = np.core.records.fromrecords(data, names=sorted_columns,\n formats=formats)\n\n # Write results to filename \n primary_hdu = pyfits.PrimaryHDU()\n table_hdu = pyfits.BinTableHDU(results_table)\n hdulist = pyfits.HDUList([primary_hdu, table_hdu])\n hdulist.writeto(args.output_filename, clobber=args.clobber)\n\n logger.info(\"Successfully written results from {0} sources with {1} fields\"\\\n \" to {2}\".format(len(results), len(results[0]), args.output_filename))\n\n\ndef parser(input_args=None):\n \"\"\" Create a parser. \"\"\"\n\n parser = argparse.ArgumentParser(\n description=\"sick, the spectroscopic inference crank\",\n epilog=\"See 'sick COMMAND -h' for more information on a specific command.\"\\\n \" Documentation and examples available at https://github.com/andycasey/sick\")\n\n # Create subparsers\n subparsers = parser.add_subparsers(title=\"command\", dest=\"command\",\n description=\"Specify the action to perform.\")\n\n # Create a parent subparser\n parent_parser = argparse.ArgumentParser(add_help=False)\n parent_parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\",\n action=\"store_true\", default=False, help=\"Vebose mode. Logger will print\"\\\n \" debugging messages.\")\n parent_parser.add_argument(\"--clobber\", dest=\"clobber\", action=\"store_true\",\n default=False, help=\"Overwrite existing files if they already exist.\")\n parent_parser.add_argument(\"--debug\", dest=\"debug\", action=\"store_true\", \n default=False, help=\"Enable debug mode. Any suppressed exception during \"\\\n \"runtime will be re-raised.\")\n\n # Create parser for the aggregate command\n aggregate_parser = subparsers.add_parser(\"aggregate\", parents=[parent_parser],\n help=\"Aggregate JSON results into a tabular format.\")\n aggregate_parser.add_argument(\"output_filename\", type=str,\n help=\"Output filename to aggregate results into.\")\n aggregate_parser.add_argument(\"result_filenames\", nargs=\"+\",\n help=\"The JSON result filenames to combine.\")\n aggregate_parser.set_defaults(func=aggregate)\n\n # Create parser for the download command\n download_parser = subparsers.add_parser(\"download\", parents=[parent_parser],\n help=\"Download a pre-cached model from an online repository.\")\n download_parser.add_argument(\"model_grid_name\", nargs=\"?\",\n help=\"The name of the pre-cached model grid to download, or 'list' (de\"\\\n \"fault) to see what pre-cached models are available.\", default=\"list\")\n download_parser.set_defaults(func=download)\n\n # Create parser for the estimate command\n estimate_parser = subparsers.add_parser(\"estimate\", parents=[parent_parser],\n help=\"Compute a point estimate of the model parameters, given the data.\")\n estimate_parser.add_argument(\"model\", type=str,\n help=\"The model filename in YAML- or JSON-style formatting.\")\n estimate_parser.add_argument(\"-r\", action=\"store_true\", dest=\"read_from_filename\",\n default=False, help=\"Read input spectra from a single filename.\")\n estimate_parser.add_argument(\"spectrum_filenames\", nargs=\"+\",\n help=\"Filenames of (observed) spectroscopic data.\")\n estimate_parser.add_argument(\"-o\", \"--output-dir\", dest=\"output_dir\", nargs=\"?\",\n type=str, default=os.getcwd(), help=\"Directory for output files.\")\n estimate_parser.add_argument(\"--filename-prefix\", \"-p\", dest=\"filename_prefix\",\n type=str, help=\"The filename prefix to use for the output files.\")\n estimate_parser.add_argument(\"--multi-sources\", dest=\"multiple_sources\",\n action=\"store_true\", default=False, help=\"Each spectrum is considered \"\\\n \"a different source.\")\n estimate_parser.add_argument(\"--multi-plexing\", dest=\"multiplexing\",\n action=\"store_true\", default=False, help=\"Specify that each FITS file \"\\\n \"contains a single channel of spectrum for many stars. Multiplexing \"\\\n \"implies --multi-sources to be true.\")\n estimate_parser.add_argument(\"-n\", \"--number-to-solve\", dest=\"number_to_solve\",\n default=\"all\", help=\"Specify the number of sources to solve. This is \"\\\n \"only applicable when --multi-sources or --multi-plexing is used. The \"\\\n \"default is to solve for %(default)s sources.\")\n estimate_parser.add_argument(\"-s\", \"--skip\", dest=\"skip\", action=\"store\", \n type=int, default=0, help=\"Number of sources to skip. This is only \"\\\n \"applicable when --multi-sources or --multi-plexing is used. Default: \"\\\n \"%(default)s)\")\n estimate_parser.add_argument(\"--no-plots\", dest=\"plotting\", action=\"store_false\",\n default=True, help=\"Disable plotting.\")\n estimate_parser.add_argument(\"--plot-format\", \"-pf\", dest=\"plot_format\", \n action=\"store\", type=str, default=\"pdf\", help=\"Format for output plots\"\\\n \" (default: %(default)s)\")\n estimate_parser.set_defaults(func=estimate)\n\n # Create parser for the solve command\n solve_parser = subparsers.add_parser(\"solve\", parents=[parent_parser],\n help=\"Compute posterior probability distributions for the model \"\\\n \"parameters, given the data.\")\n solve_parser.add_argument(\"model\", type=str,\n help=\"The model filename in YAML- or JSON-style formatting.\")\n solve_parser.add_argument(\"-r\", action=\"store_true\", dest=\"read_from_filename\",\n default=False, help=\"Read input spectra from a single filename.\")\n solve_parser.add_argument(\"spectrum_filenames\", nargs=\"+\",\n help=\"Filenames of (observed) spectroscopic data.\")\n solve_parser.add_argument(\"-o\", \"--output-dir\", dest=\"output_dir\", nargs=\"?\",\n type=str, default=os.getcwd(), help=\"Directory for output files.\")\n solve_parser.add_argument(\"--filename-prefix\", \"-p\", dest=\"filename_prefix\",\n type=str, help=\"The filename prefix to use for the output files.\")\n solve_parser.add_argument(\"--multi-sources\", dest=\"multiple_sources\",\n action=\"store_true\", default=False, help=\"Each spectrum is considered \"\\\n \"a different source.\")\n solve_parser.add_argument(\"--multi-plexing\", dest=\"multiplexing\",\n action=\"store_true\", default=False, help=\"Specify that each FITS file \"\\\n \"contains a single channel of spectrum for many stars. Multiplexing \"\\\n \"implies --multi-sources to be true.\")\n solve_parser.add_argument(\"-n\", \"--number-to-solve\", dest=\"number_to_solve\",\n default=\"all\", help=\"Specify the number of sources to solve. This is \"\\\n \"only applicable when --multi-sources or --multi-plexing is used. The \"\\\n \"default is to solve for %(default)s sources.\")\n solve_parser.add_argument(\"-s\", \"--skip\", dest=\"skip\", action=\"store\", \n type=int, default=0, help=\"Number of sources to skip. This is only \"\\\n \"applicable when --multi-sources or --multi-plexing is used. Default: \"\\\n \"%(default)s)\")\n solve_parser.add_argument(\"--no-chain-files\", dest=\"save_chain_files\",\n help=\"Do not save the chains to disk.\", action=\"store_false\", default=True)\n solve_parser.add_argument(\"--no-plots\", dest=\"plotting\", action=\"store_false\",\n default=True, help=\"Disable plotting.\")\n solve_parser.add_argument(\"--plot-format\", \"-pf\", dest=\"plot_format\", \n action=\"store\", type=str, default=\"pdf\", help=\"Format for output plots\"\\\n \" (default: %(default)s)\")\n solve_parser.set_defaults(func=solve)\n\n # Create parser for the resume command\n resume_parser = subparsers.add_parser(\"resume\", parents=[parent_parser],\n help=\"Resume MCMC simulation from a previously calculated state.\")\n resume_parser.add_argument(\"model\", type=str,\n help=\"The model filename in YAML- or JSON-style formatting.\")\n resume_parser.add_argument(\"state_filename\", type=str,\n help=\"The filename containing the pickled MCMC state.\")\n resume_parser.add_argument(\"burn\", type=int,\n help=\"The number of MCMC steps to burn.\")\n resume_parser.add_argument(\"sample\", type=int,\n help=\"The number of MCMC steps to sample after burn-in.\")\n resume_parser.add_argument(\"spectrum_filenames\", nargs=\"+\",\n help=\"Filenames of (observed) spectroscopic data.\")\n resume_parser.add_argument(\"-o\", \"--output-dir\", dest=\"output_dir\", nargs=\"?\",\n type=str, default=os.getcwd(),\n help=\"Directory where to save output files to.\")\n resume_parser.add_argument(\"--filename-prefix\", \"-p\", dest=\"filename_prefix\",\n type=str, help=\"The filename prefix to use for the output files.\")\n resume_parser.add_argument(\"--multi-channel\", \"-mc\", dest=\"multiple_channels\",\n action=\"store_true\", default=False,\n help=\"Use if each source has multiple spectral channels. Default is false\"\\\n \", implying that any additional spectra refers to a different source.\")\n resume_parser.add_argument(\"-s\", \"--skip\", dest=\"skip\", action=\"store\",\n type=int, default=0, help=\"Number of sources to skip (default: %(default)s)\")\n resume_parser.add_argument(\"--no-plots\", dest=\"plotting\", action=\"store_false\",\n default=True, help=\"Disable plotting.\")\n resume_parser.add_argument(\"--plot-format\", \"-pf\", dest=\"plot_format\", \n action=\"store\", type=str, default=\"pdf\", help=\"Format for output plots\"\\\n \" (default: %(default)s)\")\n resume_parser.set_defaults(func=resume)\n\n cache_parser = subparsers.add_parser(\"cache\", parents=[parent_parser],\n help=\"Cache the provided model for fast access at run-time.\")\n cache_parser.add_argument(\"model\", type=str,\n help=\"The (YAML- or JSON-formatted) model filename.\")\n cache_parser.add_argument(\"grid_points_filename\", type=str,\n help=\"The filename to cache the grid point information to.\")\n cache_parser.add_argument(\"fluxes_filename\", type=str,\n help=\"The filename to cache the fluxes into.\")\n cache_parser.set_defaults(func=cache)\n\n args = parser.parse_args(input_args)\n \n # Setup logging, bro.\n logger.setLevel(logging.DEBUG if args.verbose else logging.INFO)\n\n # Create a default filename prefix based on the input filename arguments\n if args.command.lower() in (\"solve\", \"estimate\", \"optimise\", \"resume\") \\\n and args.filename_prefix is None:\n args.filename_prefix = sick.utils.default_output_prefix(args.spectrum_filenames)\n\n handler = logging.FileHandler(\"{}.log\".format(\n os.path.join(args.output_dir, args.filename_prefix)))\n formatter = logging.Formatter(\"%(asctime)s [%(levelname)s] %(message)s\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n \n return args\n\n \ndef main():\n \"\"\" Parse arguments and execute the correct sub-parser. \"\"\"\n\n args = parser()\n return args.func(args)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"sick/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":38310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"616290081","text":"from gensim.models import KeyedVectors\nimport numpy as np\nimport json\nimport torch\nimport pickle\nfrom transformers import DistilBertConfig, DistilBertModel, DistilBertTokenizer\nfrom whoosh.index import create_in\nfrom whoosh.fields import *\nfrom whoosh.qparser import QueryParser, MultifieldParser\nfrom whoosh import scoring\nimport nltk\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nimport copy\n\nclass Index:\n def __init__(self, w2v_model, processed_json_file, bert_model=None, bert_tokenizer=None):\n self.bert_model = bert_model\n self.bert_tokenizer = bert_tokenizer\n self.model = KeyedVectors.load(w2v_model)\n index_keys = []\n index_values = []\n data = []\n cur_set = set([])\n with open(processed_json_file) as f:\n objs = json.load(f)[30:]\n for j, obj in enumerate(objs):\n snippet = obj[\"snippet\"]\n snippet = ' '.join(snippet.split())\n snippet = snippet.replace(\"\\n\", \" \")\n obj[\"snippet\"] = snippet\n sents = nltk.sent_tokenize(snippet)\n i = 0\n while i < len(sents):\n newsnippet = \" \".join(sents[i: i+2])\n if newsnippet not in cur_set:\n newobj = copy.deepcopy(obj)\n newobj[\"snippet\"] = newsnippet\n if i == len(sents) - 1 and j < len(objs) - 1 and \"| \" in objs[j+1][\"html\"]:\n newobj[\"html\"] = \" \" + newobj[\"html\"] + objs[j+1][\"html\"] + \" \"\n index_values.append(newobj)\n cur_set.add(newsnippet)\n i += 2\n\n # for line in f:\n # json_obj = json.loads(line)\n # json_obj[\"snippet\"] = json_obj[\"snippet\"].replace(u'\\xa0', u' ')\n # index_values.append(json_obj)\n self.create_whoosh_index(index_values)\n self.index_values = np.array(index_values)\n for value in index_values:\n index_keys.append(self.encode(value[\"snippet\"] + \" \" + value[\"header\"]))\n self.index_keys = np.stack(index_keys, axis=0)\n self.stop_words = set(stopwords.words('english'))\n\n def create_whoosh_index(self, json_objs):\n schema = Schema(\n id=NUMERIC(stored=True), filename=TEXT(stored=True),\n header=TEXT(stored=True), snippet=TEXT(stored=True))\n self.ix = create_in(\"indexdir\", schema)\n writer = self.ix.writer()\n for i, obj in enumerate(json_objs):\n writer.add_document(\n id=i, filename=obj[\"filename\"], header=obj[\"header\"],\n snippet=obj[\"snippet\"])\n writer.commit()\n\n def encode(self, text):\n if self.bert_model:\n return self.bert_encode(text)\n ave_vector = np.zeros((300,), dtype=float)\n words = text.split(\" \")\n for word in words:\n try:\n ave_vector += self.model[word.lower()]\n except KeyError:\n pass\n norm = np.linalg.norm(ave_vector)\n if norm > 0.:\n ave_vector /= norm\n return ave_vector\n\n def bert_encode(self, text):\n tokenized_ids = self.bert_tokenizer.encode(text)\n tokenized_ids = torch.tensor(tokenized_ids)[:512].unsqueeze(0)\n vectors = self.bert_model(tokenized_ids)[0].detach().numpy()\n vector = np.squeeze(np.mean(vectors, axis=1), axis=0)\n return vector# / np.linalg.norm(vector)\n\n def process_context(self, text):\n filtered_words = [\n word for word in word_list\n if word not in self.stop_words\n ]\n return \" \".join(word_list)\n\n\n def search(self, text, topn=5, context=\"\"):\n with self.ix.searcher(weighting=scoring.BM25F()) as searcher:\n querytext = text + \" \" + context\n query = MultifieldParser(\n [\"header\", \"snippet\"], self.ix.schema).parse(\" OR \".join(querytext.split(\" \")))\n results = searcher.search(query, limit=topn * 100)\n init_ids = np.array([res[\"id\"] for res in results])\n scores = np.array([r.score for r in results])\n mean_vector = self.encode(querytext)\n if init_ids.shape[0] > 0:\n sims = -np.dot(self.index_keys, mean_vector)\n scores = (scores - scores.min()) / (scores.max() - scores.min())\n sims[init_ids] -= scores\n top_idx = np.argsort(sims)[:topn*5]\n else:\n sims = -np.dot(self.index_keys, mean_vector)\n top_idx = np.argsort(sims)[:topn*5]\n top_vals = list(self.index_values[top_idx])\n cur_html_set = set([])\n ret_vals = []\n i = 0\n for i in range(len(top_vals)):\n if top_vals[i][\"html\"] in cur_html_set:\n continue\n ret_vals.append(top_vals[i])\n cur_html_set.add(top_vals[i][\"html\"])\n if len(ret_vals) >= topn:\n break\n return ret_vals\n\nif __name__ == \"__main__\":\n config_class, model_class, tokenizer_class = (DistilBertConfig, DistilBertModel, DistilBertTokenizer)\n config = config_class.from_pretrained(\"distilbert-base-uncased\")\n tokenizer = tokenizer_class.from_pretrained(\"distilbert-base-uncased\", do_lower_case=True)\n model = model_class.from_pretrained(\n \"../../finmodel.bin\", from_tf=False, config=config)\n idx = Index(\"../../finw2v.bin\", \"../../AAL.json\", None, None)#, model, tokenizer)\n with open(\"../../idx_dumpv4.pkl\", \"wb\") as f:\n pickle.dump(idx, f)\n # with open(\"../../idx_dumpv2.pkl\", \"rb\") as f:\n # idx = pickle.load(f)\n while True:\n x = input(\"query: \")\n y = input(\"context: \")\n print(idx.search(x, context=y))\n","sub_path":"backend/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":5804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"451008415","text":"#!/usr/bin/env python\n\n\"\"\" sonar2laser.py - Version 1.1 2013-12-20\n\n Translate the /sensor_msgs/Range to /sensor_msgs/LaserScan ,and publish topic.\n\n Created for the Pi Robot Project: http://www.pirobot.org\n Copyright (c) 2012 Patrick Goebel. All rights reserved.\n\n This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2 of the License, or\n (at your option) any later version.5\n \n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details at:\n \n http://www.gnu.org/licenses/gpl.html\n \n\"\"\"\n\nimport rospy\nfrom sensor_msgs.msg import LaserScan\nfrom sensor_msgs.msg import Range\n\nclass Sonar2Laser():\n def __init__(self):\n # Give the node a name\n rospy.init_node('sonar2laser', anonymous=False)\n\n self.sonar_topics = rospy.get_param('~sonar_topics',[])\n self.output_topic = rospy.get_param('~output_topic','sonar2laser')\n self.frame_id = rospy.get_param('~frame_id','sonar2laser')\n\n rospy.loginfo('sonar_topics:')\n rospy.loginfo(self.sonar_topics)\n rospy.loginfo('output_topic:')\n rospy.loginfo(self.output_topic)\n # Publisher of type nav_msgs/Odometry\n self.laserPub = rospy.Publisher(self.output_topic, LaserScan, queue_size=10)\n \n rospy.loginfo('wait for msg')\n # Wait for the topic to become available\n for topic in self.sonar_topics:\n rospy.wait_for_message(topic, Range)\n \n # Subscribe to the topic\n for topic in self.sonar_topics:\n rospy.Subscriber(topic, Range, self.pub_laser)\n \n rospy.loginfo(\"Translate Range msg to LaserScan msg\")\n \n def pub_laser(self, msg):\n laser = LaserScan()\n\n laser.header = msg.header\n laser.header.frame_id = self.frame_id\n laser.angle_min = 0\n laser.angle_max = 3.14\n laser.angle_increment = 0.01\n laser.time_increment = 0.01\n laser.scan_time = 0.1\n laser.range_min = 0.2\n laser.range_max = 4.5\n laser.ranges = [msg.range,msg.range]\n laser.intensities = [1,1]\n\n self.laserPub.publish(laser)\n \nif __name__ == '__main__':\n try:\n Sonar2Laser()\n rospy.spin()\n except:\n pass\n \n\n \n","sub_path":"youyou_robot/launch/sonar2laser.py","file_name":"sonar2laser.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"169034538","text":"#-*- coding: utf-8 -*-\nfrom django.conf.urls import include, url, patterns\nfrom rest_framework_nested import routers\nfrom ManicurToDay import settings\nfrom authentication.views import AccountViewSet, LoginView, LogoutView\nfrom ManicurToDay.views import IndexView\nfrom post.views import AccountClientViewSet, ClientViewSet\n\nrouter=routers.SimpleRouter()\nrouter.register(r'accounts', AccountViewSet)\nrouter.register(r'clients', ClientViewSet)\n\naccounts_router = routers.NestedSimpleRouter(\n router,r'accounts',lookup='account'\n)\naccounts_router.register(r'clients',AccountClientViewSet)\nurlpatterns =patterns(\n '',\n url(r'^api/v1/',include(router.urls)),\n url(r'^api/v1/',include(accounts_router.urls)),\n url(r'^api/v1/auth/login/$',LoginView.as_view(), name='login'),\n url(r'^api/v1/auth/logout/$',LogoutView.as_view(), name='logout'),\n url('^.*',IndexView.as_view(),name='index'),\n )\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns += patterns('',\n url(r'^__debug__/', include(debug_toolbar.urls)),\n )\n \n \n","sub_path":"ManicurToDay/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"404096133","text":"from unidecode import unidecode\nimport re\nimport urllib2\nclass Util:\n\tdef __init__(self):\n\t\tself.URL_BASE = \"http://en.wikipedia.org/w/api.php\"\n\t\tself.blacklist = ['article', 'wikipedia', 'wiki', 'birth', 'people from', 'from', 'category', 'categories', 'pages', '.php', 'stubs', 'death', 'people', 'template', 'wiktio', 'en.', 'file', 'help', 'stub', 'list', 'disambiguation']\n\t\tself.CATEGORY = 'category'\n\t\tself.ARTICLE = 'topic'\n\t\tself.DISAMBIGUATION = 'disambiguation'\n\t\tself.SIBLING_REL = 'sibling'\n\t\tself.CATEGORY_REL = 'parent'\n\t\tself.SUBCAT_REL = 'subcat'\n\t\tself.DISAMB_REL = 'disambiguation'\n\t\tself.INF = 9999\n\n\tdef _contains(self, s, l):\n\t\tfor i in l:\n\t\t\tif s.lower().rfind(i.lower()) >= 0:\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef _clean(self, s):\n\t\ts = self._encode_str(s)\n\t\ts = urllib2.unquote(s)\n\t\ts = re.sub(r'/wiki/', '', s)\n\t\ts = re.sub(r' ', '_', s)\n\t\ts = re.sub(r'#.*', '', s)\n\t\treturn s\n\n\tdef _encode_str(self, s):\n\t\tif type(s) == unicode:\n\t\t\treturn unidecode(s)\n\t\telse:\n\t\t\treturn unidecode(s.decode(\"utf-8\", \"ignore\"))\n","sub_path":"tailor/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"573730702","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n # d = {}\n # for index, value in enumerate(nums):\n # if target - value in d:\n # return d[target - value], index\n # d[value] = index\n sorted_nums = sorted(nums)\n i = 0\n j = len(sorted_nums) - 1\n while i < j:\n sum = sorted_nums[i] + sorted_nums[j]\n if sum == target:\n return nums.index(sorted_nums[i]), nums.index(sorted_nums[j])\n elif sum < target:\n i += 1\n else:\n j -= 1\n\na = Solution()\nprint(a.twoSum([3,2,4], 6))\n\n\n\n","sub_path":"leetcode(1.TwoSum).py","file_name":"leetcode(1.TwoSum).py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"465063241","text":"#!/usr/bin/env python2\n#coding=utf-8\nimport sys\nimport os\n\"\"\"\n./transcode.py file1 [file2] [file3] ...\nThis program transfer file from utf-8 to gbk.\nTips: using \"./transcode.py `ls`\" to transfer all the file in this dirctory from utf-8 to gbk.\n\"\"\"\nif(len(sys.argv) < 2):\n print(\"No action specified.\")\n sys.exit()\nif(sys.argv[1].startswith('--')):\n option = sys.argv[1][2:]\n if(option == 'version'):\n print(\"Version 0.0.1\")\n elif(option == 'help'):\n print(\"\"\"\nThis program transfer file from utf-8 to gbk.\n\nusing formate: ./transcode.py file1 [file2] [file3] ...\nTips: using \"./transcode.py `ls`\" or \"./transcode.py *\" to transfer all the file in this dirctory from utf-8 to gbk.\n\n \"\"\")\n else:\n print(\"Unknown option!\")\n sys.exit()\nelif(sys.argv[1] == '*'):\n argument = os.system(\"ls\")\nelse:\n argument = sys.argv[1:]\n\nos.system(\"mkdir target\")\n\nselect = raw_input(\"Please input the choice:\\n\\t1. utf-8 -> gbk\\n\\t2. gbk -> utf-8\\n\")\nif select == '1':\n for file in argument:\n os.system(\"iconv -c -f utf-8 -t gbk \"+file+' > target/'+file)\n print (\"Completed!\\n\")\nelif select == '2':\n for file in argument:\n os.system(\"iconv -c -f gbk -t utf-8 \"+file+' > target/'+file)\n print (\"Completed!\\n\")\nelse:\n print (\"Invalid choice!\\n\")\n\n","sub_path":"transcode.py","file_name":"transcode.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"178878756","text":"class Solution:\n def reverseString(self, s: List[str]) -> None:\n index = 0\n while index < len(s)//2 and len(s) > 1:\n s[index], s[-1-index] = s[-1-index], s[index]\n index += 1\n return s\n\n#this doesn't work unless you increase recursion depth, but can't on Leetcode\nimport sys\nsys.setrecursionlimit(1500)\nclass Solution:\n left = 0\n right = -1\n \n def reverseString(self, s: List[str]) -> None:\n \"\"\"\n Do not return anything, modify s in-place instead.\n \"\"\"\n if self.left >= len(s) + self.right or len(s) <= 1:\n return ''\n \n hold = s[self.left]\n s[self.left] = s[self.right]\n s[self.right] = hold\n self.right -= 1\n self.left += 1\n \n self.reverseString(s)\n","sub_path":"python/344_reverseString.py","file_name":"344_reverseString.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"599221125","text":"import os\r\nfrom flask import Flask,jsonify, request, redirect, url_for, send_from_directory\r\nimport json\r\nimport logging\r\n\r\nclass Task(object):\r\n def __init__(self, filename, result, point):\r\n self.filename = filename\r\n self.result = result\r\n self.point = point\r\n\r\n def obj_dict(self):\r\n return {'filename': self.filename, 'result': self.result, 'point': self.point}\r\n\r\n#UPLOAD_FOLDER = 'uploads'\r\nALLOWED_EXTENSIONS = set(['exe', 'cpl', 'reg', 'ini', 'bat', 'com', 'dll', 'pif', 'lnk', 'scr', 'vbs', 'ocx', 'drv', 'sys'])\r\n#logging.basicConfig(level=logging.DEBUG)\r\napp = Flask(__name__)\r\n#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\r\n\r\ndef allowed_file(filename):\r\n prop = filename.split('.')[-1]\r\n return prop in ALLOWED_EXTENSIONS\r\n\r\n@app.route('/upload-file', methods=['POST'])\r\ndef upload_file():\r\n file = request.files['file']\r\n if file and allowed_file(file.filename):\r\n # dua file vao cuckoo xu li https://cuckoo.readthedocs.io/en/latest/usage/api/\r\n print('----- found file: ', file.filename)\r\n #file.save(os.path.join(app.config['UPLOAD_FOLDER'], file.filename))\r\n\r\n # tra ve kq nhu vd duoi\r\n task = Task('1','2','3') \t\r\n return jsonify({'results' : task.obj_dict(), 'msg':'ok'})\r\n return jsonify({'msg': 'error'})\r\n\r\n@app.route('/upload-multifile', methods=['POST'])\r\ndef upload_files():\r\n #logging.info(request.files)\r\n if 'files[]' not in request.files:\r\n return jsonify({'msg': 'error'})\r\n files = request.files.getlist('files[]')\r\n for file in files:\r\n if file and allowed_file(file.filename):\r\n # Dua file vao cuckoo de xu li https://cuckoo.readthedocs.io/en/latest/usage/api/\r\n print('----- found file: ', file.filename)\r\n #file.save(os.path.join(app.config['UPLOAD_FOLDER'], file.filename))\r\n\r\n # tra ve kq nhu vd duoi \r\n task = []\r\n task.append(Task(1,2,3).obj_dict())\r\n task.append(Task(2,3,4).obj_dict())\r\n #results = [obj.obj_dict() for obj in task]\r\n return jsonify({'results' : task, 'msg' : 'ok'})\r\n\r\nif __name__ == '__main__':\r\n\tapp.run(host=\"127.0.0.1\", port = 8123, debug=True)","sub_path":"Web_API.py","file_name":"Web_API.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"432567087","text":"from pycqed.measurement.randomized_benchmarking.two_qubit_clifford_group \\\n import SingleQubitClifford, TwoQubitClifford\nfrom os.path import join, dirname, abspath\nfrom os import mkdir\nimport numpy as np\nfrom zlib import crc32\n\noutput_dir = join(abspath(dirname(__file__)), 'clifford_hash_tables')\ntry:\n mkdir(output_dir)\nexcept FileExistsError:\n pass\n\ndef construct_clifford_lookuptable(generator, indices):\n \"\"\"\n \"\"\"\n lookuptable = []\n for idx in indices:\n clifford = generator(idx=idx)\n # important to use crc32 hashing as this is a non-random hash\n hash_val = crc32(clifford.pauli_transfer_matrix.round().astype(int))\n lookuptable.append(hash_val)\n return lookuptable\n\ndef generate_hash_tables():\n print(\"Generating Clifford hash tables.\")\n single_qubit_hash_lut = construct_clifford_lookuptable(\n SingleQubitClifford, np.arange(24))\n with open(join(output_dir, 'single_qubit_hash_lut.txt'), 'w') as f:\n for h in single_qubit_hash_lut:\n f.write(str(h)+'\\n')\n\n two_qubit_hash_lut = construct_clifford_lookuptable(\n TwoQubitClifford, np.arange(11520))\n with open(join(output_dir, 'two_qubit_hash_lut.txt'), 'w') as f:\n for h in two_qubit_hash_lut:\n f.write(str(h)+'\\n')\n print(\"Successfully generated Clifford hash tables.\")\n\nif __name__ == '__main__':\n generate_hash_tables()\n","sub_path":"pycqed/measurement/randomized_benchmarking/generate_clifford_hash_tables.py","file_name":"generate_clifford_hash_tables.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"567356373","text":"\n#imports libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time as systime\nimport datetime as dtime\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nimport gc\n\n\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nfrom subprocess import check_output\nprint(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))\n\n# Make mapping for month\nmonth_enum={1:'jan',2:'feb',3:'mar',4:'apr',5:'may',6:'jun',7:'jul',8:'aug',9:'sep',10:'oct',11:'nov',12:'dec'}\n\n# Load input files\nTRAIN_FILENAME = 'train.csv'\nTEST_FILENAME = 'test.csv'\n\ntrain_raw = pd.read_csv('../input/'+TRAIN_FILENAME, parse_dates=['Dates'], index_col=False)\ntest_raw = pd.read_csv('../input/'+TEST_FILENAME, parse_dates=['Dates'], index_col=False)\n\n# Binarize Days, Month, District, Time\ndef feature_engineering(data):\n \n days = pd.get_dummies(data.DayOfWeek)\n district = pd.get_dummies(data.PdDistrict)\n month = pd.get_dummies(data.Dates.dt.month.map(month_enum))\n hour = data.Dates.dt.hour\n hour = pd.get_dummies(hour) \n \n #Append newly created dummy variables to dataframe\n new_data = pd.concat([hour, month, days, district], axis=1)\n \n return new_data\n\n# Prepare the data\ntrain = feature_engineering(train_raw)\n#test = pd.concat([test_raw['Id'],feature_engineering(test_raw)], axis=1)\ntest = feature_engineering(test_raw)\n\n# Encode distinct Categories into dummy variables\ncat_enc = LabelEncoder()\ncat_enc.fit(train_raw['Category'])\ntrain['CategoryEncoded'] = cat_enc.transform(train_raw['Category'])\n\n# Select the Predictors\nx_cols = list(train.columns[0:53].values)\n\n# Fit Logit model and estimate the class probability\n#clf = xgb.XGBClassifier(n_estimators=5)\n\nclf = AdaBoostClassifier(DecisionTreeClassifier(max_depth = 8),\n n_estimators = 40,\n learning_rate = 0.5, \n random_state = 1)\n\nclf.fit(train[x_cols], train['CategoryEncoded'])\npredicted = clf.predict_proba(test[x_cols])\n\n\n# Make the output data frame by mapping the probability estimates to categories\ncrime = cat_enc.fit_transform(train_raw.Category)\nresult=pd.DataFrame(predicted, columns=cat_enc.classes_)\n\n# I noticed that predicted estimates were having 10 decimal digits or even more.\n# Which was giving me memory insufficient error while trying to save it as .csv\n# For eg. I tried saving half of the output(442131 records) and .csv file generated\n# was of size 370mb. So I rounded of the digits to 4-5 decimal points and output file\n# size got reduced.\nresult=result.round(5)\n\n# Appending the Index column\nresult= pd.concat([test_raw['Id'], result], axis=1)\n\ndel train\ndel test\ndel train_raw\ndel test_raw\n\ngc.collect()\n\nresult.to_csv('submit.csv', index = False)\n\n\n\n","sub_path":"San_Francisco_Crime_AdaBoosting.py","file_name":"San_Francisco_Crime_AdaBoosting.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"533118361","text":"import boto3\nimport json\nimport os\n\nfrom boto3.dynamodb.conditions import Attr\n\ntabla_ordenes = os.environ[\"TABLA_ORDENES\"]\n\ndynamodb = boto3.resource(\"dynamodb\")\ntabla = dynamodb.Table(tabla_ordenes)\n\ndef lambda_handler(event, context):\n print(event)\n data = {}\n data[\"id\"] = event[\"id\"]\n data[\"articulo\"] = event[\"articulo\"]\n data[\"categoria_articulo\"] = event[\"categoria_articulo\"]\n data[\"metodo_pago\"] = event[\"metodo_pago\"]\n data[\"id_comprador\"] = event[\"id_comprador\"]\n data[\"nombre_comprador\"] = event[\"nombre_comprador\"]\n data[\"afiliado\"] = event[\"afiliado\"]\n data[\"estado\"] = \"Completada\"\n respuesta_put_item = tabla.put_item(\n Item=data \n )\n print(respuesta_put_item)\n","sub_path":"actualizar-orden.py","file_name":"actualizar-orden.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"11199139","text":"from fastapi import FastAPI\nimport pika\n\napp = FastAPI()\n\n\n# ヘルスチェック用\n@app.get(\"/\")\ndef read_root():\n return {\"Status\": \"OK\"}\n\n\n# RabbitMQ用\n@app.get(\"/add-job/{message}\")\ndef add_job(message: str):\n # RabbitMQサーバと接続(ホスト名にはコンテナ名を指定しているが,Dockerを使ってない場合はIPアドレスを指定)\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=\"rabbitmq\"))\n # チャンネルの確立\n channel = connection.channel()\n # メッセージを格納するためのキュー(task_queue)を作成\n channel.queue_declare(queue=\"task_queue\", durable=True)\n # メッセージをキュー(task_queue)に格納\n channel.basic_publish(\n exchange=\"\",\n routing_key=\"task_queue\",\n body=message,\n properties=pika.BasicProperties(\n delivery_mode=2, # メッセージの永続化\n ))\n # 接続のクローズ及びメッセージが配信されたことを確認\n connection.close()\n\n return {\"send\": message}\n","sub_path":"docker/fastapi/producer/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"219081838","text":"from defs import *\r\nfrom data_processing import best_model\r\n\r\n\r\nif sanity==1:\r\n\td[\"mainType\"] = \"Optimize_Model\"\r\n\td[\"_dataFile\"] = data_file\r\n\td[\"_maxChrNum\"] = -10\r\n\td[\"_minChrNum\"] = -1\r\nelse:\r\n\td[\"mainType\"] = \"mainSimulate\"\r\n\td[\"_freqFile\"] = freq_file\r\n\td[\"_simulationsIter\"] = 100\r\n\td[\"_simulationsJumpsStats\"] = \"expStats.txt\"\r\n\td[\"_maxChrNumForSimulations\"] = max(counts) * 10\r\n\td[\"_simulationsTreeLength\"] = 4\r\n\r\n\r\nd[\"_outDir\"] = outDir\r\nd[\"_treeFile\"] = tree_file\r\nd[\"_logFile\"] = \"log.txt\"\r\nd[\"_logValue\"] = 6\r\nd[\"_maxChrNum\"] = -10\r\nd[\"_minChrNum\"] = -1\r\nd[\"_maxOptimizationIterations\"] = 5\r\nd[\"_epsilonLLimprovement\"] = 0.01\r\nd[\"_optimizeIterNum\"] = \"0,1,3\"\r\nd[\"_optimizePointsNum\"] = \"5,2,1\"\r\nd[\"_branchMul\"] = 1\r\n\r\n\r\ndef initialize_defaults():\r\n\t'''\r\n\t\tinitialize parameters default values\r\n\t:return: parameters dictionary with fixed parameters\r\n\t'''\r\n\td = {}\r\n\r\n\td[\"_maxChrNum\"] = -10\r\n\td[\"_minChrNum\"] = -1\r\n\td[\"_branchMul\"] = 999\r\n\td[\"_simulationsNum\"] = 1000\r\n\td[\"_logFile\"] = \"log.txt\"\r\n\td[\"_logValue\"] = 6\r\n\td[\"_maxOptimizationIterations\"] = 5\r\n\td[\"_epsilonLLimprovement\"] = 0.01\r\n\td[\"_optimizeIterNum\"] = \"0,1,3\"\r\n\td[\"_optimizePointsNum\"] = \"5,2,1\"\r\n\td[\"_simulationsIter\"] = 100\r\n\td[\"_simulationsTreeLength\"] = 4\r\n\r\n\treturn d\r\n\r\ndef create_user_param_dict(filename):\r\n\td = {}\r\n\twith open(filename, \"r\") as params_file:\r\n\t\tfor line in params_file:\r\n\t\t\tline = line.strip()\r\n\t\t\tname = re.search(\"(.*)\\s(.*)\",line).group(1)\r\n\t\t\tval = re.search(\"(.*)\\s(.*)\", line).group(2)\r\n\t\t\td[name] = val\r\n\treturn d\r\n\r\ndef create_params_dict(outDir, dataFile, treeFile, params_from_user):\r\n\r\n\td = initialize_defaults()\r\n\r\n\td[\"_mainType\"] = \"mainSimulate\"\r\n\td[\"_outDir\"] = outDir\r\n\tif os.path.isfile(dataFile):\r\n\t\td[\"_dataFile\"] = dataFile\r\n\td[\"treeFile\"] = treeFile\r\n\r\n\tif os.path.isfile(params_from_user):\r\n\t\ttmp_d = create_user_param_dict(params_from_user)\r\n\telse:\r\n\t\t#########################>>>>>>>>>>>>>>>>>>>> need model for path names, but user don't necessarily specify model >>>>>>>>>>>>>>>######################\r\n\t\ttmp_d = best_model.get_params(main_res_dir + model + CE_res_filename,main_res_dir + model + root_freq_filename,max(counts) * 10) # parse from existing CE results file\r\n\t\tbntpv_vec = create_bntpv(main_res_dir + model_name + expectation_file, main_res_dir + model_name + mlAncTree,d[\"_baseNumber\"])\r\n\t\td[\"_baseTransitionProbs\"] = bntpv_vec\r\n\t\td[\"_maxChrNumForSimulations\"] = max(counts) * 10\r\n\t# d = {**d, **tmp_d}\r\n\treturn d\r\n\r\n","sub_path":"data_processing/params_processing.py","file_name":"params_processing.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"465457115","text":"import tkinter as tk\nfrom tkinter import ttk\nimport abc\n\n\nfrom frames.scrollable_frame import ScrollableFrame\n\n\nclass BaseSummary(ttk.Frame, abc.ABC):\n\n def __init__(self, container, controller, **kwargs):\n super().__init__(container, **kwargs)\n self.controller = controller\n\n # container for title\n self.title_container = tk.Frame(self, bd=10, relief='groove')\n self.title_container.pack(side='top', fill='both', expand=True)\n # container for entries\n self.container = ScrollableFrame(self, bd=10, relief='groove')\n self.container.pack(side='top', fill='both', expand=True)\n # container for buttons\n self.button_container = tk.Frame(self, bd=10, relief='groove')\n self.button_container.pack(side='top', fill='x', expand=True)\n\n def title_place(self, text):\n title = ttk.Label(\n self.title_container,\n text=text,\n font=40\n )\n title.config(anchor='center')\n title.pack(side='top', fill='x', expand=True)\n\n def container_place(self, text):\n self.container.frame.columnconfigure(0, weight=1)\n label = ttk.Label(\n self.container.frame,\n text=text\n )\n label.grid(row=0, column=0, sticky='nsew')\n\n def create_buttons(self, place):\n self.button_container.columnconfigure(0, weight=1)\n button1 = ttk.Button(\n self.button_container,\n text='Back'\n )\n button1.grid(row=0, column=0, sticky='ew')\n button1['command'] = lambda: self.controller.show_frame(place)","sub_path":"Lab5.5/frames/base_summary.py","file_name":"base_summary.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"355936483","text":"#!/usr/bin/python\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom git_admin.models import Repo, RepoType, MasterRepo, CommittedFile, Commit, FileExtension\nfrom optparse import make_option\nimport subprocess\nimport os\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\nSCRIPTS_PATH = getattr(settings, \"PATH_TO_SCMFORGE_SCRIPTS\", None)\n\n\nclass Command(BaseCommand):\n \"\"\" Manipulates statistics data \"\"\"\n option_list = BaseCommand.option_list + (\n make_option('--clean', action='store_true', dest='clean', default=False,\n help='Delete all Git repositories from database'),) + (\n make_option('--import', action='store_true', dest='import', default=False,\n help='Update all Git repositories in database basing on what is in the filesystem'), )+(\n make_option('--import-commits', action='store_true', dest='report', default=False, help='Generate stats'),)\n\n def handle(self, **options):\n if options['clean']:\n self.stdout.write(\"Number of commits before cleaning: %s\" % str(CommittedFile.objects.all().count()))\n CommittedFile.objects.all().delete()\n FileExtension.objects.all().delete()\n self.stdout.write(\"Number of commits after cleaning: %s\" % str(CommittedFile.objects.all().count()))\n elif options['import']:\n for repo in Repo.objects.all():\n repo.path = repo.path\n print(\"\\n\\n\\n### starting import for REPO: %s\" % repo.path)\n proc = subprocess.Popen([SCRIPTS_PATH + \"/git_commits_size.sh\", repo.path], stdout=subprocess.PIPE)\n commit_list = proc.stdout.readlines()\n for commit in commit_list:\n commit_split = commit.split(\" \", 2)\n new_commit = CommittedFile()\n new_extension.sha1 = commit_split[0]\n new_commit.size = int(commit_split[1])\n new_commit.filename = commit_split[2].replace(\"\\n\", \" \")\n new_commit.repo = repo\n if \".\" in os.path.basename(new_commit.filename):\n ext = os.path.basename(new_commit.filename).split(\".\")[-1]\n try:\n new_commit.extension = FileExtension.objects.get(extension=ext)\n except ObjectDoesNotExist:\n print(\"adding new extension to database: %s\" % ext)\n new_extension = FileExtension()\n new_extension.extension = ext\n new_extension.save()\n new_commit.extension = new_extension\n new_commit.save()\n print(\"added commit: %s %s %s \" % (new_commit.sha1, new_commit.size, new_commit.filename))\n print(\"..finished\")\n elif options['import-commits']:\n for repo in Repo.objects.all():\n print(repo.path)\n proc = subprocess.Popen([SCRIPTS_PATH + \"/git_full_log.sh\", repo.path], stdout=subprocess.PIPE)\n commit_list = proc.stdout.readlines()\n for commit in commit_list:\n print(commit)\n print([SCRIPTS_PATH + \"/git_commits_size.sh\", repo.path])\n","sub_path":"scmforge/git_admin/management/commands/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"176754699","text":"\"\"\"\nMY20 RYI Home Page\n\"\"\"\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support.select import Select\nimport openpyxl\nfrom bs4 import BeautifulSoup\nfrom requests.adapters import HTTPAdapter\nfrom selenium import webdriver\nimport requests\nfrom page.page import Page\nfrom proj21_escape_everyday.data.urls import current_url\nfrom proj21_escape_everyday.element.elements_define import ElementsDefine\n\nclass HomePage(Page):\n \"\"\"\n MY20 RYI Home Page\n \"\"\"\n def __init__(self, driver):\n \"\"\"\n init function\n :param driver:\n \"\"\"\n self.url = \"/{}/home\"\n super(HomePage, self).__init__(driver, current_url())\n self.element = ElementsDefine()\n\n def get_social_links(self):\n \"\"\"\n Get HomePage Social Link\n :return:\n \"\"\"\n social_links = self.find_elements(self.element.homepage_social_link)\n return [a.get_attribute(\"href\") for a in social_links if a.get_attribute(\"href\")]\n\n def check_response_code(self, url):\n headers = {\n # \"Connection\": \"close\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36\"\n }\n s = requests.Session()\n s.mount('http://', HTTPAdapter(max_retries=5)) # 失败之后重试\n s.mount('https://', HTTPAdapter(max_retries=5))\n r = s.get(url, headers=headers, timeout=20)\n return r.status_code\n\n def get_footer_links(self, locale):\n \"\"\"\n check footer links\n \"\"\"\n headers = {\n # \"Connection\": \"close\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36\"\n }\n url = f\"https://hdguest:MLP@2017@change-your-ride.proferochina.com/{locale}/home\"\n s = requests.Session()\n s.mount('http://', HTTPAdapter(max_retries=5)) # 失败之后重试\n s.mount('https://', HTTPAdapter(max_retries=5))\n r = s.get(url, headers=headers, timeout=20)\n bs = BeautifulSoup(r.text, \"html.parser\")\n footer_links = bs.find(name=\"div\", class_=\"footer_links-container\").find_all(\"a\")\n footer_li = []\n for item in footer_links:\n footer_li_child = []\n if len(item.get('href')):\n footer_li_child.append(item.get('href'))\n footer_li_child.append(item.get('target'))\n footer_li.append(footer_li_child)\n\n return footer_li\n\n","sub_path":"proj21_escape_everyday/page/home_page.py","file_name":"home_page.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"355469820","text":"import socket\nimport time\n\nclass SubscriberSocket(object):\n def __init__(self,ip,port):\n self.ip = ip\n self.port = port\n self.socket = None\n\n def establish_connection(self):\n self.socket = socket.socket() \n host = self.ip \n port = self.port \n self.socket.connect(('0.0.0.0', port))\n\n def receive(self):\n i = 0\n while 1:\n self.socket.send(bytes(str(i),'UTF-8'))\n data = self.socket.recv(1024).decode(\"utf-8\")\n if 'no more' in data:\n print(\"all publisher data received , sleeping for 10 seconds\")\n time.sleep(10)\n else:\n i += 1\n print(\"received data from publisher - \",data)\n time.sleep(1)\n\n def close(self):\n self.socket.close()\n","sub_path":"language_programs/pubsub/subscriber2/subscriber_socket.py","file_name":"subscriber_socket.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"265408657","text":"from nose import tools\nimport minIRC_Client\nfrom minIRC_Client import log\nfrom minIRC_Client.client import Client\nimport asyncio\nimport sys\n\nlogger = log.setup_custom_logger('root.tests', level=5)\n\nHOST = '127.0.0.1'\nPORT = 10101\n\nchannels = set()\nusers = {}\n\n\ndef setup():\n loop = asyncio.get_event_loop()\n client = Client(loop, name='Admin')\n coro = loop.create_connection(lambda: client, HOST, PORT)\n\n try:\n logger.debug(f'Trying to establish a connection to minIRC server on host: {HOST} port: {PORT}.')\n loop.run_until_complete(coro)\n except ConnectionRefusedError:\n logger.debug(f'Connection refused. host: {HOST} port: {PORT}')\n loop.close()\n sys.exit(1)\n\ndef teardown():\n print(\"TEAR DOWN!\")\n\ndef test_basic():\n print(\"I RAN!\")\n","sub_path":"tests/minIRC_Client_tests.py","file_name":"minIRC_Client_tests.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"23600957","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom collections import Counter\nimport logging\nimport pandas as pd\nimport re\nimport numpy as np\n\n# logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',\n# level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\ndef test1():\n # [seq_len, batch_size, hidden_size]\n h = torch.rand(10, 3, 5)\n print(h.shape)\n x = torch.tanh(h)\n print(x.shape)\n s = h.sum(dim=0)\n print(s.shape)\n z = s * x\n print(z)\n\n S = 0\n for time in range(h.size(0)):\n S += h[time] * x\n print(S == z)\n\n\ndef test2():\n x = torch.arange(24).reshape(2, 4, 3)\n print(x)\n print(x.sum(dim=1))\n print(x.sum(dim=0))\n\n\ndef test3():\n x = torch.tensor([[1, 2, 3],\n [0, 1, 4],\n [1, 1, 1]], dtype=torch.float)\n y = torch.ones(3, 3)\n z = torch.arange(9).reshape(3, 3).float()\n print(x*z + y*z)\n print((x+y)*z)\n print((x*z+y*z) == (x+y)*z)\n\n# def test4():\n# logging.info(\"ok\")\n\ndef test5():\n df = pd.read_csv('data/1.txt', header=None, sep='|||')\n # print(df.count())\n print(df.head())\n\ndef test6():\n with open(\"data/char_corpus.txt\", encoding='utf-8', errors='ignore') as fin:\n with open('data/char_tokens.txt', 'a', encoding='utf-8') as fout:\n for line in fin:\n line = re.sub(r'\\s+', '', line)\n fout.write(' '.join(list(line)))\n fout.write('\\n')\n\ndef test7():\n logger.info(\"hello world\")\n\ndef test8():\n x = np.random.randint(1, 50, 10)\n y = list(range(10))\n print(x, '\\n', y)\n z = list(zip(x, y)) # 打包\n z.sort(key=lambda t: t[0], reverse=True) # 指定取待排序元素的哪一项进行排序\n print(z)\n a, b = zip(*z) # 与zip相反,zip*可理解为解压\n print(a, '\\n', b)\n\n nums = ['flower', 'flow', 'flight']\n for i in zip(*nums):\n print(i)\n\nclass Instance(object):\n def __init__(self, wds, tag):\n self.wds = wds\n self.tag = tag\n def __str__(self):\n return ''.join(self.wds) + '|' + str(self.tag)\n\ndef test9():\n insts = [\n Instance(['我', '是', '天大人'], 1),\n Instance(['可恶'], 0),\n Instance(['它', '走了'], 0),\n Instance(['我', '爱', '我', '的', '祖国'], 1)\n ]\n\n # sorted(insts, key=lambda s: s.tag)\n insts.sort(key=lambda s: len(s.wds), reverse=True)\n\n for i in insts:\n print(i.wds, i.tag)\n\n\ndef test10():\n inputs = torch.randn(3, 64, 8)\n print(inputs.shape) # [3, 64, 8]\n inputs.transpose_(1, 2)\n print(inputs.shape)\n apool = nn.AdaptiveMaxPool1d(output_size=1)\n output = apool(inputs)\n print(output.shape) # [3, 64, 1]\n\n\ndef test11():\n # x = torch.arange(24).float().reshape((2, 3, 4))\n # print(x)\n # print(F.softmax(x[0][0] * x[0]))\n\n # for b in range(2):\n # for s in range(3):\n # t = x[b][s] * x[b]\n # print(t, F.softmax(t, dim=1))\n\n y = torch.arange(12).float().reshape(3, 4)\n print(y)\n print(F.softmax(y[0] * y))\n\n\ndef test12():\n x = torch.arange(24).float().reshape(2, 4, 3)\n print(x)\n print(x[-1], x[-2])\n print(torch.cat((x[-1], x[-2]), dim=1))\n\n\ndef test13():\n x = torch.arange(12).float().reshape(3, 4)\n print(x)\n print(F.softmax(x, dim=0))\n print(F.softmax(x, dim=1))\n","sub_path":"SentimentAnalyzeII/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"364248437","text":"import random\nfrom difflib import SequenceMatcher\n\nfrom app.Crawler.crawlImage import *\nfrom app.Crawler.crawlWord import *\nfrom app.Dictionary.dictionary import *\nfrom app.Model.Exercise import *\nfrom app.misc.etendu import etendu\n\n# A tester!!!\nclass ExerciseGenerator:\n @staticmethod\n def proposeChoices(word):\n freq = findLemmeFreq(word)\n if freq > 500:\n k = 300\n elif freq > 100:\n k = 100\n else:\n k = 5\n with connectBDD() as session:\n wordModel = session.query(Word).filter(Word.ortho == word)\n words = []\n for item in wordModel:\n words.append(item)\n if (len(words) > 0):\n maxFreq = words[0].freqlemfilms\n maxWord = words[0]\n for item in words:\n if item.freqlemfilms > maxFreq:\n maxFreq = item.freqlemfilms\n maxWord = item\n nature = maxWord.cgram\n proposition = session.query(Word).filter(Word.cgram == nature).filter(Word.freqlemfilms < (freq + k)).filter(\n Word.freqlemfilms > (freq - k))\n lemmes = []\n for item in proposition:\n lemmes.append(item.lemme)\n random.shuffle(lemmes)\n propositions = [lemmes[0], lemmes[1], lemmes[2], lemmes[3]]\n return propositions\n else:\n propositions = ['undefined', 'undefined', 'undefined', 'undefined']\n return propositions\n\n def generateExercise(self, word):\n pass\n\n\nclass BlankFillingExerciseGenerator(ExerciseGenerator):\n @staticmethod\n def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\n def generateExercise(self, word):\n phrase = crawlPhrase(word)\n if phrase:\n wordList = phrase.split(' ')\n similarList = []\n for item in wordList:\n similarList.append(self.similar(item, word))\n valeurMax = max(similarList)\n position = similarList.index(valeurMax)\n wordList[position] = '_____'\n topic = ' '.join(wordList)\n choices = self.proposeChoices(word)\n ran = int(random.random() * 100) % 4\n choices[ran] = word\n return Exercise(topic, choices, ran)\n\n\nclass SynonymeExerciseGenerator(ExerciseGenerator):\n def generateExercise(self, word):\n synonyme = crawlSynonyme(word)\n if synonyme:\n topic = \"Choisissez le mot qui a le même sens que '\" + word + \"'\"\n choices = self.proposeChoices(word)\n ran = int(random.random() * 100) % 4\n choices[ran] = synonyme\n return Exercise(topic, choices, ran)\n\n\nclass AntonymeExerciseGenerator(ExerciseGenerator):\n def generateExercise(self, word):\n antonyme = crawlAntonyme(word)\n if antonyme:\n topic = \"Choisissez le mot qui est le contraire de '\" + word + \"'\"\n choices = self.proposeChoices(word)\n ran = int(random.random() * 100) % 4\n choices[ran] = antonyme\n return Exercise(topic, choices, ran)\n\n\nclass ImageExerciseGenerator(ExerciseGenerator):\n def generateExercise(self, word):\n topic = \"Choisissez l'image qui correspond à '\" + word + \"'\"\n choices = self.proposeChoices(word)\n ran = int(random.random() * 100) % 4\n choices[ran] = word\n images = []\n for choice in choices:\n choiceImage = crawlImage(choice)\n images.append(choiceImage)\n return Exercise(topic, images, ran)\n\n\n\nclass RandomExerciseGenerator(ExerciseGenerator):\n types = ['BlankFillingExerciseGenerator',\n 'SynonymeExerciseGenerator',\n 'AntonymeExerciseGenerator',\n 'ImageExerciseGenerator']\n\n def generateExercise(self, word):\n while True:\n ran = int(random.random() * 100) % len(self.types)\n generator = globals()[self.types[ran]]()\n exercise = generator.generateExercise(word)\n if exercise:\n return exercise\n else:\n continue\n\n\n#faut ecrire une classe d'adapteur!!!!!\nclass TotalRandomExerciseGenerator(RandomExerciseGenerator):\n def choisirMot(self):\n length = len(etendu)\n ran = int(random.random() * 1000) % (length-2)\n upper = etendu[ran]\n lower = etendu[ran + 1]\n with connectBDD() as session:\n words = session.query(Word).filter(Word.freqlemfilms >= lower) \\\n .filter(Word.freqlemfilms <= upper)\n lemmes = []\n for word in words:\n lemmes.append(word.lemme)\n return random.choice(lemmes)\n\n def generateRandomExercise(self):\n word = self.choisirMot()\n return self.generateExercise(word)\n\n\n\n#a = RandomExerciseGenerator()\n\n#ERROR!!!!!!!!!\nif __name__ == '__main__':\n\n b = RandomExerciseGenerator()\n exercise = b.generateExercise('content')\n print(exercise)\n #print(a.generateExercise('content'))\n\n\n","sub_path":"app/Exercise/ExerciseGenerator.py","file_name":"ExerciseGenerator.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"268224995","text":"# -*- mode: python ; coding: utf-8 -*-\nfrom kivy_deps import sdl2, glew\n\nblock_cipher = None\n\n\na = Analysis(['main.py'],\n pathex=['/home/gef/Documents/Hobbes-many/hobbes_debug/hobbes_python'],\n binaries=[],\n datas=[('media', './media'), ('hobbes.kv', '.')],\n hiddenimports=[],\n hookspath=[],\n runtime_hooks=[],\n excludes=[\n\n \"ffpyplayer\",\n \"xmlrpc\",\n \"defusedxml\",\n \"PIL\",\n \"docutils\",\n \"ssl\",\n \"crypto\",\n \"buildozer\",\n \"cairocffi\",\n \"CairoSVG\",\n \"m2r\",\n \"Pygments\",\n \"pyperclip\",\n \"urllib3\",\n \"zipp\",\n \"lib2to3\",\n \"virtualenv\",\n \"webencodings\",\n\n 'pygame.cdrom',\n 'pygame.cursors',\n 'pygame.display',\n 'pygame.draw',\n 'pygame.event',\n 'pygame.examples',\n 'pygame.font',\n 'pygame.freetype',\n 'pygame.gfxdraw',\n 'pygame.image',\n 'pygame.joystick',\n 'pygame.key',\n 'pygame.locals',\n 'pygame.mouse',\n 'pygame.mixer.music',\n 'pygame.overlay',\n 'pygame.pixelarray',\n 'pygame.scrap',\n 'pygame.sndarray',\n 'pygame.sprite',\n 'pygame.surface',\n 'pygame.surfarray',\n 'pygame.tests',\n 'pygame.time',\n 'pygame.transform',\n\n 'kivy.core.audio',\n\n 'kivy.core.audio.audio_avplayer',\n 'kivy.core.audio.audio_ffpyplayer',\n 'kivy.core.audio.audio_gstplayer',\n 'kivy.core.audio.audio_pygame',\n 'kivy.core.audio.audio_sdl2',\n\n 'kivy.core.camera',\n\n 'kivy.core.camera.camera_android',\n 'kivy.core.camera.camera_gi',\n 'kivy.core.camera.camera_opencv',\n 'kivy.core.camera.camera_picamera',\n\n 'kivy.core.clipboard.clipboard_android',\n 'kivy.core.clipboard.clipboard_dbusklipper',\n 'kivy.core.clipboard.clipboard_dummy',\n 'kivy.core.clipboard.clipboard_gtk3',\n 'kivy.core.clipboard.clipboard_nspaste',\n 'kivy.core.clipboard.clipboard_pygame',\n 'kivy.core.clipboard.clipboard_winctypes',\n\n 'kivy.core.spelling',\n\n 'kivy.core.spelling.spelling_enchant',\n 'kivy.core.spelling.spelling_osxappkit',\n\n 'kivy.core.text._text_pango',\n 'kivy.core.text.text_pango',\n 'kivy.core.text.text_pil',\n 'kivy.core.text.text_pygame',\n\n 'kivy.core.image.img_dds',\n 'kivy.core.image.img_ffpyplayer',\n 'kivy.core.image.img_gif',\n 'kivy.core.image.img_pil',\n 'kivy.core.image.img_pygame',\n 'kivy.core.image.img_tex',\n\n 'kivy.graphics.cgl_backend.cgl_debug',\n 'kivy.graphics.cgl_backend.cgl_mock',\n\n 'kivy.graphics.svg',\n 'kivy.graphics.tesselator',\n\n 'xml.etree.cElementTree'\n ],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n [],\n exclude_binaries=True,\n name='hobbes',\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=True,\n console=True )\ncoll = COLLECT(exe,\n a.binaries,\n a.zipfiles,\n a.datas,\n\t\t\t *[Tree(p) for p in (sdl2.dep_bins + glew.dep_bins)],\n strip=False,\n upx=True,\n upx_exclude=[],\n name='main')\n","sub_path":"main_windows.spec","file_name":"main_windows.spec","file_ext":"spec","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"650929244","text":"r\"\"\"Example of using mlbench : CIFAR10 + Resnet20 + MPI\"\"\"\n\nimport argparse\nimport torch.distributed as dist\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.nn.modules.loss import CrossEntropyLoss\n\nfrom mlbench_core.utils.pytorch import initialize_backends\nfrom mlbench_core.evaluation.pytorch.metrics import TopKAccuracy\nfrom mlbench_core.models.pytorch.resnet import ResNetCIFAR\nfrom mlbench_core.lr_scheduler.pytorch.lr import MultiStepLR\nfrom mlbench_core.controlflow.pytorch import TrainValidation\nfrom mlbench_core.utils.pytorch.checkpoint import Checkpointer\nfrom mlbench_core.dataset.imagerecognition.pytorch import CIFAR10V1, partition_dataset_by_rank\n\n\ndef main(run_id):\n r\"\"\"Main logic.\"\"\"\n num_parallel_workers = 2\n dataset_root = '/datasets/torch/cifar10'\n use_cuda = True\n batch_size = 128\n\n initialize_backends(comm_backend='mpi',\n logging_level='INFO',\n logging_file='/mlbench.log',\n use_cuda=use_cuda,\n seed=42,\n cudnn_deterministic=False,\n ckpt_run_dir='/checkpoints',\n delete_existing_ckpts=False)\n\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n\n model = ResNetCIFAR(resnet_size=20,\n bottleneck=False,\n num_classes=10,\n version=1)\n\n optimizer = optim.SGD(model.parameters(),\n lr=0.1,\n momentum=0.9,\n weight_decay=1e-4,\n nesterov=True)\n\n # Create a learning rate scheduler for an optimizer\n scheduler = MultiStepLR(optimizer,\n milestones=[82, 109],\n gamma=0.1)\n\n # A loss_function for computing the loss\n loss_function = CrossEntropyLoss()\n\n if use_cuda:\n model = model.cuda()\n loss_function = loss_function.cuda()\n\n # Metrics like Top 1/5 Accuracy\n metrics = [TopKAccuracy(topk=1), TopKAccuracy(topk=5)]\n\n train_set = CIFAR10V1(dataset_root, train=True, download=True)\n val_set = CIFAR10V1(dataset_root, train=False, download=True)\n\n train_set = partition_dataset_by_rank(train_set, rank, world_size)\n\n train_loader = DataLoader(\n train_set, batch_size=batch_size, shuffle=True,\n num_workers=num_parallel_workers,\n pin_memory=use_cuda, drop_last=False)\n\n val_loader = DataLoader(\n val_set, batch_size=batch_size, shuffle=False,\n num_workers=num_parallel_workers,\n pin_memory=use_cuda, drop_last=False)\n\n checkpointer = Checkpointer(\n ckpt_run_dir='/checkpoints',\n rank=rank,\n checkpoint_all=True)\n\n controlflow = TrainValidation(\n model=model,\n optimizer=optimizer,\n loss_function=loss_function,\n metrics=metrics,\n scheduler=scheduler,\n batch_size=batch_size,\n train_epochs=164,\n rank=rank,\n world_size=world_size,\n run_id=run_id,\n dtype='fp32',\n validate=True,\n schedule_per='epoch',\n checkpoint=checkpointer,\n transform_target_type=None,\n average_models=True,\n use_cuda=True,\n max_batch_per_epoch=None)\n\n controlflow.run(\n dataloader_train=train_loader,\n dataloader_val=val_loader,\n dataloader_train_fn=None,\n dataloader_val_fn=None,\n resume=False,\n repartition_per_epoch=False)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Process run parameters')\n parser.add_argument('--run_id', type=str, help='The id of the run')\n args = parser.parse_args()\n main(args.run_id)\n","sub_path":"examples/resnet_cifar10_mpi.py","file_name":"resnet_cifar10_mpi.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"469143481","text":"import json\nimport time\nimport sys\n\nfrom urllib.request import urlopen as uReq\n\n\ndef fetchPrice():\n gbpjpy_url = \"http://finance.google.com/finance/info?client=ig&q=CURRENCY:GBPJPY\"\n\n uClient = uReq(gbpjpy_url)\n page = uClient.read()\n uClient.close()\n data = json.loads(page[6:283].decode())\n price = data[\"l\"]\n time = data[\"lt_dts\"]\n\n return (time,price)\n\np0 = 0\nwhile True:\n t, p = fetchPrice() # time and price\n if(p!=p0):\n #sys.stdout.write(\"Time: {}, Price: {}\".format(t, p))\n print(\"Time: {}, Price: {}\".format(t, p))\n\n time.sleep(60)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"614912297","text":"# t = open('telecode','w+')\n# with open('station_name','r') as f:\n# content = f.read()\n# list1 = content.split('@')\n# for l in list1:\n# print(l)\n# list2 = l.split('|')\n# #print(list2[1],list2[2])\n# string = list2[5]+\":\"+list2[1]+\":\"+list2[2]+\":\"+ list2[0]+\":\"+ list2[3]+\":\"+ list2[4]+\"\\n\"\n# t.writelines(string)\n#\n# f.close()\n\ndef get_telecode(station):\n with open('train/util/telecode','r') as t:\n list = t.readlines()\n for l in list:\n info = l.split(':')\n if info[1] == station:\n\n return info[2]\n","sub_path":"chep/train/util/get_telecode.py","file_name":"get_telecode.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"172849806","text":"import os\nimport secrets\nfrom PIL import Image\nfrom flask import render_template, url_for, flash, redirect, request\nfrom gymmate import app, db, bcrypt\nfrom gymmate.forms import RegistrationForm, LoginForm, UpdateAccountForm\nfrom gymmate.models import User, Post\nfrom flask_login import login_user, current_user, logout_user, login_required\nfrom subprocess import call\nfrom werkzeug.utils import secure_filename\nimport cv2\nimport numpy as np\nimport socket\nimport sys\nimport pickle\nimport struct\nimport time\n\n\n\n#------------------------------------------File Upload Code-------------------------------------------\n\n\nALLOWED_EXTENSIONS = set(['mp4'])\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n#@app.route('/uploader')\n#def upload_form():\n #return render_template('upload.html')\n\n\n@app.route('/uploader', methods=['POST'])\ndef upload_file():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n flash('No file selected for uploading')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n flash('File successfully uploaded')\n return redirect('/')\n else:\n flash('Allowed file types is mp4')\n return redirect(request.url)\n\n#--------------------------------------------------------------------------------------------------------------\n\n\n\n@app.route(\"/\")\n@app.route(\"/home\")\n@login_required\ndef home():\n return render_template('home.html',title='Home')\n\n\n \n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n next_page = request.args.get('next')\n return redirect(next_page) if next_page else redirect(url_for('home'))\n else:\n flash('Login Unsuccessful. Please check email and password', 'danger')\n return render_template('login.html', title='Login', form=form)\n\n\n\n\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html', title='About')\n\n\n@app.route(\"/video\")\ndef video():\n\treturn render_template('video.html',title='Videos')\n\n\n@app.route(\"/sending\")\ndef sending():\n return render_template('sending.html',title='Sending')\n\n\n@app.route(\"/register\", methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = RegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user = User(username=form.username.data, email=form.email.data, password=hashed_password)\n db.session.add(user)\n db.session.commit()\n flash('Your account has been created! You are now able to log in', 'success')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n\n\ndef save_picture(form_picture):\n random_hex = secrets.token_hex(8)\n _, f_ext = os.path.splitext(form_picture.filename)\n picture_fn = random_hex + f_ext\n picture_path = os.path.join(app.root_path, 'static/profile_pics', picture_fn)\n\n output_size = (125, 125)\n i = Image.open(form_picture)\n i.thumbnail(output_size)\n i.save(picture_path)\n\n return picture_fn\n\n\n@app.route(\"/account\", methods=['GET', 'POST'])\n@login_required\ndef account():\n form = UpdateAccountForm()\n if form.validate_on_submit():\n if form.picture.data:\n picture_file = save_picture(form.picture.data)\n current_user.image_file = picture_file\n current_user.username = form.username.data\n current_user.email = form.email.data\n db.session.commit()\n flash('Your account has been updated!', 'success')\n return redirect(url_for('account'))\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.email.data = current_user.email\n image_file = url_for('static', filename='profile_pics/' + current_user.image_file)\n return render_template('account.html', title='Account',\n image_file=image_file, form=form)\n\n\n@app.route(\"/done\")\n@login_required\n\ndef display():\n return redirect('static/output.mp4')\n\n\n#-----------------------Python Code-------------------------------------------# \n\n\n\n@app.route(\"/user\",methods=['GET','POST'])\n@login_required\ndef returnuser(): \n #print(current_user.username)\n #print(current_user.email)\n call([\"python\",\"gymmate/userid.py\"])\n return redirect(\"http://127.0.0.1:5000/\", code=302)\n\n\n@app.route(\"/client\")\n@login_required\ndef client():\n \n try:\n cap = cv2.VideoCapture(1)\n clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n clientsocket.connect(('127.0.0.1', 8083))\n print(current_user.username)\n start=time.time()\n while (time.time()-start)<=120:\n ret,frame = cap.read()\n cv2.imshow('Recording frame',frame)\n cv2.waitKey(1)\n data = pickle.dumps(frame)\n clientsocket.sendall(struct.pack(\"L\", len(data)) + data)\n cap.release()\n\n except:\n print(\"Exception \")\n \n return redirect(\"http://127.0.0.1:5000/sending\", code=302)\n\n\n\n@app.route(\"/Server\")\n@login_required\ndef server():\n call(['python','gymmate/server-video.py'])\n print('Server Running')\n try:\n path1=r\"C:\\Users\\ragha\\Desktop\\Full deployement Code\\Gym Mate\\output.mp4\"\n path2=r\"C:\\Users\\ragha\\Desktop\\Full deployement Code\\Gym Mate\\gymmate\\static\\output.mp4\"\n os.rename(path1,path2)\n shutil.move(path1,path2)\n os.replace(path1,path2)\n except:\n print(\"An exception occured\")\n\n return redirect(\"http://127.0.0.1:5000/\", code=302)","sub_path":"gymmate/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":6472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"356712544","text":"from django.http import HttpResponse\nfrom django.shortcuts import render_to_response, redirect\nfrom django.template.context_processors import csrf\nfrom django.utils import translation\nfrom gallery.models import *\n\nfrom .models import *\nfrom contacts.models import Contacts\nfrom feedbacks.models import Feedback\nfrom products.models import ProductBrand, Product\nfrom services.models import Service, Plus\nfrom vacancies.models import Vacancy\nfrom psychologist.models import Psychologist\nfrom project50.models import Pluses, Project\nfrom vacancies.models import Applicant\n# Create your views here.\n\n\ndef main_view(request, context={}):\n context.update(csrf(request))\n context['all_services'] = Service.objects.all()\n context['socials'] = Social.objects.all()\n context['sliders'] = Service.objects.filter(is_slider=True)\n context['sponsors'] = Sponsor.objects.all()\n context['pluses'] = Plus.objects.all()\n try:\n contacts = Contacts.objects.get()\n except Contacts.MultipleObjectsReturned:\n contacts = Contacts.objects.all().last()\n context['contacts'] = contacts\n context['feedbacks'] = Feedback.objects.filter(is_active=True)\n context['products'] = Product.objects.all()\n context['services'] = Service.objects.filter(is_slider=False)\n context['vacancies'] = Vacancy.objects.all()\n context['brand'] = ProductBrand.objects.all()\n context['psychologist'] = Psychologist.objects.all()\n try:\n project = Project.objects.get()\n except Project.MultipleObjectsReturned:\n project = Project.objects.all().last()\n context['project'] = project\n context['project_plus'] = Pluses.objects.all()\n context['applicant'] = Applicant.objects.order_by('-pub_date')[0:3]\n try:\n context['service_buro'] = ServiceBuro.objects.get()\n except ServiceBuro.MultipleObjectsReturned:\n context['service_buro'] = ServiceBuro.objects.last()\n except ServiceBuro.DoesNotExist:\n pass\n try:\n context['psycho_text'] = PsychoText.objects.get()\n except PsychoText.MultipleObjectsReturned:\n context['psycho_text'] = PsychoText.objects.last()\n except PsychoText.DoesNotExist:\n pass\n try:\n context['feedback_text'] = FeedbackText.objects.get()\n except FeedbackText.MultipleObjectsReturned:\n context['feedback_text'] = FeedbackText.objects.last()\n except FeedbackText.DoesNotExist:\n pass\n return render_to_response('index.html', context)\n\n\ndef gallery(request, context={}):\n context.update(csrf(request))\n context['all_services'] = Service.objects.all()\n context['socials'] = Social.objects.all()\n context['sliders'] = Service.objects.filter(is_slider=True)\n context['sponsors'] = Sponsor.objects.all()\n context['pluses'] = Plus.objects.all()\n try:\n contacts = Contacts.objects.get()\n except Contacts.MultipleObjectsReturned:\n contacts = Contacts.objects.all().last()\n context['contacts'] = contacts\n context['event'] = Event.objects.all()\n context['images'] = Images.objects.all()\n return render_to_response('gallery.html', context)\n\n\ndef friends(request, context={}):\n context.update(csrf(request))\n context['all_services'] = Service.objects.all()\n context['socials'] = Social.objects.all()\n context['sponsors'] = Sponsor.objects.all()\n try:\n contacts = Contacts.objects.get()\n except Contacts.MultipleObjectsReturned:\n contacts = Contacts.objects.all().last()\n context['contacts'] = contacts\n return render_to_response('friends.html', context)","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"481358286","text":"from dataSource import Dataset,LEM,CampoVerde,DataSource,SARSource\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom cycler import cycler\nclass DatasetStats():\n def __init__(self,dataset):\n self.dataset=dataset\n \n def calcAverageTimeseries(self,ims,mask):\n time_delta=self.dataset.getTimeDelta()\n print(time_delta)\n for channel in range(self.dataset.getBandN()):\n averageTimeseries=[]\n for t_step in range(0,self.dataset.t_len):\n im=ims[t_step,:,:,channel]\n #mask_t=mask[t_step]\n \n #print(\"im shape: {}, mask shape: {}\".format(im.shape,mask.shape))\n im=im.flatten()\n mask_t=mask.flatten()\n #print(\"im shape: {}, mask shape: {}\".format(im.shape,mask.shape))\n\n im=im[mask_t==1] # only train and test pixels (1 and 2)\n averageTimeseries.append(np.average(im))\n averageTimeseries=np.asarray(averageTimeseries)\n plt.figure(channel)\n fig, ax = plt.subplots()\n ax.plot(time_delta,averageTimeseries,marker=\".\")\n ax.set(xlabel='time ID', ylabel='band',title='Image average over time')\n plt.grid()\n print('averageTimeseries',averageTimeseries)\n plt.show()\n def calcAverageTimeseriesPerClass(self,ims,mask,label):\n print(\"Label shape\",label.shape)\n time_delta=self.dataset.getTimeDelta()\n print(time_delta)\n for channel in range(self.dataset.getBandN()):\n averageTimeseries=[]\n plt.figure(channel)\n fig, ax = plt.subplots()\n ax.set_prop_cycle(cycler('color', ['c', 'm', 'y', 'k','b', 'g', 'r']))\n\n# for clss,clss_name in zip([1,2,3,9],['soybean','maize','cotton','soil']):\n for clss,clss_name in zip([1,2,3,7,13],['soybean','maize','cotton','millet','soil']):\n# for clss,clss_name in zip(range(self.dataset.getClassN()),self.dataset.getClassList()):\n averageTimeseries=[]\n for t_step in range(0,self.dataset.t_len):\n # check available classes\n \n im=ims[t_step,:,:,channel]\n label_t=label[t_step]# label is (t,h,w,channel)\n label_t_unique=np.unique(label_t)\n if not (clss in label_t_unique):\n averageTimeseries.append(np.nan)\n continue\n #print(\"Label t shape\",label_t.shape)\n #mask_t=mask[t_step]\n \n #print(\"im shape: {}, mask shape: {}\".format(im.shape,mask.shape))\n im=im.flatten()\n mask=mask.flatten()\n label_t=label_t.flatten()\n #print(\"im shape: {}, mask shape: {}\".format(im.shape,mask.shape))\n \n # only train\n im=im[mask==1]\n label_t=label_t[mask==1]\n\n\n im=im[label_t==clss] # only train and test pixels (1 and 2) from clss\n averageTimeseries.append(np.average(im))\n averageTimeseries=np.asarray(averageTimeseries)\n ax.plot(time_delta,averageTimeseries,marker=\".\",label=clss_name)\n ax.legend()\n print('averageTimeseries',averageTimeseries)\n ax.set(xlabel='time ID', ylabel='band',title='Image average over time')\n plt.grid()\n #\n plt.show()\n \n\n\n\n","sub_path":"dataset/dataset/patches_extract_script/dataset_stats.py","file_name":"dataset_stats.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"461858106","text":"# non-resonant leptogenesis with two decaying sterile neutrino using the Boltzmann equations. Note these kinetic equations do not include off diagonal flavour oscillations. Equations from 1112.4528\nimport ulysses\nimport numpy as np\nfrom odeintw import odeintw\n\nfrom ulysses.numba import jit\n\n@jit\ndef fast_RHS(y0,eps1tt,eps1mm,eps1ee,eps1tm,eps1te,eps1me,eps2tt,eps2mm,eps2ee,eps2tm,eps2te,eps2me,d1,d2,w1,w2,n1eq,n2eq,C):\n N1, N2, Ntt, Nbb = y0\n c1t,c1m,c1e,c2t,c2m,c2e = C\n c1tc = np.conjugate(c1t)\n c1mc = np.conjugate(c1m)\n c1ec = np.conjugate(c1e)\n\n c2tc = np.conjugate(c2t)\n c2mc = np.conjugate(c2m)\n c2ec = np.conjugate(c2e)\n\n #define the different RHSs for each equation\n rhs1 = -d1*(N1-n1eq)\n rhs2 = -d2*(N2-n2eq)\n rhs3 = eps1tt*d1*(N1-n1eq)+eps2tt*d2*(N2-n2eq)-0.5*w1*(2*c1t*c1tc*Ntt) -0.5*w2*(2*c2t*c2tc*Ntt)\n rhs4 = eps1mm*d1*(N1-n1eq)+eps2mm*d2*(N2-n2eq)-0.5*w1*(2*(c1m*c1mc+c1e*c1ec)*Nbb) -0.5*w2*(2*(c2m*c2mc+c2e*c2ec)*Nbb)\n\n RHStemp = [rhs1, rhs2, rhs3, rhs4]\n return RHStemp\n\nclass EtaB_2BE2F(ulysses.ULSBase):\n \"\"\"\n Two-flavoured Boltzmann equation (BE) with two decaying steriles. See arxiv:1112.4528.\n Note these kinetic equations do not include off diagonal flavour\n oscillations.\n \"\"\"\n\n def shortname(self): return \"2BE2F\"\n def flavourindices(self): return [2, 3]\n def flavourlabels(self): return [\"$N_{\\\\tau\\\\tau}$\", \"$N_{\\\\tau\\perp\\\\tau\\perp}$\"]\n\n def RHS(self, y0, z, ETA, _C, K):\n eps1tt,eps1mm,eps1ee,eps1tm,eps1te,eps1me,eps2tt,eps2mm,eps2ee,eps2tm,eps2te,eps2me = ETA\n k1term,k2term = K\n\n if z != self._currz or z == self.zmin:\n self._d1 = np.real(self.D1(k1term, z))\n self._w1 = np.real(self.W1(k1term, z))\n self._d2 = np.real(self.D2(k2term, z))\n self._w2 = np.real(self.W2(k2term, z))\n self._n1eq = self.N1Eq(z)\n self._n2eq = self.N2Eq(z)\n self._currz=z\n\n from ulysses.numba import List\n C=List()\n [C.append(c) for c in _C]\n\n return fast_RHS(y0,eps1tt,eps1mm,eps1ee,eps1tm,eps1te,eps1me,eps2tt,eps2mm,eps2ee,eps2tm,eps2te,eps2me,self._d1,self._d2,self._w1,self._w2,self._n1eq,self._n2eq, C)\n\n\n\n @property\n def EtaB(self):\n #Define fixed quantities for BEs\n _ETA = [\n np.real(self.epsilon(0,1,2,2)),\n np.real(self.epsilon(0,1,2,1)),\n np.real(self.epsilon(0,1,2,0)),\n np.real(self.epsilon(1,0,2,2)),\n np.real(self.epsilon(1,0,2,1)),\n np.real(self.epsilon(1,0,2,0))\n ]\n\n _HT = [\n np.real(self.hterm(2,0)),\n np.real(self.hterm(1,0)),\n np.real(self.hterm(0,0)),\n np.real(self.hterm(2,1)),\n np.real(self.hterm(1,1)),\n np.real(self.hterm(0,1))\n ]\n\n _K = [np.real(self.k1), np.real(self.k2)]\n y0 = np.array([0+0j,0+0j,0+0j,0+0j], dtype=np.complex128)\n\n _ETA = [\n np.real(self.epsilon1ab(2,2)),\n np.real(self.epsilon1ab(1,1)),\n np.real(self.epsilon1ab(0,0)),\n self.epsilon1ab(2,1) ,\n self.epsilon1ab(2,0) ,\n self.epsilon1ab(1,0),\n np.real(self.epsilon2ab(2,2)),\n np.real(self.epsilon2ab(1,1)),\n np.real(self.epsilon2ab(0,0)),\n self.epsilon2ab(2,1) ,\n self.epsilon2ab(2,0) ,\n self.epsilon2ab(1,0),\n ]\n _C = [ self.c1a(2), self.c1a(1), self.c1a(0),\n self.c2a(2), self.c2a(1), self.c2a(0)]\n _K = [np.real(self.k1), np.real(self.k2)]\n\n ys = odeintw(self.RHS, y0, self.zs, args = tuple([_ETA, _C, _K]))\n self.setEvolData(ys)\n return self.ys[-1][-1]\n","sub_path":"ulysses/etab2BE2F.py","file_name":"etab2BE2F.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"117790703","text":"#! python3\n# carSim.py - Traffic light simulation to show where to add assertion to debug program quickly.\n\n# ns = north south, ew = east west\nmarket_2nd = {'ns': 'green', 'ew': 'red'}\nmission_16th = {'ns': 'red', 'ew': 'green'}\n\ndef switchLights(stoplight):\n for key in stoplight.keys():\n if stoplight[key] == 'green':\n stoplight[key] = 'yellow'\n elif stoplight[key] == 'yellow':\n stoplight[key] = 'red'\n elif stoplight[key] == 'red':\n stoplight[key] = 'green'\n\n # added to debug the virtual cars crashing issue....\n assert 'red' in stoplight.values(), 'Neither light is red! ' + str(stoplight)\nswitchLights(market_2nd)\n","sub_path":"carSim.py","file_name":"carSim.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"538247593","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport pickle\nfrom math import sqrt\n\nimport datetime\nimport time\nimport collections\nimport operator\n\nimport edtime\nimport edrconfig\nimport edrlog\nimport lrucache\nimport edsmserver\nfrom edentities import EDBounty\nfrom edri18n import _, _c, _edr\n\nEDRLOG = edrlog.EDRLog()\n\nclass EDRSystems(object):\n EDR_SYSTEMS_CACHE = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), 'cache/systems.v2.p')\n EDSM_SYSTEMS_CACHE = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), 'cache/edsm_systems.v2.p')\n EDR_NOTAMS_CACHE = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), 'cache/notams.v2.p')\n EDR_SITREPS_CACHE = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), 'cache/sitreps.v2.p')\n EDR_TRAFFIC_CACHE = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), 'cache/traffic.v2.p')\n EDR_CRIMES_CACHE = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), 'cache/crimes.v2.p')\n\n def __init__(self, server):\n edr_config = edrconfig.EDRConfig()\n\n try:\n with open(self.EDR_SYSTEMS_CACHE, 'rb') as handle:\n self.systems_cache = pickle.load(handle)\n except:\n self.systems_cache = lrucache.LRUCache(edr_config.lru_max_size(),\n edr_config.systems_max_age())\n\n try:\n with open(self.EDR_NOTAMS_CACHE, 'rb') as handle:\n self.notams_cache = pickle.load(handle)\n except:\n self.notams_cache = lrucache.LRUCache(edr_config.lru_max_size(),\n edr_config.notams_max_age())\n\n try:\n with open(self.EDR_SITREPS_CACHE, 'rb') as handle:\n self.sitreps_cache = pickle.load(handle)\n except:\n self.sitreps_cache = lrucache.LRUCache(edr_config.lru_max_size(),\n edr_config.sitreps_max_age())\n\n try:\n with open(self.EDR_CRIMES_CACHE, 'rb') as handle:\n self.crimes_cache = pickle.load(handle)\n except:\n self.crimes_cache = lrucache.LRUCache(edr_config.lru_max_size(),\n edr_config.crimes_max_age())\n\n try:\n with open(self.EDR_TRAFFIC_CACHE, 'rb') as handle:\n self.traffic_cache = pickle.load(handle)\n except:\n self.traffic_cache = lrucache.LRUCache(edr_config.lru_max_size(),\n edr_config.traffic_max_age())\n\n try:\n with open(self.EDSM_SYSTEMS_CACHE, 'rb') as handle:\n self.edsm_systems_cache = pickle.load(handle)\n except:\n self.edsm_systems_cache = lrucache.LRUCache(edr_config.lru_max_size(),\n edr_config.edsm_systems_max_age())\n\n self.reports_check_interval = edr_config.reports_check_interval()\n self.notams_check_interval = edr_config.notams_check_interval()\n self.timespan = edr_config.sitreps_timespan()\n self.server = server\n self.edsm_server = edsmserver.EDSMServer()\n\n def system_id(self, star_system, may_create=False):\n if not star_system:\n return None\n sid = self.systems_cache.get(star_system.lower())\n if sid:\n EDRLOG.log(u\"System {} is in the cache with id={}\".format(star_system, sid), \"DEBUG\")\n return sid\n\n sid = self.server.system_id(star_system, may_create)\n if sid:\n self.systems_cache.set(star_system.lower(), sid)\n EDRLOG.log(u\"Cached {}'s id={}\".format(star_system, sid), \"DEBUG\")\n return sid\n\n return None\n\n def persist(self):\n with open(self.EDR_SYSTEMS_CACHE, 'wb') as handle:\n pickle.dump(self.systems_cache, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n with open(self.EDR_NOTAMS_CACHE, 'wb') as handle:\n pickle.dump(self.notams_cache, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n with open(self.EDR_SITREPS_CACHE, 'wb') as handle:\n pickle.dump(self.sitreps_cache, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n with open(self.EDR_TRAFFIC_CACHE, 'wb') as handle:\n pickle.dump(self.traffic_cache, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n with open(self.EDR_CRIMES_CACHE, 'wb') as handle:\n pickle.dump(self.crimes_cache, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n with open(self.EDSM_SYSTEMS_CACHE, 'wb') as handle:\n pickle.dump(self.edsm_systems_cache, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n def distance(self, source_system, destination_system):\n if source_system == destination_system:\n return 0\n source = self.edsm_systems_cache.get(source_system.lower())\n destination = self.edsm_systems_cache.get(destination_system.lower())\n if not source:\n source = self.edsm_server.system(source_system)\n if source:\n self.edsm_systems_cache.set(source_system.lower(), source)\n if not destination:\n destination = self.edsm_server.system(destination_system)\n if destination:\n self.edsm_systems_cache.set(destination_system.lower(), destination)\n if source and destination:\n source_coords = source[0][\"coords\"]\n dest_coords = destination[0][\"coords\"] \n return sqrt((dest_coords[\"x\"] - source_coords[\"x\"])^2 + (dest_coords[\"y\"] - source_coords[\"y\"])^2 + (dest_coords[\"z\"] - source_coords[\"z\"])^2)\n raise ValueError('Unknown system')\n\n def timespan_s(self):\n return edtime.EDTime.pretty_print_timespan(self.timespan, short=True, verbose=True)\n\n def crimes_t_minus(self, star_system):\n if self.has_sitrep(star_system):\n system_reports = self.sitreps_cache.get(self.system_id(star_system))\n if \"latestCrime\" in system_reports:\n return edtime.EDTime.t_minus(system_reports[\"latestCrime\"])\n return None\n\n def traffic_t_minus(self, star_system):\n if self.has_sitrep(star_system):\n system_reports = self.sitreps_cache.get(self.system_id(star_system))\n if \"latestTraffic\" in system_reports:\n return edtime.EDTime.t_minus(system_reports[\"latestTraffic\"])\n return None\n \n def has_sitrep(self, star_system):\n if not star_system:\n return False\n self.__update_if_stale()\n sid = self.system_id(star_system)\n return self.sitreps_cache.has_key(sid)\n\n def has_notams(self, star_system):\n self.__update_if_stale()\n sid = self.system_id(star_system)\n return self.notams_cache.has_key(sid)\n\n def __has_active_notams(self, system_id):\n self.__update_if_stale()\n if not self.notams_cache.has_key(system_id):\n return False\n return len(self.__active_notams_for_sid(system_id)) > 0\n\n def active_notams(self, star_system):\n if self.has_notams(star_system):\n return self.__active_notams_for_sid(self.system_id(star_system))\n return None\n\n def __active_notams_for_sid(self, system_id):\n active_notams = []\n entry = self.notams_cache.get(system_id)\n all_notams = entry.get(\"NOTAMs\", None)\n js_epoch_now = edtime.EDTime.js_epoch_now()\n for notam in all_notams:\n active = True\n if \"from\" in notam:\n active &= notam[\"from\"] <= js_epoch_now\n if \"until\" in notam:\n active &= js_epoch_now <= notam[\"until\"]\n if active and \"text\" in notam:\n EDRLOG.log(u\"Active NOTAM: {}\".format(notam[\"text\"]), \"DEBUG\")\n active_notams.append(_edr(notam[\"text\"]))\n elif active and \"l10n\" in notam:\n EDRLOG.log(u\"Active NOTAM: {}\".format(notam[\"l10n\"][\"default\"]), \"DEBUG\")\n active_notams.append(_edr(notam[\"l10n\"]))\n return active_notams\n\n def systems_with_active_notams(self):\n summary = []\n self.__update_if_stale()\n systems_ids = self.notams_cache.keys()\n for sid in systems_ids:\n entry = self.notams_cache.get(sid)\n if not entry:\n continue \n star_system = entry.get(\"name\", None)\n if star_system and self.__has_active_notams(sid):\n summary.append(star_system)\n\n return summary\n\n def has_recent_activity(self, system_name):\n return self.has_recent_traffic(system_name) or self.has_recent_crimes(system_name) or self.has_recent_outlaws(system_name)\n\n def systems_with_recent_activity(self):\n systems_with_recent_crimes = {}\n systems_with_recent_traffic = {}\n systems_with_recent_outlaws = {}\n self.__update_if_stale()\n systems_ids = self.sitreps_cache.keys()\n for sid in systems_ids:\n sitrep = self.sitreps_cache.get(sid)\n star_system = sitrep.get(\"name\", None) if sitrep else None\n if self.has_recent_outlaws(star_system):\n systems_with_recent_outlaws[star_system] = sitrep[\"latestOutlaw\"]\n elif self.has_recent_crimes(star_system):\n systems_with_recent_crimes[star_system] = sitrep[\"latestCrime\"]\n elif self.has_recent_traffic(star_system):\n systems_with_recent_traffic[star_system] = sitrep[\"latestTraffic\"]\n\n summary = {}\n summary_outlaws = []\n systems_with_recent_outlaws = sorted(systems_with_recent_outlaws.items(), key=lambda t: t[1], reverse=True)\n for system in systems_with_recent_outlaws:\n summary_outlaws.append(u\"{} {}\".format(system[0], edtime.EDTime.t_minus(system[1], short=True)))\n if summary_outlaws:\n # Translators: this is for the sitreps feature; it's the title of a section to show systems with sighted outlaws \n summary[_c(u\"sitreps section|Outlaws\")] = summary_outlaws\n\n summary_crimes = []\n systems_with_recent_crimes = sorted(systems_with_recent_crimes.items(), key=lambda t: t[1], reverse=True)\n for system in systems_with_recent_crimes:\n summary_crimes.append(u\"{} {}\".format(system[0], edtime.EDTime.t_minus(system[1], short=True)))\n if summary_crimes:\n # Translators: this is for the sitreps feature; it's the title of a section to show systems with reported crimes\n summary[_c(u\"sitreps section|Crimes\")] = summary_crimes\n\n summary_traffic = []\n systems_with_recent_traffic = sorted(systems_with_recent_traffic.items(), key=lambda t: t[1], reverse=True)\n for system in systems_with_recent_traffic:\n summary_traffic.append(u\"{} {}\".format(system[0], edtime.EDTime.t_minus(system[1], short=True)))\n if summary_traffic:\n # Translators: this is for the sitreps feature; it's the title of a section to show systems with traffic\n summary[_c(u\"sitreps section|Traffic\")] = summary_traffic\n\n return summary\n\n def has_recent_crimes(self, star_system):\n if self.has_sitrep(star_system):\n system_reports = self.sitreps_cache.get(self.system_id(star_system))\n if system_reports is None or \"latestCrime\" not in system_reports:\n return False\n\n edr_config = edrconfig.EDRConfig()\n return self.is_recent(system_reports[\"latestCrime\"],\n edr_config.crimes_recent_threshold())\n return False\n\n def has_recent_outlaws(self, star_system):\n if self.has_sitrep(star_system):\n system_reports = self.sitreps_cache.get(self.system_id(star_system))\n if system_reports is None or \"latestOutlaw\" not in system_reports:\n return False\n\n edr_config = edrconfig.EDRConfig()\n return self.is_recent(system_reports[\"latestOutlaw\"],\n edr_config.outlaws_recent_threshold())\n return False\n\n def recent_crimes(self, star_system):\n sid = self.system_id(star_system)\n if not sid:\n return None\n recent_crimes = None\n if self.has_recent_crimes(star_system):\n if not self.crimes_cache.has_key(sid) or (self.crimes_cache.has_key(sid) and self.crimes_cache.is_stale(sid)):\n recent_crimes = self.server.recent_crimes(sid, self.timespan)\n if recent_crimes:\n self.crimes_cache.set(sid, recent_crimes)\n else:\n recent_crimes = self.crimes_cache.get(sid)\n return recent_crimes\n\n def has_recent_traffic(self, star_system):\n if self.has_sitrep(star_system):\n system_reports = self.sitreps_cache.get(self.system_id(star_system))\n if system_reports is None or \"latestTraffic\" not in system_reports:\n return False\n\n edr_config = edrconfig.EDRConfig()\n return self.is_recent(system_reports[\"latestTraffic\"],\n edr_config.traffic_recent_threshold())\n return False\n\n def recent_traffic(self, star_system):\n sid = self.system_id(star_system)\n if not sid:\n return None\n recent_traffic = None\n if self.has_recent_traffic(star_system):\n if not self.traffic_cache.has_key(sid) or (self.traffic_cache.has_key(sid) and self.traffic_cache.is_stale(sid)):\n recent_traffic = self.server.recent_traffic(sid, self.timespan)\n if recent_traffic:\n self.traffic_cache.set(sid, recent_traffic)\n else:\n recent_traffic = self.traffic_cache.get(sid)\n return recent_traffic\n\n def summarize_recent_activity(self, star_system):\n #TODO refactor/simplify this mess ;)\n summary = {}\n wanted_cmdrs = {}\n if self.has_recent_traffic(star_system):\n summary_sighted = []\n recent_traffic = self.recent_traffic(star_system)\n if recent_traffic is not None: # Should always be true... simplify. TODO\n summary_traffic = collections.OrderedDict()\n for traffic in recent_traffic:\n previous_timestamp = summary_traffic.get(traffic[\"cmdr\"], None)\n if traffic[\"timestamp\"] < previous_timestamp:\n continue\n karma = traffic.get(\"karma\", 0)\n bounty = EDBounty(traffic.get(\"bounty\", 0))\n if karma < 0 or bounty.is_significant():\n wanted_cmdrs[traffic[\"cmdr\"]] = [ traffic[\"timestamp\"], karma ]\n else:\n summary_traffic[traffic[\"cmdr\"]] = traffic[\"timestamp\"]\n for cmdr in summary_traffic:\n summary_sighted.append(u\"{} {}\".format(cmdr, edtime.EDTime.t_minus(summary_traffic[cmdr], short=True)))\n if summary_sighted:\n # Translators: this is for the sitrep feature; it's a section to show sighted cmdrs in the system of interest\n summary[_c(u\"sitrep section|Sighted\")] = summary_sighted\n \n if self.has_recent_crimes(star_system):\n summary_interdictors = []\n summary_destroyers = []\n recent_crimes = self.recent_crimes(star_system)\n if recent_crimes is not None: # Should always be true... simplify. TODO\n summary_crimes = collections.OrderedDict()\n for crime in recent_crimes:\n lead_name = crime[\"criminals\"][0][\"name\"]\n if lead_name not in summary_crimes or crime[\"timestamp\"] > summary_crimes[lead_name][0]: \n summary_crimes[lead_name] = [crime[\"timestamp\"], crime[\"offence\"]]\n for criminal in crime[\"criminals\"]:\n previous_timestamp = wanted_cmdrs[criminal[\"name\"]][0] if criminal[\"name\"] in wanted_cmdrs else None\n if previous_timestamp > crime[\"timestamp\"]:\n continue\n karma = criminal.get(\"karma\", 0)\n bounty = EDBounty(traffic.get(\"bounty\", 0))\n if karma < 0 or bounty.is_significant():\n wanted_cmdrs[criminal[\"name\"]] = [ crime[\"timestamp\"], karma]\n for criminal in summary_crimes:\n if summary_crimes[criminal][1] == \"Murder\":\n summary_destroyers.append(u\"{} {}\".format(criminal, edtime.EDTime.t_minus(summary_crimes[criminal][0], short=True)))\n elif summary_crimes[criminal][1] in [\"Interdicted\", \"Interdiction\"]:\n summary_interdictors.append(u\"{} {}\".format(criminal, edtime.EDTime.t_minus(summary_crimes[criminal][0], short=True)))\n if summary_interdictors:\n # Translators: this is for the sitrep feature; it's a section to show cmdrs who have been reported as interdicting another cmdr in the system of interest\n summary[_c(u\"sitrep section|Interdictors\")] = summary_interdictors\n if summary_destroyers:\n # Translators: this is for the sitrep feature; it's a section to show cmdrs who have been reported as responsible for destroying the ship of another cmdr in the system of interest; use a judgement-neutral term\n summary[_c(u\"sitreps section|Destroyers\")] = summary_destroyers\n \n wanted_cmdrs = sorted(wanted_cmdrs.items(), key=operator.itemgetter(1), reverse=True)\n if wanted_cmdrs:\n summary_wanted = []\n for wanted in wanted_cmdrs:\n summary_wanted.append(u\"{} {}\".format(wanted[0], edtime.EDTime.t_minus(wanted[1][0], short=True)))\n if summary_wanted:\n # Translators: this is for the sitrep feature; it's a section to show wanted cmdrs who have been sighted in the system of interest\n summary[_c(u\"sitreps section|Outlaws\")] = summary_wanted\n\n return summary\n\n def is_recent(self, timestamp, max_age):\n if timestamp is None:\n return False\n return (edtime.EDTime.js_epoch_now() - timestamp) / 1000 <= max_age\n\n def evict(self, star_system):\n try:\n del self.systems_cache[star_system]\n except KeyError:\n pass\n\n\n def __are_reports_stale(self):\n return self.__is_stale(self.sitreps_cache.last_updated, self.reports_check_interval)\n\n def __are_notams_stale(self):\n return self.__is_stale(self.notams_cache.last_updated, self.notams_check_interval)\n\n def __is_stale(self, updated_at, max_age):\n if updated_at is None:\n return True\n now = datetime.datetime.now()\n epoch_now = time.mktime(now.timetuple())\n epoch_updated = time.mktime(updated_at.timetuple())\n\n return (epoch_now - epoch_updated) > max_age\n\n def __update_if_stale(self):\n updated = False\n if self.__are_reports_stale():\n missing_seconds = self.timespan\n now = datetime.datetime.now()\n if self.sitreps_cache.last_updated:\n missing_seconds = min(self.timespan, (now - self.sitreps_cache.last_updated).total_seconds())\n sitreps = self.server.sitreps(missing_seconds)\n if sitreps:\n for system_id in sitreps:\n self.sitreps_cache.set(system_id, sitreps[system_id])\n self.sitreps_cache.last_updated = now\n updated = True\n\n if self.__are_notams_stale():\n missing_seconds = self.timespan\n now = datetime.datetime.now()\n if self.notams_cache.last_updated:\n missing_seconds = min(self.timespan, (now - self.notams_cache.last_updated).total_seconds())\n\n notams = self.server.notams(missing_seconds)\n if notams:\n for system_id in notams:\n self.notams_cache.set(system_id, notams[system_id])\n self.notams_cache.last_updated = now\n updated = True\n\n return updated","sub_path":"edr/edrsystems.py","file_name":"edrsystems.py","file_ext":"py","file_size_in_byte":20339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"638107654","text":"from haven import haven_chk as hc\nfrom haven import haven_results as hr\nfrom haven import haven_utils as hu\nimport torch\nimport torchvision\nimport tqdm\nimport pandas as pd\nimport pprint\nimport itertools\nimport os\nimport pylab as plt\nimport exp_configs\nimport time\nimport numpy as np\n\nfrom src import models\nfrom src import datasets\nfrom src import utils as ut\nfrom torchsummary import summary\n\nimport argparse\n\nfrom torch.utils.data import sampler\nfrom torch.utils.data.sampler import RandomSampler\nfrom torch.backends import cudnn\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\nimport copy, shutil\ncudnn.benchmark = True\n\n\n\ndef test(exp_dict, savedir_base, datadir, num_workers=0, scan_id=None,savedir=''):\n # bookkeepting stuff\n # ==================\n model_path = os.path.join(savedir, 'model_best.pth')\n # Dataset\n # ==================\n # val set\n test_set = datasets.get_dataset(dataset_dict=exp_dict[\"dataset\"],\n split=\"val\",\n datadir=datadir,\n exp_dict=exp_dict,\n dataset_size=exp_dict['dataset_size'])\n if str(scan_id) != 'None':\n test_set.active_data = test_set.get_scan(scan_id)\n test_sampler = torch.utils.data.SequentialSampler(test_set)\n test_loader = DataLoader(test_set,\n sampler=test_sampler,\n batch_size=1,\n collate_fn=ut.collate_fn,\n num_workers=num_workers)\n\n # Model\n # ==================\n # chk = torch.load('best_model.ckpt')\n if torch.cuda.is_available():\n model = models.get_model(model_dict=exp_dict['model'],\n exp_dict=exp_dict,\n train_set=test_set).cuda()\n else:\n model = models.get_model(model_dict=exp_dict['model'],\n exp_dict=exp_dict,\n train_set=test_set).cpu()\n epoch = -1\n\n\n if str(model_path) != 'None':\n model_path = model_path\n model.load_state_dict(hu.torch_load(model_path))\n else:\n try:\n exp_dict_train = copy.deepcopy(exp_dict)\n del exp_dict_train['test_mode']\n savedir_train = os.path.join(savedir_base, hu.hash_dict(exp_dict_train))\n model_path = os.path.join(savedir_train, \"model_best.pth\")\n score_list = hu.load_pkl(os.path.join(savedir_train, 'score_list_best.pkl'))\n epoch = score_list[-1]['epoch']\n print('Loaded model at epoch %d with score %.3f' % epoch)\n model.load_state_dict(hu.torch_load(model_path))\n except:\n pass\n\n # print(model)\n pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(pytorch_total_params)\n with open(os.path.join(savedir, \"params.txt\"), \"w\") as f:\n f.write(str(pytorch_total_params))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-e', '--exp_group_list', default='open_source_pspnet',nargs=\"+\")\n parser.add_argument('-sb', '--savedir_base', default='CovidSeg/save')\n parser.add_argument('-d', '--datadir', default='CovidSeg/dataset')\n parser.add_argument(\"-r\", \"--reset\", default=1, type=int)\n parser.add_argument(\"-ei\", \"--exp_id\", default='unetplus_timm-resnest26d')\n parser.add_argument(\"-j\", \"--run_jobs\", default=0, type=int)\n parser.add_argument(\"-nw\", \"--num_workers\", type=int, default=0)\n parser.add_argument(\"-ec\", \"--encoder\", default='') # timm-efficientnet-b0\n parser.add_argument(\"-si\", \"--scan_id\", type=str, default=None)\n\n args = parser.parse_args()\n\n # Collect experiments\n # -------------------\n if args.exp_id is not None:\n # select one experiment\n savedir = os.path.join(args.savedir_base, args.exp_id)\n exp_dict = hu.load_json(os.path.join(savedir, 'exp_dict.json'))\n\n exp_list = [exp_dict]\n\n else:\n # select exp group\n exp_list = []\n for exp_group_name in [args.exp_group_list]:\n exp_list += exp_configs.EXP_GROUPS[exp_group_name]\n\n # format them for test\n for exp_dict in exp_list:\n exp_dict['test_mode'] = 1\n\n # Run experiments or View them\n # ----------------------------\n for exp_dict in exp_list:\n # do trainval\n test(exp_dict=exp_dict,\n savedir_base=args.savedir_base,\n datadir=args.datadir,\n num_workers=args.num_workers,\n scan_id=args.scan_id,\n savedir=savedir)\n","sub_path":"CovidSeg/count_params.py","file_name":"count_params.py","file_ext":"py","file_size_in_byte":4664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"16639249","text":"# N개의 수로 이뤄진 수열 A1,A2, ... An\n# 수와 수 사이에 끼워넣을 수 있는 N-1개의 연산자 + - * /\n# 숫자의 순서는 바꿀 수 없다.\n\n# 식의 계산은 연산 우선순위를 무시하고 앞에서부터 계산한다.\n# 나숫셈은 몫으로 결과\n# 음수를 양수로 나눌 때는 양수로 바꾼 뒤 몫을 음수로 바꾼다.\n\n# 최대, 최소\n\nimport sys\n\ninput = sys.stdin.readline\nN = int(input())\nnum = list(map(int, input().split()))\nop = list(map(int, input().split()))\n\nmaximum = -1e9\nminimum = 1e9\n\ndef dfs(depth, total, plus, minus, multiply, divide):\n global maximum, minimum\n if depth==N:\n maximum = max(total, maximum)\n minimum = min(total, minimum)\n return\n if plus:\n dfs(depth+1, total+num[depth], plus-1, minus, multiply, divide)\n if minus:\n dfs(depth + 1, total - num[depth], plus, minus-1, multiply, divide)\n if multiply:\n dfs(depth + 1, total * num[depth], plus, minus, multiply -1, divide)\n if divide:\n dfs(depth + 1, int(total / num[depth]), plus, minus, multiply, divide-1)\n\ndfs(1, num[0], op[0], op[1], op[2], op[3])\n\n\n\n\n\"\"\"\nimport sys\nfrom itertools import permutations\n\ninput = sys.stdin.readline\nN = int(input())\nnum = list(map(int, input().split()))\nop_num = list(map(int, input().split()))\nop_list = ['+', '-', '*', '/']\nop = []\n\nfor k in range(len(op_num)):\n for i in range(op_num[k]):\n op.append(op_list[k])\n\nmaximum = -1e9\nminimum = 1e9\n\ndef solve():\n global maximum, minimum\n for case in permutations(op, N-1):\n total = num[0]\n for r in range(1, N):\n if case[r-1] == '+':\n total += num[r]\n elif case[r-1] == '-':\n total -= num[r]\n elif case[r-1] == '*':\n total *= num[r]\n else:\n total = int(total/num[r])\n if total > maximum:\n maximum = total\n if total < minimum:\n minimum = total\n\nsolve()\nprint(maximum)\nprint(minimum)\n\"\"\"\n\n\n\n# from itertools import permutations, combinations\n#\n# N = int(input())\n# data = list(map(int, input().split()))\n# calculate_count = list(map(int, input().split()))\n# cheat = {\n# 0: '+',\n# 1: '-',\n# 2: '*',\n# 3: '/',\n# }\n# calculate = []\n# i = 0\n# for count in calculate_count:\n# if count == 0:\n# i += 1\n# continue\n# calculate.append(cheat[i] * count)\n# i += 1\n# print(calculate)\n#\n# value_result=[]\n# result = data[0]\n# i = 1\n# for case in permutations(calculate, sum(calculate_count)):\n# for calu in case:\n# if calu == \"+\":\n# result += data[i]\n# elif calu == \"-\":\n# result -= data[i]\n# elif calu == \"*\":\n# result *= data[i]\n# else:\n# if result >= 0:\n# result = result // data[i]\n# else:\n# result = -(-result // data[i])\n# value_result.append(result)\n#\n# print(value_result)","sub_path":"약점체크/연산자 끼워넣기.py","file_name":"연산자 끼워넣기.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"503368232","text":"# Author: Martin McBride\n# Created: 2019-06-04\n# Copyright (C) 2018, Martin McBride\n# License: MIT\n\nimport colorsys\nimport itertools\n\ncssColors = {\n\"indianred\":(205,92,92),\n\"lightcoral\":(240,128,128),\n\"salmon\":(250,128,114),\n\"darksalmon\":(233,150,122),\n\"lightsalmon\":(255,160,122),\n\"crimson\":(220,20,60),\n\"red\":(255,0,0),\n\"firebrick\":(178,34,34),\n\"darkred\":(139,0,0),\n\"pink\":(255,192,203),\n\"lightpink\":(255,182,193),\n\"hotpink\":(255,105,180),\n\"deeppink\":(255,20,147),\n\"mediumvioletred\":(199,21,133),\n\"palevioletred\":(219,112,147),\n\"coral\":(255,127,80),\n\"tomato\":(255,99,71),\n\"orangered\":(255,69,0),\n\"darkorange\":(255,140,0),\n\"orange\":(255,165,0),\n\"gold\":(255,215,0),\n\"yellow\":(255,255,0),\n\"lightyellow\":(255,255,224),\n\"lemonchiffon\":(255,250,205),\n\"lightgoldenrodyellow\":(250,250,210),\n\"papayawhip\":(255,239,213),\n\"moccasin\":(255,228,181),\n\"peachpuff\":(255,218,185),\n\"palegoldenrod\":(238,232,170),\n\"khaki\":(240,230,140),\n\"darkkhaki\":(189,183,107),\n\"lavender\":(230,230,250),\n\"thistle\":(216,191,216),\n\"plum\":(221,160,221),\n\"violet\":(238,130,238),\n\"orchid\":(218,112,214),\n\"fuchsia\":(255,0,255),\n\"magenta\":(255,0,255),\n\"mediumorchid\":(186,85,211),\n\"mediumpurple\":(147,112,219),\n\"blueviolet\":(138,43,226),\n\"darkviolet\":(148,0,211),\n\"darkorchid\":(153,50,204),\n\"darkmagenta\":(139,0,139),\n\"purple\":(128,0,128),\n\"rebeccapurple\":(102,51,153),\n\"indigo\":(75,0,130),\n\"mediumslateblue\":(123,104,238),\n\"slateblue\":(106,90,205),\n\"darkslateblue\":(72,61,139),\n\"greenyellow\":(173,255,47),\n\"chartreuse\":(127,255,0),\n\"lawngreen\":(124,252,0),\n\"lime\":(0,255,0),\n\"limegreen\":(50,205,50),\n\"palegreen\":(152,251,152),\n\"lightgreen\":(144,238,144),\n\"mediumspringgreen\":(0,250,154),\n\"springgreen\":(0,255,127),\n\"mediumseagreen\":(60,179,113),\n\"seagreen\":(46,139,87),\n\"forestgreen\":(34,139,34),\n\"green\":(0,128,0),\n\"darkgreen\":(0,100,0),\n\"yellowgreen\":(154,205,50),\n\"olivedrab\":(107,142,35),\n\"olive\":(128,128,0),\n\"darkolivegreen\":(85,107,47),\n\"mediumaquamarine\":(102,205,170),\n\"darkseagreen\":(143,188,143),\n\"lightseagreen\":(32,178,170),\n\"darkcyan\":(0,139,139),\n\"teal\":(0,128,128),\n\"aqua\":(0,255,255),\n\"cyan\":(0,255,255),\n\"lightcyan\":(224,255,255),\n\"paleturquoise\":(175,238,238),\n\"aquamarine\":(127,255,212),\n\"turquoise\":(64,224,208),\n\"mediumturquoise\":(72,209,204),\n\"darkturquoise\":(0,206,209),\n\"cadetblue\":(95,158,160),\n\"steelblue\":(70,130,180),\n\"lightsteelblue\":(176,196,222),\n\"powderblue\":(176,224,230),\n\"lightblue\":(173,216,230),\n\"skyblue\":(135,206,235),\n\"lightskyblue\":(135,206,250),\n\"deepskyblue\":(0,191,255),\n\"dodgerblue\":(30,144,255),\n\"cornflowerblue\":(100,149,237),\n\"royalblue\":(65,105,225),\n\"blue\":(0,0,255),\n\"mediumblue\":(0,0,205),\n\"darkblue\":(0,0,139),\n\"navy\":(0,0,128),\n\"midnightblue\":(25,25,112),\n\"cornsilk\":(255,248,220),\n\"blanchedalmond\":(255,235,205),\n\"bisque\":(255,228,196),\n\"navajowhite\":(255,222,173),\n\"wheat\":(245,222,179),\n\"burlywood\":(222,184,135),\n\"tan\":(210,180,140),\n\"rosybrown\":(188,143,143),\n\"sandybrown\":(244,164,96),\n\"goldenrod\":(218,165,32),\n\"darkgoldenrod\":(184,134,11),\n\"peru\":(205,133,63),\n\"chocolate\":(210,105,30),\n\"saddlebrown\":(139,69,19),\n\"sienna\":(160,82,45),\n\"brown\":(165,42,42),\n\"maroon\":(128,0,0),\n\"white\":(255,255,255),\n\"snow\":(255,250,250),\n\"honeydew\":(240,255,240),\n\"mintcream\":(245,255,250),\n\"azure\":(240,255,255),\n\"aliceblue\":(240,248,255),\n\"ghostwhite\":(248,248,255),\n\"whitesmoke\":(245,245,245),\n\"seashell\":(255,245,238),\n\"beige\":(245,245,220),\n\"oldlace\":(253,245,230),\n\"floralwhite\":(255,250,240),\n\"ivory\":(255,255,240),\n\"antiquewhite\":(250,235,215),\n\"linen\":(250,240,230),\n\"lavenderblush\":(255,240,245),\n\"mistyrose\":(255,228,225),\n\"gainsboro\":(220,220,220),\n\"lightgray\":(211,211,211),\n\"lightgrey\":(211,211,211),\n\"silver\":(192,192,192),\n\"darkgray\":(169,169,169),\n\"darkgrey\":(169,169,169),\n\"gray\":(128,128,128),\n\"grey\":(128,128,128),\n\"dimgray\":(105,105,105),\n\"dimgrey\":(105,105,105),\n\"lightslategray\":(119,136,153),\n\"lightslategrey\":(119,136,153),\n\"slategray\":(112,128,144),\n\"slategrey\":(112,128,144),\n\"darkslategray\":(47,79,79),\n\"darkslategrey\":(47,79,79),\n\"black\":(0,0,0),\n}\n\nclass Color():\n '''\n Holds a color value.\n\n Color is stored as a tuple (r, g, b, a), where each channel has a value between 0 and 1.\n\n Colour can be initialised with:\n - a grey value\n - a grey value + an alpha\n - r, g and b values (alpha defaults to 1)\n - r, g, b and a values\n - a CSS color name as a string (alpha defaults to 1)\n - a CSS color name as a string plus an alpha value (0 to 1)\n\n Color objects are immutable.\n\n get_rgb, get_rgba gets the colour values as a 3- or 4-tuple\n\n of_hsl, of_hsla creates a new Color from hsl values (color values are stored as RGB)\n\n get_r gets the red value (similar for b, g, a, h, s, l). h, s and l values are obtained by converting from rgb\n\n with_r creates a new Color from an existing colour by setting the r value (similar for b, g, a, h, s, l). For\n h, s, l values, the color is converted to hsl, modified, then converted back to rgb.\n\n with_r_factor creates a new Color from an existing colour by multiplying the r value by a factor. It is\n equivalent to with_r(get_r()*factor). Similar for b, g, a, h, s, l\n\n '''\n\n def __init__(self, *args):\n if len(args) == 1:\n if args[0] in cssColors:\n self.color = tuple([x/255 for x in cssColors[args[0]]]) + (1,)\n else:\n g = Color.clamp(args[0])\n self.color = (g,)*3 + (1,)\n elif len(args) == 2:\n if args[0] in cssColors:\n self.color = tuple([x/255 for x in cssColors[args[0]]]) + (args[1],)\n else:\n g = Color.clamp(args[0])\n a = Color.clamp(args[1])\n self.color = (g,) * 3 + (a,)\n elif len(args) == 3:\n self.color = tuple([Color.clamp(x) for x in args]) + (1,)\n elif len(args) == 4:\n self.color = tuple([Color.clamp(x) for x in args])\n else:\n raise ValueError(\"Color takes 1, 2, 3 or 4 arguments\")\n\n @staticmethod\n def of_hsl(h, s, l):\n h = Color.clamp(h)\n s = Color.clamp(s)\n l = Color.clamp(l)\n r, g, b = colorsys.hls_to_rgb(h, l, s)\n return Color(r, g, b)\n\n @staticmethod\n def of_hsla(h, s, l, a):\n h = Color.clamp(h)\n s = Color.clamp(s)\n l = Color.clamp(l)\n a = Color.clamp(a)\n r, g, b = colorsys.hls_to_rgb(h, l, s)\n return Color(r, g, b, a)\n\n @property\n def rgb(self):\n return tuple(self.color[:3])\n\n @property\n def rgba(self):\n return tuple(self.color)\n\n @property\n def r(self):\n return self.color[0]\n\n def with_r(self, newval):\n newval = Color.clamp(newval)\n return Color(newval, self.color[1], self.color[2], self.color[3])\n\n def with_r_factor(self, factor):\n return Color(self.color[0]*factor, self.color[1], self.color[2], self.color[3])\n\n @property\n def g(self):\n return self.color[1]\n\n def with_g(self, newval):\n newval = Color.clamp(newval)\n return Color(self.color[0], newval, self.color[2], self.color[3])\n\n def with_g_factor(self, factor):\n return Color(self.color[0], self.color[1]*factor, self.color[2], self.color[3])\n\n @property\n def b(self):\n return self.color[2]\n\n def with_b(self, newval):\n newval = Color.clamp(newval)\n return Color(self.color[0], self.color[1], newval, self.color[3])\n\n def with_b_factor(self, factor):\n return Color(self.color[0], self.color[1], self.color[2]*factor, self.color[3])\n\n @property\n def a(self):\n return self.color[3]\n\n def with_a(self, newval):\n newval = Color.clamp(newval)\n return Color(self.color[0], self.color[1], self.color[2], newval)\n\n def with_a_factor(self, factor):\n return Color(self.color[0], self.color[1], self.color[2], self.color[3]*factor)\n\n @property\n def h(self):\n h, l, s = colorsys.rgb_to_hls(self.color[0], self.color[1], self.color[2])\n return h\n\n def with_h(self, newval):\n newval = Color.clamp(newval)\n h, l, s = colorsys.rgb_to_hls(self.color[0], self.color[1], self.color[2])\n r, g, b = colorsys.hls_to_rgb(newval, l, s)\n return Color(r, g, b, self.color[3])\n\n def with_h_factor(self, factor):\n h, l, s = colorsys.rgb_to_hls(self.color[0], self.color[1], self.color[2])\n r, g, b = colorsys.hls_to_rgb(Color.clamp(h*factor), l, s)\n return Color(r, g, b, self.color[3])\n\n @property\n def s(self):\n h, l, s = colorsys.rgb_to_hls(self.color[0], self.color[1], self.color[2])\n return s\n\n def with_s(self, newval):\n newval = Color.clamp(newval)\n h, l, s = colorsys.rgb_to_hls(self.color[0], self.color[1], self.color[2])\n r, g, b = colorsys.hls_to_rgb(h, l, newval)\n return Color(r, g, b, self.color[3])\n\n def with_s_factor(self, factor):\n h, l, s = colorsys.rgb_to_hls(self.color[0], self.color[1], self.color[2])\n r, g, b = colorsys.hls_to_rgb(h, l, Color.clamp(s*factor))\n return Color(r, g, b, self.color[3])\n\n @property\n def l(self):\n h, l, s = colorsys.rgb_to_hls(self.color[0], self.color[1], self.color[2])\n return l\n\n def with_l(self, newval):\n newval = Color.clamp(newval)\n h, l, s = colorsys.rgb_to_hls(self.color[0], self.color[1], self.color[2])\n r, g, b = colorsys.hls_to_rgb(h, newval, s)\n return Color(r, g, b, self.color[3])\n\n def with_l_factor(self, factor):\n h, l, s = colorsys.rgb_to_hls(self.color[0], self.color[1], self.color[2])\n r, g, b = colorsys.hls_to_rgb(h, Color.clamp(l*factor), s)\n return Color(r, g, b, self.color[3])\n\n def lerp(self, other, factor):\n factor = Color.clamp(factor)\n col1 = self.rgba\n col2 = other.rgba\n col = [x*(1-factor) + y*factor for x, y in zip(col1, col2)]\n return Color(*col)\n\n def as_rgbstr(self):\n return 'rgb({}, {}, {})'.format(int(self.color[0] * 255),\n int(self.color[1] * 255),\n int(self.color[2] * 255))\n\n def as_rgb_bytes(self):\n return (int(self.color[0] * 255),\n int(self.color[1] * 255),\n int(self.color[2] * 255))\n\n def as_rgba_bytes(self):\n return (int(self.color[0] * 255),\n int(self.color[1] * 255),\n int(self.color[2] * 255),\n int(self.color[3] * 255))\n\n @staticmethod\n def clamp(v):\n try:\n v = min(1, max(0, v)) #Clamp v between 0 and 1\n except e:\n raise ValueError('Numerical value required') from e\n return v\n\n def __str__(self):\n return 'rgba' + str(self.color)\n\n def __getitem__(self, i):\n if i < 4:\n return self.color[i]\n else:\n raise IndexError()\n\n\ndef make_colormap(length, colors, bands=None):\n '''\n Create a colormap, a list of varying colors.\n :param length: Total size of list\n :param colors: List of colors, must be at least 2 long.\n :param bands: Relative size of each band. bands[i] gives the size of the band between color[i] and color[i+1].\n len(bands) must be exactly 1 less than len(colors). If bands is None, equal bands will be used.\n :return: a list of Color objects\n '''\n\n color_count = len(colors)\n\n # Check parameters\n if length <= 0:\n raise ValueError('length must be > 0')\n if color_count < 2:\n raise ValueError('colors list must have at least 2 elements')\n if not bands:\n bands = [1]*(color_count - 1)\n if color_count != len(bands) + 1:\n raise ValueError('colors list must be exactly 1 longer than bands list')\n\n\n band_total = sum(bands)\n band_breakpoints = [int(x*length/band_total) for x in itertools.accumulate(bands)]\n\n current_colour = 0\n band_index = 0\n colormap = [None]*length\n band_size = []\n for i in range(length):\n while band_breakpoints[current_colour] <= i:\n band_size.append(band_index)\n current_colour += 1\n band_index = 0\n colormap[i] = (current_colour, band_index)\n band_index += 1\n band_size.append(band_index)\n\n colormap = [colors[col].lerp(colors[col+1], band/(band_size[col]-1))\n for col, band in colormap ]\n\n return colormap\n","sub_path":"generativepy/color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":12386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"639981821","text":"import pdb\nimport csv\nimport random\nfrom connection import connection\nfrom bigquery import client\n\nresults = client.query(\"\"\"\n SELECT * FROM `harvard-599-trendsetters.Genie.final_table_output_to_ui`\n\"\"\")\n\nwith connection:\n with connection.cursor() as cur:\n cur.execute(\"DELETE FROM relationships;\")\n connection.commit()\n\n count = 0\n for row in results:\n count += 1\n if count % 5000 == 0:\n print(count)\n connection.commit()\n\n change_recent = None\n if row[6] == \"N\":\n change_recent = False\n elif row[6] == \"Y\":\n change_recent = True\n cur.execute(\n \"INSERT INTO relationships VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (id) DO NOTHING;\",\n (row[0], row[1][:-1], row[2], row[3], row[4] or \"\", row[5] or \"\", change_recent, row[7] and row[7][:-1] or 0, row[8] and row[8][:-1] or 0, row[9], row[10] or 0)\n )\n","sub_path":"front-end/loader/fetch_relationships.py","file_name":"fetch_relationships.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"328081417","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom urllib import urlencode\nimport base64\nfrom io import BytesIO\nfrom PIL import Image\nfrom splash.tests.test_proxy import BaseHtmlProxyTest\nfrom .test_execute import BaseLuaRenderTest\n\n\nclass OnRequestTest(BaseLuaRenderTest, BaseHtmlProxyTest):\n def test_request_log(self):\n resp = self.request_lua(\"\"\"\n function main(splash)\n local urls = {}\n local requests = {}\n splash:on_request(function(request)\n requests[#requests+1] = request.info\n urls[#urls+1] = request.url\n end)\n splash:go(splash.args.url)\n return requests, urls\n end\n \"\"\", {'url': self.mockurl(\"show-image\")})\n self.assertStatusCode(resp, 200)\n requests, urls = resp.json()\n\n # FIXME: it should return lists, and indices should be integer\n self.assertIn(\"show-image\", urls['1'])\n self.assertIn(\"slow.gif\", urls['2'])\n\n self.assertEqual(requests['1']['method'], 'GET')\n self.assertEqual(requests['1']['url'], urls['1'])\n\n def test_abort_request(self):\n resp = self.request_lua(\"\"\"\n function main(splash)\n splash:on_request(function(request)\n if string.find(request.url, \"gif\") ~= nil then\n request:abort()\n end\n end)\n splash:go(splash.args.url)\n return {har=splash:har(), png=splash:png()}\n end\n \"\"\", {'url': self.mockurl(\"show-image\")})\n self.assertStatusCode(resp, 200)\n data = resp.json()\n\n # the rendered image is not black (gif is not rendered)\n img = Image.open(BytesIO(base64.b64decode(data['png'])))\n self.assertEqual((255, 255, 255, 255), img.getpixel((10, 10)))\n\n # gif file is not in HAR log\n urls = [e['request']['url'] for e in data['har']['log']['entries']]\n self.assertTrue(any('show-image' in url for url in urls), urls)\n self.assertFalse(any('.gif' in url for url in urls), urls)\n\n def test_set_url(self):\n url = self.mockurl(\"http-redirect?code=302\")\n new_url = self.mockurl(\"jsrender\")\n resp = self.request_lua(\"\"\"\n function main(splash)\n splash:on_request(function(request)\n if request.url == splash.args.url then\n request:set_url(splash.args.new_url)\n end\n end)\n splash:go(splash.args.url)\n return splash:html()\n end\n \"\"\", {'url': url, 'new_url': new_url})\n self.assertStatusCode(resp, 200)\n self.assertIn('After', resp.content)\n\n def test_set_proxy(self):\n proxy_port = self.ts.mock_proxy_port\n resp = self.request_lua(\"\"\"\n function main(splash)\n assert(splash:go(splash.args.url))\n local html_1 = splash:html()\n\n splash:on_request(function(request)\n request:set_proxy{\n host=\"0.0.0.0\",\n port=splash.args.proxy_port\n }\n end)\n\n assert(splash:go(splash.args.url))\n local html_2 = splash:html()\n return html_1, html_2\n end\n \"\"\", {'url': self.mockurl(\"jsrender\"), 'proxy_port': proxy_port})\n self.assertStatusCode(resp, 200)\n html_1, html_2 = resp.json()\n self.assertNotProxied(html_1)\n self.assertProxied(html_2)\n\n def test_request_outside_callback(self):\n resp = self.request_lua(\"\"\"\n function main(splash)\n local req = nil\n splash:on_request(function(request)\n req = request\n end)\n assert(splash:go(splash.args.url))\n req:abort()\n return \"ok\"\n end\n \"\"\", {'url': self.mockurl(\"jsrender\")})\n self.assertStatusCode(resp, 400)\n self.assertErrorLineNumber(resp, 8)\n self.assertIn(\"request is used outside a callback\", resp.content)\n\n def test_set_header(self):\n resp = self.request_lua(\"\"\"\n function main(splash)\n splash:on_request(function(request)\n request:set_header(\"User-Agent\", \"Fooozilla\")\n request:set_header{name=\"Custom-header\", value=\"some-val\"}\n end)\n splash:go(splash.args.url)\n return splash:html()\n end\n \"\"\", {'url': self.mockurl(\"getrequest\")})\n self.assertStatusCode(resp, 200)\n\n self.assertIn(\"'custom-header': 'some-val'\", resp.text)\n self.assertIn(\"'user-agent': 'Fooozilla'\", resp.text)\n\n\nclass OnResponseHeadersTest(BaseLuaRenderTest, BaseHtmlProxyTest):\n def test_get_header(self):\n resp = self.request_lua(\"\"\"\n function main(splash)\n local header_value = nil\n splash:on_response_headers(function(response)\n header_value = response.headers['Content-Type']\n end)\n res = splash:http_get(splash.args.url)\n return header_value\n end\n \"\"\", {'url': self.mockurl(\"jsrender\")})\n\n self.assertStatusCode(resp, 200)\n self.assertEqual(resp.text, \"text/html\")\n\n def test_abort_on_response_headers(self):\n resp = self.request_lua(\"\"\"\n function main(splash)\n splash:on_response_headers(function(response)\n if response.headers['Content-Type'] == 'text/html' then\n response:abort()\n end\n end)\n res = splash:http_get(splash.args.url)\n return res\n end\n \"\"\", {'url': self.mockurl(\"jsrender\")})\n self.assertStatusCode(resp, 200)\n self.assertFalse(resp.json().get(\"content\").get(\"text\"))\n\n def test_response_used_outside_callback(self):\n resp = self.request_lua(\"\"\"\n function main(splash)\n local res = nil\n splash:on_response_headers(function(response)\n res = response\n end)\n splash:http_get(splash.args.url)\n res:abort()\n return \"ok\"\n end\n \"\"\", {'url': self.mockurl(\"jsrender\")})\n self.assertStatusCode(resp, 400)\n self.assertIn(\"response is used outside callback\", resp.text)\n\n def test_get_headers(self):\n headers = {\n \"Foo\": \"bar\",\n \"X-Proxy-Something\": \"1234\",\n \"X-Content-Type-Options\": \"nosniff\"\n }\n mocked_url = self.mockurl(\"set-header?\" + urlencode(headers))\n resp = self.request_lua(\"\"\"\n function main(splash)\n local headers = nil\n splash:on_response_headers(function(response)\n headers = response.headers\n response.abort()\n end)\n splash:http_get(splash.args.url)\n return headers\n end\"\"\", {\"url\": mocked_url})\n\n result = resp.json()\n\n self.assertStatusCode(resp, 200)\n\n for k, v in headers.iteritems():\n self.assertIn(k, result)\n self.assertEqual(result[k], headers[k])\n\n def test_other_response_attr(self):\n headers = {\n \"Foo\": \"bar\",\n }\n mocked_url = self.mockurl(\"set-header?\" + urlencode(headers))\n some_attrs = {\n \"url\": (unicode, mocked_url),\n \"status\": (int, 200),\n \"info\": (dict, {}),\n \"ok\": (bool, True),\n }\n\n resp = self.request_lua(\"\"\"\n function main(splash)\n local all_attrs = {}\n local attr_names = {\"url\", \"status\", \"info\", \"ok\", \"request\"}\n splash:on_response_headers(function(response)\n for key, value in pairs(attr_names) do\n all_attrs[value] = response[value]\n end\n end)\n splash:http_get(splash.args.url)\n return all_attrs\n end\"\"\", {\"url\": mocked_url})\n self.assertStatusCode(resp, 200)\n result = resp.json()\n\n for k, v in some_attrs.iteritems():\n self.assertIn(k, result)\n self.assertIsInstance(result[k], v[0])\n if v[1]:\n self.assertEqual(result[k], v[1], \"{} should equal {}\".format(k, v[1]))\n\n def test_request_in_callback(self):\n mocked_url = self.mockurl(\"set-header?\" + urlencode({\"alfa\": \"beta\"}))\n resp = self.request_lua(\"\"\"\n function main(splash)\n splash:on_response_headers(function(response)\n req_info = {}\n for key, value in pairs(response.request) do\n req_info[key] = response.request[key]\n end\n end)\n splash:on_request(function(request)\n request:set_header(\"hello\", \"world\")\n end)\n splash:http_get(splash.args.url)\n return req_info\n end\"\"\", {\"url\": mocked_url})\n self.assertStatusCode(resp, 200)\n resp = resp.json()\n for elem in [\"method\", \"url\", \"headers\"]:\n self.assertIn(elem, resp)\n self.assertEqual(resp[\"url\"], mocked_url)\n self.assertEqual(resp[\"method\"], \"GET\")\n self.assertEqual(resp[\"headers\"], {\"hello\": \"world\"})\n","sub_path":"splash/tests/test_execute_callbacks.py","file_name":"test_execute_callbacks.py","file_ext":"py","file_size_in_byte":9210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"395351884","text":"import random, numpy as np\r\n\r\nimport sys\r\nfrom sklearn import linear_model, metrics\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.linear_model import SGDClassifier\r\n\r\nimport os,re\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\nfrom sklearn.feature_selection import SelectFromModel\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.metrics import metrics\r\nfrom sklearn.pipeline import Pipeline\r\nfrom nltk.stem import *\r\nfrom nltk.stem.porter import *\r\nfrom sklearn.svm import LinearSVC\r\nimport nltk.stem\r\nimport matplotlib.pyplot as plt\r\n\r\nif __name__ == '__main__':\r\n if len(sys.argv) != 3:\r\n print (\"Illegal use of Arguments: Best_configuration.py \")\r\n exit(1)\r\n train = sys.argv[1]\r\n test = sys.argv[2]\r\n ''' Extracting the training samples '''\r\n header_list = []\r\n labels = []\r\n i=0\r\n for root, dirs, files in os.walk('C:/Users/sthatipally/Downloads/Training'):\r\n for name in files:\r\n fo = open(root +\"/\"+name, \"r\")\r\n content = fo.read().replace('\\n', ' ')\r\n body = re.sub(r'^(.*) Lines: (\\d)+ ', \"\", content)\r\n header_list.append(unicode(body,errors='ignore'))\r\n labels.append(i)\r\n i=i+1\r\n\r\n ''' Extracting the testing samples '''\r\n header_test = []\r\n test_labels = []\r\n i = 0\r\n for root, dirs, files in os.walk('C:/Users/sthatipally/Downloads/Test'):\r\n for name in files:\r\n fo = open(root +\"/\"+name, \"r\")\r\n content = fo.read().replace('\\n', ' ')\r\n body = re.sub(r'^(.*) Lines: (\\d)+ ', \"\", content)\r\n header_test.append(unicode(body,errors='ignore'))\r\n test_labels.append(i)\r\n i=i+1\r\n\r\n def shuffle(train, test, size):\r\n shuffled_train = []\r\n shuffled_train_labels = []\r\n index =[]\r\n for i in range(len(labels)):\r\n index.append(i)\r\n random.shuffle(index)\r\n for i in index:\r\n shuffled_train.append(header_list[i])\r\n shuffled_train_labels.append((labels[i]))\r\n return shuffled_train[:size],shuffled_train_labels[:size]\r\n subsets = [(i+1)*100 for i in range(20)]\r\n\r\n from sklearn.naive_bayes import MultinomialNB\r\n def find_scores(estimator):\r\n sizes = []\r\n scores = []\r\n for i in subsets:\r\n text_clf = Pipeline([('vect', CountVectorizer()),('tfidf', TfidfTransformer()),('clf', estimator),])\r\n train, labels_sub = shuffle(header_list, labels, i)\r\n text_clf = text_clf.fit(train, labels_sub)\r\n predicted = text_clf.predict(header_test)\r\n sizes.append(i)\r\n scores.append(metrics.f1_score(test_labels, predicted, average='macro'))\r\n return (sizes,scores)\r\n\r\n sizes_NB,scores_NB = find_scores(MultinomialNB())\r\n print(sizes_NB,scores_NB)\r\n sizes_svm,scores_svm = find_scores(SGDClassifier(loss='hinge', penalty='l2',\r\n ))\r\n print(sizes_svm,scores_svm)\r\n sizes_log,scores_log = find_scores(linear_model.LogisticRegression())\r\n print(sizes_log,scores_log)\r\n sizes_RF,scores_RF = find_scores(RandomForestClassifier(n_estimators=100))\r\n print(sizes_RF,scores_RF)\r\n import numpy as np\r\n\r\n\r\n plt.figure()\r\n plt.title(\"learning curves\")\r\n plt.xlabel(\"Training examples\")\r\n plt.ylabel(\"Score\")\r\n plt.grid()\r\n plt.plot(sizes_NB, scores_NB, 'o-', color=\"r\",\r\n label=\"Bayes score\")\r\n plt.plot(sizes_log, scores_log, 'o-', color=\"g\",\r\n label=\"Logistic Score\")\r\n plt.plot(sizes_svm, scores_svm, 'o-', color=\"y\",\r\n label=\"SVM score\")\r\n plt.plot(sizes_RF, scores_RF, 'o-', color=\"b\",\r\n label=\"Random Forest score\")\r\n plt.legend(loc=\"best\")\r\n plt.show()","sub_path":"Learning_curves.py","file_name":"Learning_curves.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"308835060","text":"#!/usr/bin/python3\n#from sys import winver\nimport paramiko\nimport yaml\nimport os\nimport shutil\nimport ruamel.yaml\nfrom yaml.loader import Loader\n\n\n \nclass ConfigFileOtarieS1():\n def __init__(self, info_config, info_deployment, elastic_config):\n self.info_config = info_config\n self.elastic = elastic_config\n self.sourcename = 'source-otarie'\n self.new_source = info_config['name']\n self.deployment_info = info_deployment\n self.ressource_dir = info_config['ressource_dir']\n print(self.sourcename)\n print(self.deployment_info)\n \n def get_path_source(self):\n listsource = os.listdir(self.ressource_dir)\n if self.sourcename in listsource:\n return os.path.join(self.ressource_dir, self.sourcename)\n else:\n raise Warning(\"Source not found\")\n exit(-1)\n\n\n def get_path_script(self):\n return os.path.join(self.get_path_source(), 'scripts')\n \n def get_deployment_path(self):\n return self.deployment_info['deploymentPath']\n\n def get_path_new_source_from_deployment_server(self):\n return os.path.join(self.get_deployment_path(), self.new_source)\n\n def get_path_script_from_deployment_server(self):\n src = self.get_path_new_source_from_deployment_server()\n return os.path.join(src, 'scripts')\n\n def update_collecte_config_file(self):\n path_script = self.get_path_script()\n path_collect = os.path.join(path_script, 'collecte')\n path_collect_file = os.path.join(path_collect, 'config.yml')\n dirPath = os.path.join(self.get_path_new_source_from_deployment_server(), 'data')\n parse_dir = os.path.join(self.get_path_new_source_from_deployment_server(), 'parseDir')\n stream = open(path_collect_file, 'r')\n yaml = ruamel.yaml.YAML()\n yaml.indent(mapping=4, sequence=4, offset=2)\n yaml.preserve_quotes = True\n data = yaml.load(stream)\n data['source']['connection_type'] = self.info_config['accessMode']\n data['source']['host'] = self.info_config['ipAddress']\n data['source']['username'] = self.info_config['username']\n data['source']['password'] = self.info_config['password']\n data['source']['directory'] = self.info_config['path']\n data['source']['pattern_file'] = self.info_config['pattern']\n if self.info_config['date'] != '':\n data['source']['date'] = self.info_config['date']\n data['source']['format'] = self.info_config['format']\n data['destination']['directory'] = dirPath\n data['destination']['parse_dir'] = parse_dir\n\n with open(path_collect_file, 'w') as file:\n yaml.dump(data, file)\n \n def update_parsing_config_file(self):\n path_script = self.get_path_script()\n path_config = os.path.join(path_script, 'config')\n archdir = os.path.join(self.get_path_new_source_from_deployment_server(), 'archive')\n parsedir = os.path.join(self.get_path_new_source_from_deployment_server(), 'parseDir')\n path_parse_file = os.path.join(path_config, 'files_config.yml')\n stream = open(path_parse_file, 'r')\n yaml = ruamel.yaml.YAML()\n yaml.indent(mapping=4, sequence=4, offset=2)\n yaml.preserve_quotes = True\n data = yaml.load(stream)\n data['csv']['directory']= parsedir\n data['csv']['separateur']= self.info_config['separateur']\n data['csv']['column_to_convert'] = self.info_config['dateField']\n data['csv']['archive_directory'] = archdir\n\n with open(path_parse_file, 'w') as file:\n yaml.dump(data, file)\n\n\n def update_elastic_config_file(self):\n src_script = self.get_path_script_from_deployment_server()\n path_script = self.get_path_script()\n path_config = os.path.join(path_script, 'config')\n elastic_file = os.path.join(path_config, 'elastic_config.yml')\n config_dir_server = os.path.join(src_script, 'config')\n stream = open(elastic_file, 'r')\n yaml = ruamel.yaml.YAML()\n yaml.indent(mapping=4, sequence=4, offset=2)\n yaml.preserve_quotes = True\n data = yaml.load(stream)\n data['elastic']['host'] = self.elastic['hosts']\n data['elastic']['INDEX'] = self.info_config['indexName']\n data['elastic']['mapping'] = os.path.join(config_dir_server, 'mapping.json')\n \n with open(elastic_file, 'w') as file:\n yaml.dump(data, file)\n\n def create_new_source(self):\n src_dir = self.get_deployment_path()\n source_dir = self.get_path_source()\n ssh = paramiko.SSHClient()\n know_host = paramiko.AutoAddPolicy()\n ssh.set_missing_host_key_policy(know_host)\n ssh.connect(hostname=self.deployment_info['ipAddress'], port=22, username=self.deployment_info['username']\\\n ,password=self.deployment_info['password'])\n #print(src_dir)\n #zip_file = os.path.join(self.script_dir, 'script')\n name = 'source' \n shutil.make_archive(name, 'zip', source_dir)\n zip_file = name+'.zip'\n print('-----------------------------')\n print(zip_file)\n print(src_dir)\n ftp_client = ssh.open_sftp()\n des_dir = os.path.join(src_dir, 'source.zip')\n ftp_client.put(zip_file, des_dir)\n os.remove(zip_file) \n ftp_client.close()\n new_path = os.path.join(src_dir, self.new_source)\n cmd = 'unzip '+des_dir+' -d '+new_path+' && rm '+des_dir\n stdin, stdout, stderr = ssh.exec_command(cmd)\n print(stdout.read().decode())\n ssh.close()\n ","sub_path":"backend/src/traitement/createSourceOtarie.py","file_name":"createSourceOtarie.py","file_ext":"py","file_size_in_byte":5633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"190385608","text":"import pandas as pd\nimport plotly\nimport plotly.graph_objs as go\nimport sklearn.model_selection\nimport sklearn.decomposition\nfrom sklearn import metrics\nfrom sklearn.linear_model import LinearRegression\nimport numpy as np\n# 数据预处理\ngupiao = pd.read_csv('data_akbilgic.csv')\nco = gupiao[0:1].values[0]\nco = co[1:]\nco[0] += '_TL_BASED'\nco[1] += '_USD_BASED'\ngupiao = pd.DataFrame(gupiao.iloc[1:, 1:].values, columns=co).astype(float)\n\n\n# 数据降维\ndef pca(df, n):\n data = df.values\n data = data-data.mean(axis=0)\n cov = np.cov(data, rowvar=False)\n values, factors = np.linalg.eig(cov)\n order = np.argsort(values)\n order = order[:-(n+1):-1]\n factors1 = factors[:, order]\n low_data = np.dot(data, factors1)\n return low_data\nx = gupiao.iloc[:, 1:]\nx = pca(x, 2)\nx = pd.DataFrame(x)\ny = gupiao.iloc[:, 0:1]\nprint(y)\nprint(x[0])\nprint(x[1])\n\n# 画出第一项关于降维后其他项的图\nscatter=[]\ny_1 = list(float(u) for u in y.values)\nfor i in range(0,2):\n x_1 = list(float(u) for u in x[i].values)\n scatter.append(go.Scatter(\n x=y_1,\n y=x_1,\n name=str(i),\n mode='markers',\n marker=dict(\n size=3,\n )\n ))\nplotly.offline.plot(scatter, filename='原始数据2.html')\n\n\n# 线性回归训练\n\nx_train, x_test, y_train, y_test = \\\n sklearn.model_selection.train_test_split(x, y, test_size=0.2, random_state=888)\n# print(x_train.shape)\n# print(y_train.shape)\n# print(x_test.shape)\n# print(y_test.shape)\nlinreg = LinearRegression() # 建立线性模型类实例linreg\nlinreg.fit(x_train, y_train)\n# print(linreg.intercept_)\n# print(linreg.coef_)\n\n\n# 画出测试集预测值与真值的对应图\n# 重写一下.predict()方法的实现\n# def get_y(data):\n# y = linreg.intercept_[0]\n# for i2 in range(0, len(linreg.coef_[0])):\n# y += float(data.values[0][i2])*float(linreg.coef_[0][i2])\n# return y\n# y_test_prime = []\n# for i in x_test.index:\n# y_test_prime.append(get_y(x[i:i+1]))\n# print(y_test_prime)\ny_test = [float(i) for i in y_test.values]\ny_test_prime = linreg.predict(x_test)\ny_test_prime = [y_test_prime[i][0] for i in range(0,len(y_test_prime))]\nplot1 = go.Scatter(\n x=np.linspace(0, 1, num=len(y_test)),\n y=y_test,\n mode='markers+lines',\n name='test'\n)\nplot2 = go.Scatter(\n x=np.linspace(0, 1, num=len(y_test)),\n y=y_test_prime,\n mode='markers+lines',\n name='test_prime'\n)\nplotly.offline.plot([plot1, plot2], filename='降维.html')\n\n# 检查误差\np = (metrics.mean_squared_error(y_test, y_test_prime))**0.5\nprint(p)\n\n# 交叉验证\nfrom sklearn.model_selection import cross_val_predict\nlinreg = LinearRegression()\npredicted = cross_val_predict(linreg, x, y, cv=10)\np = (metrics.mean_squared_error(y, predicted))**0.5\nprint(p)\n","sub_path":"人工智能程序设计/class/5.9assignment/第一题/降维.py","file_name":"降维.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"186357059","text":"\"\"\"\nFile: demographic_eigen_centrality.py\n-------------------\n@author jmwebb\n@date 2016-05-19\n\nExperiment to determine which demographic groups have the\nhighest average/median eigenvector centrality.\n\"\"\"\n\nimport networkx as nx\nimport math\n\n\ndef _map_print(key, value):\n \"\"\"\n Pretty print feature for key-value pairs\n \"\"\"\n print(\n '\\t{0:11.11}:\\t{1}\\t'.format(\n key, value))\n\n\ndef _print_average_demo(eigenvec_centrality_totals):\n \"\"\"\n Calculates and prints the average eigenvector centrality\n for a given demographic group.\n\n @param eigenvec_centrality_totals: a dict mapping a demographic to\n a list of the eigenvector centrality for all nodes in that demographic\n \"\"\"\n averages = {}\n print('-' * 60)\n for attribute in eigenvec_centrality_totals:\n averages[attribute] = sum(eigenvec_centrality_totals[\n attribute]) / len(eigenvec_centrality_totals[attribute])\n\n for attribute, average in sorted(\n averages.items(), key=lambda x: -x[1]):\n _map_print(attribute, average)\n\n\ndef _print_median_demo(eigenvec_centrality_totals):\n \"\"\"\n Calculates and prints the average eigenvector centrality\n for a given demographic group.\n\n @param eigenvec_centrality_totals: a dict mapping a demographic to\n a list of the eigenvector centrality for all nodes in that demographic\n \"\"\"\n medians = {}\n print('-' * 60)\n for attribute in eigenvec_centrality_totals:\n sort = sorted(eigenvec_centrality_totals[attribute])\n medians[attribute] = sort[math.floor(len(sort) / 2)]\n\n for attribute, median in sorted(medians.items(), key=lambda x: -x[1]):\n _map_print(attribute, median)\n\n\ndef _calc_centrality_totals(graph):\n \"\"\"\n Calculates the eigenvector centrality for every node in\n a graph, then assigns those centralities to different\n demographic groups.\n\n @param graph: the graph to calculate centrality for\n @return a dict mapping gender string to list of centralities\n @return a dict mapping major name to list of centralities\n @return a dict mapping activity name to list of centralities\n \"\"\"\n eigen_centralities = nx.eigenvector_centrality(graph)\n gender_eigen_totals = {}\n major_eigen_totals = {}\n ec_eigen_totals = {}\n for node in graph.nodes(data=True):\n gender = node[1]['gender']\n major = node[1]['area_of_study']\n extra_currics = node[1]['extra_curricular']\n if gender in gender_eigen_totals:\n gender_eigen_totals[gender].append(eigen_centralities[node[0]])\n else:\n gender_eigen_totals[gender] = []\n if major in major_eigen_totals:\n major_eigen_totals[major].append(eigen_centralities[node[0]])\n else:\n major_eigen_totals[major] = []\n for ec in extra_currics:\n if ec in ec_eigen_totals:\n ec_eigen_totals[ec].append(eigen_centralities[node[0]])\n else:\n ec_eigen_totals[ec] = []\n\n return gender_eigen_totals, major_eigen_totals, ec_eigen_totals\n\n\ndef average_centralities(graph):\n \"\"\"\n Finds the average eigenvector centrality across all demographic groups\n in the provided graph.\n \"\"\"\n gender_eigen_totals, major_eigen_totals, ec_eigen_totals = _calc_centrality_totals(\n graph)\n\n print('Average Eigenvector Centrality by Demographic:')\n print('-' * 60)\n print('Gender stats:')\n _print_average_demo(gender_eigen_totals)\n print('Major stats:')\n _print_average_demo(major_eigen_totals)\n print('Extracurricular stats:')\n _print_average_demo(ec_eigen_totals)\n\n\ndef median_centralities(graph):\n \"\"\"\n Finds the median eigenvector centrality across all demographic groups\n in the provided graph.\n \"\"\"\n gender_eigen_totals, major_eigen_totals, ec_eigen_totals = _calc_centrality_totals(\n graph)\n\n print('Median Eigenvector Centrality by Demographic:')\n print('-' * 60)\n print('Gender stats:')\n _print_median_demo(gender_eigen_totals)\n print('Major stats:')\n _print_median_demo(major_eigen_totals)\n print('Extracurricular stats:')\n _print_median_demo(ec_eigen_totals)\n","sub_path":"demographic_eigen_centrality.py","file_name":"demographic_eigen_centrality.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"643187645","text":"from setuptools import setup\nimport os\nimport re\n\ndef find_version():\n with open('pionUploader.py', 'r') as version_file:\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file.read(), re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\nif os.name == \"nt\":\n scripts = None\n entry_points = {\n {\n 'console_scripts': ['tasmotizer=tasmotizer:main'],\n }\n }\nelse:\n scripts = ['pionUploader.py']\n entry_points = None\n\nsetup(\n name='PionUpploader',\n version=find_version(),\n url='https://github.com/pion-labs/pion-kits-firmware-uploader',\n py_modules=['pionUploader', 'gui', 'pionUploader_esptool', 'banner', 'utils'],\n license='GPLv3',\n author='Pion Labs',\n author_email='liftoff@pionlabs.com.br',\n description='Firmware uploader for PION Educational kits!',\n long_description=\"Dedicated flashing tool for the default firmware for PION Educational Satellite Kits\",\n python_requires='>=3.6',\n install_requires=[\n \"pyserial>=3.0\",\n \"PyQt5>=5.10\"\n ],\n entry_points=entry_points,\n scripts=scripts,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Operating System :: OS Independent\",\n ],\n project_urls={\n \"Issue Tracker\": \"https://github.com/pion-labs/pion-kits-firmware-uploader/issues\",\n \"Documentation\": \"https://github.com/pion-labs/pion-kits-firmware-uploader/wiki\",\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"515051924","text":"class Employee:\n\n raise_amount = 1.04\n def __init__(self,first,last,pay):\n self.first = first\n self.last = last\n self.pay = pay\n self.email = first+\".\"+last+\"@company.com\"\n \n def fullname(self): \n return '{} {}'.format(self.first,self.last) \n \n \n def raise_amount(self):\n self.pay = int(self.pay*raise_amount) \n \nemp1 = Employee(\"fahad\",\"mushahid\",230000)\nemp2 = Employee(\"rahul\",\"tenda\",121331)\n \nprint(emp1.email)\nprint(emp2.email)\n\nprint(emp1.fullname())\nprint(emp1.pay)\nemp1.raise_amount()\nprint(emp1.pay)\n\n","sub_path":"Employee.py","file_name":"Employee.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"577181923","text":"import os\nos.environ['NUMPY_EXPERIMENTAL_ARRAY_FUNCTION'] = '0'\n\nfrom naca4 import cluster_both, cluster_right, ellipse, trans_finite, converge, plot\nfrom pyHype.mesh.airfoil import NACA4\nimport numpy as np\n\nfarfield = 2\nny = 41\nnx = 31\nf = 0.0\n\nsource = {'ap': 0.5,\n 'cp': 1.0,\n 'aq': 100.0,\n 'cq': 10.0,\n 'p': [0, 1],\n 'q': [1]\n }\n\nairfoil = NACA4(airfoil='9410',\n angle_start=0,\n angle_end=180,\n aoa=5,\n npt=int(np.floor(ny/2)+1))\n\n# -------------------------------------------------------------\n# Mesh\n\nX = np.zeros((ny, nx))\nY = np.zeros((ny, nx))\n\ntheta = cluster_both(3 * np.pi / 2, np.pi / 2, ny, factor=1.5)\n\nr = ellipse(farfield + 1, farfield + 1, theta)\n\nX[:, 0] = r * np.cos(theta) + 1\nX[:, -1] = np.concatenate((np.flip(airfoil.x_lower), airfoil.x_upper[1:]))\nX[-1, :] = np.linspace(airfoil.x_upper[-1], airfoil.x_upper[-1], nx)\nX[0, :] = np.linspace(airfoil.x_upper[-1], airfoil.x_upper[-1], nx)\n\nY[:, 0] = r * np.sin(theta)\nY[:, -1] = np.concatenate((np.flip(airfoil.y_lower)+f, airfoil.y_upper[1:]+f))\nY[-1, :] = cluster_right(r[-1], airfoil.y_upper[-1] + f, nx, factor=3, flip=True)\nY[0, :] = cluster_right(-r[1], airfoil.y_lower[-1] + f, nx, factor=3, flip=True)\n\nX, Y = trans_finite(X, Y)\nXt, Yt = X.copy(), Y.copy()\n\n# -------------------------------------------------------------\n# Calculate mesh\n\n_eta = np.linspace(0, 1, nx - 2)\n_xi = np.linspace(0, 1, ny - 2)\neta, xi = np.meshgrid(_eta, _xi)\n\nX, Y = converge(X, Y, eta, xi, source)\n\n# Plot\nplot(X, Y, Xt, Yt)\nplot(X, Y, Xt, Yt, xlim=(-0.25, 1), ylim=(-0.5, 0.5), vert=True)\n","sub_path":"new_functions_testing/naca4_test.py","file_name":"naca4_test.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"592443192","text":"\n# Given a string, find the first non-repeating character in it and return it's index. If it doesn't exist, return -1.\n\nimport collections\n\nclass Solution(object):\n def firstUniqChar(self, s):\n if s is None:\n return -1\n\n freq = collections.Counter(s)\n for i in range(len(s)):\n if freq.get(s[i])==1:\n return i\n return -1\n\nif __name__ == '__main__':\n solution = Solution()\n print(solution.firstUniqChar(\"loveleetcode\"))","sub_path":"First Unique Character in a String.py","file_name":"First Unique Character in a String.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"586306261","text":"import torch.nn as nn\n\n\nclass DoubleUpconvUNet(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(DoubleUpconvUNet, self).__init__()\n self.l1 = nn.Upsample(scale_factor=2, mode='nearest')\n self.l2 = nn.ReflectionPad2d(1)\n self.l3 = nn.Conv2d(in_channels=in_channels, out_channels=in_channels//2, kernel_size=3, stride=1, padding=0,\n bias=True)\n self.l4 = nn.Conv2d(in_channels=in_channels//2, out_channels=out_channels, kernel_size=3, stride=1, padding=1,\n bias=True)\n\n self.l5 = nn.BatchNorm2d(out_channels)\n\n def forward(self, x):\n x = self.l1(x)\n x = self.l2(x)\n x = self.l3(x)\n x = self.l4(x)\n x = self.l5(x)\n return x\n","sub_path":"_fred-v1/model/models/DoubleUpconvUNet.py","file_name":"DoubleUpconvUNet.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"90818134","text":"from django.conf.urls import url\nfrom page import views\nfrom page.twviews import *\n\nurlpatterns = [\n # url(r'^categories/$', views.categories, name='categories'),\n # url(r'^(?:(?P\\d+)/)?$', views.index, name='index'),\n # url(r'^good/(?P[0-9]+)/$', views.good, name='good')\n url(r'^(?:(?P\\d+)/)?$', GoodListView.as_view(), name='index'),\n url(r'^good/(?P\\d+)/$', GoodDetailView.as_view(), name='good'),\n url(r'^(?P\\d+)/add/$', GoodCreate.as_view(), name='good_add'),\n url(r'^good/(?P\\d+)/edit/$', GoodUpdate.as_view(), name='good_edit'),\n url(r'^good/(?P\\d+)/delete/$', GoodDelete.as_view(), name='good_delete')\n]\n","sub_path":"page/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"337573907","text":"# Import modules\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport streamlit as st\r\n\r\n@st.cache()\r\ndef load_data():\r\n\t# Load the Adult Income dataset into DataFrame.\r\n\r\n\tdf = pd.read_csv('https://student-datasets-bucket.s3.ap-south-1.amazonaws.com/whitehat-ds-datasets/adult.csv', header=None)\r\n\tdf.head()\r\n\r\n\t# Rename the column names in the DataFrame using the list given above. \r\n\r\n\t# Create the list\r\n\tcolumn_name =['age', 'workclass', 'fnlwgt', 'education', 'education-years', 'marital-status', 'occupation', 'relationship', 'race','gender','capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income']\r\n\r\n\t# Rename the columns using 'rename()'\r\n\tfor i in range(df.shape[1]):\r\n\t df.rename(columns={i:column_name[i]},inplace=True)\r\n\r\n\t# Print the first five rows of the DataFrame\r\n\tdf.head()\r\n\r\n\t# Replace the invalid values ' ?' with 'np.nan'.\r\n\r\n\tdf['native-country'] = df['native-country'].replace(' ?',np.nan)\r\n\tdf['workclass'] = df['workclass'].replace(' ?',np.nan)\r\n\tdf['occupation'] = df['occupation'].replace(' ?',np.nan)\r\n\r\n\t# Delete the rows with invalid values and the column not required \r\n\r\n\t# Delete the rows with the 'dropna()' function\r\n\tdf.dropna(inplace=True)\r\n\r\n\t# Delete the column with the 'drop()' function\r\n\tdf.drop(columns='fnlwgt',axis=1,inplace=True)\r\n\r\n\treturn df\r\n\r\ncensus_df = load_data()\r\n\r\n# Write your code to filter streamlit warnings \r\nst.set_option('deprecation.showPyplotGlobalUse', False)\r\n\r\n# Write the code to design the web app\r\nst.title(\"Census Visualisation App\")\r\n \r\n# Add title on the main page and in the sidebar.\r\nst.sidebar.title(\"Menu\")\r\n# Using the 'if' statement, display raw data on the click of the checkbox.\r\nif st.sidebar.checkbox(\"Show Raw Data\"):\r\n st.subheader(\"Census Data Frame\")\r\n st.dataframe(census_df)\r\n st.write(f\"Number of rows are:{census_df.shape[0]} , Number of Columns are:{census_df.shape[1]}\") \r\n# Add a multiselect widget to allow the user to select multiple visualisations.\r\n# Add a subheader in the sidebar with the label \"Visualisation Selector\"\r\nst.sidebar.subheader(\"Visualisation Selector\")\r\n\r\n# Add a multiselect in the sidebar with label 'Select the Charts/Plots:'\r\n# Store the current value of this widget in a variable 'plot_list'.\r\nplt_typ = st.sidebar.multiselect(\"Select the Plot\" , ('Box Plot', 'Count Plot', 'Pie Chart'))\r\n\r\n# Display pie plot using matplotlib module and 'st.pyplot()'\r\nif 'Pie Chart' in plt_typ:\r\n st.subheader(\"Pie Chart\") \r\n plt_pie = st.sidebar.multiselect(\"Select the column for Pie chart\" , (\"income\" , \"gender\"))\r\n for i in plt_pie:\r\n data_pie = census_df[i].value_counts()\r\n plt.figure(figsize = (15 , 10))\r\n plt.title(f\"Pie Chart for {i}\")\r\n plt.pie(data_pie , labels = data_pie.index , autopct = \"%.2f%%\" , explode = np.linspace(0.05 , 0.15 , len(data_pie)))\r\n st.pyplot()\r\n\r\n# Display box plot using matplotlib module and 'st.pyplot()'\r\nif 'Box Plot' in plt_typ:\r\n st.subheader(\"Box Plot\")\r\n cols = st.sidebar.multiselect(\"Select the columns to create its Box Plot\" , ('income' , 'gender')) \r\n for i in cols:\r\n plt.figure(figsize = (15 , 10))\r\n plt.title(f\"Box Plot for {i}\")\r\n sns.boxplot(census_df[\"hours-per-week\"] , census_df[i])\r\n st.pyplot()\r\n\r\n# Display count plot using seaborn module and 'st.pyplot()' \r\nif 'Count Plot' in plt_typ:\r\n st.subheader(\"Count Plot\") \r\n plt.figure(figsize = (15 , 10))\r\n plt.title(f\"Count Plot for Workclass\")\r\n sns.countplot(census_df['workclass'] , hue = census_df[\"income\"])\r\n st.pyplot()\r\n","sub_path":"census.py","file_name":"census.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"121726109","text":"'''Create a generator that yields \"n\" random numbers between a low and high number (that are inputs).\nNote: Use the random library. For example:'''\n\nimport random\n\nrandom.randint(1,10)\ndef rand_num(low,high,n):\n for i in range(n):\n yield random.randint(low,high)\n\nfor ran_num in rand_num(1,15,10):\n print(ran_num)","sub_path":"generator_exercise_2.py","file_name":"generator_exercise_2.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"214012253","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2018-03-19\n# @Author : Joe\n\nimport numpy as np\nimport cv2 as cv\nfrom matplotlib import pyplot as plt\n\n# Feature Matching + Homography to find Objects\n\n# Goal\n\t# 在本节中,我们将在一副较复杂的图像中混合使用特征匹配和\n\t# calib3d模块中的findHomography来查找已知的物体。\n\n# Basics\n\t# 所以我们上节中做了什么呢?我们使用了一个未知图像,然后\n\t# 在它里面找到了一些特征点,我们再使用另外一副训练图像,\n\t# 也在图像中找到一些特征点,然后在这些特征点当中找到他们\n\t# 相互匹配的特征点。简单来说,我们在另外一副图像中找到了\n\t# 一些物体的某些部位的位置。这些信息足够让我们在训练图像\n\t# 中找到精确的物体。\n\n\t# 为了实现这个目标,我们使用了在 calib3d 模块中的函数,cv.findHomography()\n\t# 如果我们给这个函数传入了在两幅图像中都存在的一些点,这\n\t# 个函数将会找到这个物体的透视转换信息。然后我们就可以使\n\t# 用cv2.perspectiveTransform()来找到这个物体。它需要最\n\t# 少4个正确的点来找到这个变换。\n\n\t# 我们可以看到这里将会存在在匹配的时候会出现一些可能的错\n\t# 误信息将会影响到结果。解决这个问题,算法使用RANSAC或者\n\t# LEAST_MEDIAN(可以通过标记判断)。所以好的匹配提供了正\n\t# 确的预测被称��inliers,剩余的被称为outliers。\n\n# Code\nMIN_MATCH_COUNT = 10\nimg1 = cv.imread('box.png',0) # queryImage\nimg2 = cv.imread('box_in_scene.png',0) # trainImage\n# Initiate SIFT detector\nsift = cv.xfeatures2d.SIFT_create()\n# find the keypoints and descriptors with SIFT\nkp1, des1 = sift.detectAndCompute(img1,None)\nkp2, des2 = sift.detectAndCompute(img2,None)\nFLANN_INDEX_KDTREE = 1\nindex_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\nsearch_params = dict(checks = 50)\nflann = cv.FlannBasedMatcher(index_params, search_params)\nmatches = flann.knnMatch(des1,des2,k=2)\n# store all the good matches as per Lowe's ratio test.\ngood = []\nfor m,n in matches:\n if m.distance < 0.7*n.distance:\n good.append(m)\n\nif len(good)>MIN_MATCH_COUNT:\n\tsrc_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)\n\tdst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)\n\tM, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0)\n\tmatchesMask = mask.ravel().tolist()\n\th,w = img1.shape\n\tpts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)\n\tdst = cv.perspectiveTransform(pts,M)\n\timg2 = cv.polylines(img2,[np.int32(dst)],True,255,3, cv.LINE_AA)\nelse:\n\tprint( \"Not enough matches are found - {}/{}\".format(len(good), MIN_MATCH_COUNT) )\n\tmatchesMask = None\t\n\ndraw_params = dict(matchColor = (0,255,0), # draw matches in green color\n singlePointColor = None,\n matchesMask = matchesMask, # draw only inliers\n flags = 2)\nimg3 = cv.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)\nplt.imshow(img3, 'gray'),plt.show()","sub_path":"feature_detection_and_description/10_feature_matching_homography_to_find_objects.py","file_name":"10_feature_matching_homography_to_find_objects.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"519241716","text":"import config\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch\n\ndef train_epoch(network, loader, optimizer):\n cumu_loss = 0\n cumu_acc = 0\n total = 0\n\n criterion = nn.CrossEntropyLoss()\n for _, (data, target) in enumerate(loader):\n data, target = data.to(config.DEVICE), target.to(config.DEVICE)\n optimizer.zero_grad()\n\n loss = criterion(network(data), target)\n cumu_loss += loss.item()\n _, predicted = torch.max(network(data).data, 1)\n total += target.size(0)\n cumu_acc += (predicted == target).sum().item()\n\n loss.backward()\n optimizer.step()\n network.eval() \n return cumu_loss / len(loader), 100 * cumu_acc / total","sub_path":"day_3/Mnist_train_2/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"426638686","text":"from scipy.integrate import ode\nimport matplotlib.pyplot as plt\n\nfrom models import *\nfrom parameters import *\n\n\nrho_x = 0\nrho_y = 0\n\nrho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b = 0, 5, 5, 0, 5, 0, 5, 0\n\nparams = (delta_L, gamma_L_X, n_y, theta_L_X, eta_x, omega_x, m_x, delta_x, delta_y, rho_x, rho_y, gamma_x, theta_x, r_X, r_Y, \n rho_I0_a, rho_I0_b, rho_I1_a, rho_I1_b, rho_I2_a, rho_I2_b, rho_I3_a, rho_I3_b)\n\n\nY0 = np.zeros(59)\n\n\n# number of cells: toggle switches\nN_I0 = np.array([1,1])\nN_I1 = np.array([1,1])\nN_I2 = np.array([1,1])\nN_I3 = np.array([1,1])\n\nY0[4:6] = N_I0\nY0[10:12] = N_I1\nY0[16:18] = N_I2\nY0[22:24] = N_I3\n\n# number of cells: mux\n#Y0[22-4+24:38-4+24] = 1 # number of cells\nY0[42:58] = 1 # number of cells\n\n# S0, S1\nS = np.array([0, 0])\nY0[24:26] = S\n\n\"\"\"\nsimulations\n\"\"\"\n\n# simulation parameters\nt_end = 1500\nN = t_end\n\n# initialization\n\nT = np.linspace(0, t_end, N)\n\nt1 = t_end\ndt = t_end/N\nT = np.arange(0,t1+dt,dt)\nY = np.zeros([1+N,59])\nY[0,:] = Y0\n\n\n# simulation\nr = ode(CLB_model_ODE).set_integrator('zvode', method='bdf')\nr.set_initial_value(Y0, T[0]).set_f_params(params)\n\ni = 1\nwhile r.successful() and r.t < t1:\n Y[i,:] = r.integrate(r.t+dt)\n i += 1\n\nout = Y[:,-1]\n\nS0, S1 = Y[:,24], Y[:,25]\n\nI0_a, I0_b = Y[:,2], Y[:,3]\nI1_a, I1_b = Y[:,8], Y[:,9]\nI2_a, I2_b = Y[:,14], Y[:,15]\nI3_a, I3_b = Y[:,20], Y[:,21]\n\n\n# plot\n\"\"\"\nax1 = plt.subplot(241)\nax1.plot(T, I0_a)\nax1.plot(T, I0_b)\nax1.legend([\"I0_a = I0\", \"I0_b\"])\nax1.set_title('I0 toggle')\n\nax2 = plt.subplot(242)\nax2.plot(T, I1_a)\nax2.plot(T, I1_b)\nax2.legend([\"I1_a = I1\", \"I1_b\"])\nax2.set_title('I1 toggle')\n\nax3 = plt.subplot(243)\nax3.plot(T, I2_a)\nax3.plot(T, I2_b)\nax3.legend([\"I2_a = I2\", \"I2_b\"])\nax3.set_title('I2 toggle')\n\nax4 = plt.subplot(244)\nax4.plot(T, I3_a)\nax4.plot(T, I3_b)\nax4.legend([\"I3_a = I3\", \"I3_b\"])\nax4.set_title('I3 toggle')\n\nax5 = plt.subplot(212)\nax5.plot(T,out)\nax5.set_title('out')\n\nplt.suptitle(f\"S = [{S[1]},{S[0]}]\")\nplt.show()\n\"\"\"\nax1 = plt.subplot(341)\nax1.plot(T, I0_a, color=\"#800000ff\", alpha=0.75)\nax1.plot(T, I0_b, color=\"#999999ff\", alpha=0.75)\nax1.legend([\"$I_0$\", \"$\\\\overline{I_0}$\"])\n#ax1.set_title('$I_0$ toggle')\nax1.set_xlabel(\"Time [min]\")\nax1.set_ylabel(\"Concentrations [nM]\")\n\n\nax2 = plt.subplot(342)\nax2.plot(T, I1_a, color = \"#00ff00ff\", alpha=0.75)\nax2.plot(T, I1_b, color = \"#666666ff\")#, alpha=0.75)\nax2.legend([\"$I_1$\", \"$\\\\overline{I_1}$\"])\n#ax2.set_title('$I_1$ toggle')\nax2.set_xlabel(\"Time [min]\")\nax2.set_ylabel(\"Concentrations [nM]\")\n\n\nax3 = plt.subplot(343)\nax3.plot(T, I2_a, color = \"#0000ffff\", alpha=0.75)\nax3.plot(T, I2_b, color = \"#ecececfe\")#, alpha=0.75)\nax3.legend([\"$I_2$\", \"$\\\\overline{I_2}$\"])\n#ax3.set_title('$I_2$ toggle')\nax3.set_xlabel(\"Time [min]\")\nax3.set_ylabel(\"Concentrations [nM]\")\n\n\nax4 = plt.subplot(344)\nax4.plot(T, I3_a, color = \"#800080ff\", alpha=0.75)\nax4.plot(T, I3_b, color = \"#999999fc\")#, alpha=0.75)\nax4.legend([\"$I_3$\", \"$\\\\overline{I_3}$\"])\n#ax4.set_title('$I_3$ toggle')\nax4.set_xlabel(\"Time [min]\")\nax4.set_ylabel(\"Concentrations [nM]\")\n\n\nax5 = plt.subplot(312)\nax5.plot(T,S0, color = \"#ff6600ff\", alpha=0.75)\nax5.plot(T,S1, color = \"#ffff00ff\")#, alpha=0.75)\nax5.legend([\"$S_0$\", \"$S_1$\"])\n#ax5.set_title('Select inputs')\nax5.set_xlabel(\"Time [min]\")\nax5.set_ylabel(\"Concentrations [nM]\")\n\n\nax6 = plt.subplot(313)\nax6.plot(T,out, color = \"#8080805a\", alpha=0.75)\n#ax6.set_title('out')\nax6.legend('out')\nax6.set_xlabel(\"Time [min]\")\nax6.set_ylabel(\"Concentrations [nM]\")\n\nplt.gcf().set_size_inches(15,10)\nplt.show()","sub_path":"cblb/_run_ode_model_clb.py","file_name":"_run_ode_model_clb.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"391911058","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Evaluation functions\n\"\"\"\n\n# external imports\n# ---\n\nimport logging\nimport numpy as np\nfrom sklearn.cluster import AgglomerativeClustering, KMeans\nfrom six import iteritems\nimport six\nimport importlib\nimport os\n\n# internal imports\n# ---\n\nimport web.datasets.synonymy\n\nfrom web.datasets.similarity import *\n\nfrom web.datasets.categorization import *\n\n# import of analogy datasets fetchers\n# and many other things (ex: itertools.product)\n# are accomplished within analogy_solver\n# ---\nfrom web.analogy_solver import *\n\nfrom web.embedding import Embedding\nfrom web.embeddings import load_toy_embedding\nfrom web.embeddings import load_embedding\n\n\ndef evaluate_on_all_datasets(w, wordrep_max_pairs=None):\n \"\"\"\n Evaluate Embedding w on all benchmarks\n\n Input\n -----\n\n w:\n\n Word embedding to evaluate\n (Embedding object or dict)\n\n wordrep_max_pairs:\n\n maximum number of pairs to be considered for the WordRep dataset\n (integer, ex: 50; the default used by the original authors was 1000)\n\n Output\n ------\n\n dfs:\n\n Table with results\n (pandas.DataFrame)\n\n \"\"\"\n if isinstance(w, dict):\n\n w = Embedding.from_dict(w)\n\n # Synonyms tasks\n # ---\n\n synonymy_datasets = []\n\n # synonymy_datasets.append(\"TOEFL\") # *new*\n # synonymy_datasets.append(\"ESL\") # *new*\n\n # Similarity tasks\n # ---\n\n similarity_datasets = []\n\n # similarity_datasets.append(\"MEN\")\n # similarity_datasets.append(\"WS353\")\n # similarity_datasets.append(\"WS353S\")\n # similarity_datasets.append(\"WS353R\")\n # similarity_datasets.append(\"SimLex999\")\n # similarity_datasets.append(\"RW\")\n # similarity_datasets.append(\"RG65\")\n # similarity_datasets.append(\"MTurk\")\n # similarity_datasets.append(\"TR9856\")\n # similarity_datasets.append(\"SimVerb3500\") # *new*\n\n # Analogy tasks\n # ---\n\n analogy_datasets = []\n\n analogy_datasets.append(\"Google\")\n analogy_datasets.append(\"MSR\")\n analogy_datasets.append(\"SemEval\")\n analogy_datasets.append(\"WordRep\")\n analogy_datasets.append(\"SAT\") # *new*\n analogy_datasets.append(\"BATS\") # *new*\n\n # Categorization tasks\n # ---\n\n categorization_datasets = []\n\n # categorization_datasets.append(\"AP\")\n # categorization_datasets.append(\"BLESS\")\n # categorization_datasets.append(\"battig\")\n # categorization_datasets.append(\"battig2010\") # *new*\n # categorization_datasets.append(\"ESSLLI_1a\")\n # categorization_datasets.append(\"ESSLLI_2b\")\n # categorization_datasets.append(\"ESSLLI_2c\")\n\n # Calculate results on synonymy\n # ---\n\n logger.info(\"\\nCalculating synonymy benchmarks\")\n\n results = {}\n\n for dataset in synonymy_datasets:\n\n df = evaluate_synonymy(w, dataset)\n\n msg = \"\\nResults for {}\\n---\\n{}\".format(dataset, df)\n\n logger.info(msg)\n\n df['task'] = 'synonymy'\n df['dataset'] = dataset\n\n results[dataset] = df\n\n # Calculate results on similarity\n # ---\n\n logger.info(\"\\nCalculating similarity benchmarks\")\n\n for dataset in similarity_datasets:\n\n if dataset == 'WS353R':\n\n mydataset = 'WS353'\n\n kwargs = {'which': 'relatedness'}\n\n elif dataset == 'WS353S':\n\n mydataset = 'WS353'\n\n kwargs = {'which': 'similarity'}\n\n else:\n mydataset = dataset\n\n kwargs = {}\n\n fetch_function_name = \"fetch_\" + mydataset\n module = importlib.import_module(\"web.datasets.similarity\")\n data = getattr(module, fetch_function_name)(**kwargs)\n\n df = evaluate_similarity(w, data.X, data.y)\n\n msg = \"\\nResults for {}\\n---\\n{}\".format(dataset, df)\n\n logger.info(msg)\n\n df['dataset'] = dataset\n df['task'] = 'similarity'\n\n results[dataset] = df\n\n # Calculate results on analogy\n # ---\n\n logger.info(\"\\nCalculating analogy benchmarks\")\n\n for dataset in analogy_datasets:\n\n if dataset == \"Google\":\n\n data = fetch_google_analogy()\n df = evaluate_analogy(w, data.X, data.y, category=data.category)\n\n df['dataset'] = dataset\n\n elif dataset == \"MSR\":\n\n data = fetch_msr_analogy()\n df = evaluate_analogy(w, data.X, data.y, category=data.category)\n\n df['dataset'] = dataset\n\n elif dataset == \"SemEval\":\n\n df = evaluate_on_semeval_2012_2(w)\n\n elif dataset == \"SAT\":\n\n df = evaluate_on_SAT(w)\n\n elif dataset == 'BATS':\n\n df = evaluate_on_BATS(w)\n\n elif dataset == \"WordRep\":\n\n df = evaluate_on_WordRep(w, max_pairs=wordrep_max_pairs)\n\n else:\n\n continue\n\n # msg = \"\\nResults for {}\\n---\\n{}\".format(dataset, df)\n msg = \"\\n\\nResults for {}\\n---\\n\".format(dataset)\n\n logger.info(msg)\n\n # print first four columns\n # ---\n\n print(\"\")\n print(df.iloc[:, 0:2])\n print(\"\")\n print(df.iloc[:, 2:4])\n print(\"\")\n\n results[dataset] = df\n\n # Calculate results on categorization\n # ---\n\n logger.info(\"\\nCalculating categorization benchmarks\")\n\n for dataset in categorization_datasets:\n\n fetch_function_name = \"fetch_\" + dataset\n\n module = importlib.import_module(\"web.datasets.categorization\")\n\n data = getattr(module, fetch_function_name)()\n\n result = evaluate_categorization(w, data.X, data.y)\n\n result['dataset'] = dataset\n\n msg = \"\\nResults for {}\\n---\\n{}\".format(dataset, result)\n\n logger.info(msg)\n\n results[dataset] = result\n\n # Construct pandas table\n # ---\n\n dfs = None\n\n for dataset, df in results.items():\n\n # print(dataset)\n # print(\"---\")\n # df.reset_index(inplace=True)\n # print(df)\n # print(df.shape)\n\n if dfs is None:\n\n dfs = df\n\n else:\n\n # non-concatenation axis is not aligned\n # (i.e., columns are not aligned and have to be)\n # ---\n\n # sort = True -> sort the columns\n # sort = False -> do not sort the columns\n # ---\n\n # dfs = pd.concat([dfs, df], axis=0, ignore_index=True, sort=True)\n dfs = pd.concat([dfs, df], axis=0, ignore_index=True, sort=False)\n\n columns = ['dataset', 'task', 'category',\n 'nb_items', 'nb_items_covered', 'nb_missing_words',\n 'performance', 'performance_type',\n 'performance2', 'performance_type2']\n\n dfs = dfs.reindex(columns=columns)\n\n return dfs\n\n\ndef evaluate_on_all_fast(w):\n \"\"\"\n Evaluate Embedding w on all fast-running benchmarks\n\n Parameters\n ----------\n w: Embedding or dict\n Embedding to evaluate.\n\n Returns\n -------\n results: pandas.DataFrame\n DataFrame with results, one per column.\n \"\"\"\n if isinstance(w, dict):\n\n w = Embedding.from_dict(w)\n\n # Calculate results on similarity\n # ---\n\n logger.info(\"Calculating similarity benchmarks\")\n\n similarity_tasks = {\n \"MEN\": fetch_MEN(),\n \"WS353\": fetch_WS353(),\n \"WS353R\": fetch_WS353(which=\"relatedness\"),\n \"WS353S\": fetch_WS353(which=\"similarity\"),\n \"SimLex999\": fetch_SimLex999(),\n \"RW\": fetch_RW(),\n \"RG65\": fetch_RG65(),\n \"MTurk\": fetch_MTurk(),\n \"TR9856\": fetch_TR9856(),\n \"SimVerb3500\": fetch_SimVerb3500(),\n }\n\n similarity_results = {}\n\n for name, data in iteritems(similarity_tasks):\n\n # compute Spearkan correlation\n # ---\n similarity_results[name] = evaluate_similarity(w, data.X, data.y)\n\n logger.info(\"Spearman correlation of scores on {} {}\".format(name, similarity_results[name]))\n\n # Calculate results on analogy\n # ---\n\n logger.info(\"Calculating analogy benchmarks\")\n\n analogy_tasks = {\n \"Google\": fetch_google_analogy(),\n \"MSR\": fetch_msr_analogy()\n }\n\n analogy_results = {}\n\n for name, data in iteritems(analogy_tasks):\n\n analogy_results[name] = evaluate_analogy(w, data.X, data.y)\n\n logger.info(\"Analogy prediction accuracy on {} {}\".format(name, analogy_results[name]))\n\n SemEval = evaluate_on_semeval_2012_2(w)\n\n for k in SemEval:\n\n analogy_results[k] = SemEval[k]\n\n logger.info(\"Analogy prediction accuracy on {} {}\".format(\"SemEval2012\", analogy_results[\"SemEval2012_2\"]))\n\n # Calculate results on categorization\n\n logger.info(\"Calculating categorization benchmarks\")\n\n categorization_tasks = {\n \"AP\": fetch_AP(),\n \"BLESS\": fetch_BLESS(),\n \"Battig\": fetch_battig(),\n \"ESSLLI_2c\": fetch_ESSLLI_2c(),\n \"ESSLLI_2b\": fetch_ESSLLI_2b(),\n \"ESSLLI_1a\": fetch_ESSLLI_1a()\n }\n\n categorization_results = {}\n\n # Calculate results using helper function\n\n for name, data in iteritems(categorization_tasks):\n\n categorization_results[name] = evaluate_categorization(w, data.X, data.y)\n\n logger.info(\"Cluster purity on {} {}\".format(name, categorization_results[name]))\n\n # Construct pandas table\n\n cat = pd.DataFrame([categorization_results])\n\n analogy = pd.DataFrame([analogy_results])\n\n sim = pd.DataFrame([similarity_results])\n\n results = cat.join(sim).join(analogy)\n\n return results\n\n\ndef evaluate_similarity(w, X, y):\n \"\"\"\n Calculate Spearman correlation\n between cosine similarity of the model\n and human rated similarity of word pairs\n\n Parameters\n ----------\n w : Embedding or dict\n Embedding or dict instance.\n\n X: array, shape: (n_samples, 2)\n Word pairs\n\n y: vector, shape: (n_samples,)\n Human ratings\n\n Returns\n -------\n cor: float\n Spearman correlation\n \"\"\"\n\n if isinstance(w, dict):\n\n w = Embedding.from_dict(w)\n\n missing_words = 0\n\n words = w.vocabulary.word_id\n\n nb_items_covered = 0\n\n for query in X:\n\n item_fully_covered = True\n\n for query_word in query:\n\n if query_word not in words:\n\n missing_words += 1\n\n item_fully_covered = False\n\n if item_fully_covered:\n\n nb_items_covered += 1\n\n if missing_words > 0:\n\n logger.warning(\"Missing {} words. Will replace them with mean vector\".format(missing_words))\n\n mean_vector = np.mean(w.vectors, axis=0, keepdims=True)\n\n A = np.vstack(w.get(word, mean_vector) for word in X[:, 0])\n\n B = np.vstack(w.get(word, mean_vector) for word in X[:, 1])\n\n scores = np.array([v1.dot(v2.T) / (np.linalg.norm(v1) * np.linalg.norm(v2)) for v1, v2 in zip(A, B)])\n\n correlation = scipy.stats.spearmanr(scores, y).correlation\n\n nb_items = len(y)\n\n data = [pd.Series(correlation, name=\"performance\"),\n pd.Series(nb_items, name=\"nb_items\"),\n pd.Series(nb_items_covered, name=\"nb_items_covered\"),\n pd.Series(missing_words, name=\"nb_missing_words\")]\n\n results = pd.concat(data, axis=1)\n\n results['performance_type'] = 'spearman correlation'\n\n return results\n\n\ndef calculate_purity(y_true, y_pred):\n \"\"\"\n Calculate purity for given true and predicted cluster labels.\n\n Parameters\n ----------\n y_true: array, shape: (n_samples, 1)\n True cluster labels\n\n y_pred: array, shape: (n_samples, 1)\n Cluster assignment.\n\n Returns\n -------\n purity: float\n Calculated purity.\n\n\n See:\n https://stats.stackexchange.com/questions/95731/how-to-calculate-purity\n \"\"\"\n assert len(y_true) == len(y_pred)\n\n nb_items = len(y_true)\n\n nb_clusters = len(set(true))\n\n true_clusters = np.zeros(shape=(nb_clusters, nb_items))\n\n pred_clusters = np.zeros_like(true_clusters)\n\n # convert the clustering labels to binary format\n # ---\n\n for id, cl in enumerate(set(y_true)):\n\n true_clusters[id] = (y_true == cl).astype(\"int\")\n\n for id, cl in enumerate(set(y_pred)):\n\n pred_clusters[id] = (y_pred == cl).astype(\"int\")\n\n M = pred_clusters.dot(true_clusters.T)\n\n purity = 1. / nb_items * np.sum(np.max(M, axis=1))\n\n\n results = {'purity': purity,\n 'nb_items': nb_items}\n\n return results\n\n\ndef evaluate_categorization(w, X, y, method=\"all\", seed=None):\n \"\"\"\n Evaluate embeddings on categorization task.\n\n Parameters\n ----------\n w: Embedding or dict\n Embedding to test.\n\n X: vector, shape: (n_samples, )\n Vector of words.\n\n y: vector, shape: (n_samples, )\n Vector of cluster assignments.\n\n method: string, default: \"all\"\n What method to use. Possible values are \"agglomerative\", \"kmeans\", \"all.\n If \"agglomerative\" is passed, method will fit AgglomerativeClustering\n (with very crude hyperparameter tuning to avoid overfitting).\n If \"kmeans\" is passed, method will fit KMeans.\n In both cases number of clusters is preset to the correct value.\n\n seed: int, default: None\n Seed passed to KMeans.\n\n Returns\n -------\n purity: float\n Purity of the best obtained clustering.\n\n Notes\n -----\n KMedoids method was excluded as empirically didn't improve over KMeans (for categorization\n tasks available in the package).\n \"\"\"\n\n if isinstance(w, dict):\n\n w = Embedding.from_dict(w)\n\n assert method in [\"all\", \"kmeans\", \"agglomerative\"], \"Uncrecognized method\"\n\n '''\n NaN happens when there are only 0s,\n which might happen for very rare words or\n very insufficient word vocabulary\n\n In order to prevent problems from happening\n further in the calculation, we could use nanmean()\n instead of mean()\n '''\n\n mean_vector = np.mean(w.vectors, axis=0, keepdims=True)\n # mean_vector = np.nanmean(w.vectors, axis=0, keepdims=True)\n\n words = np.vstack(w.get(word, mean_vector) for word in X.flatten())\n\n missing_words = sum([1 for word in X.flatten() if word not in w])\n # print(\"missing words:\", missing_words)\n\n ids = np.random.RandomState(seed).choice(range(len(X)), len(X), replace=False)\n\n # Evaluate clustering on several hyperparameters of AgglomerativeClustering and\n # KMeans\n best_purity = 0\n\n if method == \"all\" or method == \"agglomerative\":\n\n results = calculate_purity(y[ids], AgglomerativeClustering(n_clusters=len(set(y)),\n affinity=\"euclidean\",\n linkage=\"ward\").fit_predict(words[ids]))\n\n best_purity = results['purity']\n\n logger.debug(\"Purity={:.3f} using affinity={} linkage={}\".format(best_purity, 'euclidean', 'ward'))\n\n for affinity in [\"cosine\", \"euclidean\"]:\n\n for linkage in [\"average\", \"complete\"]:\n\n results = calculate_purity(y[ids], AgglomerativeClustering(n_clusters=len(set(y)),\n affinity=affinity,\n linkage=linkage).fit_predict(words[ids]))\n purity = results['purity']\n\n logger.debug(\"Purity={:.3f} using affinity={} linkage={}\".format(purity, affinity, linkage))\n\n best_purity = max(best_purity, purity)\n\n if method == \"all\" or method == \"kmeans\":\n\n results = calculate_purity(y[ids], KMeans(random_state=seed, n_init=10, n_clusters=len(set(y))).\n fit_predict(words[ids]))\n\n purity = results['purity']\n\n logger.debug(\"Purity={:.3f} using KMeans\".format(purity))\n\n best_purity = max(purity, best_purity)\n\n nb_items = len(y)\n\n nb_items_covered = nb_items - missing_words\n\n data = [pd.Series(best_purity, name=\"performance\"),\n pd.Series(nb_items, name=\"nb_items\"),\n pd.Series(nb_items_covered, name=\"nb_items_covered\"),\n pd.Series(missing_words, name=\"nb_missing_words\")]\n\n df = pd.concat(data, axis=1)\n\n df['performance_type'] = 'purity'\n\n df['task'] = 'categorization'\n\n return df\n\n\ndef evaluate_analogy(w, X, y, method=\"add\", k=None, category=None, batch_size=100):\n \"\"\"\n Simple method to score embedding using SimpleAnalogySolver\n\n used with MSR and GOOGLE datasets\n\n Other datasets use other evaluation methods\n\n Parameters\n ----------\n w : Embedding or dict\n Embedding or dict instance.\n\n method : {\"add\", \"mul\"}\n Method to use when finding analogy answer, see \"Improving Distributional Similarity\n with Lessons Learned from Word Embeddings\"\n\n X : array-like, shape (n_samples, 3)\n Analogy questions.\n\n y : array-like, shape (n_samples, )\n Analogy answers.\n\n k : int, default: None\n If not None will select k top most frequent words from embedding\n\n batch_size : int, default: 100\n Increase to increase memory consumption and decrease running time\n\n category : list, default: None\n Category of each example, if passed function returns accuracy per category\n in addition to the overall performance.\n Analogy datasets have \"category\" field that can be supplied here.\n\n Returns\n -------\n result: dict\n Results, where each key is for given category and special empty key \"\" stores\n summarized accuracy across categories\n \"\"\"\n\n print(\"\\nGoogle or MSR analogy task\\n---\\n\")\n\n print(\"Note: * indicates a missing word (i.e., a word not in the semantic space)\")\n\n if isinstance(w, dict):\n\n w = Embedding.from_dict(w)\n\n assert category is None or len(category) == y.shape[0], \"Passed incorrect category list\"\n\n solver = SimpleAnalogySolver(w=w, method=method, batch_size=batch_size, k=k)\n\n predictions = solver.predict(X)\n\n y_pred = predictions['predictions']\n\n nic = predictions['nb_items_covered']\n\n nmw = predictions['nb_missing_words']\n\n # informative message\n # ---\n\n for i, (my_X, my_pred, my_y) in enumerate(zip(X, y_pred, y)):\n\n msg = \" - Item \" + str(i) + \" : \"\n\n msg += \" - \".join([x if x in w else x + \"*\" for x in my_X])\n\n msg += \" : ... \" + my_pred + \" ?\"\n\n if my_pred == my_y:\n\n msg += \" CORRECT \"\n\n else:\n\n if my_y in w:\n\n my_y_str = my_y\n\n else:\n\n my_y_str = my_y + \"*\"\n\n msg += \" incorrect (Good answer: \" + my_y_str + \")\"\n\n print(msg)\n\n # calculate performance\n # ---\n\n accuracy = OrderedDict({\"all\": np.mean(y_pred == y)})\n\n count = OrderedDict({\"all\": len(y_pred)})\n\n correct = OrderedDict({\"all\": np.sum(y_pred == y)})\n\n nb_items_covered = OrderedDict({\"all\": np.sum(nic)})\n\n nb_missing_words = OrderedDict({\"all\": np.sum(nmw)})\n\n if category is not None:\n\n for cat in set(category):\n\n accuracy[cat] = np.mean(y_pred[category == cat] == y[category == cat])\n\n count[cat] = np.sum(category == cat)\n\n correct[cat] = np.sum(y_pred[category == cat] == y[category == cat])\n\n nb_items_covered[cat] = np.sum(nic[category == cat])\n\n nb_missing_words[cat] = np.sum(nmw[category == cat])\n\n df = pd.concat([pd.Series(accuracy, name=\"performance2\"),\n pd.Series(correct, name=\"performance\"),\n pd.Series(count, name=\"nb_items\"),\n pd.Series(nb_items_covered, name=\"nb_items_covered\"),\n pd.Series(nb_missing_words, name=\"nb_missing_words\"),\n ],\n axis=1)\n\n df['category'] = df.index\n\n df['performance_type'] = 'nb_items_correct'\n df['performance_type2'] = 'accuracy = nb_items_correct / nb_items'\n\n df['task'] = 'analogy'\n\n return df\n\n\ndef evaluate_on_semeval_2012_2(w):\n \"\"\"\n Simple method to score embedding\n\n Note:\n it is NOT using SimpleAnalogySolver\n but another method\n\n Parameters\n ----------\n w : Embedding or dict\n Embedding or dict instance.\n\n Returns\n -------\n result: pandas.DataFrame\n\n Results with spearman correlation\n per broad category\n with special key \"all\" for summary\n\n \"\"\"\n\n print(\"\\nSemEval 2012 analogy task\\n---\\n\")\n\n if isinstance(w, dict):\n\n w = Embedding.from_dict(w)\n\n data = fetch_semeval_2012_2()\n\n '''\n NaN happens when there are only 0s,\n which might happen for very rare words or\n very insufficient word vocabulary\n\n In order to prevent problems from happening\n further in the calculation, we could use nanmean()\n instead of mean()\n '''\n\n mean_vector = np.mean(w.vectors, axis=0, keepdims=True)\n # mean_vector = np.nanmean(w.vectors, axis=0, keepdims=True)\n\n categories = data.y.keys()\n\n results = defaultdict(list)\n nb_items = defaultdict(list)\n nb_missing_words = defaultdict(list)\n nb_items_covered = defaultdict(list)\n\n j = 0\n\n for c in categories:\n\n c_name = data.categories_names[c].split(\"_\")[0]\n\n j += 1\n\n print(\"\\n{}) Category {} : {}\\n---\".format(j, c, c_name))\n\n prototypes = data.X_prot[c]\n\n nmw = 0\n\n for words in prototypes:\n\n for word in words:\n\n if word not in w:\n\n nmw += 1\n\n if nmw > 0:\n\n nic = 0\n\n else:\n\n nic = 1\n\n nb_missing_words[c_name].append(nmw)\n nb_items[c_name].append(1)\n nb_items_covered[c_name].append(nic)\n\n # Get mean of left and right vector\n # ---\n\n prot_left = np.mean(np.vstack(w.get(word, mean_vector) for word in prototypes[:, 0]), axis=0)\n\n prot_right = np.mean(np.vstack(w.get(word, mean_vector) for word in prototypes[:, 1]), axis=0)\n\n questions = data.X[c]\n\n question_left = np.vstack(w.get(word, mean_vector) for word in questions[:, 0])\n\n question_right = np.vstack(w.get(word, mean_vector) for word in questions[:, 1])\n\n scores = np.dot(prot_left - prot_right, (question_left - question_right).T)\n\n # print(scores)\n # print(data.y[c])\n\n try:\n\n cor = scipy.stats.spearmanr(scores, data.y[c]).correlation\n\n except Exception as e:\n\n print(\"\\n\\n\\nERROR\")\n print()\n print(e)\n print()\n print(\"\\n\\nCategory:\\n\", c)\n print(\"\\n\\nscores:\\n\", scores)\n print(\"\\n\\nprot_left:\\n\", prot_left)\n print(\"\\n\\nprot_right:\\n\", prot_right)\n print(\"\\n\\nquestion_left:\\n\", question_left)\n print(\"\\n\\nquestion_right:\\n\", question_right)\n\n print(\"\\n\\ndata.y[c]:\\n\", data.y[c])\n\n print(\"\\n\\nMean vector:\\n\", mean_vector)\n print()\n\n cor = np.nan\n\n results[c_name].append(0 if np.isnan(cor) else cor)\n\n for i, (pl, pr) in enumerate(zip(prototypes[:, 0], prototypes[:, 1])):\n\n if pl not in w:\n pl = pl + \"*\"\n\n if pr not in w:\n pr = pr + \"*\"\n\n print(\"- Prototypes {} : ('{}' - '{}') \".format(i, pl, pr))\n\n for i, (ql, qr, s, d) in enumerate(zip(questions[:, 0], questions[:, 1], scores, data.y[c])):\n\n if ql not in w:\n ql = ql + \"*\"\n\n if qr not in w:\n qr = qr + \"*\"\n\n print(\"- Item {0} : ('{1}' - '{2}') = {3:.2f} --- {4}\".format(i, ql, qr, s, d))\n\n if cor is not np.nan:\n\n cor = round(cor, 3)\n\n print(\"---\\nSpearman correlation = \", cor)\n\n final_results = OrderedDict()\n final_nb_items = OrderedDict()\n final_nb_missing_words = OrderedDict()\n final_nb_items_covered = OrderedDict()\n\n # average correlation\n # ---\n final_results['all'] = sum(sum(v) for v in results.values()) / len(categories)\n\n final_nb_items['all'] = sum(sum(v) for v in nb_items.values())\n final_nb_missing_words['all'] = sum(sum(v) for v in nb_missing_words.values())\n final_nb_items_covered['all'] = sum(sum(v) for v in nb_items_covered.values())\n\n for k in results:\n\n # average correlation\n # ---\n final_results[k] = sum(results[k]) / len(results[k])\n\n final_nb_items[k] = sum(nb_items[k])\n final_nb_missing_words[k] = sum(nb_missing_words[k])\n final_nb_items_covered[k] = sum(nb_items_covered[k])\n\n df = pd.concat([pd.Series(final_results, name=\"performance\"),\n pd.Series(final_nb_items, name=\"nb_items\"),\n pd.Series(final_nb_items_covered, name=\"nb_items_covered\"),\n pd.Series(final_nb_missing_words, name=\"nb_missing_words\"),\n ],\n axis=1)\n\n # series = pd.Series(final_results)\n\n # df = series.to_frame(name='performance')\n\n df['category'] = df.index\n\n df['performance_type'] = 'average_correlation'\n\n df['dataset'] = 'SemEval'\n\n df['task'] = 'analogy'\n\n return df\n\n\ndef evaluate_on_WordRep(w, max_pairs=None, solver_kwargs={}):\n \"\"\"\n Evaluate on WordRep dataset\n\n Parameters\n ----------\n w : Embedding or dict\n Embedding or dict instance.\n\n max_pairs: int, default: None\n Each category will be constrained to maximum of max_pairs pairs\n (which results in max_pair * (max_pairs - 1) examples)\n\n solver_kwargs: dict, default: {}\n Arguments passed to SimpleAnalogySolver. It is suggested to limit number of words\n in the dictionary.\n\n References\n ----------\n Bin Gao, Jiang Bian, Tie-Yan Liu (2015)\n \"WordRep: A Benchmark for Research on Learning Word Representations\"\n \"\"\"\n\n print(\"\\nEvaluation of WordRep analogy with max_pairs = \", max_pairs, \"\\n---\")\n\n if isinstance(w, dict):\n\n w = Embedding.from_dict(w)\n\n data = fetch_wordrep()\n\n categories = set(data.category)\n\n accuracy = {}\n\n correct = {}\n\n count = {}\n\n missing = {}\n\n items_covered = {}\n\n for category in categories:\n\n print(\"Category : \", category, \"\\n---\")\n\n X_cat = data.X[data.category == category]\n\n if max_pairs:\n\n # further limit the number of pairs to consider\n # ---\n X_cat = X_cat[0:max_pairs]\n\n nb_pairs = X_cat.shape[0]\n\n nb_questions = nb_pairs * (nb_pairs - 1)\n\n logger.info(\"Processing {} with {} pairs, {} questions\".\n format(category, nb_pairs, nb_questions))\n\n # For each category construct question-answer pairs\n # ---\n\n X = np.zeros(shape=(nb_questions, 3), dtype=\"object\")\n\n y = np.zeros(shape=(nb_questions,), dtype=\"object\")\n\n id = 0\n\n # to find all permutations\n # iterate through the Cartesian product\n # ---\n\n for left, right in product(X_cat, X_cat):\n\n if not np.array_equal(left, right):\n\n # we exclude the cases when left = right\n # ---\n\n X[id, 0:2] = left\n\n X[id, 2] = right[0]\n\n y[id] = right[1]\n\n id += 1\n\n # Run solver\n # ---\n\n solver = SimpleAnalogySolver(w=w, **solver_kwargs)\n\n results = solver.predict(X)\n\n y_pred = results['predictions']\n\n nb_correct = float(np.sum(y_pred == y))\n\n for i, (x1,x2,x3,y_obs,y_true) in enumerate(zip(X[:,0],X[:,1],X[:,2], y_pred, y)):\n\n if x1 not in w:\n x1 = x1 + \"*\"\n\n if x2 not in w:\n x2 = x2 + \"*\"\n\n if x3 not in w:\n x3 = x3 + \"*\"\n\n if y_true not in w:\n y_true = y_true + \"*\"\n\n if y_obs == y_true:\n\n cor_msg = \"OK\"\n\n else:\n\n cor_msg = \"---> wrong ! Correct : \" + y_true\n\n print(\"- Item {} : {} - {}, {} - ... {} ? {}\".format(i, x1,x2,x3,y_obs,cor_msg))\n\n correct[category] = nb_correct\n\n count[category] = nb_questions\n\n accuracy[category] = nb_correct / nb_questions\n\n missing[category] = sum(results['nb_missing_words'])\n\n items_covered[category] = sum(results['nb_items_covered'])\n\n # Add summary results\n # ---\n\n for summary in ('all', 'wikipedia', 'wordnet'):\n\n missing[summary] = 0\n correct[summary] = 0\n count[summary] = 0\n items_covered[summary] = 0\n\n for c in categories:\n\n missing['all'] += missing[c]\n items_covered['all'] += items_covered[c]\n correct['all'] += correct[c]\n count['all'] += count[c]\n\n if c in data.wikipedia_categories:\n\n missing['wikipedia'] += missing[c]\n items_covered['wikipedia'] += items_covered[c]\n correct['wikipedia'] += correct[c]\n count['wikipedia'] += count[c]\n\n if c in data.wordnet_categories:\n\n missing['wordnet'] += missing[c]\n items_covered['wordnet'] += items_covered[c]\n correct['wordnet'] += correct[c]\n count['wordnet'] += count[c]\n\n accuracy['all'] = correct['all'] / count['all']\n accuracy['wikipedia'] = correct['wikipedia'] / count['wikipedia']\n accuracy['wordnet'] = correct['wordnet'] / count['wordnet']\n\n data = [pd.Series(accuracy, name=\"performance2\"),\n pd.Series(correct, name=\"performance\"),\n pd.Series(count, name=\"nb_items\"),\n pd.Series(items_covered, name=\"nb_items_covered\"),\n pd.Series(missing, name=\"nb_missing_words\")]\n\n df = pd.concat(data, axis=1)\n\n df['performance_type'] = 'nb_items_correct'\n df['performance_type2'] = 'accuracy = nb_items_correct / nb_items'\n\n df['category'] = df.index\n\n df['dataset'] = 'WordRep'\n\n df['task'] = 'analogy'\n\n return df\n\n\ndef evaluate_on_BATS(w, solver_kwargs={}):\n \"\"\"\n Evaluate on the BATS dataset\n\n Parameters\n ----------\n w : Embedding or dict\n Embedding or dict instance.\n\n solver_kwargs: dict, default: {}\n Arguments passed to SimpleAnalogySolver.\n Note: It is suggested to limit number of words in the dictionary.\n\n References\n ----------\n Gladkova, A., Drozd, A., & Matsuoka, S. (2016). Analogy-Based Detection of Morphological and Semantic Relations with Word Embeddings: What Works and What Doesn’t. In Proceedings of the NAACL-HLT SRW (pp. 47–54). San Diego, California, June 12-17, 2016: ACL. https://doi.org/10.18653/v1/N16-2002\n\n \"\"\"\n\n print(\"\\nEvaluation of BATS analogy\\n---\")\n\n if isinstance(w, dict):\n\n w = Embedding.from_dict(w)\n\n data = fetch_BATS()\n\n categories = set(data.category)\n\n # just used two categories --- for testing purposes\n # categories = list(categories)[0:2]\n\n accuracy = {}\n\n correct = {}\n\n items = {}\n\n items_covered = {}\n\n missing = {}\n\n for category in categories:\n\n print(\"\\nCategory : \" + category)\n\n pairs = data.X[data.category == category]\n\n # convert numpy array to list of lists\n # ---\n pairs = pairs.tolist()\n\n # we want to keep only the pairs covered\n # ---\n\n # filter 1\n # ---\n pairs = [(target_word, candidate) for target_word, candidate in pairs if target_word in w]\n\n # filter 2\n # ---\n final_pairs = []\n\n for target_word, candidate in pairs:\n\n found_word = False\n\n if \"/\" not in candidate:\n\n if candidate in w:\n\n found_word = True\n\n else:\n\n words = candidate.split(\"/\")\n\n for word in words:\n\n if word in w:\n\n # keep as a candidate the first word\n # found in the vocabulary\n # ---\n\n found_word = True\n\n candidate = word\n\n break\n\n if found_word:\n\n word_pair = (target_word, candidate)\n\n final_pairs.append(word_pair)\n\n nb_pairs = len(final_pairs)\n\n if nb_pairs == 0:\n\n continue\n\n nb_questions = nb_pairs * (nb_pairs - 1)\n\n logger.info(\"Processing {} with {} pairs, {} questions\".\n format(category, nb_pairs, nb_questions))\n\n # For each category construct question-answer pairs\n # ---\n\n X = np.zeros(shape=(nb_questions, 3), dtype=\"object\")\n\n y = np.zeros(shape=(nb_questions,), dtype=\"object\")\n\n id = 0\n\n # to find all permutations\n # iterate through the Cartesian product\n # ---\n\n for left, right in product(final_pairs, final_pairs):\n\n if not np.array_equal(left, right):\n\n # we exclude the cases when left = right\n # ---\n\n X[id, 0:2] = left\n\n X[id, 2] = right[0]\n\n y[id] = right[1]\n\n id += 1\n\n # Run solver\n # ---\n\n solver = SimpleAnalogySolver(w=w, **solver_kwargs)\n\n results = solver.predict(X)\n\n\n for i, (x1,x2,x3,y_obs,y_true) in enumerate(zip(X[:,0],X[:,1],X[:,2], results['predictions'], y)):\n\n if x1 not in w:\n x1 = x1 + \"*\"\n\n if x2 not in w:\n x2 = x2 + \"*\"\n\n if x3 not in w:\n x3 = x3 + \"*\"\n\n if y_true not in w:\n y_true = y_true + \"*\"\n\n if y_obs == y_true:\n\n cor_msg = \"OK\"\n\n else:\n\n cor_msg = \"---> wrong ! Correct : \" + y_true\n\n print(\"- Item {} : {} - {}, {} - ... {} ? {}\".format(i, x1,x2,x3,y_obs,cor_msg))\n\n\n nb_correct = float(np.sum(results['predictions'] == y))\n\n correct[category] = nb_correct\n\n items[category] = 2450\n\n items_covered[category] = nb_questions\n\n # missing[category] = np.NaN\n\n missing[category] = np.sum(results['nb_missing_words'])\n\n accuracy[category] = nb_correct / nb_questions\n\n # Add summary results\n # ---\n\n correct['all'] = sum(v for v in correct.values())\n items['all'] = sum(v for v in items.values())\n items_covered['all'] = sum(v for v in items_covered.values())\n missing['all'] = sum(v for v in missing.values())\n accuracy['all'] = correct['all'] / items_covered['all']\n\n data = [pd.Series(accuracy, name=\"performance2\"),\n pd.Series(correct, name=\"performance\"),\n pd.Series(missing, name=\"nb_missing_words\"),\n pd.Series(items, name=\"nb_items\"),\n pd.Series(items_covered, name=\"nb_items_covered\")]\n\n df = pd.concat(data, axis=1)\n\n df['category'] = df.index\n\n df['dataset'] = 'BATS'\n\n df['task'] = 'analogy'\n\n df['performance_type'] = 'nb_items_correct'\n df['performance_type2'] = 'accuracy = nb_items_correct / nb_items'\n\n return df\n\n\ndef cosine_similarity_dense(vector1, vector2):\n '''\n\n Takes a input dense vectors.\n\n Returns the angular difference between two vectors.\n\n It is calculated as the ratio of\n the dot product and\n the product of the magnitudes (norms) of the vectors.\n\n cos = v1 - v2 / |v1| * |v2|\n\n Note:\n\n it is useless to divide by the product of the norms if\n the vectors are already normalized.\n\n In the case where vectors are already normalized, the dot product suffices.\n '''\n\n dot_product = np.dot(vector1, vector2)\n\n vector1_mag = np.linalg.norm(vector1)\n\n vector2_mag = np.linalg.norm(vector2)\n\n result = dot_product / (vector1_mag * vector2_mag)\n\n return result\n\n\ndef answer_SAT_analogy_question(question, answers, w, solver):\n '''\n\n '''\n\n nb_rows = len(answers)\n\n # init\n # ---\n X = np.zeros(shape=(nb_rows, 3), dtype=\"object\")\n\n y = np.zeros(shape=(nb_rows,), dtype=\"object\")\n\n # for i in range(nb_rows):\n\n # print(\"triple\", i + 1, \":\", X[i, ], \"candidate:\", y[i])\n\n # filling\n # ---\n\n for i, answer in enumerate(answers):\n\n X[i, 0] = question[0]\n X[i, 1] = question[1]\n X[i, 2] = answer[0]\n y[i] = answer[1]\n\n # for i in range(nb_rows):\n\n # print(\"- triple\", i + 1, \":\", X[i, ], \"candidate:\", y[i])\n\n # prediction through the analogy solver\n # ---\n\n results = solver.predict(X)\n\n y_pred = results['predictions']\n missing_words = sum(results['nb_missing_words'])\n items_covered = sum(results['nb_items_covered'])\n\n selected_answer = None\n selected_cosine = None\n\n for i in range(nb_rows):\n\n # prediction\n # ---\n\n predicted_word = y_pred[i]\n\n predicted_vector = w[predicted_word]\n\n # candidate\n # ---\n\n candidate_word = y[i]\n\n cosine = None\n\n selected = \"\"\n\n if candidate_word in w:\n\n candidate_vector = w[candidate_word]\n\n cosine = cosine_similarity_dense(predicted_vector, candidate_vector)\n\n if selected_answer is None or cosine >= selected_cosine:\n\n selected_answer = i\n selected_cosine = cosine\n\n selected = \"SELECTED\"\n\n else:\n\n # print(\"The candidate word is not in the vocabulary. This item is ignored.\")\n\n candidate_word += \"*\"\n\n\n myX = [x if x in w else x+\"*\" for x in X[i, ]]\n\n myX = myX[0] + \" - \" + myX[1] + \", \" + myX[2]\n\n if cosine is not None:\n\n cosine = round(cosine, 3)\n\n else:\n\n cosine = \"?\"\n\n print(\"- triple\", i + 1, \":\", myX, \" ... \", candidate_word, \"? ---> prediction:\", predicted_word, \", cosine:\", cosine, selected)\n\n # i = selected_answer\n\n # print(\"\\nSelected answer: triple\", i + 1, \":\", X[i, ], \", candidate:\", y[i])\n\n if selected_answer == 0:\n\n print(\" OK \")\n\n else:\n\n print(\" --- incorrect --- \")\n\n results = {'selected_answer': selected_answer,\n 'nb_missing_words': missing_words,\n 'nb_items_covered': items_covered}\n\n return results\n\n\ndef evaluate_on_SAT(w, solver_kwargs={}):\n \"\"\"\n Evaluate on the SAT analogy dataset\n\n Parameters\n ----------\n w : Embedding or dict\n Embedding or dict instance.\n\n solver_kwargs: dict, default: {}\n Arguments passed to SimpleAnalogySolver. It is suggested to limit number of words\n in the dictionary.\n\n References\n ----------\n Turney, P. D., Littman, M. L., Bigham, J., & Shnayder, V. (2003). Combining independent modules to solve multiple-choice synonym and analogy problems. In Proceedings of the International Conference on Recent Advances in Natural Language Processing (RANLP-03).\n\n \"\"\"\n if isinstance(w, dict):\n\n w = Embedding.from_dict(w)\n\n solver = SimpleAnalogySolver(w=w, **solver_kwargs)\n # solver = SimpleAnalogySolver(w=w)\n\n data = fetch_SAT()\n\n nb_items = len(data.X)\n\n nb_items_correct = 0\n\n missing_words = 0\n\n items_covered = 0\n\n for i in range(nb_items):\n\n print(\"\\nSAT analogy question {}\\n---\".format(i))\n\n question = data.X[i].split(\"_\")\n\n answers = data.y[i, :]\n\n '''\n We split while testing for string to avoid attempting to split\n nan values, which occurs when there are 4 alternatives instead of 5.\n '''\n\n answers = [answer.split(\"_\") for answer in answers\n if isinstance(answer, six.string_types)]\n\n # print(question, answers)\n\n response = answer_SAT_analogy_question(question, answers, w, solver)\n\n nmw = response['nb_missing_words']\n\n missing_words += nmw\n\n if nmw == 0:\n\n items_covered += 1\n\n i = response['selected_answer']\n\n # print(i)\n\n if i == 0:\n # this is the good answer\n nb_items_correct += 1\n # print(\"\\n*** Yes! ***\")\n\n # print(\"\\nNumber of items:\", nb_items, \"Number of correct answers:\", nb_items_correct)\n\n accuracy = nb_items_correct / nb_items\n\n data = [pd.Series(accuracy, name=\"performance2\"),\n pd.Series(nb_items_correct, name=\"performance\"),\n pd.Series(nb_items, name=\"nb_items\"),\n pd.Series(items_covered, name=\"nb_items_covered\"),\n pd.Series(missing_words, name=\"nb_missing_words\")]\n\n df = pd.concat(data, axis=1)\n\n df['dataset'] = 'SAT'\n\n df['task'] = 'analogy'\n\n df['performance_type'] = 'nb_items_correct'\n df['performance_type2'] = 'accuracy = nb_items_correct / nb_items'\n\n return df\n\n\ndef answer_synonymy_question(question, answers, w):\n '''\n\n '''\n\n print(\"\\nWhat is the synonym of: \", question, \" ?\\n---\\n\")\n\n if question in w:\n\n question_vector = w[question]\n\n else:\n '''\n If we do not have a vector for the question,\n we cannot answer it.\n We do not try responding at random.\n The selected answer is therefore 'None'.\n '''\n\n response = {'selected_answer': None, 'selected_cosine': None}\n\n print(\"We don't know, because it is NOT in the word embedding.\", \"\\n\")\n\n return response\n\n selected_answer = None\n\n selected_cosine = None\n\n nb_answers = len(answers)\n\n # choose the answer which has the highest cosine\n # ---\n\n for i in range(nb_answers):\n\n answer = answers[i]\n\n msg = \"- answer \" + str(i) + \": \" + answer\n\n if answer in w:\n\n answer_vector = w[answer]\n\n cosine = cosine_similarity_dense(question_vector, answer_vector)\n\n msg += \" (cosine : \" + str(round(cosine, 3)) + \") \"\n\n if selected_answer is None or cosine >= selected_cosine:\n\n '''\n We keep the first answer found\n or the one that has the highest cosine.\n '''\n\n selected_answer = i\n\n selected_cosine = cosine\n\n msg += \" SELECTED \"\n\n else:\n\n msg += \" NOT in the word embedding! \"\n\n print(msg)\n\n response = {'selected_answer': selected_answer, 'selected_cosine': selected_cosine}\n\n return response\n\n\ndef evaluate_synonymy(w, dataset_name):\n '''\n\n Evaluate the words embedding on a synonymy dataset\n\n '''\n\n if isinstance(w, dict):\n\n w = Embedding.from_dict(w)\n\n # set the fetch function name\n # ---\n fetch_function_name = \"fetch_\" + dataset_name\n\n # retrieve the dataset\n # ---\n data = getattr(web.datasets.synonymy, fetch_function_name)()\n\n # the question\n # ---\n X = data.X\n\n # the answers\n # ---\n y = data.y\n\n nb_items = data.X.shape[0]\n\n # TEMP ---\n nb_questions = len(X)\n nb_answers = data.y.shape[1]\n print(nb_items, \"items, i.e., \", nb_questions, \"questions:\", X, \"\\n\")\n print(nb_answers, \"answers per items:\", y, \"\\n\")\n # --- TEMP\n\n nb_items_correct = 0\n\n nb_items_covered = 0\n\n nb_missing_words = 0\n\n for i in range(nb_items):\n\n question = X[i]\n\n answers = y[i]\n\n # print(question, answers)\n\n response = answer_synonymy_question(question, answers, w)\n\n selected_answer = response['selected_answer']\n selected_cosine = response['selected_cosine']\n\n # print(i)\n\n if selected_answer is not None:\n\n nb_items_covered += 1\n\n if selected_answer == 0:\n\n # this is the good answer\n # ---\n nb_items_correct += 1\n\n print(\"\\nGOOD answer.\\n\")\n\n else:\n\n print(\"\\nIncorrect answer!\\n\")\n\n else:\n\n # the word is not in the vocabulary\n # ---\n\n nb_missing_words += 1\n\n accuracy = nb_items_correct / nb_items_covered\n\n print(\"\\nPerformance to the synonym task\\n---\\n\")\n print(\"Number of good answers = \", str(nb_items_correct), \"\\n\")\n print(\"Number of items = \", str(nb_items), \"\\n\")\n print(\"Number of items covered = \", str(nb_items_covered), \"\\n\")\n print(\"Accuracy = nb items correct / nb items covered = \", str(accuracy), \"\\n\")\n\n data = [pd.Series(accuracy, name=\"performance2\"),\n pd.Series(nb_items_correct, name=\"performance\"),\n pd.Series(nb_items, name=\"nb_items\"),\n pd.Series(nb_items_covered, name=\"nb_items_covered\"),\n pd.Series(nb_missing_words, name=\"nb_missing_words\")]\n\n df = pd.concat(data, axis=1)\n\n df['performance_type'] = 'nb_items_correct'\n df['performance_type2'] = 'accuracy = nb_items_correct / nb_items_covered'\n\n return df\n\n\ndef test_toy_synonymy():\n '''\n\n '''\n print(\"\\n\\nTest toy words embeddings on synonymy\")\n print(\"---\")\n\n # logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, datefmt='%I:%M:%S')\n\n logger = logging.getLogger(__name__)\n\n w = load_toy_embedding()\n\n print(w)\n\n results = evaluate_synonymy(w, \"ESL\")\n\n output_path = os.path.expanduser(\"~/Downloads/results.csv\")\n results.to_csv(output_path)\n\n print(results)\n\n print(\"---THE END---\")\n\n\ndef test_toy_all():\n '''\n\n '''\n print(\"\\n\\nTest toy words embeddings on all datasets\")\n print(\"---\")\n\n # logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, datefmt='%I:%M:%S')\n\n logger = logging.getLogger(__name__)\n\n w = load_toy_embedding()\n\n print(w)\n\n results = evaluate_on_all_datasets(w, wordrep_max_pairs=50)\n\n output_path = os.path.expanduser(\"~/Downloads/results.csv\")\n results.to_csv(output_path)\n\n print(results)\n\n print(\"---THE END---\")\n\n\ndef test_ri_all():\n '''\n\n\n\n '''\n\n print(\"\\n\\nTests RI words embedding on all datasets\")\n print(\"---\")\n\n logger = logging.getLogger(__name__)\n\n input_file = '/media/patrick/my_data/DSM/07_models/RI/2_300_window/text-1.distvecs.decoded'\n\n w = load_embedding(fname=input_file,\n format=\"word2vec\",\n normalize=False,\n # normalize=True\n lower=False,\n clean_words=False)\n\n print(w)\n\n results = evaluate_on_all_datasets(w, wordrep_max_pairs=50)\n\n output_path = os.path.expanduser(\"~/Downloads/results2.csv\")\n results.to_csv(output_path)\n\n print(results)\n\n print(\"---THE END---\")\n\n\nif __name__ == \"__main__\":\n\n test_toy_synonymy()\n\n # test_ri_all()\n\n # test_toy_all()\n","sub_path":"web/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":46085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"638171311","text":"import configparser\r\nimport datetime\r\n\r\n\r\n# ===========Function: Write to log file==============\r\n# Arguments: [1]\r\n# Arguments: [Log Content]\r\ndef write_log(str1):\r\n config = configparser.ConfigParser()\r\n config.sections()\r\n config.read(\"config.ini\")\r\n config.sections()\r\n\r\n # Log config\r\n is_log = config[\"LOG\"][\"is_log\"]\r\n log_file_path = config[\"LOG\"][\"full_path\"] + str(datetime.date.today()) + \".txt\"\r\n # Open file and write log\r\n if is_log:\r\n log_file = open(log_file_path, \"w+\")\r\n log_file.writelines(str(datetime.datetime.now()) + \": \" + str1)\r\n log_file.close()\r\n# ====================================================\r\n\r\n","sub_path":"log_writer.py","file_name":"log_writer.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"139224644","text":"#!/usr/bin/python\n\nimport rospy\nfrom rospydemo.msg import *\n\nrospy.init_node('talker',anonymous=0)\npub=rospy.Publisher('chatter',Mes,queue_size=10)\nrate=rospy.Rate(1)\nwhile not rospy.is_shutdown():\n pub.publish('i am talker')\n rate.sleep()\n\n\n","sub_path":"src/talker.py","file_name":"talker.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"418496015","text":"from plyfile import PlyData\nimport os\nimport sys\n\nreconstruction_dir = sys.argv[1]\nreadpath = os.path.join(reconstruction_dir, \"PMVS\", \"models\", \"option-0000.ply\")\nfname = os.path.basename(os.path.dirname(os.path.dirname(reconstruction_dir)))\nwritepath = os.path.join(reconstruction_dir, \"PMVS\", \"models\", fname + \".ply\")\nplydata = PlyData.read(str(readpath))\nplydata = PlyData([plydata['vertex']], text=False, byte_order='<')\nplydata.write(str(writepath))","sub_path":"scripts/ply2bin.py","file_name":"ply2bin.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"35415311","text":"# -*- coding: utf-8 -*-\n\"\"\"\n8ball plugin\n\"\"\"\nimport re\nimport random\nfrom time import time\nfrom pydle.plugins import Plugin\n\np = Plugin(__name__)\n\noutcomes = [\n # positive\n \"✔️ It is certain\",\n \"🥂 It is decidedly so\",\n \"💯Without a doubt\",\n \"🔥Yes definitely\",\n \"✌️ You may rely on it\",\n \"🔮As I see it, yes\",\n \"↗️ Most likely\",\n \"🉐Outlook good\",\n \"👌Yes\",\n \"🎉Signs point to yes\",\n # neutral\n \"😵 Reply hazy try again\",\n \"🤷 Ask again later\",\n \"🙄 Better not tell you now\",\n \"😞 Cannot predict now\",\n \"🤔 Concentrate and ask again\",\n # negative\n \"😞 Don't count on it\",\n \"⛔ My reply is no\",\n \"🎱 My sources say no\",\n \"❌ Outlook not so good\",\n \"👎 Very doubtful\",\n]\n\n\n@p.cmd('8ball')\ndef make_prediction(event):\n \"\"\"Your classic 8ball\"\"\"\n # seed random generator\n question = re.sub(r'[^0-9a-z ]', '', event.arg.lower()).strip()\n\n if question:\n # ensure same answer for same question, 1hr interval\n s1 = sum(map(ord, question)) % 3600\n s2 = divmod(int(time()), 3600)[0] * 3600\n random.seed(s1+s2)\n\n prediction = random.choice(outcomes)\n\n if question:\n random.seed() # reseed PRGN\n\n event.reply_to_user(prediction)\n","sub_path":"pydle-8ball.py","file_name":"pydle-8ball.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"173703793","text":"import cv2\nimport sys\n\ndef mouse_event(event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONUP:\n cv2.circle(img, (x, y), 50, (0,0,255))\n print(\"x=\"+str(x)+\",y=\"+str(y))\n\nimg = cv2.imread(\"piece_3.jpg\")\nheight = img.shape[0]\nwidth = img.shape[1]\nprint(\"height=\"+str(height)+\",width=\"+str(width))\nimg = cv2.resize(img, (int(width/2),int(height/2)))\ncv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\ncv2.setMouseCallback(\"img\", mouse_event)\ncv2.imshow(\"img\", img)\ninput1 = sys.stdin.readline()\nprint(\"input1=\"+input1)\nwhile(True):\n cv2.imshow(\"img\", img)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\ncv2.destroyAllWindows()\n","sub_path":"gui1.py","file_name":"gui1.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"634944831","text":"import os\nimport sys\nimport threading\nimport time\nimport requests\nimport xlwt\nimport configparser\nfrom terminaltables import SingleTable\nfrom bs4 import BeautifulSoup\n\nconfig = configparser.ConfigParser()\nconfig.read(\"config.ini\")\n\nwebsites = [] # Parsable websites.\nproducts = []\n\n# Class declaration.\nclass website:\n def __init__(self, name, initials):\n self.name = name\n self.initials = initials\n self.products = []\n\n def add_product(self, product):\n self.products.append(product)\n\nclass product:\n def __init__(self, name, price):\n self.name = name\n self.price = price\n\ncls = lambda: os.system('cls') # Clears screen.\n\n# Displays the current status.\ndef print_status():\n loading_symbols = [\" --\", \" \\\\\", \" |\", \" /\", \" --\"]\n t = threading.currentThread()\n\n while getattr(t, \"do_run\", True):\n for i in range(0, 5):\n cls()\n print(\"\\n \" + message + loading_symbols[i])\n time.sleep(0.1)\n cls()\n\nt = threading.Thread(target=print_status)\nt.start()\n\n# Creates website objects and fills array.\nfile = open(\"source/websites.txt\", \"r\")\nlines = file.read().split(\"\\n\")\nfile.close()\n\nfor i, token in enumerate(lines):\n if token == \"\":\n continue\n name = token[:token.find(\";\")]\n initials = token[token.find(\";\") + 1:]\n websites.append(website(name, initials))\n\ndef website_exists(website_name):\n for i, token in enumerate(websites):\n if website_name in token.name:\n return i\n\ndef extract_price_from_product(url, website_name):\n data = requests.get(url).text\n soup = BeautifulSoup(data, \"html.parser\")\n\n price = \"\"\n\n try:\n if website_name == \"bike-discount\" or website_name == \"sportsonline\":\n price = soup.find(\"span\", {\"class\": \"price\"}).text\n\n elif website_name == \"fitnessdigital\":\n price = soup.find(\"strong\", {\"class\": \"price center-block\"}).text\n\n elif website_name == \"deporvillage\":\n price = soup.find(\"span\", {\"class\": \"price\"}).text\n\n elif website_name == \"runnerinn\":\n price = soup.find(\"p\", {\"class\": \"valPrice\"}).text\n\n elif website_name == \"bike24\":\n price = soup.find(\"span\", {\"class\": \"text-value js-price-value\"}).text\n\n elif website_name == \"decathlon\":\n price = soup.find(\"span\", {\"id\": \"real_price_value\"}).text\n\n elif website_name == \"retto\":\n price = soup.find(\"span\", {\"class\": \"priceact\"}).text\n\n except AttributeError:\n return\n\n price = price.replace(\" \", \"\")\n price = price.replace(\" \", \"\")\n price = price.replace(\"€\", \"\")\n price = price.replace(\",\", \".\")\n\n return float(price)\n\n# Analyse the URL file.\nfile = open(\"source/url.txt\", \"r\")\nlines = file.read().split(\"\\n\")\nfile.close()\n\nlink_count = 0\n\ncurrent_product = \"\"\nfor i, token in enumerate(lines):\n\n link_count += 1\n message = \"Parsing URLs (\" + str(link_count) + \"/\" + str(len(lines)) + \")...\"\n\n if \"#\" in token: # Finds products.\n current_product = token[2:]\n products.append(current_product)\n elif token == \"\":\n continue\n else:\n temp = token[token.find(\".\") + 1:]\n temp = temp[:-len(temp) + temp.find(\".\")]\n\n web_index = website_exists(temp)\n\n price = extract_price_from_product(token, temp)\n if price == None:\n continue\n\n prod = product(current_product, price)\n #print(str(prod.name) + \" \" + str(prod.price))\n websites[web_index].add_product(prod)\n #print(len(websites[web_index].products))\n\ndef construct_table():\n table_data = []\n\n # Adds headers (website initials).\n header_row = [\"\"]\n for i, token in enumerate(websites):\n header_row.append(websites[i].initials)\n table_data.append(header_row)\n\n normal_row = []\n for i, name in enumerate(products):\n normal_row.append(name)\n\n for j, prods in enumerate(websites):\n found = False\n\n for k, prod in enumerate(websites[j].products):\n if name == prod.name:\n normal_row.append(str(prod.price))\n found = True\n break\n\n if not found:\n normal_row.append(\"-\")\n\n table_data.append(list(normal_row))\n normal_row.clear()\n\n return table_data\n\n# Creates .xls sheet with parsed information.\ndef construct_sheet(sheet, style):\n\n for i, token in enumerate(websites): # Fills header row.\n sheet.write(2, i+2, websites[i].initials, style)\n\n for i, name in enumerate(products):\n sheet.write(i+3, 1, name, style)\n\n for j, prods in enumerate(websites):\n found = False\n\n for k, prod in enumerate(websites[j].products):\n if name == prod.name:\n sheet.write(i+3, j+2, prod.price)\n found = True\n break\n\n# Avoiding aditional dependencies...\ndef str_2_bool(string):\n if string.upper() == \"TRUE\":\n return True\n else:\n return False\n\nif str_2_bool(config.get(\"output\", \"console_table\")):\n table_data = construct_table()\n table = SingleTable(table_data)\n print(table.table)\n\nif str_2_bool(config.get(\"output\", \"spreadsheet_table\")):\n message = \"Creating output.xls...\"\n book = xlwt.Workbook(encoding=\"utf-8\")\n sheet = book.add_sheet(\"Sheet 1\", cell_overwrite_ok=True)\n h_style = xlwt.easyxf(\"font: bold on\")\n construct_sheet(sheet, h_style)\n book.save(\"output.xls\")\n time.sleep(1)\n\n# Disables the output threading.\nt.do_run = False\nt.join()\n","sub_path":"comparator.py","file_name":"comparator.py","file_ext":"py","file_size_in_byte":5606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"285585037","text":"import logging\nimport logging.handlers\nimport os\n\nlogger = logging.getLogger(\"LanguageClient\")\nlogpath = os.path.join(os.getenv(\"TMP\", \"/tmp\"), \"LanguageClient.log\")\nlogpath_server = os.path.join(os.getenv(\"TMP\", \"/tmp\"), \"LanguageServer.log\")\nfileHandler = logging.handlers.RotatingFileHandler(\n logpath, maxBytes=20 * 1024 * 1024, backupCount=2)\nfileHandler.setFormatter(\n logging.Formatter(\n \"%(asctime)s %(levelname)-7s [%(threadName)-10s] %(message)s\",\n \"%H:%M:%S\"))\nlogger.addHandler(fileHandler)\nlogger.setLevel(logging.WARN)\n\n\ndef setLoggingLevel(level) -> None:\n \"\"\"\n Set logging level.\n \"\"\"\n logger.setLevel({\n \"ERROR\": 40,\n \"WARNING\": 30,\n \"INFO\": 20,\n \"DEBUG\": 10,\n }[level])\n","sub_path":"rplugin/python3/LanguageClient/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"422532184","text":"from .origin_service import OriginService\nfrom .origin_channels import OriginChannels\nfrom .origin_epg import OriginEPG\n\nimport fHDHR.exceptions\n\n\nclass OriginEPG_StandIN():\n def __init__(self):\n pass\n\n def update_epg(self, channels):\n return {}\n\n\nclass OriginChannels_StandIN():\n def __init__(self):\n pass\n\n def get_channels(self):\n return []\n\n def get_channel_stream(self, chandict, allchandict):\n return [{\"number\": chandict[\"number\"], \"stream_url\": None}], False\n\n\nclass OriginServiceWrapper():\n\n def __init__(self, fhdhr):\n self.fhdhr = fhdhr\n\n self.servicename = fhdhr.config.dict[\"main\"][\"servicename\"]\n\n self.setup_success = None\n self.setup()\n\n def setup(self):\n\n try:\n self.origin = OriginService(self.fhdhr)\n self.setup_success = True\n self.fhdhr.logger.info(\"%s Setup Success\" % self.servicename)\n except fHDHR.exceptions.OriginSetupError as e:\n self.fhdhr.logger.error(e)\n self.setup_success = False\n\n if self.setup_success:\n self.channels = OriginChannels(self.fhdhr, self.origin)\n self.epg = OriginEPG(self.fhdhr)\n else:\n self.channels = OriginChannels_StandIN()\n self.epg = OriginEPG_StandIN()\n\n def get_channels(self):\n return self.channels.get_channels()\n\n def get_channel_stream(self, chandict, allchandict):\n return self.channels.get_channel_stream(chandict, allchandict)\n\n def update_epg(self, channels):\n return self.epg.update_epg(channels)\n\n def get_status_dict(self):\n\n if self.setup_success:\n status_dict = {\n \"Setup\": \"Success\",\n }\n\n try:\n full_status_dict = self.origin.get_status_dict()\n for status_key in list(full_status_dict.keys()):\n status_dict[status_key] = full_status_dict[status_key]\n return status_dict\n except AttributeError:\n return status_dict\n else:\n return {\n \"Setup\": \"Failed\",\n }\n\n def __getattr__(self, name):\n ''' will only get called for undefined attributes '''\n if hasattr(self.fhdhr, name):\n return eval(\"self.fhdhr.\" + name)\n if hasattr(self.origin, name):\n return eval(\"self.origin.\" + name)\n elif hasattr(self.channels, name):\n return eval(\"self.channels.\" + name)\n elif hasattr(self.epg, name):\n return eval(\"self.epg.\" + name)\n else:\n raise AttributeError(name)\n","sub_path":"fHDHR/origin/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"213244455","text":"class Solution(object):\n def cherryPickup(self, grid):\n def bestpath(grid):\n N = len(grid)\n NINF = float('-inf')\n dp = [[NINF] * N for _ in range(N)]\n dp[-1][-1] = grid[-1][-1]\n for i in range(N-1, -1, -1):\n for j in range(N-1, -1, -1):\n if grid[i][j] >= 0 and (i != N-1 or j != N-1):\n new_val_of_col = dp[i+1][j] if i+1 < N else NINF\n new_val_of_row = dp[i][j+1] if j+1 < N else NINF\n\n dp[i][j] = max(new_val_of_col, new_val_of_row)\n dp[i][j] += grid[i][j]\n\n print('aaa')\n for row in dp:\n for cell in row:\n print(\"{:0>2}\".format(cell), end=\" \")\n print()\n\n if dp[0][0] < 0:\n return None\n\n ans = [(0, 0)]\n i = j = 0\n while i != N-1 or j != N-1:\n if j+1 == N or i+1 < N and dp[i+1][j] >= dp[i][j+1]:\n i += 1\n else:\n j += 1\n ans.append((i, j))\n return ans\n\n ans = 0\n path = bestpath(grid)\n\n for i, j in path:\n ans += grid[i][j]\n grid[i][j] = 0\n\n for i, j in bestpath(grid):\n ans += grid[i][j]\n\n return ans\n\n\nif __name__ == \"__main__\":\n s = Solution()\n test_case_1 = [\n [0,1,-1],\n [1,0,-1],\n [1,1,1]]\n test_case_2 = [\n [1,1,1,1,0,0,0],\n [0,0,0,1,0,0,0],\n [0,0,0,1,0,0,1],\n [1,0,0,1,0,0,0],\n [0,0,0,1,0,0,0],\n [0,0,0,1,0,0,0],\n [0,0,0,1,1,1,1]]\n\n print(s.cherryPickup(test_case_1))\n","sub_path":"leetcode/p0741_cherry_pick/solution_greedy.py","file_name":"solution_greedy.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"352921492","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/8/8 下午3:14\n# @Author : ShaHeTop-Almighty-ares\n# @Email : yang6333yyx@126.com\n# @File : views.py\n# @Software: PyCharm\n\nfrom flask import views, request\nfrom .service import MatchService\nfrom .repository import MatchModel\nfrom utils.apiResult import api_result\nfrom app.cms.views import bp\nfrom app.cms.userLogin.cms_decorators import login_required\n\n\n@bp.route('/xxx')\ndef xxx():\n from app.cms.userLogin.models import Article, CMSUser\n x = CMSUser.query.filter_by(username='yyx').first()\n print(x)\n print(x.articles[0].title, type(x.articles), type(x.articles[0]))\n a = Article.query.filter_by(id=1).first()\n print(a)\n print(a.user.id)\n print(a.user.username)\n print(a.user.articles, type(a.user.articles[0]))\n return 'text import bp'\n\n\nclass MatchView(views.MethodView):\n # decorators = [login_required]\n\n def get(self):\n data = request.args.to_dict()\n return api_result(code=200, message='', data=MatchService().read(data))\n\n @login_required\n def post(self):\n data = request.get_json()\n match = MatchService().add_match(data)\n if MatchModel(match).add():\n return api_result(code=201, message='', data=data)\n\n @login_required\n def put(self):\n data = request.get_json()\n m = MatchService().up_match(data)\n match = MatchModel.up(m)\n if match:\n return api_result(code=200, message='update ok', data=[])\n\n @login_required\n def delete(self):\n data = request.get_json()\n m = MatchService().del_match(data)\n match = MatchModel.delete(m)\n if match:\n return api_result(code=204, message='delete ok', data=[])\n","sub_path":"Flask_Projects/HuntingBall/app/cms/match/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"281112379","text":"\nfrom aws_ir.libs import connection\nfrom aws_ir.libs import compromised\n\nfrom aws_ir.plugins import disableaccess_key\nfrom aws_ir.plugins import revokests_key\n\n\"\"\"Compromise class for Key Compromise Procedure\"\"\"\nclass Compromise(object):\n\n def __init__(self,\n examiner_cidr_range='0.0.0.0/0',\n compromised_access_key_id=None,\n region='us-west-2',\n case=None,\n logger=None\n ):\n\n if compromised_access_key_id==None:\n raise ValueError(\n 'Must specifiy an access_key_id for the compromised key.'\n )\n\n self.case_type = 'Key'\n self.compromised_access_key_id = compromised_access_key_id\n self.region = region\n self.case = case\n self.logger = logger\n\n\n def mitigate(self):\n \"\"\"Any steps that run as part of key compromises.\"\"\"\n access_key = self.compromised_access_key_id\n compromised_resource = compromised.CompromisedMetadata(\n compromised_object_inventory = {\n 'access_key_id': access_key,\n 'region': self.region\n },\n case_number=self.case.case_number,\n type_of_compromise='key_compromise'\n ).data()\n\n client = connection.Connection(\n type='client',\n service='iam',\n region=compromised_resource['region']\n ).connect()\n\n self.logger.event_to_logs(\n \"Attempting key disable.\"\n )\n\n\n # step 1 - disable access key\n disableaccess_key.Disableaccess(\n client=client,\n compromised_resource = compromised_resource,\n dry_run=False\n )\n\n\n # step 2 - revoke and STS tokens issued prior to now\n revokests_key.RevokeSTS(\n client=client,\n compromised_resource = compromised_resource,\n dry_run=False\n )\n\n self.logger.event_to_logs(\n \"STS Tokens revoked issued prior to NOW.\"\n )\n\n self.logger.event_to_logs(\n \"Disable complete. Uploading results.\"\n )\n\n self.case.teardown(\n region=self.region,\n resource_id=self.compromised_access_key_id\n )\n","sub_path":"aws_ir/plans/key.py","file_name":"key.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"67633741","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAll spiders should yield data shaped according to the Open Civic Data\nspecification (http://docs.opencivicdata.org/en/latest/data/event.html).\n\"\"\"\nimport datetime\nimport json\nfrom urllib.parse import parse_qs, urljoin\n\nimport dateutil.parser\nimport pytz\nimport scrapy\n\nfrom city_scrapers.constants import CITY_COUNCIL\nfrom city_scrapers.spider import Spider\n\n\nclass ChiCityCouncilSpider(Spider):\n name = 'chi_citycouncil'\n agency_name = 'Chicago City Council'\n timezone = 'America/Chicago'\n allowed_domains = ['ocd.datamade.us']\n\n endpoint = \"https://ocd.datamade.us/events/\"\n query = {\n \"start_date__gt\": str(datetime.date.today()),\n \"sort\": \"start_date\",\n \"jurisdiction\": \"ocd-jurisdiction/country:us/state:il/place:chicago/government\",\n }\n # the response doesn't include the address for city hall\n address = '121 N LaSalle Dr, Chicago, IL'\n\n def start_requests(self):\n yield scrapy.FormRequest(\n url=self.endpoint, method='GET', formdata=self.query, callback=self.parse\n )\n\n def parse(self, response):\n \"\"\"\n This is not a traditional spider, rather, this is a wrapper\n around the Open Civic Data API to which the Chicago City Clerk\n Legistar site info has already been scraped.\n We will attempt to return all events that have been uploaded in the\n future, i.e. past today's date.\n \"\"\"\n data = json.loads(response.text)\n for url in self._gen_requests(data):\n yield scrapy.Request(url, callback=self._parse_item)\n\n if self._addtl_pages(data):\n params = parse_qs(response.url)\n params['page'] = self._next_page(data)\n yield scrapy.FormRequest(\n url=self.endpoint, method='GET', formdata=params, callback=self.parse\n )\n\n def _gen_requests(self, data):\n for result in data['results']:\n event_url = urljoin(self.endpoint, '../' + result['id'] + '/')\n yield event_url\n\n @staticmethod\n def _addtl_pages(data):\n max_page = data['meta']['max_page']\n page = data['meta']['page']\n return max_page > page\n\n @staticmethod\n def _next_page(data):\n current_page = data['meta']['page']\n return current_page + 1\n\n def _parse_item(self, response):\n data = json.loads(response.text)\n start = self._parse_time(data.get('start_date', ''))\n end = self._parse_time(data.get('end_date', ''))\n documents = self._parse_documents(data['documents'])\n location = self._parse_location(data)\n item = {\n '_type': 'event',\n 'name': data['name'],\n 'location': location,\n 'id': data['id'],\n 'event_description': data['description'],\n 'classification': CITY_COUNCIL,\n 'start': start,\n 'end': end,\n 'all_day': data['all_day'],\n 'documents': documents,\n 'sources': data['sources'],\n 'status': data['status']\n }\n end_date = item['end']['date']\n state_date = item['start']['date']\n item['end']['date'] = state_date if end_date is None else end_date\n item['id'] = self._generate_id(item)\n return item\n\n def _parse_location(self, data):\n return {\n 'address': self.address,\n 'name': data['location']['name'].strip(),\n }\n\n def _parse_time(self, timestamp):\n if len(timestamp) <= 0:\n return {'date': None, 'time': None, 'note': ''}\n\n tz = pytz.timezone(self.timezone)\n dt = dateutil.parser.parse(timestamp).astimezone(tz)\n return {\n 'date': dt.date(),\n 'time': dt.time(),\n 'note': '',\n }\n\n @staticmethod\n def _parse_documents(documents):\n parsed_documents = []\n for document in documents:\n for link in document['links']:\n parsed_document = {\"url\": link['url'], 'note': document['note']}\n parsed_documents.append(parsed_document)\n return parsed_documents\n","sub_path":"city_scrapers/spiders/chi_citycouncil.py","file_name":"chi_citycouncil.py","file_ext":"py","file_size_in_byte":4144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"23557038","text":"import json\n\nfrom Minerals.serializers import MineralSerializer\n\njson_data = open('minerals.json').read()\n\ndata = json.loads(json_data)\n\nfor item in data:\n print(item)\n ms = MineralSerializer(data=item)\n if ms.is_valid():\n ms.save()\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"163811284","text":"from sklearn.model_selection import train_test_split # train_test_split - Splitting Training Set\nimport pandas as pd # pandas - Data Science Utilities\n\nimport SelectionValidation as sv\nimport CSVUtil as csvUtil\nimport DataPrep.LabelUtil as labelUtil\nimport DataPrep.MethodDemo as md\nimport DataPrep.AugConfig as config\n\n# Hyper parameters\nSAMPLE_SIZE = 25000\nTEST_RATIO = 0.2\nBATCH_SIZE = 2\nDATASET_DIR = \"E:\\\\Developing\\\\TrainingSet\\\\\"\nDATASET_CSV = \"TrainingSetAnalysis.csv\"\n\n# CLI messages\nSCALE_MENU = \"\\nAvailable scale of dataset:\\nA Sampled 50k images.\\nB Whole DataSet\"\nSCALE_DICT = {\"A\": \"Sampled_50k\", \"B\": \"WholeDataSet\"}\nMODEL_MENU = \"\\nAvailable model to train:\\nCNNProto\\nResNet50\\nVGG16\\nVGG19\"\nMODEL_DICT = {\"CNNProto\": 256, \"ResNet50\": 224, \"VGG16\": 224, \"VGG19\": 224}\nWELCOME_MESG = \"Please select or input:\"\n\n# Read csv file using pandas\nwith csvUtil.get_dataframe(DATASET_DIR, DATASET_CSV) as csv_file:\n print(\"File opening...\")\n raw_data = csv_file\n print(\"File open success!\")\n# CLI menus\ninput_validation = sv.SelectionValidation(selection_dict=SCALE_DICT,\n menu_message=SCALE_MENU,\n welcome_message=WELCOME_MESG)\nscale_select = input_validation.validation()\n\ninput_validation = sv.SelectionValidation(selection_dict=MODEL_DICT,\n menu_message=MODEL_MENU,\n welcome_message=WELCOME_MESG)\nmodel_select = input_validation.validation()\n\n# Mode confirm\nprint(\"Mode \" + SCALE_DICT[scale_select] + \" have been selected.\\n\")\n\nif scale_select == \"A\":\n print(\"Since 25,000 original images will be used, data augmentation will be turned ON.\")\n # Sample SAMPLE_SIZE images\n raw_data = raw_data.sample(n=SAMPLE_SIZE, replace=False, weights=None, random_state=1)\n\nelse:\n print(\"Since all of the images will be used, data augmentation will be turned OFF.\")\n\n# Append labels for DataFrame items\nraw_data['HasShip'] = labelUtil.label_gen(raw_data['EncodedPixels'].values)\n# Visualize results\nlabelUtil.label_stat_bar(input_tuple=labelUtil.label_num_calc(raw_data['HasShip'].values),\n save_dir=DATASET_DIR)\n# Spilt up images\ntrain_data, test_data = train_test_split(raw_data, random_state=1, test_size=TEST_RATIO)\n\n# Visualizing image augmentation effect\nsample_df = pd.read_csv(DATASET_DIR + 'Sample.csv')\nsample_df['Directory'] = DATASET_DIR + sample_df['ImageId']\nsample_df['HasShip'] = labelUtil.label_gen(sample_df['EncodedPixels'].values)\n\nif scale_select == \"A\":\n selected_config = config.sample_train_config\nelse:\n selected_config = config.preprocess_config\n\n# DataFrameIterator: Generates unlimited number of images according to the configuration\n# A.k.a Source of the pre-processed images\nprep_train = config.prep_exec(config=selected_config,\n input_df=train_data,\n batch_size=BATCH_SIZE,\n side_length=MODEL_DICT[model_select])\nprep_test = config.prep_exec(config=selected_config,\n input_df=test_data,\n batch_size=BATCH_SIZE,\n side_length=MODEL_DICT[model_select])\nsample = md.MethodDemo(train_config=config.sample_train_config,\n test_config=config.preprocess_config,\n input_df=sample_df,\n side_length=MODEL_DICT[model_select],\n save_dir=DATASET_DIR + \"Sample\")\nsample.output_aug_sample()\n","sub_path":"ShipDetection/DataPrep/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"107635334","text":"build_frame = [[1, 0, 0, 1], [1, 1, 1, 1], [2, 1, 0, 1], [2, 2, 1, 1], [5, 0, 0, 1], [5, 1, 0, 1], [4, 2, 1, 1], [3, 2, 1, 1]]\nn = 5\nbuild_frame2 = [[0, 0, 0, 1], [2, 0, 0, 1], [4, 0, 0, 1], [0, 1, 1, 1], [1, 1, 1, 1], [2, 1, 1, 1], [3, 1, 1, 1], [2, 0, 0, 0], [1, 1, 1, 0], [2, 2, 0, 1]]\ndef check(x, y, a, result):\n if a == 0:\n if [x, y-1, 0] in result or [x-1, y, 1] in result or [x,y,1] in result or y == 0:\n return True\n return False\n elif a == 1:\n if [x, y-1, 0] in result or [x+1, y-1, 0] in result or ([x-1, y, 1] in result and [x+1, y, 1] in result):\n return True\n return False\n\n\ndef solution(n, build_frame):\n result = []\n for data in bulid_frame:\n x, y, a, b = data\n if b == 1:\n if check(x, y, a, result):\n result.append([x, y, a])\n else:\n result.remove([x, y, a])\n for val in result:\n nx, ny, na = val\n if check(nx, ny, na, result) == False:\n result.append([x, y, a])\n break\n result.sort(key = lambda x : (x[0], x[1], x[2]))\n return result\n\nprint(solution(n, build_frame2))\n","sub_path":"Q12_기둥과 보 설치.py","file_name":"Q12_기둥과 보 설치.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"132213967","text":"## Script (Python) \"iol_onOpenDocument\"\n##bind container=container\n##bind context=context\n##bind namespace=\n##bind script=script\n##bind subpath=traverse_subpath\n##parameters=\n##title=\n##\n\"\"\"\nStandardizzazione dele operazioni da svolgere all'apertura di una richiesta\n\"\"\"\n\ndb = context.getParentDatabase()\n#tipo_domanda = genera_tipo_domanda(plominoDocument.getFormName())\n# all'apertura di nuovo documento plominoDocument è un plominoForm\nif context.isNewDocument():\n \n if 'rinnovo' == context.naming_manager('tipo_richiesta'):\n parentDocument = db.getDocument(context.REQUEST.get('parentDocument'))\n if parentDocument.naming_manager('tipo_richiesta') != 'periodica':\n return \"ATTENZIONE! Non è possibile rinnovare una richiesta NON periodica.\"\n \n if parentDocument.getItem('numero_rinnovi') == 2:\n return \"ATTENZIONE! Non è possibile rinnovare ulteriormente la pratica selezionata!\"\n\nreturn ''\n","sub_path":"src/gisweb/iol/skins/iol_templates/iol_old/iol_onOpenDocument.py","file_name":"iol_onOpenDocument.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"526048673","text":"\"\"\"Pirograph image processing.\n\nPygame implementation, based heavily on earlier Technology Wishing Well\ncode and Mike Cook's Kaleido Cam from the Mag Pi, December 2017.\n\nNote that this code will only work on V4L2 systems: ie. Linux and Raspberry Pi.\nSorry, Mac and Windows users.\n\n...so for testing reasons, this instrumented version just loads an image from disk.\n\"\"\"\n\nimport pygame\n# import pygame.camera\nimport os, time\nfrom tkinter import Tk\nfrom tkinter.filedialog import asksaveasfilename\nfrom PIL import Image, ImageStat, ImageOps, ImageDraw\n\n# os.system(\"sydo modprobe bcm2835-v4l2\") # needed for Pi camera\nTk().withdraw()\npygame.init()\n# pygame.camera.init()\nos.environ['SDL_VIDEO_WINDOW_POS'] = 'center'\npygame.display.set_caption(\"Pirograph\")\npygame.event.set_allowed(None)\npygame.event.set_allowed([pygame.KEYDOWN, pygame.QUIT])\n\nimagesize = 800 # basic image size.\nscreen = pygame.display.set_mode([imagesize, imagesize], 0, 32)\n\n# find, open and start camera\n# cam_list = pygame.camera.list_cameras()\n# print(cam_list)\n# webcam = pygame.camera.Camera(cam_list[0], (1920, 1080))\n# webcam.start()\n\npreRot = 0.0\nautoRotate = False\nsavePath = \"\"\nframeNumber = 0\nsaveSource = False\n\n# Config variables (can adapt at runtime)\nfull_screen = 0\nvideo_framerate = 0\nthreshold_low = 40\nthreshold_high = 230\nframe_count = 1\n\n\ndef main():\n x = 0\n while x in range(10):\n checkForEvent()\n showScreen()\n print(x)\n x += 1\n # print(\"\")\n\ndef showScreen():\n global camFrame, preRot, frame_count\n # camFrame = webcam.get_image()\n camFrame = pygame.image.load('test_image.jpeg')\n frame_count += 1\n if autoRotate:\n preRot += 0.5\n if preRot > 360:\n preRot -= 360\n rotFrame = pygame.transform.scale(camFrame, (imagesize, imagesize)) # ensure square\n rotFrame.set_alpha(greyscale(rotFrame))\n rotFrame = rot_center(rotFrame, preRot) # Rotate\n sqFrame = pygame.Surface((imagesize, imagesize))\n sqFrame.blit(rotFrame, (0, 0))\n else:\n thisFrame = pygame.transform.scale(camFrame, (imagesize, imagesize))\n thisFrame.set_alpha(greyscale(thisFrame))\n sqFrame = pygame.Surface((imagesize, imagesize))\n sqFrame.blit(thisFrame, (0, 0))\n screen.blit(sqFrame, (0, 0))\n pygame.display.update()\n\n\ndef rot_center(image, angle):\n # rotate an image while keeping its center and size\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image\n\n\ndef terminate():\n # webcam.stop()\n pygame.quit()\n os._exit(1)\n\n\ndef greyscale(self, img):\n \"\"\"See https://stackoverflow.com/questions/10261440/how-can-i-make-a-greyscale-copy-of-a-surface-in-pygame/10693616#10693616.\"\"\"\n arr = pygame.surfarray.pixels3d(img)\n avgs = [[(r*0.298+ g*0.587 + b*0.114) for (r, g, b) in col] for col in arr]\n arr = arr.dot([0.298, 0.587, 0.114])[:,:,None].repeat(3, axis=2)\n return pygame.surfarray.make_surface(arr)\n\n\ndef checkForEvent():\n global savePath, autoRotate, saveSource, preRot\n event = pygame.event.poll()\n if event.type == pygame.QUIT:\n terminate()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n terminate()\n if event.key == pygame.K_r:\n autoRotate = not autoRotate\n print(\"Autorotate: \", autoRotate)\n if autoRotate:\n preRot = 0\n\n\nif __name__ == '__main__':\n main()","sub_path":"experiments/pirograph__partial_instrumented.py","file_name":"pirograph__partial_instrumented.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"653687543","text":"#!/usr/bin/env python\n# coding: utf-8\nimport sys\nfrom operator import add\nfrom pyspark import SparkConf, SparkContext, SQLContext \nfrom pyspark.sql import SparkSession\n\n#infile = \"hdfs://128.104.222.61:9000/CS744/inputdata/enwiki-pages-articles/link-enwiki-20180601-pages-articles14.xml-p7697599p7744799\"\ninfile = \"hdfs://128.104.222.61:9000/CS744/inputdata/enwiki-pages-articles/link-enwiki-20180601-pages-articles*\"\nfileOut = 'hdfs://128.104.222.61:9000/CS744/outputdata/outputranks_bigsample.txt'\n\ndef flaten(tup):\n key,value = tup # (URL, (List of links, rank))\n links,rank = value\n n = len(links)\n out = [(dest,rank/n) for dest in links]\n #out.append((key, 0))# append the src also to this list, with no rank from this scenario\n return out\n\nconf = SparkConf().setMaster(\"spark://128.104.222.61:7077\").setAppName(\"TableData\")\nsc = SparkContext(conf = conf)\nnoofpartitions = 120\n\ndef partitioner(key):\n\treturn hash(key)\n\ndata = sc.textFile(infile)\n\nlinks = data.map(lambda x: x.split('\\t'))#break line\nlinks = links.filter(lambda l: ':' not in l[1] or l[1][0:9] == 'Category:')#ignore some\nlinks = links.map(lambda l: [k.lower() for k in l])#to lowercase\nlinks = links.groupBy(lambda l: l[0])#group by key as from value\nlinks = links.map(lambda x : (x[0], [l[1] for l in list(x[1])]))#convert iterator to list of values\nprint(\"No of partitions of links RDD before custom partition: {}\").format(links.getNumPartitions())\n#links = links.partitionBy(noofpartitions,partitioner) \nprint(\"No of partitions of links RDD after custom partition: {}\").format(links.getNumPartitions())\n\nlinks.cache()#saving as in-memory objects\n\nr0 = 1.0\niterations = 10\n\nranks = links.keys().map(lambda x: (x,r0))\n\nfor i in range(iterations):\n contribs = links.join(ranks).flatMap(flaten)\n ranks = contribs.reduceByKey(add).mapValues(lambda x: 0.15 + (0.85)*x)\n\nranks.saveAsNewAPIHadoopFile(fileOut, \"org.apache.hadoop.mapreduce.lib.output.TextOutputFormat\",\"org.apache.hadoop.io.Text\",\"org.apache.hadoop.io.Text\")\n\n","sub_path":"scripts/PageRank_part3Task4.py","file_name":"PageRank_part3Task4.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"2299809","text":"#hopscotch.py\r\n#\r\n#function that draws a hopscotch court with the parameters for a turtle, edge length, and pen width\r\n#usage: ???\r\n# % python hopscotch.py t, edgeLen, penWidth\r\n#\r\n#Helen Gao, 7-9-2018 by 11am\r\n\r\nimport turtle #import the turtle library\r\nbob = turtle.Turtle() #create a turtle named bob\r\ndef square(t, edgeLen): #this function takes the parameters t for turtle and edgeLen for the edge length of the square\r\n for i in range(4): #since it is a square, drawing one side can be repeated four times\r\n t.fd(edgeLen) #the turtle moves forward the designated edge length\r\n t.rt(90) #the turtle turns 90 degrees in preparation for drawing the next edge length\r\ndef twosquares(t, edgeLen): #this function draws two square side by side using the above square function and uses the same parameters\r\n square(t, edgeLen) #drawing a square\r\n t.fd(edgeLen) #moves to the top right corner of the first square so the turtle can start drawing the next square\r\n square(t, edgeLen) #draws the second square\r\ndef threesquares(t, edgeLen): #this function draws a square on top of two squares using the square function and the twosquares function\r\n square(t, edgeLen) #draws the top square\r\n t.rt(90) #turns the turtle so it faces downwards\r\n t.fd(edgeLen) #moves down the length of the square so the turtle is at the bottom left corner of the first square\r\n t.lt(90) #turtle turns left 90 degrees so it faces to the right\r\n t.bk(edgeLen/2) #turtle moves back 1/2 the length of the side length so that the twosquares function will be centered properly\r\n twosquares(t, edgeLen) #turtle draws two squares\r\n t.rt(90) #turtle turns so that it's facing downwards\r\n t.fd(edgeLen) #turtle moves down the center of the two triangles\r\n t.lt(90) #turtle turns left so that it faces to the right\r\n t.bk(edgeLen/2) #turtle moves back so that it will be centered when it draws the single square\r\ndef hopscotch_court(t, edgeLen, penWidth): #this function creates the hopscotch court using the parameters t for turtle, edgeLen for edge length, and penWidth for pen size\r\n t.penup() #the turtle's pen goes up so it doesn't draw when repositioning\r\n t.setpos(-edgeLen/2, edgeLen*3) #the turtle's position is set so that the starting point is half the length of the edge to the left (because the first square is a full edge length long, this will center it) and since the hopscotch court is six edge lengths tall, placing it at a y-value of three edge lengths will also center the court vertically\r\n t.pendown() #the turtle's pen goes down to start drawing\r\n t.pensize(penWidth) #sets the pen size to the given pen width\r\n for i in range(2): #since the hopscotch court has two sets of three squares, the threesquares function can be run twice\r\n threesquares(t, edgeLen) #runs threesquares with the parameters t and edgeLen\r\n square(t, edgeLen) #draws the seventh square after finishing the first 2*3=6 squares\r\n t.rt(90) #the turtle turns right so that it faces down\r\n t.fd(edgeLen) #the turtle moves forward so that it is at the bottom left corner of the seventh square\r\n t.lt(90) #the turtle turns left so that it faces right in preparation for drawing the next square\r\n square(t, edgeLen) #the turtle draws the last square\r\n t.ht() #hiding the turtle now. bye turtle. we still love him though\r\nhopscotch_court(bob, 75, 6) #draws the hopscotch court with the turtle bob, each square having edges of length 75 pixels, and the width of the lines being 6 pixels\r\nturtle.mainloop() #the window stays open for our viewing pleasure\r\n\r\n#note: got the ideas for repeated square functions and centering the drawing at joe's thursday section","sub_path":"hopscotch.py","file_name":"hopscotch.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"632009941","text":"import uuid\nfrom functools import wraps\nfrom utils import log\n\nfrom flask import (\n session,\n request,\n abort,\n redirect,\n url_for,\n )\n\nfrom models.user import User\n\n\ndef current_user():\n uid = session.get('user_id', -1)\n\n u = User.one(id=uid)\n log('current', u.username)\n return u\n\n\ncsrf_tokens = dict()\n\n\ndef csrf_required(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n token = request.args['token']\n u = current_user()\n if token in csrf_tokens and csrf_tokens[token] == u.id:\n csrf_tokens.pop(token)\n return f(*args, **kwargs)\n else:\n abort(401)\n\n return wrapper\n\n\ndef new_csrf_token():\n u = current_user()\n token = str(uuid.uuid4())\n csrf_tokens[token] = u.id\n return token\n\n\ndef login_required(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n u = current_user()\n if u.is_guest():\n return redirect(url_for('index.index'))\n else:\n return f(*args, **kwargs)\n\n return wrapper\n\n\ndef same_user_required(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n u = current_user()\n id = int(request.args['id'])\n\n if u.is_admin() or id == u.id:\n return f(*args, **kwargs)\n else:\n return redirect(url_for('topic.index'))\n\n return wrapper\n\n\ndef admin_required(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n u = current_user()\n\n if u.is_admin():\n return f(*args, **kwargs)\n else:\n return redirect(url_for('topic.index'))\n\n return wrapper\n\n","sub_path":"routes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"157088859","text":"\"\"\"\n把两个向量想象成空间的两条线段,都是从原点出发([0, 0, ...]),指向不同的方向。\n两条线段之间形成一个夹角,如果夹角为0°,则方向相同、线段重合;\n如果夹角为90°,则形成直角,方向完全不相似;\n如果夹角为180°,则方向正好相反。\n夹角越小,就代表越相似。\n\"\"\"\nimport numpy as np\n\n\ndef cos_similar(vector_a, vector_b):\n \"\"\"\n 计算两个向量之间的余弦相似度\n :param vector_a: 向量a\n :param vector_b: 向量b\n :return: sim\n \"\"\"\n vector_a = np.mat(vector_a)\n vector_b = np.mat(vector_b)\n num = float(vector_a * vector_b.T) # * 表示数量积,dot表示矢量乘法\n\n # np.linalg.norm() 求范数,默认是二范数(ord = 2)\n denum = np.linalg.norm(vector_a) * np.linalg.norm(vector_b)\n \n cos = num / denum\n sim = 0.5 + 0.5 * cos # 归一化\n return sim\n # return cos\n\n\na = [1, 2, 3]\nb = [1, 2, 4]\n\nprint(cos_similar(a, b))\n","sub_path":"65+韩润华+南昌/week2/demo_cos.py","file_name":"demo_cos.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"422998020","text":"import torch.nn as nn\n\n\nclass PNet(nn.Module):\n def __init__(self):\n super(PNet, self).__init__()\n self.backbone = nn.Sequential(\n nn.Conv2d(3, 8, kernel_size=3),\n nn.BatchNorm2d(8),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Conv2d(8, 16, kernel_size=3),\n nn.BatchNorm2d(16),\n nn.ReLU(inplace=True),\n nn.Conv2d(16, 32, kernel_size=3),\n # nn.BatchNorm2d(32),\n nn.ReLU(inplace=True)\n )\n self.roi_cls_head = nn.Conv2d(32, 2, kernel_size=1)\n self.roi_reg_head = nn.Conv2d(32, 4, kernel_size=1)\n\n def forward(self, x):\n x = self.backbone(x)\n roi_cls = self.roi_cls_head(x)\n roi_reg = self.roi_reg_head(x)\n return roi_cls, roi_reg\n","sub_path":"mtcnn/modeling/pnet.py","file_name":"pnet.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"348270085","text":"# 4837. [파이썬 S/W 문제해결 기본] 2일차 - 부분집합의 합\n\nT = int(input())\narr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\nn = len(arr)\n\nlst = []\nfor i in range(1< redirect to settings\n def test_oob_roll(self):\n result = self.client.post('/stories/new/roll', data={'dice_number': 9, 'dice_img_set': 'standard'})\n self.assertRedirects(result, '/stories/new/settings')\n\n # Redirect from session (abc fails, throws ValueError, gets 8 from session, out of range -> redirect)\n def test_oob_roll_sess(self):\n with self.client.session_transaction() as sess:\n sess['dice_number'] = 8\n result = self.client.post('/stories/new/roll', data={'dice_number': 'abc', 'dice_img_set': 'standard'})\n self.assertRedirects(result, '/stories/new/settings')\n\n # Correct execution's flow of roll\n def test_roll(self):\n with self.client.session_transaction() as sess:\n sess['dice_number'] = 2\n rnd.seed(2) # File die0.txt\n self.client.post('/stories/new/roll', data={'dice_number': 4, 'dice_img_set': 'animal'})\n self.assert_template_used('roll_dice.html')\n","sub_path":"monolith/views/tests/test_dice.py","file_name":"test_dice.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"392591714","text":"\"\"\"\r\n\r\n Collaborative Filtering: Modeling Methods\r\n\r\n\"\"\"\r\n\r\nimport math\r\nimport random\r\n\r\nimport algebric_operations\r\nimport data_treatment\r\nimport model\r\nimport utils\r\n\r\ndef _update_p_matrix(p_matrix, q_matrix, user_index, item_index, error, lambda_value=0.1, gamma_value=0.01):\r\n\r\n for row in range(len(p_matrix)):\r\n\r\n p_matrix[row][user_index] += gamma_value * (error * q_matrix[row][item_index] - lambda_value * p_matrix[row][user_index])\r\n\r\n return p_matrix\r\n\r\ndef _update_q_matrix(q_matrix, p_matrix, user_index, item_index, user, amount_items, ratings, error, lambda_value=0.05, gamma_value=0.01):\r\n\r\n for row in range(len(q_matrix)):\r\n\r\n q_matrix[row][item_index] += gamma_value * (error * (p_matrix[row][user_index] + 1/math.sqrt(amount_items) * ratings[user][row])) - lambda_value * q_matrix[row][item_index]\r\n\r\n return q_matrix\r\n\r\ndef _update_y_matrix(y_matrix, q_matrix, users_items, user, items, amount_items, error, lambda_value=0.1, gamma_value=0.01):\r\n\r\n for row, item in enumerate(users_items[user]):\r\n\r\n item_index = items[item]\r\n\r\n for column in range(len(y_matrix[item_index])):\r\n\r\n y_matrix[item_index][column] += gamma_value * (error * 1/math.sqrt(amount_items) * q_matrix[column][item_index] - lambda_value * y_matrix[item_index][column])\r\n\r\n return y_matrix\r\n\r\ndef _update_residual_items(residual_items, item_index, error, gamma_value=0.01, lambda_value=0.05):\r\n\r\n\r\n residual_items[item_index] += gamma_value * (error - lambda_value * residual_items[item_index])\r\n\r\n return residual_items\r\n\r\ndef _update_residual_users(residual_users, user_index, error, gamma_value=0.01, lambda_value=0.05):\r\n\r\n\r\n residual_users[user_index] += gamma_value * (error - lambda_value * residual_users[user_index])\r\n\r\n return residual_users\r\n\r\ndef calculate_first_estimation(users, users_items, latent_factors_size, y_matrix, items):\r\n\r\n # first estimation of the ratings\r\n estimation = {}\r\n\r\n for user in users.keys():\r\n # array of zeros\r\n zero_array = [0] * latent_factors_size\r\n\r\n for item in users_items[user]: # items consumed by the user\r\n\r\n zero_array = algebric_operations.sum_two_arrays(zero_array, y_matrix[items[item]])\r\n\r\n estimation[user] = zero_array\r\n\r\n return estimation\r\n\r\ndef retrieve_column(matrix, column):\r\n\r\n column_array = []\r\n\r\n for value in range(len(matrix)):\r\n\r\n column_array.append(matrix[value][column])\r\n\r\n return column_array\r\n\r\ndef svd_prediction(p_matrix, q_matrix):\r\n\r\n return sum(list(map(lambda x, y: x*y, p_matrix, q_matrix)))\r\n\r\ndef svd_rmse(historic_rating_matrix, matrix_users_items, users, items, ratings_mean, residual_users, residual_items, ratings, q_matrix, y_matrix, users_items, latent_factors_size):\r\n\r\n total_error = 0\r\n\r\n for row in matrix_users_items:\r\n\r\n user, item = row[0], row[1] \r\n\r\n user_index, item_index = users[user], items[item]\r\n\r\n ratings[user] = [0] * latent_factors_size\r\n\r\n for item in users_items[user]:\r\n\r\n ratings[user] = algebric_operations.sum_two_arrays(ratings[user], y_matrix[item_index])\r\n\r\n prediction = (ratings_mean + residual_users[user_index] + residual_items[item_index] + svd_prediction(ratings[user], retrieve_column(q_matrix, item_index)))\r\n\r\n total_error += (historic_rating_matrix[user_index][item_index] - prediction) ** 2\r\n\r\n return math.sqrt(total_error/len(matrix_users_items))\r\n\r\n\r\ndef make_prediction(historic_data, prediction_data, ratings, ratings_mean, users, items, q_matrix, residual_users, residual_items, users_items, y_matrix, latent_factors_size):\r\n\r\n predictions = []\r\n\r\n #items_mean = utils.measure_column_mean(historic_data)\r\n\r\n for row in prediction_data:\r\n\r\n user, item = row[0], row[1]\r\n\r\n '''if user in users.keys() and item not in items.keys():\r\n\r\n user_index = users[user]\r\n\r\n prediction = users_mean[user]\r\n\r\n elif item in items.keys() and user not in users.keys():\r\n\r\n item_index = items[item]\r\n\r\n prediction = items_mean[item]'''\r\n\r\n if user not in users.keys() or item not in items.keys():\r\n\r\n prediction = ratings_mean\r\n\r\n else:\r\n\r\n user_index, item_index = users[user], items[item]\r\n\r\n ratings[user] = [0] * latent_factors_size\r\n\r\n for item in users_items[user]:\r\n\r\n ratings[user] = algebric_operations.sum_two_arrays(ratings[user], y_matrix[item_index])\r\n\r\n\r\n prediction = (ratings_mean + residual_users[user_index] + residual_items[item_index] + svd_prediction(ratings[user], retrieve_column(q_matrix, item_index)))\r\n\r\n predictions.append(prediction) \r\n\r\n return predictions\r\n\r\n\r\ndef singular_value_decomposition_pp(data, latent_factors_size, epochs):\r\n \"\"\"\r\n\r\n Based on the code available in:\r\n\r\n https://github.com/cheungdaven/recommendation\r\n\r\n Based on the paper of:\r\n\r\n https://people.engr.tamu.edu/huangrh/Spring16/papers_course/matrix_factorization.pdf\r\n\r\n\r\n \"\"\"\r\n random.seed()\r\n\r\n users_items, users, items = data_treatment.retrieve_guide_features(data['Historic Data'])\r\n\r\n matrix_users_items = data_treatment.mount_matrix_user_item(users_items)\r\n\r\n ratings_mean = utils.measure_average_rating(data['Historic Data'])\r\n\r\n # a matrix users x items\r\n historic_rating_matrix = model.generate_historic_data_matrix(data['Historic Data'], 'users', users, items, ratings_mean)\r\n\r\n #users_mean = utils.measure_row_mean(historic_rating_matrix)\r\n\r\n #historic_rating_matrix = utils.subtraction_matrix_row_mean(historic_rating_matrix, users_mean)\r\n\r\n # users latent matrix\r\n p_matrix = algebric_operations.generate_random_matrix(latent_factors_size, len(users))\r\n\r\n # itens latent matrix\r\n q_matrix = algebric_operations.generate_random_matrix(latent_factors_size, len(items))\r\n\r\n # prediction matrix\r\n y_matrix = algebric_operations.generate_random_matrix(len(items), latent_factors_size)\r\n\r\n ratings = calculate_first_estimation(users, users_items, latent_factors_size, y_matrix, items)\r\n\r\n residual_items = [random.uniform(0, 1) for item in range(0, len(items))]\r\n\r\n residual_users = [random.uniform(0, 1) for user in range(0, len(users))]\r\n\r\n for epoch in range(epochs):\r\n\r\n for row in matrix_users_items:\r\n\r\n user, item = row[0], row[1]\r\n\r\n user_index, item_index = users[user], items[item]\r\n\r\n amount_items = len(users_items[user])\r\n\r\n # diving all the values of a a array by the sqrt of the users amount of items\r\n ratings[user] = list(map(lambda value: value/math.sqrt(amount_items), ratings[user]))\r\n\r\n # retriving all the values of a specific column\r\n column_array = retrieve_column(p_matrix, users[user])\r\n\r\n ratings[user] = algebric_operations.sum_two_arrays(ratings[user], column_array)\r\n\r\n predicted_rating = ratings_mean + residual_items[item_index] + residual_users[user_index] + svd_prediction(ratings[user], retrieve_column(q_matrix, item_index))\r\n\r\n measured_error = historic_rating_matrix[user_index][item_index] - predicted_rating # error_metric(historic_rating_matrix[users[user]][item_index], predicted_rating)\r\n\r\n # cost O(n)\r\n p_matrix = _update_p_matrix(p_matrix, q_matrix, user_index, item_index, measured_error)\r\n\r\n # cost O(n)\r\n q_matrix = _update_q_matrix(q_matrix, p_matrix, user_index, item_index, user, amount_items, ratings, measured_error)\r\n\r\n # reconstruction matrix - this will be the closest to the original matrix - cost O(n**2)\r\n y_matrix = _update_y_matrix(y_matrix, q_matrix, users_items, user, items, amount_items, measured_error)\r\n\r\n # cost O(1)\r\n residual_items = _update_residual_items(residual_items, item_index, measured_error)\r\n\r\n # cost O(1)\r\n residual_users = _update_residual_users(residual_users, user_index, measured_error)\r\n\r\n print(svd_rmse(historic_rating_matrix, matrix_users_items, users, items, ratings_mean, residual_users, residual_items, ratings, q_matrix, y_matrix, users_items, latent_factors_size))\r\n\r\n predictions = make_prediction(historic_rating_matrix, data['Prediction Data'], ratings, ratings_mean, users, items, q_matrix, residual_users, residual_items, users_items, y_matrix, latent_factors_size)\r\n\r\n for index, prediction in enumerate(predictions):\r\n\r\n data['Prediction Data'][index].append(str(prediction))\r\n\r\n data['Prediction Data'].insert(0, ['UserId', 'ItemId', 'Prediction'])\r\n\r\n utils.write_table(data['Prediction Data'], \"Outputs/predictions.txt\")\r\n\r\n","sub_path":"model_based.py","file_name":"model_based.py","file_ext":"py","file_size_in_byte":8781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"346923929","text":"# Module for geocoding matched land registry postcodes\nimport pandas as pd\nimport numpy as np\nfrom numpy import nan\nimport itertools\nimport csv\nimport logging\nimport time\n\ndef get_landreg_for_geocoding(land_registry_df, codex, name_col, geo_cols):\n\t# Select elements within land_registry_df that have been matched\n\tlandreg_geo = land_registry_df.loc[land_registry_df[name_col].isin(codex[name_col])]\n\tlandreg_geo = landreg_geo.reindex(columns = geo_cols)\n\treturn landreg_geo\n\n# Given location of missing post code entry, searches nearby entries for same addresses to fill postcode from\ndef suggest_entry_for_missing_val(df, column_to_fill, columns_matched_on):\n\told_indices = df.index\n\tdf.set_index(np.arange(df.shape[0]), inplace=True)\n\tdf['SUGGESTED_'+column_to_fill]=df[column_to_fill]\n\tfor row_index,row in df.iterrows():\n\t# Selects the elements of DataFrame row with missing entry in 'column_to_fill' to be matched against other rows\n\t\tif pd.isnull(row[column_to_fill]):\n\t\t\telements_to_compare = row.reindex(columns_matched_on)\n\t\t\t# Selects elements of neighbouring entries for comparison. Break stops loop once match is found.\n\t\t\tfor i in range(max(0,row_index-2),row_index) + range(min(len(df),row_index+1), min(len(df),row_index+3)):\t\n\t\t\t\tcomparator = (df.ix[i]).reindex(columns_matched_on)\n\t\t\t\tif elements_to_compare.equals(comparator):\n\t\t\t\t\tdf.ix[row_index,'SUGGESTED_'+column_to_fill]= df.ix[i,column_to_fill]\n\t\t\t\t\tbreak\n\tdf.set_index(old_indices, inplace=True)\n\treturn df\n\n#Not currently used\ndef suggest_missing_entry_values(df, column_to_fill, columns_matched_on):\n\told_indices = df.index\n\tdf.set_index(np.arange(df.shape[0]), inplace=True)\n\tdf['SUGGESTED_'+column_to_fill]=df[column_to_fill]\n\tsub_df = df.loc[df[column_to_fill].isnull()] # This line does not work. Works whens used in separate function (see below) but position in this func is incorrect\n\tfor row_index, row in sub_df.iterrows():\n\t\telements_to_compare = row.reindex(columns_matched_on)\n\t\t# Selects elements of neighbouring entries for comparison. Break stops loop once match is found.\n\t\tfor i in range(max(0,row_index-2),row_index) + range(min(len(df),row_index+1), min(len(df),row_index+3)):\t\n\t\t\tcomparator = (df.ix[i]).reindex(columns_matched_on)\n\t\t\tif elements_to_compare.equals(comparator):\n\t\t\t\tdf.ix[row_index,'SUGGESTED_'+column_to_fill]= df.ix[i,column_to_fill]\n\t\t\t\tbreak\n\tdf.set_index(old_indices, inplace=True)\n\ndef get_sub_df(df, col):\n\tsub_df = df.loc[df[col].isnull()]\n\treturn sub_df\n\t\ndef pcd_clean(string):\n\ttry:\n\t\tstring=str(string)\n\t\tstring=string.upper()\n\t\tstring=string.replace(' ' ,'')\n\t\treturn string\n\texcept AttributeError:\n\t\tprint('Attribute Error when cleaning the following input to pcd_clean: ' + string)\n\t\treturn string\n\texcept (UnicodeDecodeError, UnicodeEncodeError):\n\t\tprint('Unicode Error when cleaning the input to pcd_clean')\n\t\treturn string\n\texcept:\n\t\tprint('Unexpected error when cleaning string', sys.exc_info()[0])\n\t\treturn string\n\t\traise","sub_path":"lr_geocode.py","file_name":"lr_geocode.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"124276239","text":"from django import template\nfrom django.utils import safestring\nfrom django.utils import html\n\nregister = template.Library()\n\n\n@register.filter()\ndef bootstrap_tags(tags):\n bootstrap_alerts = ['debug', 'info', 'success', 'warning']\n output = \"\"\n for tag in tags.split():\n if tag in bootstrap_alerts:\n tag_output = ' alert-{0}'.format(tag)\n elif tag == 'error': # django error == bootstrap danger\n tag_output = ' alert-danger'\n else:\n tag_output = ' {0}'.format(html.conditional_escape(tag))\n\n output += tag_output\n\n return safestring.mark_safe(output)\n","sub_path":"utils/templatetags/bootstrap_tags.py","file_name":"bootstrap_tags.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"596647625","text":"def remove_domain():\n file_name = input('Enter reverse or forward filename: ')\n with open(file_name, 'r') as file:\n lines = file.readlines()\n list_domains = []\n for index, line in enumerate(lines):\n check_line = line.split()\n if check_line and 'IN' in check_line and '@' not in check_line:\n list_domains.append(dict(index=index, ip=check_line[-1], domain=check_line[0]))\n if list_domains:\n for index, domain in enumerate(list_domains):\n print('{}.- {} -------- {} '.format(index + 1, domain['domain'], domain['ip']))\n number = int(input('Enter number of domain to delete: '))\n with open(file_name, 'w') as file:\n for index, line in enumerate(lines):\n if list_domains[number - 1]['index'] != index:\n file.write(line)\n","sub_path":"remove_domain.py","file_name":"remove_domain.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"123811090","text":"'''\n@author: Jeremy Bayley\n@email : TheEnvironmentGuy@gmail.com\n@websight : TheEnvironmentGuy.com\n\nFriday March 25 2016\nBlender 2.77\n\n-*- coding: utf-8 -*-\n'''\n\nimport bpy\nimport ImperialPrimitives as ip\n\ninch = 0.0254\nfoot = 0.3048\nstud_length = foot*8\n\nceiling_height = foot*8\nwall_length = foot*6\ndrywall_size = [foot*4, inch/2, foot*8]\nwoodstock_size = [inch*2.5, foot*8, inch*1.5]\nstud_distance = inch*16\n\n\ndef Main():\n bpy.ops.mesh.primitive_cube_add()\n ip.SetupObject(size=[11,2,3])\n\n\n\nif __name__ == '__main__':\n Main()\n","sub_path":"blenderWallBuilder/WallBuilder.py","file_name":"WallBuilder.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"314857335","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 12 08:16:45 2017\n\n@author: George\n\"\"\"\n\n#import math\nimport numpy as np\n#import h5py\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom resnets_utils import *\nfrom PIL import Image\nfrom scipy import ndimage\nimport copy\nfrom keras.models import Model, load_model\nfrom tqdm import tqdm\nimport glob, os\n\n\n#load model\n#fileName = \"kerasModel_20171201-001742\"\n#fileName = \"kerasModel_20171204-062622\"\n#fileName = \"kerasModel_20180221-025551\" #40,000 images in training\n#fileName = \"kerasModel_20180302-024740\" #80,000 images in training\n#fileName = \"kerasModel_20180311-103037\" #100,000 images in training + 4,100 notBuilding Idaho examples\n#fileName = \"kerasModel_20180312-083230\" #120,000 images in training + 10,600 notBuilding Idaho examples\nfileName = \"kerasModel_20180317-114850\" #120,000 images in training + 10,600 notBuilding Idaho examples + 1648 building Idaho examples\n\nmodel_filePath = r\"C:\\\\Google Drive\\\\code\\\\python_code\\\\tensorFlow\\\\buildingIdentification\\\\results\\\\\" + fileName\n\nmodel = load_model(model_filePath)\nresultsPath = r\"C:\\\\Google Drive\\\\code\\\\python_code\\\\tensorFlow\\\\buildingIdentification\\\\results\\\\kerasModel_results\\\\\"\nsavePath = r\"C:\\\\Google Drive\\\\code\\\\python_code\\\\tensorFlow\\\\buildingIdentification\\\\results\\\\kerasModel_results\\\\\" + fileName\n\n\n#load map\n#mapPath = r\"J:\\AerialImageDataset\\AerialImageDataset\\test\\images\\tyrol-e10.tif\"\n#mapPath = r\"J:\\AerialImageDataset\\AerialImageDataset\\test\\images\\innsbruck20.tif\"\n#mapPath = r\"J:\\AerialImageDataset\\AerialImageDataset\\test\\images\\sfo3.tif\"\n#mapPath = r\"J:\\AerialImageDataset\\AerialImageDataset\\test\\images\\bellingham19.tif\"\n#mapPath = r\"J:\\AerialImageDataset\\AerialImageDataset\\test\\images\\sfo25.tif\"\n#mapPath = r\"J:\\AerialImageDataset\\AerialImageDataset\\test\\images\\tyrol-e18.tif\"\n#mapPath = r\"J:\\AerialImageDataset\\AerialImageDataset\\test\\images\\tyrol-e28.tif\"\n#mapPath = r\"J:\\AerialImageDataset\\AerialImageDataset\\test\\images\\bloomington5.tif\"\n#mapPath = r\"J:\\AerialImageDataset\\AerialImageDataset\\test\\images\\bellingham21.tif\"\n#mapPath = r\"J:\\AerialImageDataset\\AerialImageDataset\\test\\images\\bloomington23.tif\"\nmapPath = r\"C:\\Users\\George\\Desktop\\testImages\\test4.jpg\"\n#mapPath = r\"C:\\Users\\George\\Desktop\\buildingClassifier\\finalClips\\NAIP20150.jpg\"\n\n#load mapArray from image file\nmapArray = np.array(ndimage.imread(mapPath, flatten=False))\n\n\n# =============================================================================\n# ##load image from lat/long coordinates - using imageCapture in resnets_utils\n# centerPoint = (-114.318634, 43.522553) #(long,lat)\n# zoom = 19 \n# NW_lat_long, SE_lat_long = getNWSECorners(centerPoint,zoom)\n# result = get_maps_image(NW_lat_long, SE_lat_long, zoom=zoom)\n# saveImagePath = r\"C:\\Users\\George\\Desktop\\testImages\\test_lat_long.jpg\"\n# result.save(saveImagePath,\"JPEG\")\n# mapArray = np.array(result)\n# =============================================================================\n\n\n\n#get list of arrays\ndef getDataList(mapArray, startX, endX, startY, endY):\n mapArray = mapArray[startX:endX,startY:endY]\n imgwidth, imgheight , colours = mapArray.shape\n \n #split map into sections\n dataList = []\n num_px = 100\n #imageVectorSize = num_px * num_px * 3\n \n #square maps only\n def crop(mapArray,height,width):\n imgwidth, imgheight , colours = mapArray.shape\n for i in range(imgheight-height):\n for j in range(imgwidth-width):\n yield mapArray[i:i+height, j:j+width]\n \n height=num_px\n width=num_px\n start_num=0\n for k,piece in enumerate(crop(mapArray,height,width),start_num):\n dataList.append(piece)\n return dataList\n\n\ndef getPredictionArray(dataList, savePath, x, y, i):\n \n imgwidth = 200\n imgheight = 200\n num_px = 100\n \n # Loading the data (cat/non-cat)\n X_test_orig = np.array(dataList)\n Y_test_orig = np.zeros(len(dataList), dtype=\"int\")\n Y_test_orig = Y_test_orig.reshape(1,Y_test_orig.shape[0])\n classes = np.array(('notBuilding','building'), dtype=\"str\")\n \n num_labels = np.size(classes)\n \n # Normalize image vectors\n X_test = X_test_orig/255.\n # Convert training and test labels to one hot matrices\n Y_test = convert_to_one_hot(Y_test_orig, num_labels)\n \n \n print (\"number of images to process = \" + str(X_test.shape[0]))\n print (\"X_test shape: \" + str(X_test.shape))\n print (\"Y_test shape: \" + str(Y_test.shape))\n \n \n image_predictions_original = model.predict(X_test, verbose=1)\n \n predict_image = []\n\n##majority vote allocation \n for result in image_predictions_original :\n if result[0] > result[1]:\n predict_image.append(0)\n else:\n predict_image.append(1)\n\n\n### for result in image_predictions_original :\n# if result[1] > 0.75:\n# predict_image.append(1)\n# else:\n# predict_image.append(0)\n \n \n image_predictions = np.array(predict_image)\n \n array = image_predictions.reshape((imgwidth - num_px, imgheight- num_px))\n #array = np.flipud(array)\n array = np.uint8(array*255)\n \n saveFile = savePath + \"_array_\" + str(i) + \"_\" + str(x) + \"-\" + str(y) + \".txt\"\n np.savetxt(saveFile, array, delimiter=',')\n\n return\n\n\ndef displayResults(folderPath):\n os.chdir(folderPath)\n \n fileList = []\n for file in glob.glob(\"*.txt\"):\n fileList.append(file)\n\n\n array0 = np.genfromtxt(fileList[0],delimiter=\",\")\n for i in range(1,5):\n array = np.genfromtxt(fileList[i],delimiter=\",\")\n array0 = np.hstack((array0,array))\n\n array1 = np.genfromtxt(fileList[5],delimiter=\",\")\n for i in range(6,10):\n array = np.genfromtxt(fileList[i],delimiter=\",\")\n array1 = np.hstack((array1,array))\n\n array2 = np.genfromtxt(fileList[10],delimiter=\",\")\n for i in range(11,15):\n array = np.genfromtxt(fileList[i],delimiter=\",\")\n array2 = np.hstack((array2,array))\n\n array3 = np.genfromtxt(fileList[15],delimiter=\",\")\n for i in range(16,20):\n array = np.genfromtxt(fileList[i],delimiter=\",\")\n array3 = np.hstack((array3,array))\n\n array4 = np.genfromtxt(fileList[20],delimiter=\",\")\n for i in range(21,25):\n array = np.genfromtxt(fileList[i],delimiter=\",\")\n array4 = np.hstack((array4,array))\n\n imageArray = np.vstack((array0,array1,array2,array3,array4))\n \n img = Image.fromarray(imageArray)\n# plt.figure()\n# plt.imshow(img)\n return img\n\n\n\n#reduce map size for large map\nstartXloc = 200 #actually Y!\n#endX = startX + 200\nstartYloc = 200 #actually X!\n#endY = startY + 200\n\nfor i in range(3):\n \n for x in tqdm(range(5)):\n print(\"Run :\" + str(x))\n print(\"----------------\")\n startX = startXloc + (x * 100)\n endX = startX + 200\n \n for y in range(5):\n startY = startYloc + (y *100)\n endY = startY + 200\n \n dataList = getDataList(mapArray, startX, endX, startY, endY)\n getPredictionArray(dataList, savePath, startX, startY, i)\n print(\"-------------\")\n startYloc = startYloc + 500\n \n\n\n### Uncomment to display results\n#piece = displayResults(resultsPath)\n#\n#startX = startXloc\n#endX = startX + (6 * 100)\n#startY = startYloc\n#endY = startY + (6 * 100)\n#\n#num_px = 100\n#\n#\n##img = Image.open(mapPath)\n#mapArray = np.array(ndimage.imread(mapPath, flatten=False))\n#mapArray = mapArray[startX:endX,startY:endY]\n#img = Image.fromarray(mapArray)\n#\n#img2 = copy.deepcopy(img)\n#\n#img2.paste(piece,(int(num_px/2),int(num_px/2)))\n#\n#\n#fig1 = plt.figure(1)\n#plt.title(\"Input image\")\n#plt.imshow(img)\n#fig1.show()\n#\n#fig2 = plt.figure(2)\n#plt.title(\"Neural Net Output\")\n#plt.imshow(img2)\n#fig2.show()\n#\n#\n##get pixels that are white\n#data = piece.getdata()\n#height = 500\n#width = 500\n#pixelList = []\n#\n#for i in range(height):\n# for j in range(width):\n# stride = (width*i) + j\n# pixelList.append((j, i, data[stride]))\n#\n#whites = []\n#for pixel in pixelList:\n# if pixel[2] > 0:\n# whites.append(pixel[0:2])\n#\n#\n#whites = [list(elem) for elem in whites]\n#\n#x_values = [x[0] for x in whites]\n#y_values = [y[1] for y in whites]\n#\n#plt.scatter(x_values,y_values)\n#\n##np.array of white pixels\n#X = np.array(whites)\n\n\n\n# =============================================================================\n# #filter out green-ish from original image\n# mapArrayGreen = mapArray[:,:,2]#>155\n# mapArrayGreen = mapArrayGreen.astype(float)\n# mapArrayGreen = mapArrayGreen[50:200+50,50:200+50]#*255\n# \n# meanArray = np.mean(np.array([mapArrayGreen,array]),axis=0)\n# \n# meanArray = meanArray[:,:]>80\n# \n# img3 = copy.deepcopy(img)\n# meanArray = meanArray.astype(float)*255\n# piece2 = Image.fromarray(meanArray)\n# \n# img3.paste(piece2,(int(num_px/2),int(num_px/2)))\n# \n# \n# fig3 = plt.figure(3)\n# plt.title(\"Neural Net averaged with Blue channel Output\")\n# plt.imshow(img3)\n# fig3.show()\n# =============================================================================\n","sub_path":"tensorFlow/buildingIdentification/ResNet_analyzeImages_loadWholeImage_iterateOverLargeImage.py","file_name":"ResNet_analyzeImages_loadWholeImage_iterateOverLargeImage.py","file_ext":"py","file_size_in_byte":9053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"173334343","text":"from utils.pretrained_wordembed import *\nimport pandas as pd\nimport pickle\n\nclass Data:\n\n def __init__(self):\n self.char_to_idx = {}\n self.tag_to_idx = {}\n self.word_to_idx = {}\n self.reverse_tag_to_idx = {}\n self.training_path = None\n self.validation_path = None\n self.evaluation_path = None\n self.char_embedding_dim = None\n self.word_embedding_dim = None\n self.char_hidden_dim = None\n self.word_hidden_dim = None\n self.param_name = None\n self.learning_rate = None\n self.pretrained_word_embed_path = None\n self.pretrain_word_emb = None\n self.char_model = None\n self.word_model = None\n self.batch_size = None\n self.GPU = None\n self.epoch = None\n self.optimizer = None\n self.model_save_path = None\n self.data_save_path = None\n self.infer_path = None\n self.mode = None\n self.NER = None\n self.result_save_path = None\n\n def readConfig(self,file_path):\n lines = open(file_path, 'r').readlines()\n for line in lines:\n if len(line) == 1 or line[0] == '#':\n continue\n param_name, value = line.replace(' ', '').split('=')[0], line.replace(' ', '').split('=')[1].replace('\\n','')\n\n if str(param_name) == \"training_path\":\n self.training_path = value\n\n elif param_name == 'validation_path':\n self.validation_path = value\n\n elif param_name == 'evaluation_path':\n self.evaluation_path = value\n\n elif param_name == 'char_embedding_dim':\n self.char_embedding_dim = int(value)\n\n elif param_name == 'word_embedding_dim':\n self.word_embedding_dim = int(value)\n\n elif param_name == 'char_hidden_dim':\n self.char_hidden_dim = int(value)\n\n elif param_name == 'word_hidden_dim':\n self.word_hidden_dim = int(value)\n\n elif param_name == 'epoch':\n self.epoch = int(value)\n\n elif param_name == 'learning_rate':\n self.learning_rate = float(value)\n\n elif param_name == 'pretained_word_embed_path':\n self.pretrained_word_embed_path = value\n\n elif param_name == 'char_model':\n self.char_model = value\n\n elif param_name == 'word_model':\n self.word_model = value\n\n elif param_name == 'batch_size':\n self.batch_size = int(value)\n\n elif param_name == 'GPU':\n self.GPU = value\n\n elif param_name == 'optimizer':\n self.optimizer = value\n\n elif param_name == 'model_save_path':\n self.model_save_path = value\n\n elif param_name == 'data_save_path':\n self.data_save_path = value\n\n elif param_name == 'infer_path':\n self.infer_path = value\n\n elif param_name == 'mode':\n self.mode = value\n\n elif param_name == 'NER':\n self.NER = bool(value)\n\n elif param_name == 'result_save_path':\n self.result_save_path = value\n\n\n \n def buildDictionary(self):\n files = [self.training_path, self.evaluation_path, self.validation_path]\n frames = [pd.read_csv(file,header=None,low_memory=False,encoding='utf-8') for file in files]\n data = pd.concat(frames)\n column0, column1, column2 = data[data.columns[0]],data[data.columns[1]],data[data.columns[2]]\n preIdx = '0'\n preToken = ''\n STRINGS = []\n TAGS = []\n string_tmp = []\n tag_tmp = []\n for idx, token, tag in zip(column0, column1, column2):\n idx = str(idx)\n token = str(token)\n tag = str(tag)\n if preIdx != idx:\n STRINGS.append(string_tmp)\n TAGS.append(tag_tmp)\n string_tmp = []\n tag_tmp = []\n\n string_tmp.append(token)\n tag_tmp.append(tag)\n preIdx = idx\n\n\n for item in zip(STRINGS, TAGS):\n sentence = item[0]\n tags = item[1]\n for tag in tags:\n if tag not in self.tag_to_idx:\n self.tag_to_idx[tag] = len(self.tag_to_idx) + 1\n self.reverse_tag_to_idx[len(self.reverse_tag_to_idx)+1] = tag\n\n sen_list = sentence\n for word in sen_list:\n word = word.lower()\n if word not in self.word_to_idx:\n self.word_to_idx[word] = len(self.word_to_idx)+1\n for char in word:\n if char not in self.char_to_idx:\n self.char_to_idx[char] = len(self.char_to_idx)+1\n\n self.word_to_idx[\"WORD_PAD\"] = 0\n self.char_to_idx[\"CHAR_PAD\"] = 0\n\n\n def getPretrainedEmbedding(self):\n self.pretrain_word_emb = build_pretrain_embedding(embedding_path=self.pretrained_word_embed_path,\\\n word_alphabet = list(self.word_to_idx.keys()))\n\n def saveData(self):\n f = open(self.data_save_path, 'wb')\n pickle.dump(self.__dict__, f, 2)\n f.close()\n\n def load(self,path):\n f = open(path, 'rb')\n tmp_dict = pickle.load(f)\n f.close()\n self.__dict__.update(tmp_dict)\n\n\n\n","sub_path":"model/Data.py","file_name":"Data.py","file_ext":"py","file_size_in_byte":5474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"575745056","text":"\"\"\"Fakegram middleware catalog.\"\"\"\n\n# Django \nfrom django.shortcuts import redirect\nfrom django.urls import reverse\n\nclass PofileCompletionMiddleware:\n\t\"\"\"Profile completition middleware.\n\n\tEnsure every user that is interacting with the plattform\n\thave their profile picture and biography.\t\n\t\"\"\"\n\t\n\tdef __init__(self, get_response):\n\t\t\"\"\"Middleware initialization.\"\"\"\n\t\tself.get_response = get_response\n\n\tdef __call__(self, request):\n\t\t\"\"\"Code to be executed for each request before the view is called.\"\"\"\n\t\tif not request.user.is_anonymous:\n\t\t\tif not request.user.is_staff:\n\t\t\t\tprofile = request.user.profile\n\t\t\t\tif not profile.picture or not profile.biography:\n\t\t\t\t\tif request.path not in [reverse('users:update_profile'), reverse('users:logout')]: # nos da la url a partir de su nombre\n\t\t\t\t\t\treturn redirect('users:update_profile')\n\n\t\tresponse = self.get_response(request)\n\t\treturn response\n\n# lo instalamos en settins.py, llamándolo por su ruta después del resto de middlewares","sub_path":"fakegram/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"310863607","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torchvision\n\ndef main():\n print('cuda device count: ', torch.cuda.device_count())\n net = torchvision.models.mobilenet_v3_small(pretrained=True)\n #net.fc = nn.Linear(512, 2)\n net = net.eval()\n net = net.to('cuda:0')\n print(net)\n tmp = torch.ones(1, 3, 224, 224).to('cuda:0')\n out = net(tmp)\n print('mobilenet out:', out.shape)\n torch.save(net, \"mobilenetv3.pth\")\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"mobilenet/mobilenetv3.py","file_name":"mobilenetv3.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"82239998","text":"import pandas as pd\nfrom amply import Amply\n\nfrom otoole.preprocess.datafile_to_datapackage import (\n convert_amply_data_to_list,\n convert_amply_to_dataframe,\n load_parameter_definitions,\n)\n\n\ndef test_amply():\n\n Amply(\n \"\"\"set REGION;\n # set REGION := SIMPLICITY;\n set TECHNOLOGY;\n set TECHNOLOGY := ETHPLANT GAS_EXTRACTION;\n set MODE_OF_OPERATION;\n set MODE_OF_OPERATION := 1 2;\n set YEAR;\n set YEAR := 2014;\n end;\"\"\"\n )\n\n\ndef test_convert_amply_to_dataframe():\n\n config = {\n \"VariableCost\": {\n \"type\": \"param\",\n \"indices\": [\"REGION\", \"TECHNOLOGY\", \"MODE_OF_OPERATION\", \"YEAR\"],\n \"dtype\": \"float\",\n \"default\": 0,\n },\n \"REGION\": {\"type\": \"set\", \"dtype\": \"str\"},\n \"YEAR\": {\"dtype\": \"int\", \"type\": \"set\"},\n \"MODE_OF_OPERATION\": {\"dtype\": \"int\", \"type\": \"set\"},\n \"TECHNOLOGY\": {\"dtype\": \"str\", \"type\": \"set\"},\n }\n\n amply = Amply(\n \"\"\"set REGION;\n set REGION := SIMPLICITY;\n set TECHNOLOGY;\n set TECHNOLOGY := ETHPLANT GAS_EXTRACTION;\n set MODE_OF_OPERATION;\n set MODE_OF_OPERATION := 1 2;\n set YEAR;\n set YEAR := 2014;\"\"\"\n )\n amply.load_string(\"param VariableCost {REGION,TECHNOLOGY,MODE_OF_OPERATION,YEAR};\")\n # amply.load_string(\"\"\"param default 0 : VariableCost :=\n # SIMPLICITY ETHPLANT 1 2014 2.89\n # SIMPLICITY ETHPLANT 2 2014 999999.0\n # SIMPLICITY GAS_EXTRACTION 1 2014 7.5\n # SIMPLICITY GAS_EXTRACTION 2 2014 999999.0\"\"\")\n amply.load_string(\n \"\"\"\nparam VariableCost default 0.0001 :=\n[SIMPLICITY,ETHPLANT,*,*]:\n2014 :=\n1 2.89\n2 999999.0\n[SIMPLICITY,GAS_EXTRACTION,*,*]:\n2014 :=\n1 7.5\n2 999999.0;\"\"\"\n )\n actual = convert_amply_to_dataframe(amply, config)\n expected = pd.DataFrame(\n data=[\n [\"SIMPLICITY\", \"ETHPLANT\", 1, 2014, 2.89],\n [\"SIMPLICITY\", \"ETHPLANT\", 2, 2014, 999999.0],\n [\"SIMPLICITY\", \"GAS_EXTRACTION\", 1, 2014, 7.5],\n [\"SIMPLICITY\", \"GAS_EXTRACTION\", 2, 2014, 999999.0],\n ],\n columns=[\"REGION\", \"TECHNOLOGY\", \"MODE_OF_OPERATION\", \"YEAR\", \"VALUE\"],\n )\n\n pd.testing.assert_frame_equal(actual[\"VariableCost\"], expected)\n\n\ndef test_convert_amply_data_to_list_of_lists():\n\n data = {\n \"SIMPLICITY\": {\n \"ETHPLANT\": {1.0: {2014.0: 2.89}, 2.0: {2014.0: 999999.0}},\n \"GAS_EXTRACTION\": {1.0: {2014.0: 7.5}, 2.0: {2014.0: 999999.0}},\n }\n }\n expected = [\n [\"SIMPLICITY\", \"ETHPLANT\", 1.0, 2014.0, 2.89],\n [\"SIMPLICITY\", \"ETHPLANT\", 2.0, 2014.0, 999999.0],\n [\"SIMPLICITY\", \"GAS_EXTRACTION\", 1.0, 2014.0, 7.5],\n [\"SIMPLICITY\", \"GAS_EXTRACTION\", 2.0, 2014.0, 999999.0],\n ]\n actual = convert_amply_data_to_list(data)\n assert actual == expected\n\n\ndef test_load_parameters():\n\n config = {\"TestParameter\": {\"type\": \"param\", \"indices\": [\"index1\", \"index2\"]}}\n\n actual = load_parameter_definitions(config)\n expected = \"param TestParameter {index1,index2};\\n\"\n assert actual == expected\n\n\ndef test_load_sets():\n\n config = {\"TestSet\": {\"type\": \"set\"}}\n\n actual = load_parameter_definitions(config)\n expected = \"set TestSet;\\n\"\n assert actual == expected\n","sub_path":"tests/preprocess/test_file2package.py","file_name":"test_file2package.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"571543061","text":"import logging\n\n_logger = logging.getLogger(__name__)\n\n__all__ = [\n \"camera_efficiency_log_file_name\",\n \"camera_efficiency_results_file_name\",\n \"camera_efficiency_simtel_file_name\",\n \"convert_telescope_model_name_to_yaml\",\n \"get_site_from_telescope_name\",\n \"get_telescope_type\",\n \"is_valid_name\",\n \"layout_telescope_list_file_name\",\n \"ray_tracing_file_name\",\n \"ray_tracing_plot_file_name\",\n \"ray_tracing_results_file_name\",\n \"simtel_array_config_file_name\",\n \"simtel_telescope_config_file_name\",\n \"simtools_instrument_name\",\n \"split_telescope_model_name\",\n \"validate_camera_name\",\n \"validate_layout_array_name\",\n \"validate_model_version_name\",\n \"validate_name\",\n \"validate_simtel_mode_name\",\n \"validate_site_name\",\n \"validate_sub_system_name\",\n \"validate_telescope_id_name\",\n \"validate_telescope_model_name\",\n \"validate_telescope_name_db\",\n]\n\nlst = \"LST\"\nmst = \"MST\"\nsct = \"SCT\"\nsst = \"SST\"\n\nall_telescope_class_names = {\n lst: [\"lst\"],\n mst: [\"mst\"],\n sct: [\"sct\"],\n sst: [\"sst\"],\n}\n\nall_camera_names = {\n \"SST\": [\"sst\"],\n \"ASTRI\": [\"astri\"],\n \"GCT\": [\"gct\", \"gct-s\"],\n \"1M\": [\"1m\"],\n \"FlashCam\": [\"flashcam\", \"flash-cam\"],\n \"NectarCam\": [\"nectarcam\", \"nectar-cam\"],\n \"SCT\": [\"sct\"],\n \"LST\": [\"lst\"],\n}\n\nall_structure_names = {\"Structure\": [\"Structure\", \"structure\"]}\n\nall_site_names = {\"South\": [\"paranal\", \"south\"], \"North\": [\"lapalma\", \"north\"]}\n\nall_model_version_names = {\n \"2015-07-21\": [\"\"],\n \"2015-10-20-p1\": [\"\"],\n \"prod4-v0.0\": [\"\"],\n \"prod4-v0.1\": [\"\"],\n \"2018-02-16\": [\"\"],\n \"prod3_compatible\": [\"p3\", \"prod3\", \"prod3b\"],\n \"prod4\": [\"p4\"],\n \"post_prod3_updates\": [\"\"],\n \"2016-12-20\": [\"\"],\n \"2018-11-07\": [\"\"],\n \"2019-02-22\": [\"\"],\n \"2019-05-13\": [\"\"],\n \"2019-11-20\": [\"\"],\n \"2019-12-30\": [\"\"],\n \"2020-02-26\": [\"\"],\n \"2020-06-28\": [\"prod5\"],\n \"prod4-prototype\": [\"\"],\n \"default\": [],\n \"Current\": [],\n \"Latest\": [],\n}\n\nall_simtel_mode_names = {\n \"RayTracing\": [\"raytracing\", \"ray-tracing\"],\n \"RayTracingSingleMirror\": [\n \"raytracing-singlemirror\",\n \"ray-tracing-singlemirror\",\n \"ray-tracing-single-mirror\",\n ],\n \"Trigger\": [\"trigger\"],\n}\n\nall_layout_array_names = {\n \"4LST\": [\"4-lst\", \"4lst\"],\n \"1LST\": [\"1-lst\", \"1lst\"],\n \"4MST\": [\"4-mst\", \"4mst\"],\n \"1MST\": [\"1-mst\", \"mst\"],\n \"4SST\": [\"4-sst\", \"4sst\"],\n \"1SST\": [\"1-sst\", \"sst\"],\n \"Prod5\": [\"prod5\", \"p5\"],\n \"TestLayout\": [\"test-layout\"],\n}\n\ncorsika_to_simtools_names = {\n \"OBSLEV\": \"corsika_obs_level\",\n}\n\n\ndef validate_sub_system_name(name):\n \"\"\"\n Validate a sub system name (optics structure or camera).\n\n Parameters\n ----------\n name: str\n Name of the subsystem.\n\n Returns\n -------\n str\n Validated name.\n \"\"\"\n return validate_name(name, {**all_camera_names, **all_structure_names})\n\n\ndef validate_camera_name(name):\n \"\"\"\n Validate a camera name.\n\n Parameters\n ----------\n name: str\n Camera name\n\n Returns\n -------\n str\n Validated name.\n \"\"\"\n return validate_name(name, all_camera_names)\n\n\ndef validate_telescope_id_name(name):\n \"\"\"\n Validate a telescope ID name\n\n Valid names e.g.,\n - D\n - telescope ID\n\n Parameters\n ----------\n name: str\n Telescope ID name.\n\n Returns\n -------\n str\n Validated name.\n\n Raises\n ------\n ValueError\n If name is not valid.\n \"\"\"\n\n if name == \"D\" or name.isdigit():\n return name\n\n msg = f\"Invalid telescope ID name {name}\"\n _logger.error(msg)\n raise ValueError(msg)\n\n\ndef validate_model_version_name(name):\n \"\"\"\n Validate a model version name.\n\n Parameters\n ----------\n name: str\n Model version name.\n\n Returns\n -------\n str\n Validated name.\n \"\"\"\n return validate_name(name, all_model_version_names)\n\n\ndef validate_simtel_mode_name(name):\n \"\"\"\n Validate a sim_telarray mode name.\n\n Parameters\n ----------\n name: str\n sim_telarray mode name.\n\n Returns\n -------\n str\n Validated name.\n \"\"\"\n return validate_name(name, all_simtel_mode_names)\n\n\ndef validate_site_name(name):\n \"\"\"\n Validate a site name.\n\n Parameters\n ----------\n name: str\n Site name.\n\n Returns\n -------\n str\n Validated name.\n \"\"\"\n return validate_name(name, all_site_names)\n\n\ndef validate_layout_array_name(name):\n \"\"\"\n Validate a layout array name.\n\n Parameters\n ----------\n name: str\n Layout array name.\n\n Returns\n -------\n str\n Validated name.\n \"\"\"\n return validate_name(name, all_layout_array_names)\n\n\ndef validate_name(name, all_names):\n \"\"\"\n Validate a name given the all_names options. For each key in all_names, a list of options is \\\n given. If name is in this list, the key name is returned.\n\n Parameters\n ----------\n name: str\n Name to validate.\n all_names: dict\n Dictionary with valid names.\n Returns\n -------\n str\n Validated name.\n\n Raises\n ------\n ValueError\n If name is not valid.\n \"\"\"\n\n if not is_valid_name(name, all_names):\n msg = f\"Invalid name {name}\"\n _logger.error(msg)\n raise ValueError(msg)\n for main_name, list_of_names in all_names.items():\n if name.lower() in list_of_names + [main_name.lower()]:\n if name != main_name:\n _logger.debug(f\"Correcting name {name} -> {main_name}\")\n return main_name\n return None\n\n\ndef is_valid_name(name, all_names):\n \"\"\"\n Check if name is valid.\n\n Parameters\n ----------\n name: str\n Name to validated.\n all_names: dict\n Dictionary with valid names.\n\n Returns\n -------\n bool\n True if name is valid. Otherwise, false.\n \"\"\"\n\n if not isinstance(name, str):\n return False\n for main_name in all_names.keys():\n if name.lower() in all_names[main_name] + [main_name.lower()]:\n return True\n return False\n\n\ndef validate_telescope_model_name(name):\n \"\"\"\n Validate a telescope model name.\n\n Parameters\n ----------\n name: str\n Telescope model name.\n\n Returns\n -------\n str\n Validated name.\n \"\"\"\n\n tel_class, tel_type = split_telescope_model_name(name)\n tel_class = validate_name(tel_class, all_telescope_class_names)\n if \"flashcam\" in tel_type:\n tel_type = tel_type.replace(\"flashcam\", \"FlashCam\")\n if \"nectarcam\" in tel_type:\n tel_type = tel_type.replace(\"nectarcam\", \"NectarCam\")\n if \"1m\" in tel_type:\n tel_type = tel_type.replace(\"1m\", \"1M\")\n if \"gct\" in tel_type:\n tel_type = tel_type.replace(\"gct\", \"GCT\")\n if \"astri\" in tel_type:\n tel_type = tel_type.replace(\"astri\", \"ASTRI\")\n if \"-d\" in \"-\" + tel_type:\n tel_type = tel_type.replace(\"d\", \"D\")\n\n return tel_class + \"-\" + tel_type\n\n\ndef split_telescope_model_name(name):\n \"\"\"\n Split a telescope name into class and type.\n\n Parameters\n ----------\n name: str\n Telescope name.\n\n Returns\n -------\n str, str\n class (LST, MST, SST ...) and type (any complement).\n \"\"\"\n\n name_parts = name.split(\"-\")\n tel_class = name_parts[0]\n tel_type = \"-\".join(name_parts[1:])\n return tel_class, tel_type\n\n\ndef get_site_from_telescope_name(name):\n \"\"\"\n Get site name (South or North) from the (validated) telescope name.\n\n Parameters\n ----------\n name: str\n Telescope name.\n\n Returns\n -------\n str\n Site name (South or North).\n \"\"\"\n return validate_site_name(name.split(\"-\")[0])\n\n\ndef validate_telescope_name_db(name):\n \"\"\"\n Validate a telescope DB name.\n\n Parameters\n ----------\n name: str\n\n Returns\n -------\n str\n Validated name.\n \"\"\"\n site = get_site_from_telescope_name(name)\n tel_model_name = \"-\".join(name.split(\"-\")[1:])\n\n return f\"{validate_site_name(site)}-{validate_telescope_model_name(tel_model_name)}\"\n\n\ndef convert_telescope_model_name_to_yaml(name):\n \"\"\"\n Get telescope name following the old convention (yaml files) from the current telescope name.\n\n Parameters\n ----------\n name: str\n Telescope model name.\n\n Returns\n -------\n str\n Telescope name (old convention).\n\n Raises\n ------\n ValueError\n if name is not valid.\n \"\"\"\n tel_class, tel_type = split_telescope_model_name(name)\n new_name = tel_class + \"-\" + tel_type\n old_names = {\n \"SST-D\": \"SST\",\n \"SST-1M\": \"SST-1M\",\n \"SST-ASTRI\": \"SST-2M-ASTRI\",\n \"SST-GCT\": \"SST-2M-GCT-S\",\n \"MST-FlashCam-D\": \"MST-FlashCam\",\n \"MST-NectarCam-D\": \"MST-NectarCam\",\n \"SCT-D\": \"SCT\",\n \"LST-D234\": \"LST\",\n \"LST-1\": \"LST\",\n }\n\n if new_name not in old_names:\n raise ValueError(f\"Telescope name {name} could not be converted to yml names\")\n\n return old_names[new_name]\n\n\ndef simtools_instrument_name(site, telescope_class_name, sub_system_name, telescope_id_name):\n \"\"\"\n Instrument name following simtools naming convention\n\n Parameters\n ----------\n site: str\n South or North.\n telescope_class_name: str\n LST, MST, ...\n sub_system_name: str\n FlashCam, NectarCam\n telescope_id_name: str\n telescope ID (e.g., D, numerial value)\n\n Returns\n -------\n instrument: name: str\n Instrument name.\n \"\"\"\n\n return (\n validate_site_name(site)\n + \"-\"\n + validate_name(telescope_class_name, all_telescope_class_names)\n + \"-\"\n + validate_sub_system_name(sub_system_name)\n + \"-\"\n + validate_telescope_id_name(telescope_id_name)\n )\n\n\ndef simtel_telescope_config_file_name(\n site, telescope_model_name, model_version, label, extra_label\n):\n \"\"\"\n sim_telarray config file name for a telescope.\n\n Parameters\n ----------\n site: str\n South or North.\n telescope_model_name: str\n LST-1, MST-FlashCam, ...\n model_version: str\n Version of the model.\n label: str\n Instance label.\n extra_label: str\n Extra label in case of multiple telescope config files.\n\n Returns\n -------\n str\n File name.\n \"\"\"\n name = f\"CTA-{site}-{telescope_model_name}-{model_version}\"\n name += f\"_{label}\" if label is not None else \"\"\n name += f\"_{extra_label}\" if extra_label is not None else \"\"\n name += \".cfg\"\n return name\n\n\ndef simtel_array_config_file_name(array_name, site, version, label):\n \"\"\"\n sim_telarray config file name for an array.\n\n Parameters\n ----------\n array_name: str\n Prod5, ...\n site: str\n South or North.\n version: str\n Version of the model.\n label: str\n Instance label.\n\n Returns\n -------\n str\n File name.\n \"\"\"\n name = f\"CTA-{array_name}-{site}-{version}\"\n name += f\"_{label}\" if label is not None else \"\"\n name += \".cfg\"\n return name\n\n\ndef simtel_single_mirror_list_file_name(\n site, telescope_model_name, model_version, mirror_number, label\n):\n \"\"\"\n sim_telarray mirror list file with a single mirror.\n\n Parameters\n ----------\n site: str\n South or North.\n telescope_model_name: str\n North-LST-1, South-MST-FlashCam, ...\n model_version: str\n Version of the model.\n mirror_number: int\n Mirror number.\n label: str\n Instance label.\n\n Returns\n -------\n str\n File name.\n \"\"\"\n name = f\"CTA-single-mirror-list-{site}-{telescope_model_name}-{model_version}\"\n name += f\"-mirror{mirror_number}\"\n name += f\"_{label}\" if label is not None else \"\"\n name += \".dat\"\n return name\n\n\ndef layout_telescope_list_file_name(name, label):\n \"\"\"\n File name for files required at the RayTracing class.\n\n Parameters\n ----------\n name: str\n Name of the array.\n label: str\n Instance label.\n\n Returns\n -------\n str\n File name.\n \"\"\"\n file_name = f\"telescope_positions-{name}\"\n file_name += f\"_{label}\" if label is not None else \"\"\n file_name += \".ecsv\"\n return file_name\n\n\ndef ray_tracing_file_name(\n site,\n telescope_model_name,\n source_distance,\n zenith_angle,\n off_axis_angle,\n mirror_number,\n label,\n base,\n):\n \"\"\"\n File name for files required at the RayTracing class.\n\n Parameters\n ----------\n site: str\n South or North.\n telescope_model_name: str\n LST-1, MST-FlashCam, ...\n source_distance: float\n Source distance (km).\n zenith_angle: float\n Zenith angle (deg).\n off_axis_angle: float\n Off-axis angle (deg).\n mirror_number: int\n Mirror number. None if not single mirror case.\n label: str\n Instance label.\n base: str\n Photons, stars or log.\n\n Returns\n -------\n str\n File name.\n \"\"\"\n name = (\n f\"{base}-{site}-{telescope_model_name}-d{source_distance:.1f}\"\n f\"-za{zenith_angle:.1f}-off{off_axis_angle:.3f}\"\n )\n name += f\"_mirror{mirror_number}\" if mirror_number is not None else \"\"\n name += f\"_{label}\" if label is not None else \"\"\n name += \".log\" if base == \"log\" else \".lis\"\n return name\n\n\ndef ray_tracing_results_file_name(site, telescope_model_name, source_distance, zenith_angle, label):\n \"\"\"\n Ray tracing results file name.\n\n Parameters\n ----------\n site: str\n South or North.\n telescope_model_name: str\n LST-1, MST-FlashCam, ...\n source_distance: float\n Source distance (km).\n zenith_angle: float\n Zenith angle (deg).\n label: str\n Instance label.\n\n Returns\n -------\n str\n File name.\n \"\"\"\n name = f\"ray-tracing-{site}-{telescope_model_name}-d{source_distance:.1f}-za{zenith_angle:.1f}\"\n name += f\"_{label}\" if label is not None else \"\"\n name += \".ecsv\"\n return name\n\n\ndef ray_tracing_plot_file_name(\n key, site, telescope_model_name, source_distance, zenith_angle, label\n):\n \"\"\"\n Ray tracing plot file name.\n\n Parameters\n ----------\n key: str\n Quantity to be plotted (d80_cm, d80_deg, eff_area or eff_flen)\n site: str\n South or North.\n telescope_model_name: str\n LST-1, MST-FlashCam, ...\n source_distance: float\n Source distance (km).\n zenith_angle: float\n Zenith angle (deg).\n label: str\n Instance label.\n\n Returns\n -------\n str\n File name.\n \"\"\"\n name = (\n f\"ray-tracing-{site}-{telescope_model_name}-{key}-\"\n f\"d{source_distance:.1f}-za{zenith_angle:.1f}\"\n )\n name += f\"_{label}\" if label is not None else \"\"\n name += \".pdf\"\n return name\n\n\ndef camera_efficiency_results_file_name(site, telescope_model_name, zenith_angle, label):\n \"\"\"\n Camera efficiency results file name.\n\n Parameters\n ----------\n site: str\n South or North.\n telescope_model_name: str\n LST-1, MST-FlashCam, ...\n zenith_angle: float\n Zenith angle (deg).\n label: str\n Instance label.\n\n Returns\n -------\n str\n File name.\n \"\"\"\n name = f\"camera-efficiency-{site}-{telescope_model_name}-za{zenith_angle:.1f}\"\n name += f\"_{label}\" if label is not None else \"\"\n name += \".ecsv\"\n return name\n\n\ndef camera_efficiency_simtel_file_name(site, telescope_model_name, zenith_angle, label):\n \"\"\"\n Camera efficiency simtel output file name.\n\n Parameters\n ----------\n site: str\n South or North.\n telescope_model_name: str\n LST-1, MST-FlashCam-D, ...\n zenith_angle: float\n Zenith angle (deg).\n label: str\n Instance label.\n\n Returns\n -------\n str\n File name.\n \"\"\"\n name = f\"camera-efficiency-{site}-{telescope_model_name}-za{zenith_angle:.1f}\"\n name += f\"_{label}\" if label is not None else \"\"\n name += \".dat\"\n return name\n\n\ndef camera_efficiency_log_file_name(site, telescope_model_name, zenith_angle, label):\n \"\"\"\n Camera efficiency log file name.\n\n Parameters\n ----------\n site: str\n South or North.\n telescope_model_name: str\n LST-1, MST-FlashCam-D, ...\n zenith_angle: float\n Zenith angle (deg).\n label: str\n Instance label.\n\n Returns\n -------\n str\n File name.\n \"\"\"\n name = f\"camera-efficiency-{site}-{telescope_model_name}-za{zenith_angle:.1f}\"\n name += f\"_{label}\" if label is not None else \"\"\n name += \".log\"\n return name\n\n\ndef get_telescope_type(telescope_name):\n \"\"\"\n Guess telescope type from name, e.g. \"LST\", \"MST\", ...\n\n Parameters\n ----------\n telescope_name: str\n Telescope name\n\n Returns\n -------\n str\n Telescope type.\n \"\"\"\n\n _class, _ = split_telescope_model_name(telescope_name)\n try:\n if _class[0:3] in all_telescope_class_names:\n return _class[0:3]\n\n except IndexError:\n pass\n\n return \"\"\n\n\ndef translate_corsika_to_simtools(corsika_par):\n \"\"\"\n Translate the name of a CORSIKA parameter to the name used in simtools.\n\n Parameters\n ----------\n corsika_par: str\n Name of the corsika parameter to be translated.\n\n \"\"\"\n\n try:\n return corsika_to_simtools_names[corsika_par]\n except KeyError:\n msg = f\"Translation not found. We will proceed with the original parameter name:\\\n {corsika_par}.\"\n _logger.debug(msg)\n return corsika_par\n\n\ndef translate_simtools_to_corsika(simtools_par):\n \"\"\"\n Translate the name of a simtools parameter to the name used in CORSIKA.\n\n Parameters\n ----------\n simtools_par: str\n Name of the simtools parameter to be translated.\n \"\"\"\n\n simtools_to_corsika_names = {\n new_key: new_value for new_value, new_key in corsika_to_simtools_names.items()\n }\n try:\n return simtools_to_corsika_names[simtools_par]\n except KeyError:\n msg = f\"Translation not found. We will proceed with the original parameter name:\\\n {simtools_par}.\"\n _logger.debug(msg)\n return simtools_par\n","sub_path":"simtools/util/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":18218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"217515745","text":"def main():\n\n n = int(input(\"Número: \"))\n resultado(n)\n\n\ndef resultado(n):\n perfeito = 0 # contador\n while n > 0:\n quadrado = n ** 0.5 # tirar raiz quadrada\n if quadrado % 1 == 0: # verifica o resto da divisão\n print(f'{n} - {quadrado}')\n perfeito += 1 # incrementa o contador\n if perfeito == 1: # para a função se achar o primeiro resultado\n break\n n -= 1\n\n\nmain()\n","sub_path":"Python/Fabio03_while/parte_1/q14-menor_quadrado.py","file_name":"q14-menor_quadrado.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"486071465","text":"import time\nimport os\nimport numpy as np\nnp.seterr(all='ignore')\n\nfrom functools import partial\nfrom collections import OrderedDict\nimport json\n\nimport wx\nimport wx.lib.scrolledpanel as scrolled\nimport wx.lib.agw.flatnotebook as flat_nb\n\nimport wx.dataview as dv\n\nfrom wxutils import (SimpleText, pack, Button, HLine, Choice, Check,\n MenuItem, GUIColors, GridPanel, CEN, RCEN, LCEN,\n FRAMESTYLE, Font, FileSave, FileOpen)\n\nfrom lmfit import Parameter, Parameters, fit_report\ntry:\n from lmfit.model import save_modelresult, load_modelresult\n HAS_MODELSAVE = True\nexcept ImportError:\n HAS_MODELSAVE = False\n\nimport lmfit.models as lm_models\nfrom lmfit.printfuncs import gformat, CORREL_HEAD\n\nfrom larch import Group, site_config\nfrom larch.utils import index_of\nfrom larch.utils.jsonutils import encode4js, decode4js\n\nfrom larch.wxlib import (ReportFrame, BitmapButton, ParameterWidgets,\n FloatCtrl, SetTip)\n\nfrom larch_plugins.std import group2dict\nfrom larch_plugins.io.export_modelresult import export_modelresult\nfrom larch_plugins.wx.icons import get_icon\nfrom larch_plugins.wx.parameter import ParameterPanel\n\nLCEN = wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL\nCEN |= wx.ALL\n\nFNB_STYLE = flat_nb.FNB_NO_X_BUTTON|flat_nb.FNB_NO_NAV_BUTTONS\n\nModelTypes = ('Peaks', 'General', 'Steps')\n\nModelChoices = {'steps': ('', 'Linear Step', 'Arctan Step',\n 'ErrorFunction Step', 'Logistic Step', 'Rectangle'),\n 'general': ('', 'Constant', 'Linear',\n 'Quadratic', 'Exponential', 'PowerLaw'),\n 'peaks': ('', 'Gaussian', 'Lorentzian',\n 'Voigt', 'PseudoVoigt', 'DampedHarmonicOscillator',\n 'Pearson7', 'StudentsT', 'SkewedGaussian',\n 'Moffat', 'BreitWigner', 'Donaich', 'Lognormal'),\n }\n\nFitMethods = (\"Levenberg-Marquardt\", \"Nelder-Mead\", \"Powell\")\n\nMIN_CORREL = 0.0010\n\nclass FitResultFrame(wx.Frame):\n def __init__(self, parent=None, controller=None, datagroup=None, **kws):\n\n wx.Frame.__init__(self, None, -1, title='Fit Results',\n style=FRAMESTYLE, size=(600, 675), **kws)\n self.parent = parent\n self.controller = controller\n self.larch = controller.larch\n self.datagroup = datagroup\n self.build()\n self.show()\n\n def build(self):\n sizer = wx.GridBagSizer(10, 5)\n sizer.SetVGap(2)\n sizer.SetHGap(2)\n\n panel = scrolled.ScrolledPanel(self)\n self.SetMinSize((600, 450))\n self.colors = GUIColors()\n\n # title row\n self.wids = wids = {}\n title = SimpleText(panel, 'Fit Results', font=Font(12),\n colour=self.colors.title, style=LCEN)\n\n wids['data_title'] = SimpleText(panel, '< > ', font=Font(12),\n colour=self.colors.title, style=LCEN)\n\n wids['hist_tag'] = SimpleText(panel, 'Fit #1', font=Font(12),\n colour=self.colors.title, style=LCEN)\n\n wids['hist_info'] = SimpleText(panel, ' ___ ', font=Font(12),\n colour=self.colors.title, style=LCEN)\n\n sizer.Add(title, (0, 0), (1, 2), LCEN)\n sizer.Add(wids['data_title'], (0, 2), (1, 2), LCEN)\n sizer.Add(wids['hist_tag'], (0, 4), (1, 1), LCEN)\n sizer.Add(wids['hist_info'], (0, 5), (1, 1), LCEN)\n\n irow = 1\n wids['model_desc'] = SimpleText(panel, '', font=Font(12))\n sizer.Add(wids['model_desc'], (irow, 0), (1, 5), LCEN)\n\n irow += 1\n sizer.Add(HLine(panel, size=(400, 3)), (irow, 0), (1, 5), LCEN)\n\n irow += 1\n title = SimpleText(panel, '[[Fit Statistics]]', font=Font(12),\n colour=self.colors.title, style=LCEN)\n sizer.Add(title, (irow, 0), (1, 4), LCEN)\n\n for label, attr in (('Fit method', 'method'),\n ('# Fit Evaluations', 'nfev'),\n ('# Data Points', 'ndata'),\n ('# Fit Variables', 'nvarys'),\n ('# Free Points', 'nfree'),\n ('Chi-square', 'chisqr'),\n ('Reduced Chi-square', 'redchi'),\n ('Akaike Info Criteria', 'aic'),\n ('Bayesian Info Criteria', 'bic')):\n irow += 1\n wids[attr] = SimpleText(panel, '?')\n sizer.Add(SimpleText(panel, \" %s = \" % label), (irow, 0), (1, 1), LCEN)\n sizer.Add(wids[attr], (irow, 1), (1, 1), LCEN)\n\n irow += 1\n sizer.Add(HLine(panel, size=(400, 3)), (irow, 0), (1, 5), LCEN)\n\n irow += 1\n title = SimpleText(panel, '[[Variables]]', font=Font(12),\n colour=self.colors.title, style=LCEN)\n sizer.Add(title, (irow, 0), (1, 1), LCEN)\n\n self.wids['copy_params'] = Button(panel, 'Update Model with Best Fit Values',\n size=(250, -1), action=self.onCopyParams)\n\n sizer.Add(self.wids['copy_params'], (irow, 1), (1, 3), LCEN)\n\n dvstyle = dv.DV_SINGLE|dv.DV_VERT_RULES|dv.DV_ROW_LINES\n pview = self.wids['params'] = dv.DataViewListCtrl(panel, style=dvstyle)\n self.wids['paramsdata'] = []\n pview.AppendTextColumn('Parameter', width=150)\n pview.AppendTextColumn('Best-Fit Value', width=100)\n pview.AppendTextColumn('Standard Error', width=100)\n pview.AppendTextColumn('Info ', width=275)\n\n for col in (0, 1, 2, 3):\n this = pview.Columns[col]\n isort, align = True, wx.ALIGN_LEFT\n if col in (1, 2):\n isort, align = False, wx.ALIGN_RIGHT\n this.Sortable = isort\n this.Alignment = this.Renderer.Alignment = align\n\n pview.SetMinSize((650, 200))\n pview.Bind(dv.EVT_DATAVIEW_SELECTION_CHANGED, self.onSelectParameter)\n\n irow += 1\n sizer.Add(pview, (irow, 0), (1, 5), LCEN)\n\n irow += 1\n sizer.Add(HLine(panel, size=(400, 3)), (irow, 0), (1, 5), LCEN)\n\n irow += 1\n title = SimpleText(panel, '[[Correlations]]', font=Font(12),\n colour=self.colors.title, style=LCEN)\n\n self.wids['all_correl'] = Button(panel, 'Show All',\n size=(100, -1), action=self.onAllCorrel)\n\n self.wids['min_correl'] = FloatCtrl(panel, value=MIN_CORREL,\n minval=0, size=(60, -1), gformat=True)\n\n ctitle = SimpleText(panel, 'minimum correlation: ')\n sizer.Add(title, (irow, 0), (1, 1), LCEN)\n sizer.Add(ctitle, (irow, 1), (1, 1), LCEN)\n sizer.Add(self.wids['min_correl'], (irow, 2), (1, 1), LCEN)\n sizer.Add(self.wids['all_correl'], (irow, 3), (1, 1), LCEN)\n\n irow += 1\n\n cview = self.wids['correl'] = dv.DataViewListCtrl(panel, style=dvstyle)\n\n cview.AppendTextColumn('Parameter 1', width=150)\n cview.AppendTextColumn('Parameter 2', width=150)\n cview.AppendTextColumn('Correlation', width=100)\n\n for col in (0, 1, 2):\n this = cview.Columns[col]\n isort, align = True, wx.ALIGN_LEFT\n if col == 1:\n isort = False\n if col == 2:\n align = wx.ALIGN_RIGHT\n this.Sortable = isort\n this.Alignment = this.Renderer.Alignment = align\n cview.SetMinSize((450, 200))\n\n irow += 1\n sizer.Add(cview, (irow, 0), (1, 5), LCEN)\n irow += 1\n sizer.Add(HLine(panel, size=(400, 3)), (irow, 0), (1, 5), LCEN)\n\n pack(panel, sizer)\n panel.SetupScrolling()\n\n mainsizer = wx.BoxSizer(wx.VERTICAL)\n mainsizer.Add(panel, 1, wx.GROW|wx.ALL, 1)\n\n pack(self, mainsizer)\n self.Show()\n self.Raise()\n\n def onSelectParameter(self, evt=None):\n if self.wids['params'] is None:\n return\n if not self.wids['params'].HasSelection():\n return\n item = self.wids['params'].GetSelectedRow()\n pname = self.wids['paramsdata'][item]\n\n cormin= self.wids['min_correl'].GetValue()\n self.wids['correl'].DeleteAllItems()\n\n fit_history = getattr(self.datagroup, 'fit_history', [])\n result = fit_history[-1]\n this = result.params[pname]\n if this.correl is not None:\n sort_correl = sorted(this.correl.items(), key=lambda it: abs(it[1]))\n for name, corval in reversed(sort_correl):\n if abs(corval) > cormin:\n self.wids['correl'].AppendItem((pname, name, \"% .4f\" % corval))\n\n def onAllCorrel(self, evt=None):\n fit_history = getattr(self.datagroup, 'fit_history', [])\n params = fit_history[-1].params\n parnames = list(params.keys())\n\n cormin= self.wids['min_correl'].GetValue()\n correls = {}\n for i, name in enumerate(parnames):\n par = params[name]\n if not par.vary:\n continue\n if hasattr(par, 'correl') and par.correl is not None:\n # print(par, par.correl)\n for name2 in parnames[i+1:]:\n if (name != name2 and name2 in par.correl and\n abs(par.correl[name2]) > cormin):\n correls[\"%s$$%s\" % (name, name2)] = par.correl[name2]\n\n sort_correl = sorted(correls.items(), key=lambda it: abs(it[1]))\n sort_correl.reverse()\n\n self.wids['correl'].DeleteAllItems()\n\n for namepair, corval in sort_correl:\n name1, name2 = namepair.split('$$')\n self.wids['correl'].AppendItem((name1, name2, \"% .4f\" % corval))\n\n def onCopyParams(self, evt=None):\n fit_history = getattr(self.datagroup, 'fit_history', [])\n self.parent.fit_panel.update_start_values(fit_history[-1])\n\n def show(self, datagroup=None):\n if datagroup is not None:\n self.datagroup = datagroup\n\n fit_history = getattr(self.datagroup, 'fit_history', [])\n if len(fit_history) < 1:\n print(\"No fit reults to show for datagroup \", self.datagroup)\n result = fit_history[-1]\n wids = self.wids\n wids['method'].SetLabel(result.method)\n wids['ndata'].SetLabel(\"%d\" % result.ndata)\n wids['nvarys'].SetLabel(\"%d\" % result.nvarys)\n wids['nfree'].SetLabel(\"%d\" % result.nfree)\n wids['nfev'].SetLabel(\"%d\" % result.nfev)\n wids['redchi'].SetLabel(\"%f\" % result.redchi)\n wids['chisqr'].SetLabel(\"%f\" % result.chisqr)\n wids['aic'].SetLabel(\"%f\" % result.aic)\n wids['bic'].SetLabel(\"%f\" % result.bic)\n wids['hist_info'].SetLabel(\"%d\" % len(fit_history))\n wids['hist_tag'].SetLabel(\"Latest Fit\") #\n\n wids['data_title'].SetLabel(self.datagroup.filename)\n\n wids['model_desc'].SetLabel(result.model_repr)\n wids['params'].DeleteAllItems()\n wids['paramsdata'] = []\n for i, param in enumerate(result.params.values()):\n pname = param.name\n try:\n val = gformat(param.value)\n except (TypeError, ValueError):\n val = ' ??? '\n\n serr = ' N/A '\n if param.stderr is not None:\n serr = gformat(param.stderr, length=9)\n\n extra = ' '\n if param.expr is not None:\n extra = ' = %s ' % param.expr\n elif param.init_value is not None:\n extra = ' (init=% .7g)' % param.init_value\n elif not param.vary:\n extra = ' (fixed)'\n\n wids['params'].AppendItem((pname, val, serr, extra))\n wids['paramsdata'].append(pname)\n\n self.Refresh()\n\nclass PrePeakPanel(wx.Panel):\n def __init__(self, parent=None, controller=None, **kws):\n\n wx.Panel.__init__(self, parent, -1, size=(550, 625), **kws)\n self.parent = parent\n self.controller = controller\n self.larch = controller.larch\n self.fit_components = OrderedDict()\n self.fit_model = None\n self.fit_params = None\n self.user_added_params = None\n self.summary = None\n self.sizer = wx.GridBagSizer(10, 6)\n self.build_display()\n self.pick2_timer = wx.Timer(self)\n self.pick2_group = None\n self.Bind(wx.EVT_TIMER, self.onPick2Timer, self.pick2_timer)\n self.pick2_t0 = 0.\n self.pick2_timeout = 15.\n\n self.pick2erase_timer = wx.Timer(self)\n self.pick2erase_panel = None\n self.Bind(wx.EVT_TIMER, self.onPick2EraseTimer, self.pick2erase_timer)\n\n def build_display(self):\n\n self.mod_nb = flat_nb.FlatNotebook(self, -1, agwStyle=FNB_STYLE)\n self.mod_nb.SetTabAreaColour(wx.Colour(250,250,250))\n self.mod_nb.SetActiveTabColour(wx.Colour(254,254,195))\n\n self.mod_nb.SetNonActiveTabTextColour(wx.Colour(10,10,128))\n self.mod_nb.SetActiveTabTextColour(wx.Colour(128,0,0))\n self.mod_nb.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.onNBChanged)\n\n pan = self.panel = GridPanel(self, ncols=4, nrows=4, pad=2, itemstyle=LCEN)\n\n self.btns = {}\n for name in ('ppeak_elo', 'ppeak_emin', 'ppeak_emax', 'ppeak_ehi'):\n bb = BitmapButton(pan, get_icon('plus'),\n action=partial(self.on_selpoint, opt=name),\n tooltip='use last point selected from plot')\n self.btns[name] = bb\n\n opts = dict(size=(65, -1), gformat=True, precision=1,\n # action=self.UpdatePlot,\n )\n\n self.ppeak_emin = FloatCtrl(pan, value=-30, **opts)\n self.ppeak_emax = FloatCtrl(pan, value=0, **opts)\n self.ppeak_elo = FloatCtrl(pan, value=-15, **opts)\n self.ppeak_ehi = FloatCtrl(pan, value=-5, **opts)\n\n self.ppeak_bkgfit = Button(pan, 'Fit Pre-edge Baseline', size=(175, 30),\n action=self.onPreedgeBaseline)\n\n self.model_type = Choice(pan, size=(100, -1),\n choices=ModelTypes,\n action=self.onModelTypes)\n\n self.model_func = Choice(pan, size=(200, -1),\n choices=ModelChoices['peaks'],\n action=self.addModel)\n\n pan.Add(SimpleText(pan, 'Fit Energy Range: '), newrow=True)\n pan.Add(self.btns['ppeak_emin'])\n pan.Add(self.ppeak_emin)\n pan.Add(SimpleText(pan, ':'))\n pan.Add(self.btns['ppeak_emax'])\n pan.Add(self.ppeak_emax)\n\n t = SimpleText(pan, 'Pre-edge Peak Range: ')\n t.SetToolTip('Range used as mask for background')\n\n pan.Add(t, newrow=True)\n pan.Add(self.btns['ppeak_elo'])\n pan.Add(self.ppeak_elo)\n pan.Add(SimpleText(pan, ':'))\n pan.Add(self.btns['ppeak_ehi'])\n pan.Add(self.ppeak_ehi)\n pan.Add(self.ppeak_bkgfit)\n\n pan.Add(SimpleText(pan, ' Add Model Type: '), newrow=True)\n pan.Add(self.model_type, dcol=3)\n pan.Add(SimpleText(pan, ' Model: '), dcol=2)\n pan.Add(self.model_func)\n\n\n pan.pack()\n\n# rsizer.Add(SimpleText(range_row, 'Fit Range X=[ '), 0, LCEN, 3)\n# rsizer.Add(xmin_sel, 0, LCEN, 3)\n# rsizer.Add(self.xmin, 0, LCEN, 3)\n# rsizer.Add(SimpleText(range_row, ' : '), 0, LCEN, 3)\n# rsizer.Add(xmax_sel, 0, LCEN, 3)\n# rsizer.Add(self.xmax, 0, LCEN, 3)\n# rsizer.Add(SimpleText(range_row, ' ] '), 0, LCEN, 3)\n# rsizer.Add(Button(range_row, 'Full Data Range', size=(150, -1),\n# action=self.onResetRange), 0, LCEN, 3)\n# pack(range_row, rsizer)\n\n\n# self.plot_comps = Check(pan, label='Plot Components?',\n# default=True, size=(150, -1))\n#\n# rsizer.Add(Button(a, 'Run Fit',\n# size=(100, -1), action=self.onRunFit), 0, RCEN, 3)\n# self.savebtn = Button(action_row, 'Save Fit',\n# size=(100, -1), action=self.onSaveFitResult)\n# self.savebtn.Disable()\n# rsizer.Add(self.savebtn, 0, LCEN, 3)\n#\n# rsizer.Add(Button(action_row, 'Plot Current Model',\n# size=(175, -1), action=self.onShowModel), 0, LCEN, 3)\n# rsizer.Add(self.plot_comps, 0, LCEN, 3)\n#\n# pack(action_row, rsizer)\n#\n\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.AddMany([((10, 10), 0, LCEN, 10), (pan, 0, LCEN, 10),\n ((10, 10), 0, LCEN, 10),\n (HLine(self, size=(550, 3)), 0, LCEN, 4),\n ((10,10), 0, LCEN, 2),\n (self.mod_nb, 1, LCEN|wx.GROW, 10)])\n\n pack(self, sizer)\n\n def onPreedgeBaseline(self, evt=None):\n print(\" on preedge baseline\")\n\n\n def onNBChanged(self, event=None):\n idx = self.mod_nb.GetSelection()\n\n def onModelTypes(self, event=None):\n modtype = event.GetString().lower()\n self.model_func.SetChoices(ModelChoices[modtype])\n\n def addModel(self, event=None, model=None):\n if model is None and event is not None:\n model = event.GetString()\n if model is None or model.startswith('<'):\n return\n\n p = model[0].lower()\n curmodels = [\"%s%i_\" % (p, i+1) for i in range(1+len(self.fit_components))]\n for comp in self.fit_components:\n if comp in curmodels:\n curmodels.remove(comp)\n\n prefix = curmodels[0]\n\n label = \"%s(prefix='%s')\" % (model, prefix)\n title = \"%s: %s\" % (prefix[:-1], (model+' '*8)[:8])\n mclass_kws = {'prefix': prefix}\n if 'step' in model.lower():\n form = model.lower().replace('step', '').strip()\n\n if form.startswith('err'): form = 'erf'\n label = \"Step(form='%s', prefix='%s')\" % (form, prefix)\n title = \"%s: Step %s\" % (prefix[:-1], form[:3])\n mclass = lm_models.StepModel\n mclass_kws['form'] = form\n minst = mclass(form=form, prefix=prefix)\n else:\n mclass = getattr(lm_models, model+'Model')\n minst = mclass(prefix=prefix)\n\n panel = GridPanel(self.mod_nb, ncols=1, nrows=1, pad=1, itemstyle=CEN)\n\n def SLabel(label, size=(80, -1), **kws):\n return SimpleText(panel, label,\n size=size, style=wx.ALIGN_LEFT, **kws)\n usebox = Check(panel, default=True, label='Use in Fit?', size=(150, -1))\n bkgbox = Check(panel, default=False, label='Is Background?', size=(150, -1))\n\n delbtn = Button(panel, 'Delete Component', size=(150, -1),\n action=partial(self.onDeleteComponent, prefix=prefix))\n\n pick2msg = SimpleText(panel, \" \", size=(125, -1))\n pick2btn = Button(panel, 'Pick Values from Data', size=(200, -1),\n action=partial(self.onPick2Points, prefix=prefix))\n\n # SetTip(mname, 'Label for the model component')\n SetTip(usebox, 'Use this component in fit?')\n SetTip(bkgbox, 'Label this component as \"background\" when plotting?')\n SetTip(delbtn, 'Delete this model component')\n SetTip(pick2btn, 'Select X range on Plot to Guess Initial Values')\n\n panel.Add(SLabel(label, size=(275, -1), colour='#0000AA'),\n dcol=3, style=wx.ALIGN_LEFT, newrow=True)\n panel.Add(usebox, dcol=2)\n panel.Add(bkgbox, dcol=2)\n\n panel.Add(pick2btn, dcol=3, style=wx.ALIGN_LEFT, newrow=True)\n panel.Add(pick2msg, dcol=3, style=wx.ALIGN_RIGHT)\n panel.Add(delbtn, style=wx.ALIGN_LEFT)\n\n # panel.Add((10, 10), newrow=True)\n # panel.Add(HLine(panel, size=(150, 3)), dcol=4, style=wx.ALIGN_CENTER)\n\n panel.Add(SLabel(\"Parameter \"), style=wx.ALIGN_LEFT, newrow=True)\n panel.AddMany((SLabel(\" Value\"), SLabel(\" Type\"), SLabel(' Bounds'),\n SLabel(\" Min\", size=(60, -1)),\n SLabel(\" Max\", size=(60, -1)),\n SLabel(\" Expression\")))\n\n parwids = OrderedDict()\n parnames = sorted(minst.param_names)\n\n for a in minst._func_allargs:\n pname = \"%s%s\" % (prefix, a)\n if (pname not in parnames and\n a in minst.param_hints and\n a not in minst.independent_vars):\n parnames.append(pname)\n\n for pname in parnames:\n sname = pname[len(prefix):]\n hints = minst.param_hints.get(sname, {})\n\n par = Parameter(name=pname, value=0, vary=True)\n if 'min' in hints:\n par.min = hints['min']\n if 'max' in hints:\n par.max = hints['max']\n if 'value' in hints:\n par.value = hints['value']\n if 'expr' in hints:\n par.expr = hints['expr']\n\n pwids = ParameterWidgets(panel, par, name_size=100, expr_size=175,\n float_size=80, prefix=prefix,\n widgets=('name', 'value', 'minval',\n 'maxval', 'vary', 'expr'))\n parwids[par.name] = pwids\n panel.Add(pwids.name, newrow=True)\n\n panel.AddMany((pwids.value, pwids.vary, pwids.bounds,\n pwids.minval, pwids.maxval, pwids.expr))\n\n for sname, hint in minst.param_hints.items():\n pname = \"%s%s\" % (prefix, sname)\n if 'expr' in hint and pname not in parnames:\n par = Parameter(name=pname, value=0, expr=hint['expr'])\n\n pwids = ParameterWidgets(panel, par, name_size=100, expr_size=400,\n float_size=80, prefix=prefix,\n widgets=('name', 'value', 'expr'))\n parwids[par.name] = pwids\n panel.Add(pwids.name, newrow=True)\n panel.Add(pwids.value)\n panel.Add(pwids.expr, dcol=5, style=wx.ALIGN_RIGHT)\n pwids.value.Disable()\n\n\n # panel.Add(delbtn, dcol=2)\n # panel.Add(HLine(panel, size=(250, 3)), dcol=3, style=wx.ALIGN_CENTER)\n\n fgroup = Group(prefix=prefix, title=title, mclass=mclass,\n mclass_kws=mclass_kws, usebox=usebox, panel=panel,\n parwids=parwids, float_size=65, expr_size=150,\n pick2_msg=pick2msg)\n\n self.fit_components[prefix] = fgroup\n panel.pack()\n\n self.mod_nb.AddPage(panel, title, True)\n sx,sy = self.GetSize()\n self.SetSize((sx, sy+1))\n self.SetSize((sx, sy))\n\n def onDeleteComponent(self, evt=None, prefix=None):\n fgroup = self.fit_components.get(prefix, None)\n if fgroup is None:\n return\n\n for i in range(self.mod_nb.GetPageCount()):\n if fgroup.title == self.mod_nb.GetPageText(i):\n self.mod_nb.DeletePage(i)\n\n for attr in dir(fgroup):\n setattr(fgroup, attr, None)\n\n self.fit_components.pop(prefix)\n\n sx,sy = self.GetSize()\n self.SetSize((sx, sy+1))\n self.SetSize((sx, sy))\n\n def onPick2EraseTimer(self, evt=None):\n \"\"\"erases line trace showing automated 'Pick 2' guess \"\"\"\n self.pick2erase_timer.Stop()\n panel = self.pick2erase_panel\n ntrace = panel.conf.ntrace - 1\n trace = panel.conf.get_mpl_line(ntrace)\n panel.conf.get_mpl_line(ntrace).set_data(np.array([]), np.array([]))\n panel.conf.ntrace = ntrace\n panel.draw()\n\n def onPick2Timer(self, evt=None):\n \"\"\"checks for 'Pick 2' events, and initiates 'Pick 2' guess\n for a model from the selected data range\n \"\"\"\n try:\n plotframe = self.controller.get_display(stacked=False)\n curhist = plotframe.cursor_hist[:]\n plotframe.Raise()\n except:\n return\n\n if (time.time() - self.pick2_t0) > self.pick2_timeout:\n msg = self.pick2_group.pick2_msg.SetLabel(\" \")\n plotframe.cursor_hist = []\n self.pick2_timer.Stop()\n return\n\n if len(curhist) < 2:\n self.pick2_group.pick2_msg.SetLabel(\"%i/2\" % (len(curhist)))\n return\n\n self.pick2_group.pick2_msg.SetLabel(\"done.\")\n self.pick2_timer.Stop()\n\n # guess param values\n xcur = (curhist[0][0], curhist[1][0])\n xmin, xmax = min(xcur), max(xcur)\n\n dgroup = getattr(self.larch.symtable, self.controller.groupname)\n x, y = dgroup.x, dgroup.y\n i0 = index_of(dgroup.x, xmin)\n i1 = index_of(dgroup.x, xmax)\n x, y = dgroup.x[i0:i1+1], dgroup.y[i0:i1+1]\n\n mod = self.pick2_group.mclass(prefix=self.pick2_group.prefix)\n parwids = self.pick2_group.parwids\n try:\n guesses = mod.guess(y, x=x)\n except:\n return\n\n for name, param in guesses.items():\n if name in parwids:\n parwids[name].value.SetValue(param.value)\n\n dgroup._tmp = mod.eval(guesses, x=dgroup.x)\n plotframe = self.controller.get_display(stacked=False)\n plotframe.cursor_hist = []\n plotframe.oplot(dgroup.x, dgroup._tmp)\n self.pick2erase_panel = plotframe.panel\n\n self.pick2erase_timer.Start(5000)\n\n\n def onPick2Points(self, evt=None, prefix=None):\n fgroup = self.fit_components.get(prefix, None)\n if fgroup is None:\n return\n\n plotframe = self.controller.get_display(stacked=False)\n plotframe.Raise()\n\n plotframe.cursor_hist = []\n fgroup.npts = 0\n self.pick2_group = fgroup\n\n if fgroup.pick2_msg is not None:\n fgroup.pick2_msg.SetLabel(\"0/2\")\n\n self.pick2_t0 = time.time()\n self.pick2_timer.Start(250)\n\n def onSaveFitResult(self, event=None):\n dgroup = self.get_datagroup()\n deffile = dgroup.filename.replace('.', '_') + '.fitresult'\n wcards = 'Fit Results(*.fitresult)|*.fitresult|All files (*.*)|*.*'\n\n outfile = FileSave(self, 'Save Fit Result',\n default_file=deffile,\n wildcard=wcards)\n\n if outfile is not None:\n try:\n save_modelresult(dgroup.fit_history[-1], outfile)\n except IOError:\n print('could not write %s' % outfile)\n\n def onLoadFitResult(self, event=None):\n\n wcards = 'Fit Results(*.fitresult)|*.fitresult|All files (*.*)|*.*'\n\n mfile = FileOpen(self, 'Load Fit Result',\n default_file='', wildcard=wcards)\n model = None\n\n if mfile is not None:\n try:\n model = load_modelresult(mfile)\n except IOError:\n print('could not read model result %s' % mfile)\n return\n if model is None:\n return\n print(\" Loading Model (work in progress) \", model)\n\n def onExportFitResult(self, event=None):\n dgroup = self.get_datagroup()\n deffile = dgroup.filename.replace('.', '_') + '_result.xdi'\n wcards = 'All files (*.*)|*.*'\n\n outfile = FileSave(self, 'Export Fit Result',\n default_file=deffile,\n wildcard=wcards)\n\n if outfile is None:\n return\n\n dgroup = self.get_datagroup()\n\n i1, i2, xv1, xv2 = self.get_xranges(dgroup.x)\n x = dgroup.x[slice(i1, i2)]\n y = dgroup.y[slice(i1, i2)]\n yerr = None\n if hasattr(dgroup, 'yerr'):\n yerr = dgroup.yerr\n if not isinstance(yerr, np.ndarray):\n yerr = yerr * np.ones(len(y))\n else:\n yerr = yerr[slice(i1, i2)]\n\n export_modelresult(dgroup.fit_history[-1], filename=outfile,\n datafile=dgroup.filename,\n ydata=y, yerr=yerr, x=x)\n\n\n def onResetRange(self, event=None):\n dgroup = self.get_datagroup()\n self.xmin.SetValue(min(dgroup.x))\n self.xmax.SetValue(max(dgroup.x))\n\n def on_selpoint(self, evt=None, opt='xmin'):\n xval = None\n try:\n xval = self.larch.symtable._plotter.plot1_x\n except:\n xval = None\n if xval is not None:\n if opt == 'xmin':\n self.xmin.SetValue(xval)\n elif opt == 'xmax':\n self.xmax.SetValue(xval)\n\n def get_datagroup(self):\n dgroup = None\n if self.controller.groupname is not None:\n try:\n dgroup = getattr(self.larch.symtable,\n self.controller.groupname)\n except:\n pass\n return dgroup\n\n def get_xranges(self, x):\n xmin, xmax = min(x), max(x)\n i1, i2 = 0, len(x)\n _xmin = self.xmin.GetValue()\n _xmax = self.xmax.GetValue()\n if _xmin > min(x):\n i1 = index_of(x, _xmin)\n xmin = x[i1]\n if _xmax < max(x):\n i2 = index_of(x, _xmax) + 1\n xmax = x[i2]\n xv1 = max(min(x), xmin - (xmax-xmin)/5.0)\n xv2 = min(max(x), xmax + (xmax-xmin)/5.0)\n return i1, i2, xv1, xv2\n\n def build_fitmodel(self):\n \"\"\" use fit components to build model\"\"\"\n dgroup = self.get_datagroup()\n fullmodel = None\n params = Parameters()\n self.summary = {'components': [], 'options': {}}\n for comp in self.fit_components.values():\n if comp.usebox is not None and comp.usebox.IsChecked():\n for parwids in comp.parwids.values():\n params.add(parwids.param)\n self.summary['components'].append((comp.mclass.__name__, comp.mclass_kws))\n thismodel = comp.mclass(**comp.mclass_kws)\n if fullmodel is None:\n fullmodel = thismodel\n else:\n fullmodel += thismodel\n\n self.fit_model = fullmodel\n self.fit_params = params\n\n if dgroup is not None:\n i1, i2, xv1, xv2 = self.get_xranges(dgroup.x)\n xsel = dgroup.x[slice(i1, i2)]\n dgroup.xfit = xsel\n dgroup.yfit = self.fit_model.eval(self.fit_params, x=xsel)\n dgroup.ycomps = self.fit_model.eval_components(params=self.fit_params,\n x=xsel)\n return dgroup\n\n def onShowModel(self, event=None):\n dgroup = self.build_fitmodel()\n if dgroup is not None:\n with_components = (self.plot_comps.IsChecked() and\n len(dgroup.ycomps) > 1)\n\n self.plot_fitmodel(dgroup, show_resid=False,\n with_components=with_components)\n\n def plot_fitmodel(self, dgroup, show_resid=False, with_components=None):\n if dgroup is None:\n return\n i1, i2, xv1, xv2 = self.get_xranges(dgroup.x)\n ysel = dgroup.y[slice(i1, i2)]\n\n plotframe = self.controller.get_display(stacked=True)\n plotframe.plot(dgroup.xfit, ysel, new=True, panel='top',\n xmin=xv1, xmax=xv2, label='data',\n xlabel=dgroup.plot_xlabel, ylabel=dgroup.plot_ylabel,\n title='Fit: %s' % dgroup.filename )\n\n plotframe.oplot(dgroup.xfit, dgroup.yfit, label='fit')\n\n plotframe.plot(dgroup.xfit, ysel-dgroup.yfit, grid=False,\n marker='o', markersize=4, linewidth=1, panel='bot')\n\n if with_components is None:\n with_components = (self.plot_comps.IsChecked() and\n len(dgroup.ycomps) > 1)\n if with_components:\n for label, _y in dgroup.ycomps.items():\n plotframe.oplot(dgroup.xfit, _y, label=label,\n style='short dashed')\n\n line_opts = dict(color='#AAAAAA', label='_nolegend_',\n linewidth=1, zorder=-5)\n plotframe.panel_bot.axes.axhline(0, **line_opts)\n axvline = plotframe.panel.axes.axvline\n if i1 > 0:\n axvline(dgroup.x[i1], **line_opts)\n\n if i2 < len(dgroup.x):\n axvline(dgroup.x[i2-1], **line_opts)\n\n plotframe.panel.canvas.draw()\n\n\n def onRunFit(self, event=None):\n dgroup = self.build_fitmodel()\n if dgroup is None:\n return\n i1, i2, xv1, xv2 = self.get_xranges(dgroup.x)\n dgroup.xfit = dgroup.x[slice(i1, i2)]\n ysel = dgroup.y[slice(i1, i2)]\n weights = np.ones(len(ysel))\n\n if hasattr(dgroup, 'yerr'):\n yerr = dgroup.yerr\n if not isinstance(yerr, np.ndarray):\n yerr = yerr * np.ones(len(ysel))\n else:\n yerr = yerr[slice(i1, i2)]\n yerr_min = 1.e-9*ysel.mean()\n yerr[np.where(yerr < yerr_min)] = yerr_min\n weights = 1.0/yerr\n\n result = self.fit_model.fit(ysel, params=self.fit_params,\n x=dgroup.xfit, weights=weights,\n method='leastsq')\n self.summary['xmin'] = xv1\n self.summary['xmax'] = xv2\n for attr in ('aic', 'bic', 'chisqr', 'redchi', 'ci_out', 'covar',\n 'flatchain', 'success', 'nan_policy', 'nfev', 'ndata',\n 'nfree', 'nvarys', 'init_values'):\n self.summary[attr] = getattr(result, attr)\n self.summary['params'] = result.params\n\n\n dgroup.yfit = result.best_fit\n dgroup.ycomps = self.fit_model.eval_components(params=result.params,\n x=dgroup.xfit)\n\n\n with_components = (self.plot_comps.IsChecked() and len(dgroup.ycomps) > 1)\n\n self.plot_fitmodel(dgroup, show_resid=True, with_components=with_components)\n\n # print(\" == fit model == \", self.fit_model)\n # print(\" == fit result == \", result)\n\n result.model_repr = self.fit_model._reprstring(long=True)\n\n self.autosave_modelresult(result)\n if not hasattr(dgroup, 'fit_history'):\n dgroup.fit_history = []\n dgroup.fit_history.append(result)\n\n\n self.parent.show_subframe('result_frame', FitResultFrame,\n datagroup=dgroup,\n controller=self.controller)\n\n # self.update_start_values(result)\n self.savebtn.Enable()\n\n for m in self.parent.afterfit_menus:\n self.parent.menuitems[m].Enable(True)\n\n def update_start_values(self, result):\n \"\"\"fill parameters with best fit values\"\"\"\n allparwids = {}\n for comp in self.fit_components.values():\n if comp.usebox is not None and comp.usebox.IsChecked():\n for name, parwids in comp.parwids.items():\n allparwids[name] = parwids\n\n for pname, par in result.params.items():\n if pname in allparwids:\n allparwids[pname].value.SetValue(par.value)\n\n def autosave_modelresult(self, result, fname=None):\n \"\"\"autosave model result to user larch folder\"\"\"\n xasguidir = os.path.join(site_config.usr_larchdir, 'xasgui')\n if not os.path.exists(xasguidir):\n try:\n os.makedirs(xasguidir)\n except OSError:\n print(\"Warning: cannot create XAS GUI user folder\")\n return\n if not HAS_MODELSAVE:\n print(\"Warning: cannot save model results: upgrade lmfit\")\n return\n if fname is None:\n fname = 'autosave.fitresult'\n fname = os.path.join(xasguidir, fname)\n save_modelresult(result, fname)\n","sub_path":"plugins/xasgui/prepeak_panel.py","file_name":"prepeak_panel.py","file_ext":"py","file_size_in_byte":36077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"12372490","text":"from django.core.exceptions import ValidationError\nimport json\nfrom json_tricks import dumps\nfrom core.exceptions.customexceptions import ApiException\n\nclass ActionValidator():\n\n def validateFields(value):\n\n fields=[]\n if (not value):\n raise ValidationError(\"Invalid value. \"+str(value))\n\n if (value) and (value.__class__.__name__==\"str\"):\n return True\n\n fields=value if value.__class__.__name__ == \"list\" else [value]\n\n for field in fields:\n if field.__class__.__name__ == \"dict\":\n if \"function\" in field:\n if not \"function_name\" in field[\"function\"]:\n raise ValidationError(\"A function requires a function_name property. \"+dumps(field)) \n\n if (not \"fields\" in field[\"function\"]) or (len(field[\"function\"][\"fields\"])==0):\n raise ValidationError(\"A function requires a fields property. \"+dumps(field)) \n \n for param in field[\"function\"][\"fields\"]:\n ActionValidator.validateFields(param)\n\n elif \"event_field\" in field:\n if (field[\"event_field\"].strip()==\"\"):\n raise ValidationError(\"An event_field requires a not blank value. \"+dumps(field))\n elif \"workflow\" in field:\n if not \"id\" in field[\"workflow\"]:\n raise ValidationError(\"A workflow requires a not blank id. \"+dumps(field))\n if not \"name\" in field[\"workflow\"]:\n raise ValidationError(\"A workflow requires a not blank name. \"+dumps(field))\n if not \"parameters\" in field[\"workflow\"]:\n raise ValidationError(\"A workflow requires a not blank parameters. \"+dumps(field))\n else:\n raise ValidationError(\"Invalid value/field. \"+dumps(field)) \n else:\n return True \n\n\n def validateAction(value):\n\n action=json.loads(value)\n\n if (not \"type\" in action):\n raise ValidationError(\"An action required a type property. \"+dumps(action))\n\n if (action[\"type\"]!=\"assign\"): \n raise ValidationError(\"The action type should be one 'assign'. \"+dumps(action))\n\n if (not \"value\" in action):\n raise ValidationError(\"An 'assign' action requires a value property. \"+dumps(action))\n \n if (action[\"value\"]):\n if (\"workflow\" in action[\"value\"]):\n if (\"target\" in action):\n raise ValidationError(\"A 'workflow' action mustn't have target. \"++dumps(action))\n else:\n if (not \"target\" in action or len(action[\"target\"])==0):\n raise ValidationError(\"An 'assign' action requires a target property. \"+dumps(action))\n\n return ActionValidator.validateFields(action[\"value\"])\n","sub_path":"action/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"600076059","text":"def main():\r\n\r\n print('\\n Welcome to Square Builder ')\r\n \"sq_dimension = valid_check()\"\r\n make_square(valid_check())\r\n\r\n\r\ndef valid_check():\r\n dimension = int(input('\\n Enter Square Dimension as a single int: '))\r\n while not (dimension > 0):\r\n print('\\n Not a Valid int')\r\n dimension = int(input('\\n Enter Square Dimension as a single int: '))\r\n else:\r\n return dimension\r\n\r\n\r\ndef make_square(int, border_shape='-'):\r\n\r\n top_line = int * (border_shape+' ')\r\n print(' ' + top_line)\r\n\r\n spaces = ' ' * ((int * 3) - 4)\r\n side_length = (' ' + border_shape + spaces + border_shape)\r\n\r\n length = (int - 2)\r\n while length > 0:\r\n print(side_length)\r\n length -= 1\r\n\r\n print(' ' + top_line)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"StandaloneProjects/Shapebuilder.py","file_name":"Shapebuilder.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"452889661","text":"\"\"\"\nDescription\n\n合并(归并)排序的核心思想是:每次从中间位置将数组分组再分别排序。请实现其非递归方案。\n\n\nInput\n\n输入的每一行表示一个元素为正整数的数组,所有值用空格隔开,第一个值为数值长度,其余为数组元素值。\n\n\nOutput\n\n输出的每一行为排序结果,用空格隔开,末尾不要空格。\n\n\nSample Input 1\n\n13 24 3 56 34 3 78 12 29 49 84 51 9 100\nSample Output 1\n\n3 3 9 12 24 29 34 49 51 56 78 84 100\n\"\"\"\nimport math\nimport sys\n\ndef merge(list, low, mid, high):\n left = list[low:mid]\n right = list[mid:high]\n l = 0\n r = 0\n result = []\n while l < len(left) and r < len(right):\n if left[l] < right[r]:\n result.append(left[l])\n l += 1\n else:\n result.append(right[r])\n r += 1\n result += left[l:]\n result += right[r:]\n list[low:high]=result\n\n\ndef merge_sort():\n for line in sys.stdin:\n temp_list = line.split()\n if not temp_list:\n break\n data_list = []\n length = int(temp_list[0])\n for i in range(1, length + 1):\n data_list.append(int(temp_list[i]))\n\n # time=math.log(length,2)\n # for i in range(1,time+1):\n # merge(e for e in data_list)\n\n i = 1\n while i < length: # 子数组长度\n low = 0\n while low < length:\n mid = low + i\n high = min(mid + i, length)\n if mid Mapping[str, Any]:\n return {\n \"fingerprint\": self.fingerprint,\n \"op\": self.op,\n \"desc\": self.desc,\n \"type\": self.type.value,\n \"parent_span_ids\": self.parent_span_ids,\n \"cause_span_ids\": self.cause_span_ids,\n \"offender_span_ids\": self.offender_span_ids,\n }\n\n @property\n def title(self) -> str:\n return GROUP_TYPE_TO_TEXT.get(self.type, \"N+1 Query\")\n\n @classmethod\n def from_dict(cls, data: dict):\n return cls(\n data[\"fingerprint\"],\n data[\"op\"],\n data[\"desc\"],\n GroupType(data[\"type\"]),\n data[\"parent_span_ids\"],\n data[\"cause_span_ids\"],\n data[\"offender_span_ids\"],\n )\n\n def __eq__(self, other):\n if not isinstance(other, PerformanceProblem):\n return NotImplemented\n return (\n self.fingerprint == other.fingerprint\n and self.offender_span_ids == other.offender_span_ids\n and self.type == other.type\n )\n\n def __hash__(self):\n # This will de-duplicate on fingerprint and type and only for offending span ids.\n # Fingerprint should incorporate the 'uniqueness' enough that parent and span checks etc. are not required.\n return hash((self.fingerprint, frozenset(self.offender_span_ids), self.type))\n","sub_path":"src/sentry/utils/performance_issues/performance_problem.py","file_name":"performance_problem.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"255369843","text":"# Copyright 2014 - Savoir-Faire Linux inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport requests\n\nfrom surveil.tests.integration import integration_test\n\n\nclass TestMergedIngegrationSurveil(\n integration_test.MergedIntegrationTest\n):\n\n def test_hello(self):\n self.assertEqual(\n requests.get(\"http://localhost:8999/v2/hello\").text,\n 'Hello World!'\n )\n\n\nclass TestSeparatedIntegrationSurveil(\n integration_test.SeparatedIntegrationTests\n):\n\n def test_create_host(self):\n \"\"\"Creates a host and asserts that is is monitored by Alignak.\"\"\"\n config_hosts = (TestSeparatedIntegrationSurveil.\n client.status.hosts.list())\n\n self.assertFalse(\n any(host['host_name'] == 'integrationhosttest'\n for host in config_hosts)\n )\n\n TestSeparatedIntegrationSurveil.client.config.hosts.create(\n host_name='integrationhosttest',\n address='127.0.0.1',\n )\n\n TestSeparatedIntegrationSurveil.client.config.reload_config()\n\n def function():\n status_hosts = (TestSeparatedIntegrationSurveil.\n client.status.hosts.list())\n self.assertTrue(\n any(host['host_name'].decode() == 'integrationhosttest'\n for host in status_hosts)\n\n )\n\n self.assertTrue(\n self.try_for_x_seconds(\n function,\n time_to_wait=180,\n cooldown=10,\n exception=AssertionError,\n message=\"Could not find host in status.\"\n )\n )\n\n def test_delete_host(self):\n self.test_create_host()\n\n TestSeparatedIntegrationSurveil.client.config.hosts.delete(\n 'integrationhosttest'\n )\n\n TestSeparatedIntegrationSurveil.client.config.reload_config()\n\n def function():\n status_hosts = (TestSeparatedIntegrationSurveil.\n client.status.hosts.list())\n self.assertFalse(\n any(host['host_name'].decode() == 'integrationhosttest'\n for host in status_hosts)\n )\n\n self.assertTrue(\n self.try_for_x_seconds(\n function,\n time_to_wait=180,\n cooldown=10,\n exception=AssertionError,\n message=\"Host was not deleted\"\n )\n )\n","sub_path":"surveil/tests/integration/test_surveil.py","file_name":"test_surveil.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"311760640","text":"'''\nSymbolTable\nA symbol table that keeps a correspondence between symbolic \nlabels and numeric addresses.\n\n@author: Kyle June\n'''\n\nDEFAULT_SYMBOLS = {\n 'SP': 0,\n 'LCL': 1,\n 'ARG': 2,\n 'THIS': 3,\n 'THAT': 4,\n 'R0': 0,\n 'R1': 1,\n 'R2': 2,\n 'R3': 3,\n 'R4': 4,\n 'R5': 5,\n 'R6': 6,\n 'R7': 7,\n 'R8': 8,\n 'R9': 9,\n 'R10': 10,\n 'R11': 11,\n 'R12': 12,\n 'R13': 13,\n 'R14': 14,\n 'R15': 15,\n 'SCREEN': 16384,\n 'KBD': 24576\n}\n\nclass SymbolTable(object):\n '''\n Creates a new empty symbol table.\n '''\n def __init__(self):\n table = {}\n table.update(DEFAULT_SYMBOLS)\n self._table = table\n \n '''\n Adds the pair (symbol, address) to the table.\n \n @param: symbol string\n @param: address int\n '''\n def addEntry(self, symbol, address):\n self._table.update({symbol: address})\n \n '''\n Does the symbol table contain the given symbol?\n \n @param: symbol string\n @return: Boolean\n '''\n def contains(self, symbol):\n return symbol in self._table\n \n '''\n Returns the address associated with the symbol.\n \n @param: symbol string\n @return: int\n '''\n def getAddress(self, symbol):\n return self._table.get(symbol)\n","sub_path":"SymbolTable.py","file_name":"SymbolTable.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"218259348","text":"#%%\nimport numpy as np\nimport pandas as pd\nfrom collections import Counter\nfrom pathlib import Path\nimport argparse\nfrom ckonlpy.tag import Twitter\nfrom ckonlpy.tag import Postprocessor\ntwitter = Twitter()\nimport ast\n\n\n# 각 attr keyword가 review에 포함되면 각 attr에 해당하는 review들을 분류\n#%%\nattr_df=pd.read_excel(Path(\"G:/공유 드라이브/속성사전/Data/Electronics/RiceCooker/2. Seed D_전기밥솥_copy_jk_20201130.xlsx\")\n ,sheet_name='1B')\nreview_df=pd.read_csv(Path(\"G:/공유 드라이브/속성사전/Data/Electronics/RiceCooker/review_20201129.csv\"))\n\nreview_list=[(body,rating)for body, rating in zip(review_df['body'],review_df['rating'])]\n\n# %%\n#attr별로 리뷰 모아주려고 만든 dict\ncooker_review = {}\nfor attr in attr_df['attrs']:\n cooker_review[attr]=[]\n\n#attr별로 filter 1차로 걸 키워드 모아놓은 dict\n\n\ncooker_filter = {}\nfor _, _attr, _filter in attr_df[['attrs','filter_1']].itertuples():\n cooker_filter[_attr]=ast.literal_eval(_filter)\n\n#review_list에서 분류되지 않은 남은 리뷰들 넣는 list\nremain_review = []\n# for body in review_list[6:9]:\nn_review_list_end=len(review_list)\nn_review_list_start=1\n\nfor body in review_list:\n n_review_list_start+=1\n print(n_review_list_start/n_review_list_end*100)\n # print('\\n=====body 시작=====')\n # print(' 문장: ', body[0])\n attrBreak = False\n filter_1Break = False\n \n n_attr_start = 1\n n_attr_end = len(cooker_filter)\n for attr in cooker_filter:\n # print('attr:', attr)\n\n n_attr_start+=1\n\n n_filter_1_start=1\n n_filter_1_end=len(cooker_filter[attr])\n for filter_1 in cooker_filter[attr]:\n # print(' 1차필터: ',filter_1)\n if filter_1 in body[0]:\n # print(f' 1차필터는 \"{body[0]}\"에 포함된다')\n cooker_review[attr].append(body)\n attrBreak=True\n # 한 리뷰에 하나의 attr_2가 들어감\n break\n\n else:\n n_filter_1_start+=1\n #제일 마지막으로 남은 리뷰만 저장하기 위함\n if (n_filter_1_start == n_filter_1_end) and (n_attr_start == n_attr_end):\n remain_review.append(body)\n # continue\n \n if attrBreak is True: #다음 리뷰로 이동\n break\n\nprint('\\n*** 분류된 문서 ***\\n', cooker_review)\nprint('\\n*** 분류되지 않은 문서 ***\\n', remain_review)\n\n#%%\ntemp=pd.concat({k:pd.Series(v) for k,v in cooker_review.items()}).reset_index()\ntemp['body']=[_tuple[0] for _tuple in temp[0]]\ntemp['rating']=[_tuple[1] for _tuple in temp[0]]\ntemp.drop(columns=['level_1',0], inplace=True)\ntemp.columns = ['attr_2','name','rating']\n\n#%%\n########## 4,5점 긍정 - 1,2점 부정으로 옮기기 ########\nattr_pos = {}\nfor attr in attr_df['attrs']:\n attr_pos[attr]=[]\nattr_neg = {}\nfor attr in attr_df['attrs']:\n attr_neg[attr]=[]\n\nfor _,attr,body, rating in temp.itertuples():\n if rating in [4,5]:\n\n attr_pos[attr].append(body)\n elif rating in [1,2]:\n attr_neg[attr].append(body)\n else:\n pass\n \n\nfor attr in attr_pos:\n attr_pos[attr]='; '.join(attr_pos[attr])\nfor attr in attr_neg:\n attr_neg[attr]='; '.join(attr_neg[attr])\n\n\npos_neg = pd.concat([pd.DataFrame.from_dict(attr_pos,orient='index',columns=['pos'])\n,pd.DataFrame.from_dict(attr_neg,orient='index',columns=['neg'])]\n, axis = 1)\n\n\n# %%\n#### attr별 카운트 구하기#####\ncooker_review_attr_count = {}\nfor key in cooker_review.keys():\n cooker_review_attr_count[key] = len(cooker_review[key])\nattr2_count_df=pd.DataFrame.from_dict(cooker_review_attr_count,orient='index',columns=['count'])\npd.concat([pos_neg, attr2_count_df],axis=1).to_excel('./temp.xlsx')\n# %%\n\n#%%\n# '알려진' in '안알려진'\n# twitter.pos('안알려진', stem=True, norm=True)\n\n#%%\n# twitter.pos('가벼움', stem=True, norm=True)\n\n# twitter.add_dictionary(['가볍다','가벼움','가벼워서'], 'keyword', force=True)\n# twitter.pos('솥이 가벼워서 괜찮아요', stem=True, norm=True)\n# twitter.pos('바람이 정말 세서', stem=True, norm=True)\n# twitter.add_dictionary(['풀스텐','풀스텐 커버','가벼워서'], 'keyword', force=True)\n# twitter.pos('솥이 가벼워서 괜찮아요', stem=True, norm=True)\n\n\n#%%\n# # ['가벼' in body for body in body_list]\n# def f0002():\n# # passtags = {'Keyword'}\n# # postprocessor = Postprocessor(twitter, passtags = passtags)\n# # postprocessor.pos('풀스텐 내솥 내솥, 뚜껑 모두 스테인리스에 뚜껑 분리도 되는게 참 신기하네요. ')\n# # pass\n\n\n \n# if __name__ == '__main__':\n# # f0001(args.FILE_PATH,args.EXPORT_PATH,args.FREQ_N)\n# f0002()\n\n\n\"\"\"\n# parser = argparse.ArgumentParser()\n# parser.add_argument('--FILE_PATH','-in')\n# parser.add_argument('--EXPORT_PATH','-out')\n# parser.add_argument('--FREQ_N','-n', type=int)\n# args = parser.parse_args()\n# args.FILE_PATH = '../Data/eye_df.csv'\n# args.EXPORT_PATH = '../Data/eye_df_count.xlsx'\n# args.FREQ_N = 200\n\ndef f0001(FILE_PATH, EXPORT_PATH, FREQ_N=200):\n # Read Review File\n # FILE_PATH = '../Data/BabyProducts/MilkPowder/분유.csv'\n review=pd.read_csv(Path(FILE_PATH))\n N_review = len(review)\n token_ls = []\n \n for i in range(2080,2083):\n # for i in range(N_review):\n # print(i)\n body = review['body'][i]\n if pd.isna(body):\n continue\n word_pos=okt.pos(body, stem=True, norm=True)\n [token_ls.append(token) for token, pos in word_pos if pos=='Noun']\n count_obj = Counter(token_ls)\n count = count_obj.most_common(FREQ_N)\n # save excel\n temp_dict = dict(count)\n temp_df=pd.DataFrame({'keyword':temp_dict.keys(), 'count':temp_dict.values()})\n temp_df.sort_values(by='count',ascending=False,inplace=True)\n temp_df.to_excel(Path(EXPORT_PATH),index=False)\n return \n\n# count_200=f0001('../Data/BabyProducts/MilkPowder/분유.csv'\n# ,'../Data/BabyProducts/MilkPowder/_분유_count.xlsx'\n# ,200)\n\"\"\"","sub_path":"Code/0001to0002_NLP.py","file_name":"0001to0002_NLP.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"517042290","text":"from rest_framework import serializers\nfrom .models import *\n\nimport json\nimport requests\n\nclass PipelinesSerializer(serializers.Serializer):\n pipelineschemas = serializers.JSONField()\n\n\nclass SessionSerializer(serializers.Serializer):\n id = serializers.IntegerField(read_only=True)\n \n email = serializers.CharField(required=False, allow_blank=True, max_length=100)\n description = serializers.CharField(max_length=1000, default = \"\")\n pipeline = serializers.CharField(max_length=100)\n config = serializers.JSONField()\n observation = serializers.CharField(max_length=100000, default = \"\")\n observation2 = serializers.CharField(max_length=100000, default = \"\")\n \n status = serializers.CharField(max_length = 20, default = \"Staging\")\n staging = serializers.CharField(max_length = 20, default = \"new\")\n pipeline_version = serializers.CharField(max_length=100, default = \"\", read_only=True)\n pipeline_response = serializers.CharField(max_length = 1000, default = \"\")\n date_created = serializers.DateTimeField(read_only=True)\n date_modified = serializers.DateTimeField(read_only=True)\n \n # di_image = serializers.ImageField(required=False)\n di_fits = serializers.CharField(max_length=100, default = \"\")\n rw_fits = serializers.CharField(max_length=100, default = \"\")\n# stageid = serializers.CharField(max_length=30, default = \"\")\n stage_reqid = serializers.IntegerField(default = 0)\n stage2_reqid = serializers.IntegerField(default = 0)\n transfer_id = serializers.IntegerField(default = 0)\n transfer2_id = serializers.IntegerField(default = 0)\n\n\n def create(self, validated_data):\n return Session.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.email = validated_data.get('email', instance.email)\n instance.description = validated_data.get('description', instance.description)\n instance.pipeline = validated_data.get('pipeline', instance.pipeline)\n instance.config = validated_data.get('config', instance.config)\n instance.observation = validated_data.get('observation', instance.observation)\n instance.observation2 = validated_data.get('observation2', instance.observation2)\n\n instance.pipeline_version = validated_data.get('pipeline_version', instance.pipeline_version)\n instance.pipeline_response = validated_data.get('pipeline_response', instance.pipeline_response)\n instance.status = validated_data.get('status', instance.status)\n instance.staging = validated_data.get('staging', instance.staging)\n\n instance.date_created = validated_data.get('date_created', instance.date_created)\n instance.date_modified = validated_data.get('date_modified', instance.date_modified)\n \n instance.di_fits = validated_data.get('di_fits', instance.di_fits)\n instance.rw_fits = validated_data.get('rw_fits', instance.rw_fits)\n \n# instance.stageid = validated_data.get('stageid', instance.stageid)\n instance.stage_reqid = validated_data.get('stage_reqid', instance.stage_reqid)\n instance.stage2_reqid = validated_data.get('stage2_reqid', instance.stage2_reqid)\n instance.transfer_id = validated_data.get('transfer_id', instance.transfer_id)\n instance.transfer2_id = validated_data.get('transfer2_id', instance.transfer2_id)\n \n instance.save()\n return instance\n","sub_path":"lofar_workflow_api/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"537241942","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/7/24 10:32\n\nfrom flask_script import Manager,Server\nfrom app import app\nfrom app.models import Words\n\nmanager = Manager(app)\nmanager.add_command('runserver',Server(host='0.0.0.0',port=5000,use_debugger=True))\n\n@manager.command\ndef save_word():\n words = Words(name='wz11',content='this is first words')\n words.save()\n\nif __name__ == '__main__':\n app.run()","sub_path":"24/my-words-pad/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"281179726","text":"import math\nimport sys\nimport time\nsys.path.insert(0, \"..\\\\..\\\\eulerlib-py\")\nimport FactorTable\n\nMAX = 19999999\nMAXPRIME = MAX//100\nstart = time.time()\nft = FactorTable.FactorTable(MAX)\nprint(\"FT Done.\")\nprint(time.time()-start)\n##strprimes = [str(i) for i in ft.PrimesInRange(0,MAX)]\n##for i in range(len(strprimes)-1,-1,-1):\n## if len(strprimes[i]) <= len(str(MAX))-2:\n## MAXINDEX = i+1\n## print(i,strprimes[i])\n## break\n\nprint(\"Tables Done\")\ndef intconcat(x,y):\n return int(x*10**(1+math.floor(math.log10(y)))+y)\ndef IsPrimePair(x,y):\n a = intconcat(x,y)\n b = intconcat(y,x)\n if a > MAX or b > MAX:\n return None\n return ft.IsPrime(a) and ft.IsPrime(b)\n\nres = {}\nfor p in ft.PrimesInRange(3,int(math.sqrt(MAX))+1):\n for pp in ft.PrimesInRange(3,MAXPRIME):\n if IsPrimePair(p,pp):\n if p not in res:\n res[p] = []\n if pp not in res:\n res[pp] = []\n res[p].append(pp)\n## res[pp].append(p)\n##print(res)\nsums = []\nfor b in res:\n for c in res[b]:\n intr = set(res[b]) & set(res[c])\n if len(intr) > 0:\n## res[c].remove(b)\n for d in list(intr):\n if d == b or d == c: continue\n intr = intr & set(res[d])\n if len(intr) > 0:\n print(b,c,d,intr)\n## res[d].remove(c)\n for e in list(intr):\n if e==b or e==c or e==d: continue\n intr = intr & set(res[e])\n if len(intr) > 0:\n sums.append(sum([b,c,d,e,min(intr)]))\n print(sums[-1],b,c,d,e,intr)\n##print(min(sums))\nprint(time.time()-start) \n####def Search(pairList,lastindex,MaxDepth,CurrentDepth):\n#### if CurrentDepth >= MaxDepth: return\n#### for i,p in enumerate(strprimes[lastindex+1:MAXINDEX]):\n#### bools = [IsPrimePair(x,p) for x in pairList]\n#### if None in bools: break\n#### if all(bools):\n#### pairList.append(p)\n#### if len(pairList) >= MaxDepth:\n#### print(pairList,lastindex,MaxDepth,CurrentDepth)\n#### if CurrentDepth < MaxDepth:\n#### Search(pairList,i,MaxDepth,CurrentDepth+1)\n#### pairList.pop()\n####for i,p in enumerate(strprimes):\n#### Search([p],i,5,1)\n##for i,num in enumerate(strprimes):\n## for j,num2 in enumerate(strprimes[i+1:]):\n## pp = IsPrimePair(num,num2)\n## if pp is None: break\n## if pp:\n## for k,num3 in enumerate(strprimes[j+1:]):\n## pp1 = IsPrimePair(num3,num2)\n## pp2 = IsPrimePair(num3,num)\n## if pp1 is None or pp2 is None: break\n## if pp1 and pp2:\n#### print(num,num2,num3)\n## for l,num4 in enumerate(strprimes[k+1:]):\n## pp1 = IsPrimePair(num4,num3)\n## pp2 = IsPrimePair(num4,num2)\n## pp3 = IsPrimePair(num4,num)\n## if pp1 is None or pp2 is None or pp3 is None: break\n## if pp1 and pp2 and pp3:\n## print(num,num2,num3,num4)\n## for m,num5 in enumerate(strprimes[l+1]):\n## pp1 = IsPrimePair(num5,num4)\n## pp2 = IsPrimePair(num5,num3)\n## pp3 = IsPrimePair(num5,num2)\n## pp4 = IsPrimePair(num5,num)\n## if pp1 is None or pp2 is None or pp3 is None: break\n## if pp1 and pp2 and pp3 and pp4:\n## print(num,num2,num3,num4,num5)\n##\n##\n","sub_path":"60/60.py","file_name":"60.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"562842716","text":"import logging\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler\n\ncatcols = ['protocol_type', 'service', 'flag']\n\nnumcols = ['duration', 'src_bytes', 'dst_bytes', 'wrong_fragment', 'urgent', 'hot', 'num_failed_logins',\n 'num_compromised', 'root_shell', 'su_attempted', 'num_root', 'num_file_creations',\n 'num_shells', 'num_access_files', # 'num_outbound_cmds',\n 'count', 'srv_count', 'dst_host_count', 'dst_host_srv_count',\n 'land', 'logged_in', 'is_host_login', 'is_guest_login']\n\nratecols = ['serror_rate',\n 'srv_serror_rate', 'rerror_rate', 'srv_rerror_rate', 'same_srv_rate', 'diff_srv_rate',\n 'srv_diff_host_rate', 'dst_host_same_srv_rate',\n 'dst_host_diff_srv_rate', 'dst_host_same_src_port_rate', 'dst_host_srv_diff_host_rate',\n 'dst_host_serror_rate', 'dst_host_srv_serror_rate', 'dst_host_rerror_rate', 'dst_host_srv_rerror_rate']\n\n\ndef fit_encoders(dataset):\n ohe = OneHotEncoder(handle_unknown='ignore')\n ohe.fit(dataset[catcols].values)\n scaler = StandardScaler()\n scaler.fit(dataset[numcols].values)\n return ohe, scaler\n\n\ndef transform(dataset, ohe, scaler):\n a1 = scaler.transform(dataset[numcols])\n a2 = ohe.transform(dataset[catcols]).toarray()\n a3 = dataset[ratecols].values\n labels = (dataset[\"labels\"].values != 'normal').astype(int)\n return np.append(np.append(a1, a2, axis=1), a3, axis=1), labels\n\n\ndef read_files():\n train_dataset = pd.read_csv('data/kdd/kdd_train.csv')\n test_dataset = pd.read_csv('data/kdd/kdd_test.csv')\n return train_dataset, test_dataset\n\n\ndef get_coherent_split():\n train_dataset, test_dataset = read_files()\n logging.debug(train_dataset.head())\n ohe, scaler = fit_encoders(train_dataset)\n x_train, y_train = transform(train_dataset, ohe, scaler)\n x_test, y_test = transform(test_dataset, ohe, scaler)\n return x_train, y_train, x_test, y_test\n\n\ndef get_random_split():\n train_dataset_coherent, test_dataset_coherent = read_files()\n\n train_dataset_anomaly_count = len(train_dataset_coherent[train_dataset_coherent['labels'] != 'normal'])\n train_dataset_benign_count = len(train_dataset_coherent) - train_dataset_anomaly_count\n logging.debug(\"KDD train set anomaly count : \" + str(train_dataset_anomaly_count) +\n \", benign count : \" + str(train_dataset_benign_count))\n\n test_dataset_anomaly_count = len(test_dataset_coherent[test_dataset_coherent['labels'] != 'normal'])\n test_dataset_benign_count = len(test_dataset_coherent) - test_dataset_anomaly_count\n logging.debug(\"KDD test set anomaly count : \" + str(test_dataset_anomaly_count) +\n \", benign count : \" + str(test_dataset_benign_count))\n\n while True: # Try to generate splits until valid (should not be necessary with OHE handle_unknown='ignore')\n try:\n # Combine datasets, build new index\n combined_dataset = pd.concat([train_dataset_coherent, test_dataset_coherent], ignore_index=True)\n combined_dataset_benign = combined_dataset[combined_dataset['labels'] == 'normal']\n combined_dataset_anomaly = combined_dataset[combined_dataset['labels'] != 'normal']\n\n # Randomly sample 22544 test samples, rest are train samples\n # Keep anomaly/benign ratio from original split\n test_dataset_benign = combined_dataset_benign.sample(n=test_dataset_benign_count)\n train_dataset_benign = combined_dataset_benign.drop(test_dataset_benign.index)\n test_dataset_anomaly = combined_dataset_anomaly.sample(n=test_dataset_anomaly_count)\n train_dataset_anomaly = combined_dataset_anomaly.drop(test_dataset_anomaly.index)\n train_dataset = pd.concat([train_dataset_benign, train_dataset_anomaly])\n test_dataset = pd.concat([test_dataset_benign, test_dataset_anomaly])\n logging.debug(\"(Shape KDD Random Split) Train : \" + str(train_dataset.shape) +\n \", Test : \" + str(test_dataset.shape))\n logging.debug(train_dataset.head())\n ohe, scaler = fit_encoders(train_dataset)\n x_train, y_train = transform(train_dataset, ohe, scaler)\n x_test, y_test = transform(test_dataset, ohe, scaler)\n return x_train, y_train, x_test, y_test\n except ValueError:\n logging.info(\"Discarding split, retrying...\")\n","sub_path":"data/kdd.py","file_name":"kdd.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"205841282","text":"import os\nimport random\nimport string\n\nimport pytest\n\nfrom rpi import operating_system\nfrom rpi.connections import Connections\nfrom rpi.exceptions import NeccessaryArgumentError, UserNotFoundError, InvalidMailAddressError, \\\n SpreadsheetNotFoundError, SheetNotFoundError\n\n\ndef test_connections_disable():\n status = operating_system() == 'W'\n assert Connections.DISABLE == status\n\n\ndef test_notification():\n Connections.DISABLE = False\n\n with pytest.raises(NeccessaryArgumentError, match=\"The 'file' argument is needed in\"):\n Connections.notify('test_title', 'test_message', 'test_user')\n\n with pytest.raises(NeccessaryArgumentError, match=\"Multicast can't be used without\"):\n Connections.notify('test_title', 'test_message', 'multicast')\n\n with pytest.raises(PermissionError, match=\"'force' must be True in order to use broadcast.\"):\n Connections.notify('test_title', 'test_message', 'broadcast')\n\n random_user = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(15))\n\n with pytest.raises(UserNotFoundError, match=random_user):\n Connections.notify('test_title', 'test_message', random_user, force=True)\n\n assert Connections.notify('test_title', 'test_message', 'test', force=True) is True\n\n\ndef test_mail():\n with pytest.raises(InvalidMailAddressError, match=\"'test_destination' is not a valid email\"):\n Connections.send_email('test_destination', 'test_subject', 'test_message')\n\n with pytest.raises(TypeError, match=\"destinations must be iterable or str\"):\n Connections.send_email(18, 'test_subject', 'test_message')\n\n\ndef test_to_google_spreadsheets():\n data = tuple((random.randint(0, 10) for _ in range(10)))\n\n with pytest.raises(SpreadsheetNotFoundError, match='test_file'):\n Connections.to_google_spreadsheets('test_file', 'test_sheet', data)\n\n with pytest.raises(SheetNotFoundError, match='test_sheet'):\n Connections.to_google_spreadsheets('pylog', 'test_sheet', data)\n\n\ndef test_from_google_spreadsheets():\n with pytest.raises(SpreadsheetNotFoundError, match='test_file'):\n Connections.from_google_spreadsheets('test_file', 'test_sheet')\n\n with pytest.raises(SheetNotFoundError, match='test_sheet'):\n Connections.from_google_spreadsheets('pylog', 'test_sheet')\n\n data = Connections.from_google_spreadsheets('pylog', 'puertos')\n\n assert len(data) > 0\n\n\nif __name__ == '__main__':\n pytest.main([os.path.basename(__file__), '-v'])\n","sub_path":"tests/test_connections.py","file_name":"test_connections.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"514650849","text":"class Solution:\n '''Simple algorithm to iterate through the list finding the elements which sum to the target.'''\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n index = []\n for i in range(len(nums)):\n for j in range((i+1), len(nums)):\n if nums[i] + nums[j] == target:\n index.append(i)\n index.append(j)\n \n return index\n\n\n\n def twoSum_ideal(nums, target):\n '''Creates key-value pair of the number and the index, if the difference between any subsequent numbers in the array and the target is in the dictionary, it's index can quickly be identified.'''\n h = {}\n for i, num in enumerate(nums):\n n = target - num\n if n not in h:\n h[num] = i\n else:\n return [h[n], i]\n\nif __name__ == '__main__':\n print(Solution.twoSum(nums = [2,15,11,7],target = 9))\n print(Solution.twoSum_ideal(nums = [2,15,11,7],target = 9))\n\n \n","sub_path":"Easy/TwoSum.py","file_name":"TwoSum.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"45447137","text":"import math\ndef gcd(y,x):\n q=y//x\n r=y-q*x\n print(y,'=',q,'*',x,'+',r)\n if r==0:\n if x<0:\n x=1\n return x\n else:\n return gcd(x,r)\n\ndef gcd_comp(y,x):\n\tz=y/x\n\tq=complex(round(z.real),round(z.imag))\n\tr=y-q*x\n\tprint(y,'=',q,'*',x,'+',r)\n\tif r==0:\n\t\treturn x\n\telse:\n\t\treturn gcd_comp(x,r)\n \n\ndef is_num(x):\n try:\n int(x)\n return True\n except:\n return False\n\ndef norm():\n y=input(\"Insert first number: \")\n while not is_num(y):\n print(\"Not an integer!!\")\n y=input(\"Insert first number: \")\n x=input(\"Insert second number: \")\n while not is_num(y):\n print(\"Not an integer!!\")\n x=input(\"Insert second number: \")\n print('gcd(',y,',',x,')=',gcd(int(y),int(x)))\n\ndef comp():\n y_r=input(\"Insert real part of first number: \")\n while not is_num(y_r):\n print(\"Not an integer!!\")\n y_r=input(\"Insert real part of first number: \")\n y_i=input(\"Insert imaginary part of first number: \")\n while not is_num(y_i):\n print(\"Not an integer!!\")\n y_i=input(\"Insert imaginary part of first number: \")\n\n x_r=input(\"Insert real part of second number: \")\n while not is_num(x_r):\n print(\"Not an integer!!\")\n x_r=input(\"Insert real part of second number: \")\n x_i=input(\"Insert imaginary part of second number: \")\n while not is_num(x_i):\n print(\"Not an integer!!\")\n x_i=input(\"Insert imaginary part of second number: \")\n \n print('gcd(',complex(int(y_r),int(y_i)),',',complex(int(x_r),int(x_i)), \\\n ')=',gcd_comp(complex(int(y_r),int(y_i)),complex(int(x_r),int(x_i))))\n \n\nchoice=input(\"insert 'z' for regular integer, 'c' for complex integers: \")\nwhile choice!='z' and choice!='c':\n print('bad input!')\n choice=input(\"insert 'z' for regular integer, 'c' for complex integers: \")\n \nif choice=='z':\n norm()\nelse:\n comp()\n\n\n\n","sub_path":"Python/gcd func.py","file_name":"gcd func.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"177498630","text":"# Read tremor counts into a named tuple\n# We would like to read both Wech and Idesan's tremor catalogs. \n\nimport numpy as np \nimport matplotlib.pyplot as plt \nimport sys\nimport collections\nimport glob\nimport datetime as dt \n\nTremorCat = collections.namedtuple(\"TremorCat\",['dtarray','lonarray','latarray']);\n\ndef read_input_tremor(tremortype):\n\t# The driver of the inputs (optional)\n\treadfuncs={\"wech\":read_wech,\n\t\"wech_custom\":read_wech_custom,\n\t\"ide\":read_ide,\n\t\"pnsn_052019\":read_pnsn052019};\n\tfilenames={\"wech\":\"../../GPS_POS_DATA/tremor/08_01_2009_10_31_2018.txt\",\n\t\"wech_custom\":\"../../GPS_POS_DATA/tremor/revised_Wech_2015_2017.txt\",\n\t\"ide\":\"../../GPS_POS_DATA/tremor/trm_Cascadia.20050101.3652.92921871.csv\",\n\t\"pnsn_052019\":\"../../GPS_POS_DATA/tremor/PNSN_052019/\"};\n\ttremor=readfuncs[tremortype](filenames[tremortype]);\n\treturn tremor;\n\n\n\n\n\ndef read_wech(filename):\n\tdtarray=[]; lonarray=[]; latarray=[];\n\tstart=0;\n\n\tifile=open(filename,'r');\n\tfor line in ifile:\n\t\ttemp=line.split();\n\t\tif 'yyyy-mm-dd' in line or 'DateTime' in line: # If the header is still inside. \n\t\t\tstart=1; continue;\n\t\tif len(temp)==5: # If we've removed the header already. \n\t\t\tstart=1;\n\t\tif start==1 and len(temp)>0:\n\t\t\tdtarray.append(dt.datetime.strptime(temp[0]+' '+temp[1].split('.')[0],\"%Y-%m-%d %H:%M:%S\"));\n\t\t\tlonarray.append(float(temp[3]));\n\t\t\tlatarray.append(float(temp[2]));\n\t\tif len(latarray)==180000:\n\t\t\tbreak;\n\tifile.close();\n\n\twech_tremor = TremorCat(dtarray=np.flipud(dtarray), lonarray=np.flipud(lonarray), latarray=np.flipud(latarray));\n\tprint(\"Successfully read %d tremor counts from %s \" % (len(wech_tremor.dtarray),filename));\n\treturn wech_tremor;\n\n\ndef read_wech_custom(filename):\n\tdtarray=[]; lonarray=[]; latarray=[];\n\tstart=0;\n\n\tifile=open(filename,'r');\n\tfor line in ifile:\n\t\ttemp=line.split();\n\t\tif 'DateTime' in line: # If the header is still inside. \n\t\t\tstart=1; continue;\n\t\tif len(temp)==5: # If we've removed the header already. \n\t\t\tstart=1;\n\t\tif start==1 and len(temp)>0:\n\t\t\tdtarray.append(dt.datetime.strptime(temp[0]+' '+temp[1].split('.')[0],\"%Y-%m-%d %H:%M:%S\"));\n\t\t\tlonarray.append(float(temp[2]));\n\t\t\tlatarray.append(float(temp[3]));\n\t\tif len(latarray)==180000:\n\t\t\tbreak;\n\tifile.close();\n\n\twech_tremor = TremorCat(dtarray=dtarray, lonarray=lonarray, latarray=latarray);\n\tprint(\"Successfully read %d tremor counts from %s \" % (len(wech_tremor.dtarray),filename));\n\treturn wech_tremor;\n\n\ndef read_ide(filename):\n\tdtarray=[]; lonarray=[]; latarray=[];\n\tifile=open(filename,'r');\n\tfor line in ifile:\n\t\ttemp=line.split(',');\n\t\tif len(temp)>1:\n\t\t\tdtarray.append(dt.datetime.strptime(temp[0]+' '+temp[1],\"%Y-%m-%d %H:%M:%S\"));\n\t\t\tlonarray.append(float(temp[3]));\n\t\t\tlatarray.append(float(temp[2]));\n\tifile.close();\t\n\n\tide_tremor = TremorCat(dtarray=dtarray, lonarray=lonarray, latarray=latarray);\n\tprint(\"Successfully read %d tremor counts from %s \" % (len(ide_tremor.dtarray),filename));\n\treturn ide_tremor;\n\ndef read_pnsn052019_file(filename):\n\tdtarray=[]; lonarray=[]; latarray=[]; \n\tifile=open(filename,'r');\n\tifile.readline();\n\tfor line in ifile:\n\t\ttemp=line.split(',');\n\t\tif len(temp)<=2:\n\t\t\tcontinue;\n\t\tif temp[0]=='lat':\n\t\t\tcontinue;\n\t\tlonarray.append(float(temp[1]));\n\t\tlatarray.append(float(temp[0]));\n\t\tdtarray.append(dt.datetime.strptime(temp[3],\" %Y-%m-%d %H:%M:%S \"));\n\tifile.close();\n\ttremor=TremorCat(dtarray=dtarray, lonarray=lonarray, latarray=latarray);\n\treturn tremor;\n\n\ndef read_pnsn052019(dirname):\n\tdtarray=[]; lonarray=[]; latarray=[];\n\tfiles=glob.glob(dirname+\"*.csv\");\n\tfiles=sorted(files);\n\tfor i in range(len(files)):\n\t\ttremor_1yr = read_pnsn052019_file(files[i]);\n\t\tprint(len(tremor_1yr.dtarray));\n\t\tfor i in range(len(tremor_1yr.dtarray)):\n\t\t\tdtarray.append(tremor_1yr.dtarray[i]);\n\t\t\tlonarray.append(tremor_1yr.lonarray[i]);\n\t\t\tlatarray.append(tremor_1yr.latarray[i]);\n\ttremor=TremorCat(dtarray=dtarray, lonarray=lonarray, latarray=latarray);\n\tprint(\"Successfully read %d tremor counts from %s\" % (len(tremor.dtarray), dirname) );\n\treturn tremor;\n\n\n\ndef write_tremor_as_txt(tremor, filename):\n\tofile=open(filename,'w');\n\tfor i in range(len(tremor.dtarray)):\n\t\tofile.write(\"%s %f %f\\n\" % (dt.datetime.strftime(tremor.dtarray[i],\"%Y-%m-%d\"), tremor.lonarray[i], tremor.latarray[i]) );\n\tofile.close();\n\treturn;\n\n\n\n","sub_path":"Tremor/tremor/tremor_io.py","file_name":"tremor_io.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"593708863","text":"# ==================================================\n#\n# Lection 13.\n#\n# asymptotics: O(N)\n#\n# ==================================================\n\nfrom module import A_stack\nimport ast\n\ndef isint(item):\n try:\n int(item)\n return True\n except ValueError:\n return False\n\ndef backPolandNotation(notation:list):\n for token in notation:\n if isint(token):\n A_stack.push(token)\n else:\n y = A_stack.pop()\n x = A_stack.pop()\n z = eval(str(x) + str(token) + str(y))\n A_stack.push(z)\n \n return A_stack.pop()\n\nnotation = ['2', '7', \"+\", 5, \"*\"]\n\nprint(backPolandNotation(notation))\n\n","sub_path":"Lection_13_check_brackets_sequence/Lection_13_back_poland_notation.py","file_name":"Lection_13_back_poland_notation.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"1348201","text":"from .svr_libs import *\r\n\r\ndef getData(csv):\r\n data = pd.read_csv(csv)\r\n\r\n data['heat_fuel'] = data['heat_fuel'].apply({'gas': 0, 'cogeneration': 1}.get)\r\n data['heat_type'] = data['heat_type'].apply({'individual': 0, 'central': 1, ' district': 2}.get)\r\n data['front_door_structure'] = data['front_door_structure'].apply({'corridor': 0, 'stairway': 1, 'mixed': 2}.get)\r\n\r\n\r\n data['city'] = data['city'].astype('category', copy = False)\r\n data['heat_fuel'] = data['heat_fuel'].astype('category', copy = False)\r\n data['heat_type'] = data['heat_type'].astype('category', copy = False)\r\n data['transaction_month'] = data['transaction_month'].astype('category', copy = False)\r\n data['year_of_completion'] = data['year_of_completion'].astype('category', copy = False)\r\n data['front_door_structure'] = data['front_door_structure'].astype('category', copy = False)\r\n data['address_by_law_first5'] = data['address_by_law_first5'].astype('category', copy = False)\r\n\r\n data = data.drop(['Unnamed: 0'], axis = 1)\r\n return data\r\n\r\ndef splitData(csv):\r\n data = getData(csv)\r\n\r\n X = data.drop(['real_price'], axis = 1)\r\n Y = data[['real_price']]\r\n\r\n x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.3, random_state = 0)\r\n return x_train, x_test, y_train, y_test\r\n\r\ndef svrMain(csv, epsilon):\r\n x_train, x_test, y_train, y_test = splitData(csv)\r\n\r\n model = LinearSVR(epsilon = epsilon)\r\n model.fit(x_train, y_train)\r\n\r\n pred = model.predict(x_test)\r\n\r\n svr_r2 = r2_score(pred, y_test)\r\n svr_mse = mean_squared_error(pred, y_test)\r\n svr_rmse = np.sqrt(svr_mse)\r\n return print('r2 : ', svr_r2, ', mse : ', svr_mse, ', rmse : ', svr_rmse)","sub_path":"encore_academy/apt real transaction price prediction/algorithmcodes/Python/svr/svr_funcs.py","file_name":"svr_funcs.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"407707622","text":"##############################################################################\n# Parte do livro Introdução à Programação com Python\n# Autor: Nilo Ney Coutinho Menezes\n# Editora Novatec (c) 2010-2020\n# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8\n# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3\n# Terceira Edição - Janeiro/2019 - ISBN 978-85-7522-718-3\n#\n# Site: https://python.nilo.pro.br/\n#\n# Arquivo: exercicios3\\capitulo 09\\exercicio-09-05.py\n##############################################################################\n\npares = open(\"pares.txt\", \"r\")\nsaída = open(\"pares_invertido.txt\", \"w\")\n\nL = pares.readlines()\nL.reverse()\nfor l in L:\n saída.write(l)\n\npares.close()\nsaída.close()\n\n# Observe que lemos todas as linhas antes de fazer a inversão\n# Esta abordagem não funciona com arquivos grandes\n# Alternativa usando with:\n#\n##with open(\"pares.txt\",\"r\") as pares, open(\"pares_invertido.txt\",\"w\") as saída:\n## L = pares.readlines()\n## L.reverse()\n## for l in L:\n## saída.write(l)\n","sub_path":"exercicios_resolvidos3/exercicios3/capitulo 09/exercicio-09-05.py","file_name":"exercicio-09-05.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"270118300","text":"# -*- coding: utf-8 -*-\n# file: lca_glove.py\n# author: yangheng \n# Copyright (C) 2020. All Rights Reserved.\n\nimport torch\nimport torch.nn as nn\n\nimport numpy as np\nfrom pytorch_transformers.modeling_bert import BertPooler, BertSelfAttention, BertConfig\n\nclass SelfAttention(nn.Module):\n def __init__(self, config, opt):\n super(SelfAttention, self).__init__()\n self.opt = opt\n self.config = config\n self.SA = BertSelfAttention(config)\n self.tanh = torch.nn.Tanh()\n\n def forward(self, inputs):\n zero_vec = np.zeros((inputs.size(0), 1, 1, self.opt.max_seq_len))\n zero_tensor = torch.tensor(zero_vec).float().to(self.opt.device)\n SA_out = self.SA(inputs, zero_tensor)\n return self.tanh(SA_out[0])\n\nclass LCE_GLOVE(nn.Module):\n\n def __init__(self, embedding_matrix, opt):\n super(LCE_GLOVE, self).__init__()\n # Only few of the parameters are necessary in the config.json, such as hidden_size, num_attention_heads\n self.config = BertConfig.from_json_file(\"utils/bert_config.json\")\n self.opt = opt\n self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))\n self.lc_embed = nn.Embedding(2, opt.embed_dim)\n self.global_encoder1 = SelfAttention(self.config, opt)\n self.local_encoder1 = SelfAttention(self.config, opt)\n self.local_encoder2 = SelfAttention(self.config, opt)\n self.mha = SelfAttention(self.config, opt)\n self.pool = BertPooler(self.config)\n self.dropout = nn.Dropout(opt.dropout)\n self.linear = nn.Linear(opt.embed_dim * 2, opt.embed_dim)\n self.dense = nn.Linear(opt.embed_dim, opt.polarities_dim)\n self.classifier = nn.Linear(opt.embed_dim, 2)\n\n def forward(self, inputs):\n text_global_indices = inputs[0]\n text_local_indices = inputs[1]\n lce_ids = inputs[2]\n mask_matrix = inputs[3]\n\n # embedding layer\n global_out = self.embed(text_global_indices)\n local_out = self.embed(text_local_indices)\n\n global_out = self.global_encoder1(global_out)\n\n if self.opt.lce:\n lc_embedding = self.lc_embed(lce_ids)\n global_out = torch.mul(lc_embedding, global_out)\n local_out = self.local_encoder1(local_out)\n\n # dropout\n global_out = self.dropout(global_out).to(self.opt.device)\n local_out = self.dropout(local_out).to(self.opt.device)\n\n # LCF layer\n local_out = torch.mul(local_out, mask_matrix)\n local_out = self.local_encoder2(local_out)\n\n # dropout\n global_out = self.dropout(global_out).to(self.opt.device)\n local_out = self.dropout(local_out).to(self.opt.device)\n\n cat_features = torch.cat((local_out, global_out), dim=-1)\n cat_features = self.linear(cat_features)\n\n lce_logits = self.classifier(cat_features)\n lce_logits = lce_logits.view(-1, 2)\n lce_ids = lce_ids.view(-1)\n\n # output layer\n pooled_out = self.pool(cat_features)\n sen_logits = self.dense(pooled_out)\n\n if self.opt.lcp:\n return sen_logits, lce_logits, lce_ids\n else:\n return sen_logits\n","sub_path":"models/lc_apc/lce_glove.py","file_name":"lce_glove.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"366380407","text":"\"\"\"!\nSimulation of Moller scattering events and detector signals (using SLIC).\n\"\"\"\nfrom hpsmc.generators import EGS5\nfrom hpsmc.tools import BeamCoords, RandomSample, SLIC\n\njob.description = 'Moller from generation to slic'\n\n## Get job input file targets\ninputs = list(job.input_files.values())\n\nif 'nevents' in job.params:\n nevents = job.params['nevents']\nelse:\n nevents = 250000\n\n## Generate beam\negs5 = EGS5(name=\"moller_v3\")\n\n## Rotate events into beam coordinates\nrot = BeamCoords()\n\n## Simulate events\nslic = SLIC(nevents=nevents+1)\n\n## Run the job\njob.add([egs5, rot, slic])\n","sub_path":"python/jobs/moller_gen_to_slic_job.py","file_name":"moller_gen_to_slic_job.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"237483476","text":"import os\nfrom pathlib import Path\n\nimport pandas as pd\nimport pytest\nfrom rdkit import Chem\n\nimport janitor.chemistry\n\ntest_data_dir = Path(os.path.dirname(os.path.abspath(__file__))) / \"test_data\"\n\n\n@pytest.fixture\ndef chemdf():\n df = pd.read_csv(\n test_data_dir / \"corrected_smiles.txt\", sep=\"\\t\", header=None\n ).head(10)\n df.columns = [\"id\", \"smiles\"]\n return df\n\n\n@pytest.mark.parametrize(\"progressbar\", [None, \"terminal\"])\n@pytest.mark.chemistry\ndef test_smiles2mol(chemdf, progressbar):\n chemdf = chemdf.smiles2mol(\"smiles\", \"mol\", progressbar)\n assert \"mol\" in chemdf.columns\n for elem in chemdf[\"mol\"]:\n assert isinstance(elem, Chem.rdchem.Mol)\n\n\n@pytest.mark.chemistry\ndef test_morganbits(chemdf):\n morgans = chemdf.smiles2mol(\"smiles\", \"mol\").morganbits(\"mol\")\n assert morgans.shape == (10, 2048)\n","sub_path":"tests/test_chemistry.py","file_name":"test_chemistry.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"431737990","text":"from __future__ import print_function\nimport sys\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom flask import Flask, request, send_file, redirect, jsonify, Response\nimport json\nimport datetime\nimport os\n\napp = Flask(__name__)\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\n\nfootprint={datetime.date(year=2019, month=10, day=19): 25,datetime.date(year=2019, month=10, day=18): 23,\n\tdatetime.date(year=2019, month=10, day=17): 40,datetime.date(year=2019, month=10, day=16): 10}\n\nfoodar = []\ntransportationar = []\nelectricityar = []\nfoodMap = {\"meat\": 27, \"diary\": 1.9, \"grain\": 2.7, \"vegetables\": 2.0}\n\n\ndef root_dir(): # pragma: no cover\n return os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_file(filename): # pragma: no cover\n try:\n src = os.path.join(root_dir(), filename)\n # Figure out how flask returns static files\n # Tried:\n # - render_template\n # - send_file\n # This should not be so non-obvious\n return open(src).read()\n except IOError as exc:\n return str(exc)\n\n\n# No caching at all for API endpoints.\n@app.after_request\ndef add_header(response):\n # response.cache_control.no_store = True\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n response.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '-1'\n return response\n\n@app.route(\"/\", methods=[\"GET\"])\ndef main():\n\tcontent = get_file('../index.html')\n\treturn Response(content, mimetype=\"text/html\")\n\n@app.route(\"/food\", methods=[\"GET\"])\ndef main1():\n\tcontent = get_file('../food.html')\n\treturn Response(content, mimetype=\"text/html\")\n\n@app.route(\"/transportation\", methods=[\"GET\"])\ndef main2():\n\tcontent = get_file('../transportation.html')\n\treturn Response(content, mimetype=\"text/html\")\n\n@app.route(\"/electricity\", methods=[\"GET\"])\ndef main3():\n\tcontent = get_file('../electricity.html')\n\treturn Response(content, mimetype=\"text/html\")\n\n@app.route(\"/daily\", methods=[\"GET\"])\ndef daily():\n\treturn str(footprint[datetime.date.today()] if datetime.date.today() in footprint else 0)\n\n@app.route(\"/graph\", methods=[\"GET\"])\ndef graph():\n #data = json.loads(request.json)\n xvals = sorted(list(footprint.keys()))\n #print(str(xvals) + \" \" + str([footprint[x] for x in xvals]), file=sys.stderr)\n yvals = [footprint[x] for x in xvals]\n plt.figure(figsize=(12,9))\n plt.plot(xvals, yvals, \"ko-\")\n\n plt.xlabel(\"day\")\n plt.ylabel(\"carbon emission (lbs)\")\n plt.title(\"Your daily carbon emission history\")\n plt.xlim([min(xvals) - datetime.timedelta(days=2), max(xvals)+datetime.timedelta(days=2)])\n plt.ylim([min(yvals) - 5, max(yvals) + 5])\n os.remove(\"./graph.png\")\n plt.savefig(\"./graph.png\")\n return send_file(\"./graph.png\", mimetype='image/gif')\n\n@app.route(\"/getFood\", methods=[\"GET\"])\ndef getFood():\n\treturn jsonify(foodar)\n\n@app.route(\"/getTransportation\", methods=[\"GET\"])\ndef getTransportation():\n\treturn jsonify(transportationar)\n\n@app.route(\"/getElectricity\", methods=[\"GET\"])\ndef getElectricity():\n\treturn jsonify(electricityar)\n\n@app.route(\"/foodAdd\", methods=[\"POST\"])\ndef food():\n\tdata = request.form\n\td = datetime.date.today()\n\tif(not d in footprint):\n\t\tfootprint[d] = 0\n\tfootprint[d] += float(foodMap[data[\"food type\"]]) * float(data[\"amount\"])\n\tfoodar.append(\"You ate \" + str(data[\"amount\"]) + \" ozs of \" + data[\"food type\"])\n\treturn redirect(\"/\", code=302)\n\n@app.route(\"/transportationAdd\", methods=[\"POST\"])\ndef transportation():\n\tdata = request.form\n\td = datetime.date.today()\n\tprint(data, file=sys.stderr)\n\tif(not d in footprint):\n\t\tfootprint[d] = 0\n\tfootprint[d] += float(data[\"miles\"]) / float(data[\"mpg\"]) * 20\n\ttransportationar.append(\"You traveled \" + str(data[\"miles\"]) + \" miles\")\n\treturn redirect(\"/\", code=302)\n\n@app.route(\"/electricityAdd\", methods=[\"POST\"])\ndef electricity():\n\tdata = request.form\n\td = datetime.date.today()\n\tif(not d in footprint):\n\t\tfootprint[d] = 0\n\tfootprint[d] += float(data[\"electricity\"])\n\telectricityar.append(\"You used \" + str(data[\"electricity\"]) + \" kilowatt-hours of electricity.\")\n\treturn redirect(\"/\", code=302)\n\nif __name__ == \"__main__\":\n app.run(debug=True,host=\"0.0.0.0\",port=\"5000\")","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"146913422","text":"# Author: Xu Wang\r\n# Email: i@xuwang.im\r\n#\r\n# Filename: database.py\r\n#\r\n#\r\n\r\nfrom sqlalchemy import *\r\nfrom twitter import Tweet\r\nfrom utils import log\r\nimport config\r\nimport logging\r\n# logging.basicConfig()\r\n# logging.getLogger('sqlalchemy').setLevel(logging.DEBUG)\r\n\r\ndef engine_connection():\r\n \"\"\"\r\n Return a meta data of the connection\r\n :return:\r\n \"\"\"\r\n connection = 'mysql+pymysql://%s:%s@%s/%s?charset=utf8'\\\r\n % (config.db_user, config.db_passwd, config.db_host, config.db_database)\r\n return MetaData(bind=create_engine(connection, pool_recycle=5))\r\n\r\n\r\ndef get_table(table, metadata):\r\n \"\"\"\r\n Return the object of Table table in database bind to metadata\r\n :param metadata: metadata bind to a database connection\r\n :param table: table name\r\n :return: table object in sqlalchemy\r\n \"\"\"\r\n return Table(table, metadata, autoload=True)\r\n\r\n\r\nclass NewUsers:\r\n \"\"\"\r\n Operations on New_Users table.\r\n \"\"\"\r\n def __init__(self, table=None):\r\n \"\"\"\r\n\r\n :param table: Name of New_Users table\r\n :return:\r\n \"\"\"\r\n if table is None:\r\n name = config.New_Users\r\n else:\r\n name = table\r\n self.__table = get_table(name, engine_connection())\r\n\r\n def __lock_by_screen(self, screen):\r\n \"\"\"\r\n Set inuse of screen to be 1\r\n :param screen:\r\n :return: None\r\n \"\"\"\r\n self.__set_inuse_by_screen(1, screen)\r\n\r\n def __set_inuse_by_screen(self, inuse, screen):\r\n \"\"\"\r\n Set inuse of screen in table\r\n :param inuse: 0 or 1\r\n :param screen: screen name\r\n :return:\r\n \"\"\"\r\n if inuse not in [0, 1]:\r\n raise ValueError('Illegal value for inuse')\r\n print(screen)\r\n self.__table.update(self.__table.c.screen == screen).execute(inuse=inuse)\r\n\r\n def remove_by_screen(self, screen):\r\n \"\"\"\r\n Remove user by screen\r\n :param screen:\r\n :return:\r\n \"\"\"\r\n try:\r\n self.__table.delete(self.__table.c.screen == screen).execute()\r\n log(screen)\r\n except Exception as e:\r\n # in the case screen is not in table\r\n log(str(e))\r\n\r\n def add_by_screen(self, screen):\r\n \"\"\"\r\n Add a user by screen into table\r\n :param screen:\r\n :return:\r\n \"\"\"\r\n try:\r\n self.__table.insert().execute(screen=screen)\r\n log(screen)\r\n except Exception as e:\r\n # will receive an exception when user exists in the table, ignore it\r\n log(str(e))\r\n\r\n def get_user_screen(self):\r\n \"\"\"\r\n Get a unlocked (inuse = 0) user and lock it\r\n :return: a user by screen\r\n \"\"\"\r\n screen = self.__get_free_screen()\r\n if screen is None:\r\n return None\r\n else:\r\n self.__lock_by_screen(screen)\r\n return screen\r\n\r\n def add_from_mentions(self, tweet, Users):\r\n \"\"\"\r\n Add screen from mentions in tweet\r\n :param tweet: Tweet object\r\n :param Users: Users table\r\n :return:\r\n \"\"\"\r\n for s in tweet.mentions:\r\n if not Users.exist_user_by_screen(s):\r\n self.add_by_screen(s)\r\n\r\n\r\n def __get_free_screen(self):\r\n \"\"\"\r\n Get a unlocked (inuse = 0) user\r\n :return:\r\n \"\"\"\r\n entry = self.__table.select(self.__table.c.inuse == 0).execute().fetchone()\r\n if entry is None:\r\n return None\r\n else:\r\n return entry['screen']\r\n\r\n\r\n\r\nclass Users:\r\n \"\"\"\r\n Operations on Users table\r\n \"\"\"\r\n def __init__(self, table=None):\r\n \"\"\"\r\n\r\n :param table: Name of Users table\r\n :return:\r\n \"\"\"\r\n if table is None:\r\n name = config.Users\r\n else:\r\n name = table\r\n self.__table = get_table(name, engine_connection())\r\n\r\n def exist_user_by_screen(self, screen):\r\n \"\"\"\r\n Whether a user exits in table Users identified by screen name.\r\n :param screen: the screen name\r\n :return: True if exist, False otherwise\r\n \"\"\"\r\n current_user = self.__table.select(self.__table.c.screen == screen)\\\r\n .execute().fetchone()\r\n if current_user is not None:\r\n return True\r\n else:\r\n return False\r\n\r\n def update_followers(self, user):\r\n \"\"\"\r\n Update the number of followers of user with user.followers\r\n :param user:\r\n :return: None\r\n \"\"\"\r\n pass\r\n\r\n def __add_user(self, user):\r\n \"\"\"\r\n\r\n :param user: user of User class\r\n :return:\r\n \"\"\"\r\n self.__table.insert().execute(id=user.id,\r\n name=user.name,\r\n screen=user.screen,\r\n follower=user.followers,\r\n last_update=user.last_update)\r\n\r\n def add_user(self, user):\r\n \"\"\"\r\n Add user if not exists\r\n :param user:\r\n :return:\r\n \"\"\"\r\n try:\r\n self.__add_user(user)\r\n log(user.screen)\r\n except Exception as e:\r\n log(str(e))\r\n\r\n\r\nclass Tweets:\r\n \"\"\"\r\n Table of Tweets\r\n \"\"\"\r\n def __init__(self, table=None):\r\n \"\"\"\r\n\r\n :param table: name of tweets table\r\n :return:\r\n \"\"\"\r\n if table is None:\r\n name = config.Tweets\r\n else:\r\n name = table\r\n self.__table = get_table(name, engine_connection())\r\n\r\n def __add_tweet(self, tweet):\r\n \"\"\"\r\n\r\n :param tweet: tweet of Tweet class\r\n :return:\r\n \"\"\"\r\n self.__table.insert().execute(id=tweet.id,\r\n uid=tweet.uid,\r\n text=tweet.text,\r\n timestamp=tweet.timestamp,\r\n date=tweet.date,\r\n time=tweet.time,\r\n retweets=tweet.retweets,\r\n favorite=tweet.favorite)\r\n\r\n def add_tweet(self, tweet):\r\n \"\"\"\r\n\r\n :param tweet: Tweet object\r\n :return:\r\n \"\"\"\r\n try:\r\n self.__add_tweet(tweet)\r\n log(tweet.id)\r\n except Exception as e:\r\n log(str(e))\r\n\r\n\r\nclass Mentions:\r\n \"\"\"\r\n Table of Tweet_Mentions\r\n \"\"\"\r\n def __init__(self, table=None):\r\n if table is None:\r\n name = config.Tweet_Mentions\r\n else:\r\n name = table\r\n self.__table = get_table(name, engine_connection())\r\n\r\n def __add_mention(self, tweet_id, screen):\r\n \"\"\"\r\n Add a pair tweet_id and mention\r\n :param tweet_id:\r\n :param screen: is a user screen\r\n :return:\r\n \"\"\"\r\n self.__table.insert().execute(id=tweet_id, screen=screen)\r\n\r\n def __add_mentions(self, tweet):\r\n \"\"\"\r\n Add all mentions in tweet\r\n :param tweet: Tweet object\r\n :return:\r\n \"\"\"\r\n for s in tweet.mentions:\r\n self.__add_mention(tweet.id, s)\r\n\r\n def add_mentions(self, tweet):\r\n \"\"\"\r\n\r\n :param tweet:\r\n :return:\r\n \"\"\"\r\n try:\r\n self.__add_mentions(tweet)\r\n log(tweet.id)\r\n except Exception as e:\r\n log(str(e), str(tweet.id), ' '.join(tweet.mentions))\r\n\r\n\r\nclass Tags:\r\n \"\"\"\r\n Table of Tweet_Tags\r\n \"\"\"\r\n def __init__(self, table=None):\r\n \"\"\"\r\n\r\n :param table: table name of Tweet_Tags\r\n :return:\r\n \"\"\"\r\n if table is None:\r\n name = config.Tweet_Tags\r\n else:\r\n name = table\r\n self.__table = get_table(name, engine_connection())\r\n\r\n def __add_tag(self, tweet_id, tag):\r\n \"\"\"\r\n Add a pair tweet_id and tag\r\n :param tweet_id:\r\n :param tag:\r\n :return:\r\n \"\"\"\r\n self.__table.insert().execute(id=tweet_id, tag=tag)\r\n\r\n def __add_tags(self, tweet):\r\n \"\"\"\r\n\r\n :param tweet: Tweet object\r\n :return:\r\n \"\"\"\r\n for t in tweet.tags:\r\n self.__add_tag(tweet.id, t)\r\n\r\n def add_tags(self, tweet):\r\n \"\"\"\r\n\r\n :param tweet:\r\n :return:\r\n \"\"\"\r\n try:\r\n self.__add_tags(tweet)\r\n log(tweet.id)\r\n except Exception as e:\r\n log(str(e), str(tweet.id), ' '.join(tweet.tags))\r\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":8589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"340781718","text":"import time\nimport random\nfrom Bots.Functions.API import login\n\n\ntry:\n api = login.login()\n print(\"Logged in.\\n\")\nexcept:\n api=login.login()\n\nunf_list = open(\"unf.txt\", \"a\")\n\n\ndef get_list(username):\n name = api.username_info(username)\n id = name[\"user\"][\"pk\"]\n rank_token = api.generate_uuid()\n f_list = api.user_following(id, rank_token)\n\n return f_list\n\n\ndef unfollow_script(username, no_check):\n list = get_list(username)\n self_time = api.username_feed(username)[\"items\"][0][\"taken_at\"]\n count=0\n month_sec = 2628000.0\n\n\n for i in range(len(list[\"users\"])):\n rand = random.randint(1, 10)\n o_user = list[\"users\"][i][\"username\"]\n o_user_id = list[\"users\"][i][\"pk\"]\n if len(api.username_feed(o_user)[\"items\"]) > 0:\n last_post = api.username_feed(o_user)[\"items\"][0]\n o_time=last_post[\"taken_at\"]\n if (self_time-o_time) >= month_sec and o_user not in no_check:\n api.friendships_destroy(o_user_id)\n print(\"Unfollowed {}.\".format(o_user))\n unf_list.write(o_user)\n count+=1\n else:\n print(\"{} is fine.\".format(o_user))\n else:\n print(\"{} is fine.\".format(o_user))\n time.sleep(rand/10)\n print(\"Unfollowed {} users.\".format(count))\n\n","sub_path":"Bots/unfollow_inactives.py","file_name":"unfollow_inactives.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"518502704","text":"import socket\nimport PyQt5\nfrom PyQt5 import QtCore\nimport threading\nimport time\nimport pickle\nimport query_struct_class as qs\nimport sys\nfrom PyQt5.QtWidgets import QMainWindow, QWidget, QPushButton, QApplication, QLabel, QPlainTextEdit, QLineEdit, QGridLayout, QComboBox\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtWidgets import QGroupBox, QVBoxLayout, QHBoxLayout, QCheckBox, QSpinBox, QMessageBox\nfrom PyQt5.QtChart import QChart, QChartView, QLineSeries\nfrom PyQt5.QtGui import QPolygonF, QPainter, QPen, QBrush, QColor, QTextCursor\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtChart import QScatterSeries, QChartView, QLegend\n\n\n########################################################################################\n############################### INTERFACE CLASS ######################################\n########################################################################################\nclass Interface(QMainWindow):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(Interface, self).__init__()\n\t\tglobal g_server_message\n\t\tself.__ax = 100\n\t\tself.__ay = 60\n\t\tself.__aw = 1000\n\t\tself.__ah = 600\n\t\tself.__margin = 30\n\t\tself.__wid = QWidget(self)\n\t\tself.setCentralWidget(self.__wid)\n\t\tself.__layout = QGridLayout()\n\t\tself.__layout.setSpacing(5)\n\t\tself.__data1 = []\n\t\tself.__data2 = []\n\t\tself.start = False\n\t\tself.stop = False\n\t\tself.__cl = client(\"192.168.30.101\", 10001, self)\n\t\tself.__cl.data_come_pareto.connect(self.on_data_come_pareto)\n\t\tself.__cl.data_come_times.connect(self.on_data_come_times)\n\t\tself.initUI()\n\t\tthreading.Thread(target = self.__cl.start).start()\n\t\n\tdef on_data_come_pareto(self, data):\n\t\tself.drawParetoChart(data)\n\n\tdef on_data_come_times(self, data1, data2):\n\t\tself.writeTimeLog(data1, data2)\n\t\tself.drawComparisonChart(data1, data2)\n\n\tdef __setWindowProperties(self, parrent):\n\t\tself.setWindowTitle(\"Title\") #название окна\n\t\tself.__setWindowSize(self.__ax, self.__ay, self.__aw, self.__ah)\n\n\tdef __setWindowSize(self, ax, ay, aw, ah):\n\t\tself.setGeometry(ax, ay, aw, ah) #размеры окна\n\n\tdef drawParetoChart(self, data):\n\t\tself.__pareto_chart.removeAllSeries()\n\t\tfor i in data[\"database_clients\"]:\n\t\t\tseries = QScatterSeries()\n\t\t\tseries.setName(\"database_client\")\n\t\t\tseries.setMarkerShape(QScatterSeries.MarkerShapeCircle)\n\t\t\tseries.setMarkerSize(5)\n\t\t\tseries.append(i[0],i[1])\n\t\t\tseries.setBrush(QColor(Qt.red))\n\t\t\tself.__pareto_chart.addSeries(series)\n\t\tseries = QScatterSeries()\n\t\tseries.setName(\"optimal_client\")\n\t\tseries.setMarkerShape(QScatterSeries.MarkerShapeCircle)\n\t\tseries.setMarkerSize(7)\n\t\tseries.append(data[\"optimal_client\"][0], data[\"optimal_client\"][1])\n\t\tself.__pareto_chart.addSeries(series)\n\t\tseries = QScatterSeries()\n\t\tseries.setName(\"utopian_point\")\n\t\tseries.setMarkerShape(QScatterSeries.MarkerShapeCircle)\n\t\tseries.setMarkerSize(10)\n\t\tseries.append(data[\"utopian_point\"][0], data[\"utopian_point\"][1])\n\t\tself.__pareto_chart.addSeries(series)\n\t\tself.__pareto_chart.createDefaultAxes()\n\n\tdef __createOptimisationChartGroupBox(self):\n\t\tgroupBox = QGroupBox(\"Optimisation Chart\")\n\t\tlayout = QGridLayout()\n\t\tlayout.setSpacing(0)\n\t\t\n\t\tself.__pareto_chart = QChart()\n\t\tself.__pareto_chart_series = []\n\t\t# self.__pareto_chart.createDefaultAxes()\n\t\t# legend = QLegend()\n\t\t# chart.legend.setMarkerShape(QLegend.MarkerShapeFromSeries)\n\t\tview = QChartView(self.__pareto_chart)\n\t\tlayout.addWidget(view)\n\t\tgroupBox.setLayout(layout)\n\t\treturn groupBox\n\n\tdef __createButtonsGroupBox(self):\n\t\tgroupBox = QGroupBox(\"Buttons\")\n\t\thbox = QHBoxLayout()\n\t\tself.__btn_start = QPushButton(\"Start\", self) #создание кнопки\n\t\tself.__btn_stop = QPushButton(\"Stop\", self)\n\t\thbox.addWidget(self.__btn_start)\n\t\thbox.addWidget(self.__btn_stop)\n\t\tgroupBox.setLayout(hbox)\n\t\treturn groupBox\n\n\tdef __createQueryParametersGroupBox(self):\n\t\tgroupBox = QGroupBox(\"Query Parameters\")\n\t\tlayout = QGridLayout()\n\t\tlayout.setSpacing(5)\n\t\tlabel_query_count = QLabel(\"Query Count: \")\n\t\tself.spinbox_query_count = QSpinBox()\n\t\tself.spinbox_query_count.setMinimum(100)\n\t\tself.spinbox_query_count.setMaximum(1000)\n\t\tlayout.addWidget(label_query_count, 1, 1, 1, 1)\n\t\tlayout.addWidget(self.spinbox_query_count, 1, 2, 1, 3)\n\t\tlabel_package_size = QLabel(\"Package Size: \")\n\t\tgroupBox.setLayout(layout)\n\t\treturn groupBox\n\n\tdef writeTimeLog(self, data1, data2):\n\t\tself.__plainTextEdit_time_logs.moveCursor(QTextCursor.End)\n\t\tself.__plainTextEdit_time_logs.insertPlainText(str(\"optimal:\" + str(data1[0]) + '\\n'))\n\t\tself.__plainTextEdit_time_logs.insertPlainText(str(\"drugoi:\" + str(data2[0]) + '\\n'))\n\t\tself.__plainTextEdit_time_logs.moveCursor(QTextCursor.End)\n\n\tdef __createLogGroupBox(self):\n\t\tgroupBox = QGroupBox(\"Time Logs\")\n\t\tself.__plainTextEdit_time_logs = QPlainTextEdit(\"\")\n\t\tvbox = QVBoxLayout()\n\t\tvbox.addWidget(self.__plainTextEdit_time_logs)\n\t\tvbox.addStretch(1)\n\t\tgroupBox.setLayout(vbox)\n\t\treturn groupBox\n\n\tdef drawComparisonChart(self, data1, data2):\n\t\tself.__comparison_chart.removeAllSeries()\n\t\t#for i in self.__comparison_chart_series:\n\t\t#\tself.__comparison_chart.removeSeries(i)\n\t\t#\tdel i\n\t\tseries1 = QLineSeries()\n\t\tseries1.setName(\"optimal\")\n\t\tseries2 = QLineSeries()\n\t\tseries2.setName(\"drugoi\")\n\t\tprint(\"[drawComparisonChart]: \", data1, data2)\n\t\ttemp1 = {}\n\t\ttemp1[\"time\"] = data1[0]\n\t\ttemp1[\"count\"] = data1[1]\n\t\ttemp2 = {}\n\t\ttemp2[\"time\"] = data2[0]\n\t\ttemp2[\"count\"] = data2[1]\n\t\tif self.__data1:\n\t\t\ttemp1[\"count\"] += self.__data1[len(self.__data1) - 1][\"count\"]\n\t\t\ttemp1[\"time\"] += self.__data1[len(self.__data1) - 1][\"time\"]\n\t\tif self.__data2:\n\t\t\ttemp2[\"count\"] += self.__data2[len(self.__data2) - 1][\"count\"]\n\t\t\ttemp2[\"time\"] += self.__data2[len(self.__data2) - 1][\"time\"]\n\t\tself.__data1.append(temp1)\n\t\tself.__data2.append(temp2)\n\t\tprint(\"[drawComparisonChart]: \", self.__data1, self.__data2)\n\n\t\tfor i in self.__data1:\n\t\t\tseries1.append(i[\"time\"], i[\"count\"])\n\t\tfor i in self.__data2:\n\t\t\tseries2.append(i[\"time\"], i[\"count\"])\n\n\t\tself.__comparison_chart.addSeries(series1)\n\t\t#self.__comparison_chart_series.append(series1)\n\t\tself.__comparison_chart.addSeries(series2)\n\t\t#self.__comparison_chart_series.append(series2)\n\t\tself.__comparison_chart.createDefaultAxes()\n\n\tdef __createComparisonChartGroupBox(self):\n\t\tgroupBox = QGroupBox(\"Comparison Chart\")\n\t\tlayout = QGridLayout()\n\t\tlayout.setSpacing(0)\n\t\tself.__comparison_chart = QChart()\n\t\tself.__comparison_chart_series = []\n\t\tself.__comparison_view = QChartView(self.__comparison_chart)\n\t\tlayout.addWidget(self.__comparison_view)\n\t\tgroupBox.setLayout(layout)\n\t\treturn groupBox\n\n\tdef __start_btn_clicked(self):\n\t\tprint(\"[init]: start\")\n\t\tthreading.Thread(target = self.__cl.start2).start()\n\n\tdef initUI(self):\n\t\tself.__layout.addWidget(self.__createOptimisationChartGroupBox(), 1, 1, 8, 5)\n\t\tself.__layout.addWidget(self.__createLogGroupBox(), 9, 1, 2, 5)\n\t\tself.__layout.addWidget(self.__createButtonsGroupBox(), 1, 6, 1, 5)\n\t\tself.__layout.addWidget(self.__createQueryParametersGroupBox(), 2, 6, 2, 5)\n\t\tself.__layout.addWidget(self.__createComparisonChartGroupBox(), 4, 6, 7, 5)\n\t\tself.__wid.setLayout(self.__layout)\n\t\tself.__btn_start.clicked.connect(self.__start_btn_clicked)\n\t\tself.__btn_stop.clicked.connect(self.__cl.stop2)\n\t\tself.__setWindowProperties(self)\n\t\tself.show()\n\n\tdef closeEvent(self, event): #событие закрытия окна\n\t\treply = QMessageBox.question(self, 'Message',\n\t\t\t\"Are you sure to quit?\", QMessageBox.Yes |\n\t\t\tQMessageBox.No, QMessageBox.No)\n\t\tif reply == QMessageBox.Yes:\n\t\t\tevent.accept()\n\t\telse:\n\t\t\tevent.ignore()\n\n########################################################################################\n################################# CLIENT CLASS #######################################\n########################################################################################\nclass client(QObject):\n\tdata_come_pareto = QtCore.pyqtSignal(dict)\n\tdata_come_times = QtCore.pyqtSignal(list, list)\n\n\tdef __init__(self, server_ip, server_port, inter):\n\t\tsuper(client, self).__init__()\n\t\ttry:\n\t\t\tself.__connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\t\tself.__server_address = (server_ip, server_port)\n\t\t\tself.__connection.connect(self.__server_address)\n\t\t\tself.__interface = inter\n\t\texcept:\n\t\t\tprint(\"[__init__]: Connection error.\")\n\n\tdef start2(self):\n\t\tprint(\"start2\")\n\t\twhile True:\n\t\t\ttime.sleep(1)\n\t\t\tmessage = pickle.dumps({\"command\": 98})\n\t\t\tself.__connection.send(message)\n\t\t\tself.comparisonCommand() #vremya ne 1 sec, a kak tol'ko pridet otvet\n\n\tdef stop2(self):\n\t\tprint(\"stop\")\n##////////////////////////////////////////////////////////////////////////////////////##\n\tdef start(self):\n\t\tself.__working = True\n\t\ttry:\n\t\t\ttemp_message = {}\n\t\t\ttemp_message[\"command\"] = -1\n\t\t\ttemp_message[\"client_type\"] = \"user\"\n\t\t\tmessage = pickle.dumps(temp_message)\n\t\t\tself.__connection.send(message)\n\t\t\tthreading.Thread(target = self.__listenServer).start()\n\t\t\twhile True:\n\t\t\t\ttime.sleep(100)\n\t\texcept (KeyboardInterrupt, SystemExit):\n\t\t\tprint(\"[__init__]: KeyboardInterrupt.\")\n\t\t\tself.__working = False\n\t\t\ttemp_message = {}\n\t\t\ttemp_message[\"command\"] = 0\n\t\t\tmessage = pickle.dumps(temp_message)\n\t\t\tself.__connection.send(message)\n\t\t\tself.__connection.close()\n##////////////////////////////////////////////////////////////////////////////////////##\n\tdef formServerAnswer(self):\n\t\tresult = \"\"\n\t\tprint(\"TYPE\", self.server_message)\n\t\tprint(self.server_message[\"command\"])\n\t\tprint(type(self.server_message[\"command\"]))\n\t\tprint(\"records number: \" + str(self.server_message[\"command\"]) + '\\n')\n\t\tfor i in self.server_message[\"query\"]:\n\t\t\tresult += str(i.getData()) + '\\n'\n\t\treturn result\n##////////////////////////////////////////////////////////////////////////////////////##\n\tdef __listenServer(self):\n\t\tprint(\"[__listenServer]\")\n\t\twhile self.__working:\n\t\t\ttry:\n\t\t\t\tif self.__connection:\n\t\t\t\t\tserver_message = pickle.loads(self.__connection.recv(4096))\n\t\t\t\t\tprint(\"[__listenServer]: \", server_message)\n\t\t\t\t\tif server_message[\"command\"] == 0:\n\t\t\t\t\t\tprint(\"[__listenServer]: server is off.\")\n\t\t\t\t\t\tself.__working = False\n\t\t\t\t\t\tself.__connection.close()\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.server_message = server_message\n\t\t\t\t\t\tprint(\"TYPE:\", type(self.server_message))\n\t\t\t\t\t\tprint(\"[__listenServer]: server message:\", server_message)\n\t\t\t\t\t\tif self.server_message[\"command\"] == 99:\n\t\t\t\t\t\t\tself.data_come_times.emit(server_message[\"data1\"], server_message[\"data2\"])\n\t\t\t\t\t\tif self.server_message[\"command\"] == 98:\n\t\t\t\t\t\t\tself.data_come_pareto.emit(server_message)\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tprint(\"[__listenServer]: exception.\")\n\t\t\t\tself.__working = False\n\t\t\t\tbreak\n##////////////////////////////////////////////////////////////////////////////////////##\n\tdef comparisonCommand(self):\n\t\ttemp_message = {}\n\t\ttemp_message[\"command\"] = 99 #command\n\t\ttemp_message[\"count\"] = self.__interface.spinbox_query_count.value() #100 #records number\n\t\ttemp_message[\"type\"] = 0 #0 - read, 1 - write\n\t\ttemp_message[\"optimisation\"] = 2 #0 - without, 1 - with, 2 - both\n\t\tmessage = pickle.dumps(temp_message)\n\t\tself.__connection.send(message)\n\t\tprint(temp_message)\n##////////////////////////////////////////////////////////////////////////////////////##\n\n\n########################################################################################\ndef main():\n\tprint(\"START\")\n\tapp = QApplication(sys.argv)\n\tex = Interface()\n\tsys.exit(app.exec_())\n\t\n##////////////////////////////////////////////////////////////////////////////////////##\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"user_client.py","file_name":"user_client.py","file_ext":"py","file_size_in_byte":11414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"609099126","text":"#!/usr/bin/python3\n#encoding=utf-8\nimport sys, argparse\nfrom numpy import *\nfrom sklearn import mixture\nimport math\nimport time\nfrom scipy.stats import multivariate_normal\nfrom functools import reduce\n\ndef read_features(txt,cols=100):\n\tfeatures=[]\n\tf = open(txt)\n\tlines = f.readlines();\n\tfor line in lines:\n\t\tline_feature=[]\n\t\tvec = line.split(' ')\n\t\tif size(vec) != cols:\n\t\t\tprint (\"cols of input is wrong\")\n\t\telse:\n\t\t#print size(vec)\n\t\t\tfor v in vec:\n\t\t\t\ttry:\n\t\t\t\t\tline_feature.append(float32(v))\n\t\t\t\texcept ValueError:\n\t\t\t\t\tprint ('invalid input %s' %(v))\n\t\t\tfeatures.append(line_feature)\n\tf.close()\n\tfeatures = array(features)\n\treturn features\n\ndef dictionary(descriptors, N):\n\tgmm = mixture.GMM(n_components=N,covariance_type='full')\n\tgmm.fit(descriptors)\n\t#save(\"means.gmm\", gmm.means_)\n\t#save(\"covs.gmm\", gmm.covars_)\n\t#save(\"weights.gmm\", gmm.weights_)\n\treturn float32(gmm.means_),float32(gmm.covars_),float32(gmm.weights_)\n\ndef likelihood_moment(x, gaussians, weights, k, moment):\t\n\tx_moment = power(float32(x), moment) if moment > 0 else float32([1])\n\tprobabilities = map(lambda i: weights[i] * gaussians[i], range(0, len(weights)))\n\tytk = probabilities[k] / sum(probabilities)\n\treturn x_moment * ytk\n\ndef likelihood_statistics(samples, means, covs, weights):\n\ts0, s1,s2 = {}, {}, {}\n\tsamples = zip(range(0, len(samples)), samples)\n\tgaussians = {}\n\tg = [multivariate_normal(mean=means[k], cov=covs[k]) for k in range(0, len(weights)) ]\n\tfor i,x in samples:\n\t\tgaussians[i] = {k : g[k].pdf(x) for k in range(0, len(weights) ) }\n\n\tfor k in range(0, len(weights)):\n\t\t#s0[k] = reduce(lambda a, (i,x): a + likelihood_moment(x, gaussians[i], weights, k, 0), samples, 0)\n\t\t#s1[k] = reduce(lambda a, (i,x): a + likelihood_moment(x, gaussians[i], weights, k, 1), samples, 0)\n\t\t#s2[k] = reduce(lambda a, (i,x): a + likelihood_moment(x, gaussians[i], weights, k, 2), samples, 0)\n\t\ts0[k] = reduce(lambda a, i,x: a + likelihood_moment(x, gaussians[i], weights, k, 0), samples, 0)\n\t\ts1[k] = reduce(lambda a, i,x: a + likelihood_moment(x, gaussians[i], weights, k, 1), samples, 0)\n\t\ts2[k] = reduce(lambda a, i,x: a + likelihood_moment(x, gaussians[i], weights, k, 2), samples, 0)\n\treturn s0, s1, s2\n\ndef fisher_vector_weights(s0, s1, s2, means, covs, w, T):\n\treturn float32([((s0[k] - T * w[k]) / sqrt(w[k]) ) for k in range(0, len(w))])\n\ndef fisher_vector_means(s0, s1, s2, means, sigma, w, T):\n\treturn float32([(s1[k] - means[k] * s0[k]) / (sqrt(w[k] * sigma[k])) for k in range(0, len(w))])\n\ndef fisher_vector_sigma(s0, s1, s2, means, sigma, w, T):\n\treturn float32([(s2[k] - 2 * means[k]*s1[k] + (means[k]*means[k] - sigma[k]) * s0[k]) / (sqrt(2*w[k])*sigma[k]) for k in range(0, len(w))])\n\ndef normalize(fisher_vector):\n\tv = sqrt(abs(fisher_vector)) * sign(fisher_vector)\n\treturn v / max(1,sqrt(dot(v, v)))\n\n\n\"\"\"\n************************Interface*****************************************\n\"\"\"\n\n\ndef generate_gmm(gmm_folder,descriptors, N):\n\t'''\n\tInterface\n\tgmm_folder\n\tdescriptors, (Train data) ,numpy.array, matrix, each row is one sample\n\tN, int ,the number of cluster center\n\t'''\n\tmeans,covs,weights = dictionary(descriptors,N)\n\tsave(gmm_folder+ '/'+ \"means.gmm\", means)\n\tsave(gmm_folder+ '/'+ \"covs.gmm\", covs)\n\tsave(gmm_folder+ '/'+ \"weights.gmm\", weights)\n\treturn means, covs, weights\t\n\ndef load_gmm(folder = \".\"):\n\t'''\n\tInterface\n\t'''\n\tfiles = [\"means.gmm.npy\", \"covs.gmm.npy\", \"weights.gmm.npy\"]\n\treturn map(lambda file: load(open(file,'rb')), map(lambda s : folder + \"/\" + s , files))\n\ndef fisher_vector(samples, means, covs, w):\n\t'''\n\tInterface: \n\tsamples: (to be encoded ),numpy.array , matrix, each row is a sample\n\tmeans: gmm.means_\n\tcovs: gmm.covars_\n\tw: gmm.weights_\n\t'''\n\ts0, s1, s2 = likelihood_statistics(samples, means, covs, w)\n\tT = samples.shape[0]\n\tcovs = float32([diagonal(covs[k]) for k in range(0, covs.shape[0])])\n\ta = fisher_vector_weights(s0, s1, s2, means, covs, w, T)\n\ta = normalize(a)\n\tb = concatenate(fisher_vector_means(s0, s1, s2, means, covs, w, T))\n\tb = normalize(b)\n\tc = concatenate(fisher_vector_sigma(s0, s1, s2, means, covs, w, T))\n\tc = normalize(c)\n\tfv = concatenate([a,b,c])\n\t#fv = concatenate([concatenate(a), concatenate(b), concatenate(c)])\n\t#fv = normalize(fv)\n\treturn fv\n\n\n\n","sub_path":"Fisher-Vector-master/fv_py3.py","file_name":"fv_py3.py","file_ext":"py","file_size_in_byte":4221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"302510936","text":"#!/usr/bin/env python3\nfrom flask.ext.script import Manager\nfrom sqlalchemy_utils import create_database, database_exists, drop_database\n\nfrom scrapper import create_app\n\nmanager = Manager(create_app)\n\n@manager.command\ndef init_db():\n '''\n Initialize database. Should be run before runserver command. Database details\n can be found (and changed) on scrapper/config/server.py\n '''\n from scrapper.config.default import SQLALCHEMY_DATABASE_URI as db_url\n from scrapper import db\n\n if not database_exists(db_url):\n create_database(db_url)\n db.create_all()\n print(\"Done\")\n else:\n print(\"Database already exists\")\n exit(1)\n\n@manager.command\ndef drop_db():\n '''\n Drops database according to scrapper/config/server.py.\n '''\n from scrapper.config.default import SQLALCHEMY_DATABASE_URI as db_url\n\n if database_exists(db_url):\n drop_database(db_url)\n print(\"Done\")\n else:\n print(\"Database doesn't exist\")\n exit(1)\n\nif __name__==\"__main__\":\n manager.run()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"47350369","text":"'''\nCreated on May 30, 2013\n\n@package: ally core http\n@copyright: 2011 Sourcefabric o.p.s.\n@license: http://www.gnu.org/licenses/gpl-3.0.txt\n@author: Gabriel Nistor\n\nProvides the paths adjustments for update invokers.\n'''\n\nfrom ally.api.operator.type import TypeModel, TypeProperty\nfrom ally.design.processor.attribute import requires, defines\nfrom ally.design.processor.context import Context\nfrom ally.design.processor.execution import Abort\nfrom ally.design.processor.handler import HandlerProcessor\nfrom ally.http.spec.server import HTTP_PUT\nimport logging\n\n# --------------------------------------------------------------------\n\nlog = logging.getLogger(__name__)\n\n# --------------------------------------------------------------------\n\nclass Register(Context):\n '''\n The register context.\n '''\n # ---------------------------------------------------------------- Required\n invokers = requires(list)\n \nclass Invoker(Context):\n '''\n The invoker context.\n '''\n # ---------------------------------------------------------------- Defined\n path = defines(list)\n # ---------------------------------------------------------------- Required\n methodHTTP = requires(str)\n target = requires(TypeModel)\n location = requires(str)\n \nclass ElementUpdate(Context):\n '''\n The element context.\n '''\n # ---------------------------------------------------------------- Defined\n name = defines(str)\n property = defines(TypeProperty)\n isInjected = defines(bool, doc='''\n @rtype: boolean\n If True indicates that the path element is actually to be injected inside a model entity.\n ''')\n # ---------------------------------------------------------------- Required\n model = requires(TypeModel)\n \n# --------------------------------------------------------------------\n\nclass PathUpdateHandler(HandlerProcessor):\n '''\n Implementation for a processor that provides the invoker adjustments for updates.\n '''\n \n def __init__(self):\n super().__init__(Invoker=Invoker)\n\n def process(self, chain, register:Register, Element:ElementUpdate, **keyargs):\n '''\n @see: HandlerProcessor.process\n \n Provides the paths adjustments based on target models.\n '''\n assert isinstance(register, Register), 'Invalid register %s' % register\n assert issubclass(Element, ElementUpdate), 'Invalid element %s' % Element\n if not register.invokers: return\n\n aborted = []\n for invoker in register.invokers:\n assert isinstance(invoker, Invoker), 'Invalid invoker %s' % invoker\n if invoker.methodHTTP != HTTP_PUT: continue\n if not invoker.target: continue\n assert isinstance(invoker.target, TypeModel), 'Invalid target %s' % invoker.target\n if not invoker.target.propertyId: continue\n \n if invoker.path is None: invoker.path = []\n for el in invoker.path:\n assert isinstance(el, ElementUpdate), 'Invalid element %s' % el\n if el.model == invoker.target:\n log.error('Cannot use for update because the %s is already present as input, at:%s',\n invoker.target, invoker.location)\n aborted.append(invoker)\n break\n else:\n invoker.path.append(Element(name=invoker.target.name, model=invoker.target))\n invoker.path.append(Element(property=invoker.target.propertyId, isInjected=True))\n\n if aborted: raise Abort(*aborted)\n \n","sub_path":"components/ally-core-http/ally/core/http/impl/processor/assembler/path_for_update.py","file_name":"path_for_update.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"523218819","text":"import httplib2\nimport os\n\nfrom bs4 import BeautifulSoup as soup\n# import requests\nimport re\n\nimport pytz\nfrom datetime import datetime as dt\nfrom datetime import timedelta\nfrom datetime import date\nfrom dateutil.relativedelta import relativedelta\n\nfrom googleapiclient import discovery\nfrom oauth2client import client\nfrom oauth2client import tools\nfrom oauth2client.file import Storage\n\ntry:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\nexcept ImportError:\n flags = None\n\nSCOPES = 'https://www.googleapis.com/auth/calendar'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'UKY Schedule Webscrape'\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'uky-web-scrape-calendar.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\neventIdList = []\ncalendarId = 'primary'\n# calendarId = 'un1nmhba2l7vfm5vvie66m6nrk@group.calendar.google.com'\n\ndef getService():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n return discovery.build('calendar', 'v3', http=http)\n\ndef main():\n choiceText = \"\\n1. Add\\n2. Delete\\n3. Exit\\nChoice: \"\n choice = int(input(choiceText))\n\n while choice == 1 or choice == 2:\n if choice == 1:\n add()\n # elif choice == 2:\n # delete()\n choice = int(input(choiceText))\n\n exit()\n\ndef add():\n # url = 'https://myuk.uky.edu/zAPPS/CourseCatalog/Schedule/Print/2018/010'\n # res = requests.get(url)\n # page_html = res.text\n # page_soup = soup(page_html, \"html.parser\")\n # print(page_soup)\n\n fileName = \"calendar.html\"\n page_html = open(fileName, 'r').read()\n page_soup = soup(page_html, \"html.parser\")\n\n # httplib2.debuglevel = 4\n service = getService()\n\n six_months = date.today() + relativedelta(months=+6)\n year = str(six_months.year)\n\n timeZone = \"America/New_York\"\n\n courses = page_soup.findAll(\"div\", {\"class\": \"table-thin-row small\"})\n\n for course in courses:\n title = \" \".join(course.find(\"strong\", {\"class\": \"text-dark\"}).text.split())\n section = course.findAll(\"div\")[4].text\n courseType = course.findAll(\"div\")[5].text.strip()\n\n p = re.compile(r'Section ')\n newSection = p.sub(\"\", section)\n summary = title + \" - \" + newSection\n\n weekdays = course.findAll(\"div\")[7].text\n if weekdays == \"TBD\": # for online classes that do not have a time or location\n continue\n\n hour = course.findAll(\"div\")[8].text\n dates = course.findAll(\"div\")[9].text.strip()\n\n hourList = hour.split(\" - \")\n datesList = dates.split(\"-\")\n googleDateTimeList = []\n for i in range(0, len(datesList)):\n h = ''.join(hourList[i].split(\" \"))\n new_time = dt.strptime(h, '%I:%M%p').strftime(\"%H:%M\")\n new_time += \":00\"\n unformattedTime = datesList[0] + \" \" + year + \" \" + new_time\n naive = dt.strptime(unformattedTime, '%b %d %Y %H:%M:%S')\n local = pytz.timezone(timeZone)\n local_dt = local.localize(naive, is_dst=None)\n googleDateTimeList.append(local_dt.strftime(\"%Y-%m-%d\"+\"T\"+\"%H:%M:%S\"+\"-05:00\"))\n googleDateTimeStart = googleDateTimeList[0]\n googleDateTimeEnd = googleDateTimeList[1]\n\n classEndTime = ''.join(hourList[1].split(\" \"))\n new_time = dt.strptime(classEndTime, '%I:%M%p').strftime(\"%H:%M\")\n new_time += \":00\"\n unformattedTime = datesList[1] + \" \" + year + \" \" + new_time\n naive = dt.strptime(unformattedTime, '%b %d %Y %H:%M:%S')\n naive += timedelta(hours=48) # make two days ahead because recurrence is inclusive of end time\n local = pytz.timezone(timeZone)\n local_dt = local.localize(naive, is_dst=None)\n until = local_dt.strftime(\"%Y%m%d\"+\"T\"+\"%H%M%S\"+\"Z\")\n\n weekdays = list(weekdays)\n days = \"\"\n for char in weekdays:\n if char == 'M': days += \"MO\"\n elif char == 'T': days += \"TU\"\n elif char == 'W': days += \"WE\"\n elif char == 'R': days += \"TH\"\n else: days += \"FR\"\n days += \",\"\n days = days[:-1] # remove last comma\n\n recurrence = \"RRULE:FREQ=WEEKLY;UNTIL={};BYDAY={}\".format(until, days)\n\n building = course.findAll(\"div\")[11].text\n room = course.findAll(\"div\")[12].text\n location = building + \" \" + room\n\n instuctor = course.findAll(\"div\")[13].text.strip()\n\n description = courseType + \"\\n\" + instuctor\n\n event = {\n 'summary': summary,\n 'location': location,\n 'description': description,\n 'start': {\n 'dateTime': googleDateTimeStart,\n 'timeZone': timeZone,\n },\n 'end': {\n 'dateTime': googleDateTimeEnd,\n 'timeZone': timeZone,\n },\n 'recurrence': [\n recurrence\n ]\n }\n event = service.events().insert(calendarId=calendarId, body=event).execute()\n print('Event created for {}: {}'.format(title, event.get('htmlLink')))\n\n # eventURL = event.get('htmlLink')\n # eventIdIndex = eventURL.rfind(\"=\")\n # eventId = eventURL[eventIdIndex+1:]\n # eventIdList.append(eventId)\n\n# def delete():\n# service = getService()\n# for id in eventIdList:\n# event = service.events().delete(calendarId=calendarId, eventId=id).execute()\n# print(event)\n\nif __name__ == '__main__':\n main()\n","sub_path":"ws.py","file_name":"ws.py","file_ext":"py","file_size_in_byte":6450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"172967969","text":"\n\nfrom ui.equip_ui import *\n\n\nclass Equip3(Equip_UI):\n\n def __init__(self,parent=None):\n super(Equip3,self).__init__(parent)\n paths = gol.get_value('monitor_filepath', \"E:\\\\DR\\\\\")\n self.watcher=QFileSystemWatcher()\n if isinstance(paths,list):\n self.watcher.addPaths(paths)\n else :\n self.watcher.addPath(paths)\n\n self.watcher.directoryChanged.connect(self.mes)\n # self.filemonitor.directoryChanged.connect(self.newfile)\n\n\n def mes(self,p_str):\n print(p_str)\n print(self.watcher.files())\n print(self.watcher.directories())\n\n\n\n\n\nif __name__ == '__main__':\n from utils.envir import *\n set_env()\n from ui.tj_main_ui import *\n app = QApplication(sys.argv)\n ui = Equip3()\n ui.show()\n #ui.showFullScreen()\n app.exec_()","sub_path":"filemonitor.py","file_name":"filemonitor.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"515174037","text":"from __future__ import print_function\nimport os\nimport pandas as pd\nimport xgboost as xgb\nimport time\n\nimport shutil\nfrom sklearn import preprocessing\nfrom sklearn.cross_validation import train_test_split\nimport numpy as np\nfrom sklearn.utils import shuffle\nfrom sklearn import metrics\ndef archive_results(filename,results,algo,script):\n \"\"\"\n :type algo: basestring\n :type script: basestring\n :type results: DataFrame\n \"\"\"\n #assert results == pd.DataFrame\n now=time.localtime()[0:5]\n dirname='../archive'\n subdirfmt='%4d-%02d-%02d-%02d-%02d'\n subdir=subdirfmt %now\n if not os.path.exists(os.path.join(dirname,str(algo))):\n os.mkdir(os.path.join(dirname,str(algo)))\n dir_to_create=os.path.join(dirname,str(algo),subdir)\n if not os.path.exists(dir_to_create):\n os.mkdir(dir_to_create)\n os.chdir(dir_to_create)\n\n results.to_csv(filename,index=False,float_format='%.6f')\n shutil.copy2(script,'.')\n\n return\n\n###############################################################################################\n\ndef preprocess_data(train,test):\n id_test=test['patient_id']\n\n train=train.drop(['patient_id'],axis=1)\n test=test.drop(['patient_id'],axis=1)\n #train=train.drop_duplicates()\n y=train['is_screener']\n train=train.drop(['is_screener'],axis=1)\n\n\n for f in train.columns:\n if train[f].dtype == 'object':\n print(f)\n lbl = preprocessing.LabelEncoder()\n lbl.fit(list(train[f].values) + list(test[f].values))\n train[f] = lbl.transform(list(train[f].values))\n test[f] = lbl.transform(list(test[f].values))\n\n return id_test,test,train,y\n\nos.chdir(os.getcwd())\ntrainfile=('../input/patients_train.csv.gz')\ntestfile=('../input/patients_test.csv.gz')\nsurgical_file=('../input/surgical_selected_last.csv.gz')\nactivity_file=('../input/activity_selected_last.csv.gz')\ndiagnosis_file=('../input/diagnosis_selected_last.csv.gz')\nprocedure_file=('../input/procedure_selected_last.csv.gz')\nprescription_file=('../input/prescription_selected_last.csv.gz')\nphysicians_file=('../input/physicians.csv.gz')\ndrugs_file=('../input/drugs.csv.gz')\n\ntrain=pd.read_csv(trainfile,low_memory=False )\ntest=pd.read_csv(testfile,low_memory=False )\n\n##prepare a sparse matrix#\ntrain_ex_file=('../input/train_patients_to_exclude.csv.gz')\ntrain_ex=pd.read_csv(train_ex_file,low_memory=False)\ntrain=train[train.patient_id.isin(train_ex.patient_id)==False]\ntest_ex_file=('../input/test_patients_to_exclude.csv.gz')\ntest_ex=pd.read_csv(test_ex_file,low_memory=False)\ntest=test[test.patient_id.isin(test_ex.patient_id)==False]\nprint(train.shape,test.shape)\n\nsurgical=pd.read_csv(surgical_file )\ntrain=pd.merge(train,surgical, on='patient_id',how='left')\ntest=pd.merge(test,surgical, on='patient_id',how='left')\nprint('after merging surgical')\nprint(train.shape,test.shape)\n\nactivity=pd.read_csv(activity_file )\ntrain=pd.merge(train,activity, on='patient_id',how='left')\ntest=pd.merge(test,activity, on='patient_id',how='left')\nprint('after merging activity')\nprint(train.shape,test.shape)\nprescription=pd.read_csv(prescription_file)\ndrugs=pd.read_csv(drugs_file)\nphysicians=pd.read_csv(physicians_file)\nprescription=pd.merge(prescription,drugs, left_on='patient_id',right_on='drug_id',how='left')\nprescription=pd.merge(prescription,physicians, left_on='patient_id',right_on='practitioner_id',how='left')\ntrain=pd.merge(train,prescription,on='patient_id',how='left')\ntest=pd.merge(test,prescription,on='patient_id',how='left')\nprint('after merging prescription ')\nprint(train.shape,test.shape)\n\nprocedure=pd.read_csv(procedure_file )\ndiagnosis=pd.read_csv(diagnosis_file)\ntrain=pd.merge(train,procedure,on='patient_id',how='left')\ntest=pd.merge(test,procedure,on='patient_id',how='left')\nprint('after merging procedure')\nprint(train.shape,test.shape)\ntrain=pd.merge(train,diagnosis, on='patient_id',how='left')\ntest=pd.merge(test,diagnosis, on='patient_id',how='left')\nprint('after merging diagnosis ')\nprint(train.shape,test.shape)\n\n\ntrain=train.fillna(0)\ntest=test.fillna(0)\n\nid_test,test,train,y=preprocess_data(train,test)\nprint(train.shape,test.shape)\n#print(train.columns)\n\nX=np.asarray(train)\ny=np.asarray(y)\nX_test=np.asarray(test)\nX,y=shuffle(X,y,random_state=9)\nX_train,X_val0,y_train,y_val0 = train_test_split(X,y,test_size=0.1,random_state=17)\nX_train,X_val,y_train,y_val = train_test_split(X_train,y_train,test_size=0.1,random_state=77)\n\n#from sklearn import preprocessing,decomposition\n#scl=decomposition.PCA(n_components=None,whiten=False)\n#scl=preprocessing.RobustScaler()\n#X_train=scl.fit_transform(X_train)\n#X_val=scl.transform(X_val)\n#X_test=scl.transform(X_test)\n#X_val0=scl.transform(X_val0)\n\ndval=xgb.DMatrix(data=X_val,label=y_val)\ndtrain=xgb.DMatrix(data=X_train,label=y_train)\nDTest=xgb.DMatrix(data=X_test)\nDval0=xgb.DMatrix(data=X_val0)\nwatchlist = [(dval,'eval'), (dtrain,'train')]\n\nparams = {\"objective\": \"binary:logistic\",\n \"eta\": 0.023,\n \"eta_decay\":0.5,\n \"max_depth\": 6,\n \"silent\":1,\n \"subsample\": 0.9,\n \"colsample_bytree\": 0.65,\n \"seed\": 1193,\n \"booster\": \"gbtree\",\n \"nthread\":6,\n \"eval_metric\":'auc'\n }\n#\nclf = xgb.train(params, dtrain, num_boost_round=1000, evals=watchlist, early_stopping_rounds=30,verbose_eval=True,\n maximize= True)\n\npredictions=clf.predict(DTest)\nscore=clf.best_score\nprint('best score:%s'%score)\ny_pred=clf.predict(Dval0)\n\nscore=metrics.roc_auc_score(y_val0, y_pred)\nprint('score on extra set:%s' %score)\nmodel='XGBOOST_onselected'\n#\n# predict on test set\nsubmission='%s_score_%03f.csv' %(model,score)\n# create submission file\n\npreds = pd.DataFrame({\"patient_id\": id_test, 'predict_screener': predictions})\nresults=preds.groupby(['patient_id'])['predict_screener'].mean()\nidx=preds.groupby(['patient_id'])['patient_id'].mean().astype(int)\ntest_ex['predict_screener']=0.0\n\n# Create your first submission file\nxgb_preds0 = pd.DataFrame({\"patient_id\": idx, \"predict_screener\": results})\nxgb_preds=pd.concat([xgb_preds0,test_ex],axis=0)\nxgb_preds.to_csv('../output/'+submission,index=False)\nscript=os.path.abspath(__file__)\nprint (script)\nranking=0.75\nprint ('score= %03f'%score)\nif score>ranking:\n print('score is higher than %s'%ranking)\n archive_results(submission,xgb_preds,model,script)\n","sub_path":"cervical-cancer-screening/models/src/XGBFull-selected.py","file_name":"XGBFull-selected.py","file_ext":"py","file_size_in_byte":6372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"97202044","text":"import mysql.connector\nimport csv\nimport time\nimport sys\nimport logging\n\nformat = \"%(asctime)s: %(levelname)s: %(funcName)s Line:%(lineno)d %(message)s\"\nlogging.basicConfig(filename=\"log_Py.log\", level=logging.INFO, filemode=\"a\", format=format)\n'''logger = logging.getLogger(\"meinLogger\")\nlogger.setLevel(logging.DEBUG)\nfileh = logging.FileHandler(\"log.txt\")\nform = logging.Formatter('%(name)s - %(levelname)s : %(asctime)s - %(message)s')\nfileh.setFormatter(form)\nlogger.addHandler(fileh)\nlogger.debug(\"debugging\")'''\n\ndef time_fun(sec):\n time.sleep(sec)\n\n\ndef help_function():\n print(\"You need help? Hear is my support :)\")\n print(\"You can always write a word or a sentence, what describe your intention. For example delete column or make a new table.\")\n logging.info(\"Help function was activated\")\n\n\ndef Server_connection():\n print(\"Please enter the connection data:\")\n logging.info(\"Rhe user try to connect to the server\")\n\n global connection\n condition = True\n while condition:\n try:\n host = input(\"Please input the host name: \")\n user = input(\"Please input the user name: \")\n password = input(\"Please input the password: \")\n # database = input(\"Please input the database name: \")\n\n stop_to_repeat = True\n\n while stop_to_repeat:\n\n connection = mysql.connector.connect(host=host,\n user=user,\n password=password\n )\n global mycursor\n mycursor = connection.cursor(buffered=True)\n\n print(\"\")\n mycursor.execute(\"SHOW DATABASES\")\n print(\"Databases: \")\n for db in mycursor:\n print(db)\n\n print(\"\")\n\n num = True\n while num:\n try:\n print(\"1. Connect with a database\\n\"\n \"2. Make a new database\")\n answer = int(input(\"Please answer with 1 or 2: \"))\n if answer == 1 or answer == 2:\n if answer == 2:\n name_of_databse = input(\"Name of the new database: \")\n\n sqlform = \"CREATE DATABASE \" + name_of_databse\n mycursor.execute(sqlform)\n else:\n database = input(\"Please enter the database name: \")\n connection = mysql.connector.connect(host=host,\n user=user,\n password=password,\n database=database)\n mycursor = connection.cursor(buffered=True)\n stop_to_repeat = False\n\n num = False\n\n except:\n print(\"Please answer with a number 1 or 2!\")\n logging.error(\"An error with number input\")\n\n if (connection):\n print(\"Connection successful\")\n condition = False\n time_fun(1)\n\n except:\n print(\"Not connected\")\n logging.critical(\"The logging information was not correct\")\n\n\ndef table_make():\n condition = True\n print(\"Not forget, your table must have at least 1 column\")\n\n while condition:\n try:\n tableN = input(\"Please enter the name of the new table: \")\n columns = int(input(\"How many columns do you want to make: \"))\n i = 0\n\n while i < columns:\n columnsN = input(\"Please enter the columns name: \")\n columnsT = input(\"Please enter the columns type: \")\n i = i + 1\n\n if i == 1:\n sqlform = \"CREATE TABLE \" + tableN + \"(\" + columnsN + \" \" + columnsT + \")\"\n else:\n sqlform = \"ALTER TABLE \" + tableN + \" ADD \" + columnsN + \" \" + columnsT\n\n mycursor.execute(sqlform)\n if i == columns:\n condition = False\n\n except:\n print(\"Please try again!\")\n\n\ndef table_delete():\n try:\n tableN = input(\"Please enter the table name: \")\n\n sqlform = \"DROP TABLE \" + tableN\n\n mycursor.execute(sqlform)\n except:\n print(\"Please check if the table really exist!\")\n\n\ndef show_table():\n sqlformp = \"SHOW TABLES\"\n mycursor.execute(sqlformp)\n\n for tb in mycursor:\n print(tb)\n\n\ndef columns_make():\n try:\n tableN = input(\"Please enter the table name:\")\n\n columns = int(input(\"How many columns do you want to make: \"))\n i = 0\n\n while i < columns:\n columnsN = input(\"Please enter the columns name: \")\n columnsT = input(\"Please enter the columns type: \")\n\n sqlform = \"ALTER TABLE \" + tableN + \" ADD \" + columnsN + \" \" + columnsT\n\n mycursor.execute(sqlform)\n i = + 1\n except:\n print(\"Please make sure that you gave the correct input!\")\n\n\ndef columns_drop():\n try:\n tableN = input(\"Please input the name of the table:\")\n columnN = input(\"Please enter the name of the column you want to delete: \")\n\n sqlform = \"ALTER TABLE \" + tableN + \" DROP COLUMN \" + columnN\n\n mycursor.execute(sqlform)\n except:\n print(\"Please make sure that you gave correct input or if the column exist!\")\n\n\ndef show_columns():\n try:\n tableN = input(\"Please enter the table name: \")\n\n sqlform = \"SHOW COLUMNS FROM \" + tableN\n\n mycursor.execute(sqlform)\n\n for tb in mycursor:\n print(tb)\n except:\n print(\"Please make sure that the input is correct!\")\n\n\ndef addition_to_table():\n try:\n tableN = input(\"Please enter the name of the table:\")\n sqlform = \"SHOW COLUMNS FROM \" + tableN\n mycursor.execute(sqlform)\n\n columnN = input(\"Please enter the name of the column, which you want to add the information: \")\n\n addition_text = input(\"Pleas enter what you want to add to the \" + columnN + \": \")\n\n if addition_text.isdigit():\n sqlform = \"INSERT INTO \" + tableN + \" (\" + columnN + \")\" + \"VALUES\" + \"(\" + addition_text + \")\"\n else:\n sqlform = \"INSERT INTO \" + tableN + \" (\" + columnN + \")\" + \"VALUES\" + \"(\\\"\" + addition_text + \"\\\")\"\n\n mycursor.execute(sqlform)\n connection.commit()\n\n except:\n print(\"Please make sure that the input is correct!\")\n\n ''' i = 0\n for tb in mycursor:\n print(tb)\n i = +1\n '''\n\n ''' t = 0\n b = 0\n more = True\n\n while more:\n while t < i - 1:\n more_info = input(\n \"Do you want to add information to another column in the same row? you can write the column name or 0 for no: \")\n\n more = False\n else:\n column_2 = input(\"Pleas enter the column name: \")\n b =+ 1\n '''\n\n ''' \n if b == 1:\n addition_text_one = input(\"Pleas enter what you want to add to the \" + columnN + \": \")\n addition_text_tow = input(\"Pleas enter what you want to add to the \" + columnN + \": \")\n \n if addition_text_one.isdigit():\n sqlform = \"INSERT INTO \" + tableN + \" (\" + columnN + \")\" + \"VALUES\" + \"(\" + addition_text + \")\"\n else:\n sqlform = \"INSERT INTO \" + tableN + \" (\" + columnN + \")\" + \"VALUES\" + \"(\\\"\" + addition_text + \"\\\")\"\n\n '''\n\n\ndef alter_some_inTable():\n tableN = input(\"Please enter the name of the table: \")\n sqlform = \"SHOW COLUMNS FROM \" + tableN\n mycursor.execute(sqlform)\n\n columnN = input(\"Please enter the name of the column, which you want to add information: \")\n add_to_column = input(\"Pleas enter what you want to add: \")\n\n columnC = input(\"Please enter the name of the column for the condition: \")\n condition_text = input(\"Please enter the condition: \")\n\n sqlform = \"UPDATE \" + tableN + \" SET \" + columnN + \"= %s WHERE \" + columnC + \"= %s\"\n values = (add_to_column, condition_text)\n\n mycursor.execute(sqlform, values)\n connection.commit()\n\n\ndef delete_from_table():\n try:\n tableN = input(\"Please input the name of the table:\")\n columnN = input(\"Please enter the name of the column: \")\n delete_item = input(\"Pleas enter what you want to delete: \")\n\n sqlform = \"DELETE FROM \" + tableN + \" WHERE \" + columnN + \"=\" + delete_item\n mycursor.execute(sqlform)\n connection.commit()\n except:\n print(\"Please make sure that the input is correct!\")\n\n\ndef show_data():\n global tableNS\n tableNS = input(\"Please enter tne table name: \")\n\n sqlform = \"SELECT * FROM \" + tableNS\n\n mycursor.execute(sqlform)\n\n global res\n res = mycursor.fetchall()\n\n for row in res:\n print(row)\n\n\ndef save_data():\n with open(\"mydata.txt\", \"a\") as file:\n file.write(\"The table \" + tableNS + \"H has: \\n\")\n for row in res:\n csv.writer(file).writerow(row)\n\n\ndef end_fun():\n print(\"That was fun with you :). I hope i see you again.\")\n sys.exit(0)\n\n\ndef switch_function(num):\n if num == \"0\" or num == \"end\" or num == \"exit\":\n end_fun()\n if num == \"1\" or num == \"show the tables\" or num == \"tables\" or num == \"tables show\":\n show_table()\n elif num == \"2\" or num == \"make a table\" or num == \"table make\" or num == \"make a new table\":\n table_make()\n elif num == \"3\" or num == \"delete a table\" or num == \"table delete\":\n table_delete()\n\n elif num == \"4\" or num == \"show the columns\" or num == \"columns\" or num == \"columns show\":\n show_columns()\n elif num == \"5\" or num == \"make a column\" or num == \"column make\" or num == \"make a new column\":\n columns_make()\n elif num == \"6\" or num == \"delete a column\" or num == \"column delete\":\n columns_drop()\n\n elif num == \"7\" or num == \"show the data\" or num == \"data\" or num == \"data show\":\n show_data()\n save_data()\n elif num == \"8\" or num == \"add\" or num == \"add something\" or num == \"something add\":\n addition_to_table()\n elif num == \"9\":\n alter_some_inTable()\n elif num == \"10\" or num == \"delete something\" or num == \"something delete\":\n delete_from_table()\n\n elif num == \"11\" or num == \"help\" or num == \"help me\" or num == \"-help\" or num == \"--help\":\n help_function()\n","sub_path":"py_Main_Programm/py_Programm_Classes.py","file_name":"py_Programm_Classes.py","file_ext":"py","file_size_in_byte":10819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"98815417","text":"def is_palindrome(value):\n middle_value = len(value)//2\n\n match = 0\n\n for i in range(0, middle_value):\n for j in range(-1, -middle_value-1, -1):\n if value[i] == value[j]:\n match += 1\n\n if match == len(value)//2:\n return True\n\n else:\n return False\n\n\ndef get_chinese_zodiac():\n zodiac_reference = {1: \"Rat\", 2: \"Ox\", 3: \"Tiger\", 4: \"Rabbit\", 5: \"Dragon\", 6: \"Snake\",\n 7: \"Horse\", 8: \"Goat\", 9: \"Monkey\", 10: \"Rooster\", 11: \"Dog\", 12: \"Pig\"}\n\n return [[i, zodiac_reference[(i%12)+1]] for i in range(1900, 2021)]\n\nif __name__ == \"__main__\":\n print(get_chinese_zodiac())\n","sub_path":"csc_336_data_structures/exercises/01_27_exercises.py","file_name":"01_27_exercises.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"455498172","text":"\"\"\"Plot the spanwise-averaged z-vorticity at saved time steps.\"\"\"\n\nfrom matplotlib import pyplot\nimport numpy\nimport pathlib\nimport yaml\n\nimport petibmpy\n\n\nsimudir = pathlib.Path(__file__).absolute().parents[1]\ndatadir = simudir / 'output' / 'postprocessing' / 'wz_avg'\n\n# Read the gridline coordinates from file.\nfilepath = datadir / 'grid.h5'\nx, y = petibmpy.read_grid_hdf5(filepath, 'wz-avg')\n\n# Read the boundary coordinates from file.\nfilepath = simudir / 'snake3d35.body'\nxb, yb, zb = petibmpy.read_body(filepath, skiprows=1)\n# Keep only a 2D cross-section.\nn = len(numpy.where(zb == zb[0])[0])\nxb, yb = xb[:n], yb[:n]\n\n# Get temporal parameters.\nfilepath = simudir / 'config.yaml'\nwith open(filepath, 'r') as infile:\n config = yaml.load(infile, Loader=yaml.FullLoader)['parameters']\ndt, nstart, nt, nsave = (config[k] for k in ['dt', 'startStep', 'nt', 'nsave'])\ntimesteps = list(range(nstart, nstart + nt + 1, nsave))\n\n# Create the directory to save the figures.\nfigdir = simudir / 'figures'\nfigdir.mkdir(parents=True, exist_ok=True)\n\n# Initialize the figure and axis.\npyplot.rc('font', family='serif', size=16)\nfig, ax = pyplot.subplots(figsize=(8.0, 4.0))\nax.set_xlabel('x')\nax.set_ylabel('y')\ntext = ax.text(-0.5, 0.8, '',\n bbox=dict(facecolor='white', edgecolor='white'), zorder=5)\nax.fill(xb, yb, color='black', zorder=10)\nlevels = numpy.linspace(-5.0, 5.0, num=50)\ncont = None\nax.axis('scaled', adjustable='box')\nax.set_xlim(-0.6, 4.5)\nax.set_ylim(-1.0, 1.0)\nfig.tight_layout()\n\n# Generate the filled contour at each saved time step.\nfor timestep in timesteps:\n print('[time step {:0>7}] Generating the figure...'.format(timestep))\n filepath = datadir / '{:0>7}.h5'.format(timestep)\n wz = petibmpy.read_field_hdf5(filepath, 'wz-avg')\n text.set_text('t = {}'.format(timestep * dt))\n if cont is not None:\n for collection in cont.collections:\n fig.gca().collections.remove(collection)\n cont = ax.contourf(x, y, wz, levels=levels, extend='both', zorder=0)\n filepath = figdir / 'wz_avg_wake2d_{:0>7}.png'.format(timestep)\n fig.savefig(filepath)\n","sub_path":"runs/snake/3d/1k35/scripts/plot_wz_avg_wake2d_pyplot.py","file_name":"plot_wz_avg_wake2d_pyplot.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"609436098","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def oddEvenList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if head == None:\n return head\n ou_head = head.next\n ji_zhi = head\n ou_zhi = head.next\n while ji_zhi or ou_zhi:\n if ou_zhi==None or ou_zhi.next==None:\n ji_zhi.next = ou_head\n return head\n ji_zhi.next = ou_zhi.next\n ji_zhi = ji_zhi.next\n ou_zhi.next = ji_zhi.next\n ou_zhi = ou_zhi.next\n","sub_path":"算法面试题汇总/4链表/8奇偶链表-第一版.py","file_name":"8奇偶链表-第一版.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"89761717","text":"import seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ncols = ['LSTAT', 'PTRATIO', 'RM', 'MEDV'] # housing price dataset\ncm = np.corrcoef(data[cols].values.T)\nsns.set(font_scale=1.5)\nhm = sns.heatmap(cm,\n cbar=True,\n annot=True,\n square=True,\n fmt='.2f',\n annot_kws={'size': 15},\n yticklabels=cols,\n xticklabels=cols)\n\nplt.show()\n","sub_path":"lessons/snippets/heat_map.py","file_name":"heat_map.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"411070712","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\nimport collections\nfrom typing import List\n\n\nclass Solution:\n def smallestRange(self, nums: List[List[int]]) -> List[int]:\n\n n = len(nums)\n indices = collections.defaultdict(list)\n xMin, xMax = 10 ** 9, -10 ** 9\n for i, vec in enumerate(nums):\n for x in vec:\n indices[x].append(i)\n xMin = min(xMin, *vec)\n xMax = max(xMax, *vec)\n\n # 记录频次\n freq = [0] * n\n\n inside = 0\n left, right = xMin, xMin - 1\n bestLeft, bestRight = xMin, xMax\n\n while right < xMax:\n right += 1\n if right in indices:\n for x in indices[right]:\n freq[x] += 1\n if freq[x] == 1:\n inside += 1\n while inside == n:\n if right - left < bestRight - bestLeft:\n bestLeft, bestRight = left, right\n if left in indices:\n for x in indices[left]:\n freq[x] -= 1\n if freq[x] == 0:\n inside -= 1\n left += 1\n\n return [bestLeft, bestRight]\n\n\nif __name__ == '__main__':\n s = Solution()\n nums = [[4, 10, 15, 24, 26], [0, 9, 12, 20], [5, 18, 22, 30]]\n print(s.smallestRange(nums))\n","sub_path":"备考/632.py","file_name":"632.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"142745232","text":"import socket\n\nBUFSIZE = 1024\nip_port = ('127.0.0.1', 8011)\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nres = s.connect_ex(ip_port)\n\ns.send('hello'.encode('utf-8'))\ns.send('feng'.encode('utf-8'))\n","sub_path":"project/python_fullstack/day19/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"544630916","text":"import pygame, math\nfrom pygame.sprite import Sprite\nfrom pygame.locals import *\n\nclass Ship(Sprite):\n def __init__(self, contenedor):\n Sprite.__init__(self)\n self.angulo = 0\n self.puntos = 0\n self.vida = 100\n self.vel = [0,0]\n self.contenedor = contenedor\n self.base_image = pygame.image.load(\"imagenes/ship.png\")\n self.image = self.base_image\n self.rect = self.image.get_rect()\n self.rect.move_ip(contenedor[0]/2, contenedor[1]/2)\n \n def update(self):\n teclas = pygame.key.get_pressed()\n if teclas[K_LEFT]:\n self.rotar(2)\n elif teclas[K_RIGHT]:\n self.rotar(-2)\n elif teclas[K_UP]:\n self.acelerar()\n elif teclas[K_DOWN]:\n pass\n \n self.vel[0] *= 0.99\n self.vel[1] *= 0.99\n self.rect = self.rect.move(self.vel)\n self.rect.x = self.rect.x % self.contenedor[0]\n self.rect.y = self.rect.y % self.contenedor[1]\n \n def acelerar(self):\n self.vel[0] += math.cos(math.radians((self.angulo)%360))\n self.vel[1] -= math.sin(math.radians((self.angulo)%360))\n\n def rotar(self, angulo):\n self.angulo += angulo\n centerx = self.rect.centerx\n centery = self.rect.centery\n self.image = pygame.transform.rotate(self.base_image, self.angulo)\n self.rect = self.image.get_rect() \n self.rect.centerx = centerx\n self.rect.centery = centery\n","sub_path":"ejercicio05/asteroids_002/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"553205580","text":"import pygame\r\nimport random\r\nimport animation \r\nclass Monster(animation.AnimateSprite):\r\n def __init__(self,game, name,size, offset=0):\r\n super().__init__(name,size)\r\n self.game = game\r\n self.health = 100\r\n self.max_health = 100\r\n self.attack = 0.3\r\n self.rect = self.image.get_rect()\r\n self.rect.x = 1000 + random.randint(0,300)\r\n self.rect.y = 500 - offset\r\n self.velocity = random.randint(1,2)\r\n self.start_animation()\r\n self.loot_amount = 10\r\n\r\n def set_loot_amount(self,amount):\r\n self.loot_amount = amount\r\n\r\n def set_speed(self, speed):\r\n self.default_speed = speed\r\n self.velocity = random.randint(1,3)\r\n\r\n def damage(self,amount):\r\n #infliger les degats\r\n self.health -= amount\r\n #vérifier si le nb de points de vie est inférieur ou égal à 0\r\n if self.health <= 0:\r\n self.rect.x = 1000 + random.randint(0,300)\r\n self.velocity = random.randint(1,self.default_speed)\r\n self.health = self.max_health\r\n #incrementer le score\r\n self.game.add_score(20)\r\n\r\n #si la barre d'event est chargé à son maxi on ne fait pas repop les monstres\r\n if self.game.comet_event.is_full_loaded():\r\n #retirer du jeu\r\n self.game.all_monsters.remove(self)\r\n\r\n #appel de la méthode pour essayer de déclencher la pluie de cometes\r\n self.game.comet_event.attempt_fall()\r\n\r\n def update_health_bar(self,surface):\r\n #définir une couleur pour une jauge de vie\r\n bar_color = (111,210,46)\r\n #définir une couleur pour l'arriere plan de la jauge (gris foncé)\r\n back_bar_color = (60, 63, 60)\r\n\r\n #definir la position de notre jauge de vie ainsi que sa largeur et son épaisseur\r\n bar_position = [self.rect.x + 10, self.rect.y - 20, self.health, 5]\r\n\r\n #definir la position de l'arrière plan de notre jauge de vie \r\n back_bar_position = [self.rect.x + 10, self.rect.y - 20, self.max_health, 5]\r\n\r\n # dessiner notre barre de vie\r\n pygame.draw.rect(surface, back_bar_color, back_bar_position)\r\n pygame.draw.rect(surface, bar_color, bar_position)\r\n\r\n def update_animation(self):\r\n self.animate(loop=True)\r\n \r\n def forward(self):\r\n #le deplacement ne se fait que si il ny'a pas de collision avec un groupe de joueur\r\n if not self.game.check_collision(self, self.game.all_players):\r\n self.rect.x -= self.velocity\r\n #si le monstre est en collision avec le joueur\r\n else:\r\n #infliger des degats\r\n self.game.player.damage(self.attack)\r\n\r\n#définir une class pour les mobs\r\nclass BadWorm(Monster):\r\n def __init__(self,game):\r\n super().__init__(game,\"bad_worms\", (130,130))\r\n self.set_speed(3)\r\n self.set_loot_amount(20)\r\n#définir une class pour le boss\r\nclass Boss(Monster):\r\n def __init__(self,game):\r\n super().__init__(game,\"boss\", (300,300), 130)\r\n self.health = 250\r\n self.max_health = 250\r\n self.attack = 0.8\r\n self.set_speed(1)\r\n self.set_loot_amount(50)\r\n\r\n","sub_path":"monster.py","file_name":"monster.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"165743701","text":"import argparse\nimport os\nimport shutil\nimport sys\n\nimport yaml\n\ntry:\n sys.path.append(os.getcwd())\n from src.log import log\n from src.patch.updater import Updater\n from src.utils import Util\nexcept ModuleNotFoundError as e:\n raise e\n\n\ndef save_file(path, config):\n try:\n yaml_dumper = yaml.CDumper\n except Exception:\n yaml_dumper = yaml.Dumper\n with open(path, 'wb') as f:\n f.write(yaml.dump(config, Dumper=yaml_dumper, encoding='utf-8', allow_unicode=True))\n\n\ndef main():\n parser = argparse.ArgumentParser(description='WalBot config patcher', formatter_class=argparse.RawTextHelpFormatter)\n files = [\n \"config.yaml\",\n \"markov.yaml\",\n \"secret.yaml\",\n ]\n parser.add_argument(\"file\",\n choices=[\n \"all\",\n *files,\n ],\n nargs='?',\n default=\"all\",\n help='Config file to patch')\n args = parser.parse_args()\n if args.file != \"all\":\n files = [args.file]\n for file in files:\n config = Util.read_config_file(file)\n if config is None:\n log.error(\"File '{}' does not exist\".format(file))\n sys.exit(1)\n if not hasattr(config, \"version\"):\n log.error(\"{} does not have 'version' field\".format(file))\n sys.exit(1)\n version = config.version\n log.info(\"WalBot config patch tool: {}@{}\".format(file, version))\n if Updater(file, config).result():\n if not os.path.exists(\"backup\"):\n os.makedirs(\"backup\")\n shutil.copyfile(file, \"backup/\" + file + \".bak.\" + version)\n save_file(file, config)\n log.info(\"Successfully saved file: {}\".format(config.version))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tools/patch.py","file_name":"patch.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"558753764","text":"import json\nfrom azure_resource_broker import AzureResourceBroker\nfrom config import AzureConfig\n\nvm_entity_sublist = []\nfor sub_name, sub_id in AzureConfig.AZURE_SUBSCRIPTION_ID_MAP.items():\n print (\"Fetching VMs from subscription\", sub_name)\n broker = AzureResourceBroker(sub_id)\n broker.load_resources()\n\n vm_names = broker.get_virtual_machine_names()\n for vm_name in vm_names:\n vm_entity_sublist.append({\n \"canonicalForm\": vm_name,\n \"list\": []\n })\n\nwith open(\"vm_entity_sublist.json\", 'w') as f:\n json.dump(vm_entity_sublist, f, indent=4)\n","sub_path":"generate_vm_entity_sublist.py","file_name":"generate_vm_entity_sublist.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"162748004","text":"from flask import Flask, request, Response, jsonify\n\nfrom db import WorkedWatcher, TaskPersist\n\nimport properties_rest\nimport json\n\nfrom queue import RedisTaskQueue, PAUSE, STOP, WORK, ERROR\n\n__author__ = 'alesha'\n\napp = Flask(__name__, static_url_path=\"\")\nlog = app.logger\n\n\nrtq = RedisTaskQueue(use_deferred_handler=False)\ntp = TaskPersist()\n\n\n@app.route(\"/tasks\", methods=[\"PUT\"])\ndef add_task():\n task_data_raw = request.data\n task_data = json.loads(task_data_raw)\n if not task_data.get('q'):\n return jsonify(**{'error': True, 'details': 'q field not passed'}), 400\n\n log.info(\"added new task: [%s] \\n%s\" % (task_data.get(\"q\"), task_data))\n task_id = rtq.add_task(q=task_data.get('q'),\n since_id=task_data.get('since_id'),\n project_id=task_data.get('project_id')\n )\n task_id = task_id\n task_data['task_id'] = task_id\n\n tp.save_task_info(task_id, task_data)\n\n return jsonify(**{'ok': True, 'id': str(task_id)})\n\n\n@app.route(\"/tasks\", methods=['GET'])\ndef show():\n state = request.args.get('state')\n tasks = rtq.get_tasks_states(state=state)\n return jsonify(**tasks)\n\n\n@app.route(\"/tasks/\", methods=['GET'])\ndef show_task(task_id):\n task_info = rtq.get_task_info(task_id)\n human_task_info = tp.get_task_info(task_id)\n if human_task_info:\n del human_task_info['_id']\n task_info.update(human_task_info)\n\n return jsonify(**task_info)\n\n\n@app.route(\"/tasks/\", methods=['POST'])\ndef change_task_status(task_id):\n change_info_raw = request.data\n change_info = json.loads(change_info_raw)\n new_status = change_info.get('status')\n if not new_status:\n return jsonify(**{'error': True, 'details': 'status field not passed'}), 400\n if new_status not in [PAUSE, STOP, WORK]:\n return jsonify(**{'error': True, 'details': 'status not supported'}), 400\n\n rtq.set_task_state(task_id, new_status)\n return jsonify(**{\"ok\": True, \"info\": \"status changed\"})\n\n\nww = WorkedWatcher()\n\n\n@app.route(\"/info\", methods=['GET'])\ndef info():\n work, error = ww.is_work()\n return jsonify(**{\"is_work\": work, \"error\": error})\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=properties_rest.ui_port, )\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"114248637","text":"from tkinter import *\r\nimport tkinter as tk\r\nfrom tkinter import filedialog, messagebox\r\nfrom tkinter import ttk\r\nfrom tkinter.font import Font\r\nimport pyautogui\r\nimport time\r\nimport os\r\nimport math\r\nimport subprocess\r\nfrom Thumbnail import create_thumbnail\r\nfrom Video import create_video\r\nfrom Upload import upload_video\r\nfrom tkinter.scrolledtext import ScrolledText\r\nfrom tkcalendar import *\r\nimport datetime\r\n\r\naudio = \"\"\r\nphotos = []\r\nphotos_thumb = []\r\nfolder_selected = \"\"\r\ninfo = \"\"\r\nartists = \"\"\r\ncredit = \"\"\r\ndate = \"\"\r\ntime_hr = 0\r\ntime_min = 0\r\n\r\ndef open_file_pics():\r\n global photos\r\n photos = filedialog.askopenfilenames(title=\"Select files\")\r\n\r\ndef open_file_pics_thumb():\r\n global photos_thumb\r\n photos_thumb = filedialog.askopenfilenames(title=\"Select files\") \r\n \r\ndef open_file_audio():\r\n global audio\r\n audio = filedialog.askopenfilename(title=\"Select file\")\r\n\r\ndef open_file_folder():\r\n global folder_selected\r\n folder_selected = filedialog.askdirectory() \r\n\r\ndef submit():\r\n global info, artists, credit, entry_box2, entry_box, entry_box3, photos, photos_thumb, audio, folder_selected\r\n info = entry_box.get('1.0', 'end-1c')\r\n artists = entry_box2.get('1.0', 'end-1c')\r\n credit = entry_box3.get('1.0', 'end-1c')\r\n global time_hr, time_min\r\n try:\r\n time_hr = int(entry_box4.get())\r\n time_min = int(entry_box5.get()) \r\n except:\r\n messagebox.showerror(title='Upload Time', message='Please provide the proper time for upload')\r\n\r\n global date\r\n date = cal.get_date()\r\n\r\n if photos == []:\r\n messagebox.showerror(title='Video', message='Please provide the path for all the pictures in the Video')\r\n\r\n elif photos_thumb == []:\r\n messagebox.showerror(title='Thumbnail', message='Please provide the path for all the pictures in the Thumbnail')\r\n\r\n elif folder_selected == '':\r\n messagebox.showerror(title='Resultant Folder', message='Please provide the path of the resultant folder') \r\n\r\n elif audio == '':\r\n messagebox.showerror(title='Audio', message='Please provide the audio path')\r\n\r\n elif info == '':\r\n messagebox.showerror(title='Basic Info', message='Please provide the info of the song')\r\n\r\n elif artists == '':\r\n messagebox.showerror(title='Artists', message='Please provide all the Artists')\r\n\r\n elif time_hr == 0 or time_hr >= 24:\r\n messagebox.showerror(title='Time Hour', message='Please provide the proper time for upload') \r\n\r\n elif time_min == 0 or time_min >= 60:\r\n messagebox.showerror(title='Time Min', message='Please provide the proper time for upload')\r\n\r\n else:\r\n artist = []\r\n for line in artists.splitlines():\r\n artist.append(line)\r\n i = 0\r\n try:\r\n main_artist = artist[0][0:artist[0].index('-')].strip()\r\n except Exception as e:\r\n i+=1\r\n messagebox.showerror(title='Artist', message='Please use \\\"-\\\"')\r\n\r\n if i == 0: \r\n root.destroy() \r\n\r\nroot = tk.Tk()\r\nroot.title(\"Video Creator\")\r\nroot.iconbitmap('logo.ico')\r\nmyFont = Font(family=\"Times New Roman\", size=12)\r\nroot.grid_columnconfigure(0, weight=1)\r\nroot.grid_rowconfigure(0, weight=1)\r\n\r\nprocess = 'ffmpeg'\r\ntry:\r\n p = subprocess.call(process, shell=True)\r\nexcept:\r\n messagebox.showerror(title='Error', message='Please install ffmpeg to continue')\r\n\r\ntext1 = tk.Text(root,height=1)\r\ntext1.insert(tk.INSERT , \"Select the pictures for the Video\")\r\ntext1.config(state=\"disabled\")\r\ntext1.configure(font=myFont)\r\ntext1.grid(row=0,column=0, sticky=N+S+E+W)\r\nbutton1 = Button(root,text=\"Open Pictures\", command=open_file_pics)\r\nbutton1.grid(row=1,column=0)\r\n\r\ntext4 = tk.Text(root,height=1)\r\ntext4.insert(tk.INSERT , \"Select the pictures for the Photos\")\r\ntext4.config(state=\"disabled\")\r\ntext4.configure(font=myFont)\r\ntext4.grid(row=2,column=0, columnspan=4, sticky=N+S+E+W)\r\nbutton4 = Button(root,text=\"Open Pictures\", command=open_file_pics_thumb)\r\nbutton4.grid(row=3,column=0)\r\n\r\ntext2 = tk.Text(root,height=1)\r\ntext2.insert(tk.INSERT, \"Select the audio file\")\r\ntext2.config(state=\"disabled\")\r\ntext2.configure(font=myFont)\r\ntext2.grid(row=4,column=0, columnspan=4, sticky=N+S+E+W)\r\nbutton2 = Button(root,text=\"Open Audio\", command=open_file_audio)\r\nbutton2.grid(row=5,column=0)\r\n\r\ntext3 = tk.Text(root,height=1)\r\ntext3.insert(tk.INSERT, \"Select the resultant folder\")\r\ntext3.config(state=\"disabled\")\r\ntext3.configure(font=myFont)\r\ntext3.grid(row=6,column=0, columnspan=4, sticky=N+S+E+W)\r\nbutton3 = Button(root,text=\"Select a folder\", command=open_file_folder)\r\nbutton3.grid(row=7,column=0)\r\n\r\nlabel1 = Label(root, text=\"Basic Info\", font=myFont)\r\nlabel1.grid(row=8,column=0, columnspan=4, sticky=N+S+E+W)\r\n#name=StringVar()\r\n#entry_box = Entry(root, textvariable=name, width=25)\r\n#entry_box.grid(row=9)\r\nentry_box = ScrolledText(root, width=50, height=2, font=myFont)\r\nentry_box.grid(row=9, columnspan=4, sticky=N+S+E+W)\r\n\r\nlabel2 = Label(root, text=\"Artists\", font=myFont)\r\nlabel2.grid(row=11,column=0, columnspan=4, sticky=N+S+E+W)\r\n#name1=StringVar()\r\n#entry_box2 = Entry(root, textvariable=name1, width=25).grid(row=11)\r\nentry_box2 = ScrolledText(root, width=50, height=2, font=myFont)\r\nentry_box2.grid(row=12, columnspan=4, sticky=N+S+E+W)\r\n\r\nlabel3 = Label(root, text=\"Credits\", font=myFont)\r\nlabel3.grid(row=13,column=0, columnspan=4, sticky=N+S+E+W)\r\n#name2=StringVar()\r\n#entry_box3 = Entry(root, textvariable=name2, width=25).grid(row=13)\r\nentry_box3 = ScrolledText(root, width=50, height=2, font=myFont)\r\nentry_box3.grid(row=14, columnspan=4, sticky=N+S+E+W)\r\n\r\ntext215 = tk.Text(root,height=1)\r\ntext215.insert(tk.INSERT, \"Select the Upload Date\")\r\ntext215.config(state=\"disabled\")\r\ntext215.configure(font=myFont)\r\ntext215.grid(row=15,column=0, columnspan=4, sticky=N+S+E+W)\r\n\r\ncal = DateEntry(root, selectmode=\"day\", year=datetime.date.today().year,\r\n month=datetime.date.today().month, day=datetime.date.today().day)\r\ncal.grid(row=16, column=0) \r\n\r\ntext216 = tk.Text(root,height=1)\r\ntext216.insert(tk.INSERT, \"Write the Upload Time\")\r\ntext216.config(state=\"disabled\")\r\ntext216.configure(font=myFont)\r\ntext216.grid(row=17,column=0, columnspan=4, sticky=N+S+E+W)\r\n\r\nentry_box4 = Entry(root, width=12, font=myFont)\r\nentry_box4.grid(row=18, column=0)\r\n\r\nentry_box5 = Entry(root, width=12, font=myFont)\r\nentry_box5.grid(row=19, column=0)\r\n\r\ntime = Button(root, text=\"Submit\", command=submit)\r\ntime.grid(row=20,column=0)\r\n\r\ndef on_closing():\r\n if messagebox.askokcancel(\"Quit\", \"Do you want to quit?\"):\r\n exit()\r\n\r\nroot.protocol(\"WM_DELETE_WINDOW\", on_closing)\r\n\r\n#root.geometry(\"800x705\")\r\nroot.resizable(width=False, height=False)\r\nroot.mainloop()\r\n\r\nphotos = [w.replace('/', '\\\\') for w in photos]\r\nphotos_thumb = [w.replace('/', '\\\\') for w in photos_thumb]\r\naudio = audio.replace('/', '\\\\')\r\nfolder_selected = folder_selected.replace('/','\\\\')\r\n\r\ni = 0\r\nsong = ''\r\nfor line in info.splitlines():\r\n if i == 0:\r\n song = line\r\n i += 1\r\n\r\nartist = []\r\nfor line in artists.splitlines():\r\n artist.append(line)\r\n\r\ntry:\r\n main_artist = artist[0][0:artist[0].index('-')].strip()\r\nexcept Exception as e:\r\n messagebox.showerror(title='Artist', message='Please use \\\"-\\\"')\r\n exit()\r\ntitle = f'{song} | {main_artist} | Carnatic Fever'\r\ndescription = f'{info}\\n\\nArtists:\\n{artists}\\n\\n{credit}'\r\n\r\nartist.append(song)\r\nartist.append('Carnatic Music')\r\nartist.append('Carnatic Fever')\r\nartist.append('Music')\r\nartist.append('Carnatic')\r\n\r\ndate_time = datetime.datetime(date.year, date.month, date.day, time_hr, time_min, 0).isoformat() + '.000Z'\r\n\r\ncreate_thumbnail(photos_thumb, song, folder_selected)\r\ncreate_video(photos, folder_selected, audio)\r\nupload_video(folder_selected, title, description, artist, date_time)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"307734004","text":"from Node import Node\n\nclass Topo:\n def __init__(self):\n self.nodes = {}\n self.links = []\n self.rootNode = None\n self.rootPaths = []\n\n def addNode(self, name, isRoot=False):\n node = Node(name)\n self.nodes[name] = node\n if isRoot:\n self.rootNode = node\n\n def addLink(self, src, dst, isRootPath=False):\n srcNode = self.nodes[src]\n dstNode = self.nodes[dst]\n srcNode.addLink(dstNode)\n dstNode.addLink(srcNode)\n\n self.links.append((srcNode, dstNode))\n if isRootPath:\n self.rootPaths.append((srcNode, dstNode))\n","sub_path":"Topo.py","file_name":"Topo.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"210485602","text":"#\n# @lc app=leetcode.cn id=49 lang=python3\n#\n# [49] 字母异位词分组\n#\n\n# @lc code=start\nclass Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n if not strs: return []\n\n def way1():\n # sort 时间复杂度 O(K * NlogN),空间复杂度 O(K); easy to recall\n mem = defaultdict(list)\n for word in strs:\n t = ''.join(sorted(word))\n mem[t].append(word)\n return [v for v in mem.values()]\n\n def way2():\n # array 时间复杂度 O(K * N), slow in string key construct \n mem = defaultdict(list)\n ch_int = {chr(k): k for k in range(97, 123)}\n for word in strs:\n array = [0] * 26\n for c in word:\n array[ch_int[c]-ch_int['a']] += 1\n key = '#'.join([str(m) for m in array])\n mem[key].append(word)\n return [v for v in mem.values()]\n\n def way3():\n # use prime to construct key; improved on way2\n primes = [2, 3, 5, 7, 11 ,13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101]\n ch_int = {chr(k): k for k in range(97, 123)}\n mem = defaultdict(list)\n for word in strs:\n key = 1\n for c in word:\n key *= primes[ch_int[c] - ch_int['a']]\n mem[key].append(word)\n return [v for v in mem.values()]\n\n return way1()\n# @lc code=end\n\n","sub_path":"Week_02/49_字母异位分组.py","file_name":"49_字母异位分组.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"42472120","text":"from models import Order, db, User, Admin, Deliver, Product, Man, Car, Info, Alert\nfrom flask import render_template, session, redirect, url_for\nimport random, time\n\n\ndef show_user_orders():\n uid = session.get('uid')\n if not uid:\n return redirect(url_for('error', msg='未登录', info='不登录怎么知道你有什么订单呢', link='user%2flogin'))\n\n orders = Order.query.filter_by(uid=uid).all()\n\n return render_template('test.html', data=orders)\n\n\ndef show_a_orders():\n aid = session.get('aid')\n if not aid:\n return redirect(url_for('error', msg='未登录', info='不登录怎么知道你有什么订单呢', link='admin%2flogin'))\n\n orders = Order.query.filter_by(aid=aid).all()\n\n return render_template('test.html', data=orders)\n\n\ndef get_admin_name(aid):\n admin = Admin.query.filter_by(aid=aid).first()\n return admin.a_name\n\n\ndef get_user_name(uid):\n user = User.query.filter_by(uid=uid).first()\n return user.user_name\n\n\ndef order_page(oid):\n if not session.get('uid') and not session.get('aid'):\n return redirect(url_for('error', msg='请先登录', info='未登录不可查询订单', link='home'))\n order = Order.query.filter_by(oid=oid).first()\n\n if not order:\n if session.get('uid'):\n return redirect(url_for('error', msg='查无此单', info='请仔细检查订单号码', link='user'))\n if session.get('aid'):\n return redirect(url_for('error', msg='查无此单', info='请仔细检查订单号码', link='admin%2fbackstage'))\n\n user = User.query.filter_by(uid=order.uid).first()\n admin = Admin.query.filter_by(aid=order.aid).first()\n\n if session.get('uid') != order.uid and not session.get('aid'):\n return redirect(url_for('error', msg='请求错误', info='只能查询和自己相关的订单', link='home'))\n\n name = ''\n\n if session.get('uid'):\n name = get_user_name(session.get('uid'))\n else:\n name = get_admin_name(session.get('aid'))\n\n product = Product.query.filter_by(pid=order.pid).first()\n\n delivers = None\n info = None\n\n if order.status == 0:\n pass\n else:\n delivers = db.session.query(\n Deliver.did,\n Deliver.status,\n Car.cplace,\n Car.cnumber,\n Man.name,\n Man.tel,\n Deliver.created_at\n ).filter(\n Deliver.oid == order.oid,\n Deliver.cid == Car.cid,\n Car.mid == Man.mid\n ).order_by(\n Deliver.updated_at.desc()\n ).all()\n\n info = Info.query.filter_by(oid=oid).order_by(Info.created_at.desc()).all()\n\n\n data = {}\n data['user'] = {'name': name}\n\n data['order'] = {\n 'oid': oid,\n 'product': product.pname,\n 'pid': product.pid,\n 'price': (product.price * order.sum) / 100,\n 'sum': order.sum,\n 'o_price': product.price,\n 'created_at': order.created_at,\n 'addr': order.addr,\n 'tel': user.tel,\n 'status': order.status,\n 'updated_at': order.updated_at,\n }\n\n if order.status:\n data['delivers'] = list(delivers)\n data['info'] = info\n data['len1'] = len(list(delivers))\n data['len2'] = len(list(info))\n else:\n data['delivers'] = None\n data['info'] = None\n data['len1'] = 0\n data['len2'] = 0\n\n return render_template('order.html', data=data)\n\n\ndef show_orders():\n aid = session.get('aid')\n if not aid:\n return redirect(url_for('error', msg='请登录', info='这是管理员才能查看的页面', link='admin%2flogin'))\n\n send_orders = db.session.query(\n Order.oid,\n Product.pname,\n Order.sum,\n Product.price,\n User.user_name,\n User.tel,\n Order.addr,\n Order.status,\n Info,\n Deliver,\n Car,\n Man,\n Order.alevel,\n ).filter(\n Order.did == Deliver.did,\n Order.iid == Info.iid,\n User.uid == Order.uid,\n Car.cid == Deliver.cid,\n Man.mid == Car.mid,\n Order.pid == Product.pid\n ).order_by(Order.created_at.desc()).all()\n\n unsend_orders = db.session.query(\n Order.oid,\n Product.pname,\n Order.sum,\n Product.price,\n User.user_name,\n User.tel,\n Order.addr\n ).filter(\n User.uid == Order.uid,\n Order.status == 0,\n Product.pid == Order.pid\n ).order_by(Order.created_at.desc()).all()\n\n if Alert.query.filter_by(toid=aid, msgfor=2, level=3, valid=1).count() > 0: # 顶级\n level = 4\n elif Alert.query.filter_by(toid=aid, msgfor=2, level=2, valid=1).count() > 0:\n level = 3\n elif Alert.query.filter_by(toid=aid, msgfor=2, level=1, valid=1).count() > 0: # 开始弹窗\n level = 2\n elif Alert.query.filter_by(toid=aid, msgfor=2, level=0, valid=1).count() > 0:\n level = 1\n else: # 无事\n level = 0\n\n data = {}\n data['name'] = Admin.query.filter_by(aid=aid).first().a_name\n data['send'] = send_orders\n data['unsend'] = unsend_orders\n data['alert_num'] = Alert.query.filter_by(msgfor=2, toid=aid, valid=1).count()\n data['level'] = level\n\n data['send_len'] = len(send_orders)\n data['send_row_len'] = data['send_len'] // 3\n if data['send_len'] % 3 != 0:\n data['send_row_len'] += 1\n data['unsend_len'] = len(unsend_orders)\n data['unsend_row_len'] = data['unsend_len'] // 3\n if data['unsend_len'] % 3 != 0:\n data['unsend_row_len'] += 1\n data['all_len'] = len(send_orders) + len(unsend_orders)\n data['all_row_len'] = data['send_row_len'] + data['unsend_row_len']\n\n\n lenlen = 0\n for i in send_orders:\n if i[7] == 2:\n lenlen += 1\n\n data['end_len'] = lenlen\n data['end_row_len'] = data['end_len'] // 3\n if data['end_len'] % 3 != 0:\n data['end_row_len'] += 1\n\n return render_template('orders.html', data=data)\n\n\ndef admin_delivers():\n data = {}\n aid = session.get('aid')\n if not aid:\n return redirect(url_for('error', msg='请登录', info='这是管理员才能查看的页面', link='admin%2flogin'))\n\n data['alert_num'] = Alert.query.filter_by(msgfor=2, toid=aid, valid=1).count()\n if Alert.query.filter_by(toid=aid, msgfor=2, level=3, valid=1).count() > 0: # 顶级\n level = 4\n elif Alert.query.filter_by(toid=aid, msgfor=2, level=2, valid=1).count() > 0:\n level = 3\n elif Alert.query.filter_by(toid=aid, msgfor=2, level=1, valid=1).count() > 0: # 开始弹窗\n level = 2\n elif Alert.query.filter_by(toid=aid, msgfor=2, level=0, valid=1).count() > 0:\n level = 1\n else: # 无事\n level = 0\n data['level'] = level\n\n cars = db.session.query(\n Car.cplace,\n Car.cnumber,\n Man.name,\n Man.tel,\n Car.status,\n Car.active,\n Car.cid,\n ).filter(Car.mid == Man.mid).order_by(Car.active.desc()).all()\n\n delivers = []\n for i in cars:\n deliver = Deliver.query.filter_by(cid=i[6]).order_by(Deliver.created_at.desc()).all()\n delivers.append(deliver)\n data['delivers'] = delivers\n data['car_len'] = len(cars)\n data['cars'] = cars\n\n return render_template('delivers.html', data=data)","sub_path":"orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":7413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"229057060","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('poster', '0005_auto_20160228_1928'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='plant',\n name='state',\n field=models.CharField(max_length=15, choices=[(b'upcoming', b'upcoming'), (b'pending', b'pending'), (b'proposed', b'proposed'), (b'rejected', b'rejected')]),\n ),\n ]\n","sub_path":"poster/migrations/0006_auto_20160228_1947.py","file_name":"0006_auto_20160228_1947.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"564837848","text":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom datetime import datetime, timedelta\nfrom typing import TYPE_CHECKING\n\nfrom users.settings import JWT_EXPIRES_AFTER_IN_MINUTES\n\nif TYPE_CHECKING:\n from users.domain.entities import Credential, User # noqa\n\n\ndef get_jwt_metadata() -> dict:\n utcnow = datetime.utcnow()\n\n return {\n \"exp\": utcnow + timedelta(minutes=JWT_EXPIRES_AFTER_IN_MINUTES),\n \"iat\": utcnow,\n }\n\n\n@dataclass\nclass JWTToken:\n id: str\n email: str\n name: str\n credentials: list[\"Credential\"]\n exp: datetime\n iat: datetime\n aud: str = \"auth\"\n\n @classmethod\n def from_user(cls, user: \"User\") -> JWTToken:\n return cls(\n id=user.id,\n email=user.email,\n name=user.name,\n credentials=user.credentials.scopes,\n **get_jwt_metadata()\n )\n\n @classmethod\n def from_payload(cls, payload: dict) -> JWTToken:\n return cls(\n id=payload[\"id\"],\n email=payload[\"email\"],\n name=payload[\"name\"],\n credentials=payload[\"credentials\"],\n exp=payload[\"exp\"],\n iat=payload[\"iat\"],\n )\n","sub_path":"users/users/auth/entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"84385600","text":"import os\nimport regex\nfrom doc_curation.md_helper import MdFile\n\n\ndef get_adhyaaya_md_files(md_file_path):\n md_files = MdFile.get_md_files_from_path(dir_path=md_file_path, file_pattern=\"**/*.md\", file_name_filter=lambda x: len(regex.findall(\"\\\\d\\\\d\\\\d\", os.path.basename(x))) > 0)\n return md_files\n\n\ndef get_parva_adhyaaya(md_file):\n parva = regex.findall(\"/\\\\d\\\\d-\", str(md_file.file_path))[0].replace(\"/\", \"\").replace(\"-\", \"\")\n adhyaaya = regex.findall(\"\\\\d\\\\d\\\\d\", str(md_file.file_path))[-1]\n return (parva, adhyaaya)\n\n\ndef get_adhyaaya_id(md_file):\n (parva, adhyaaya) = get_parva_adhyaaya(md_file=md_file)\n return \"%03d-%03d\" % (int(parva), int(adhyaaya))\n\n\ndef get_adhyaaya_to_source_file_map():\n md_files = get_adhyaaya_md_files(md_file_path=\"/home/vvasuki/sanskrit/raw_etexts/purANa/mahAbhArata/kumbhakonam\")\n final_map = {}\n for md_file in md_files:\n parva = regex.findall(\"/\\\\d\\\\d/\", str(md_file.file_path))[0].replace(\"/\", \"\")\n adhyaaya = regex.findall(\"\\\\d\\\\d\\\\d\", str(md_file.file_path))[0]\n adhyaaya_id = \"%s-%s\" % (parva, adhyaaya)\n final_map[adhyaaya_id] = md_file\n return final_map\n\n\n\n\n\n","sub_path":"curation_projects/mahaabhaarata/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"343759650","text":"from PyQt5 import QtWidgets, uic\nfrom pyqtgraph import PlotWidget, plot\nimport pyqtgraph as pg\nimport sys # We need sys so that we can pass argv to QApplication\nimport os\nfrom PyQt5 import QtCore\n\n\nclass MainWindow(QtWidgets.QMainWindow):\n\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n\n self.graphWidget = pg.PlotWidget()\n self.setCentralWidget(self.graphWidget)\n\n x = [1,2,3,4,5,6,7,8,9,10]\n y = [30,32,34,32,33,31,29,32,35,45]\n\n self.graphWidget.setTitle(\"Your Title Here\")\n self.graphWidget.setLabel('left', \"Temperature (°C)\")\n self.graphWidget.setLabel('bottom', \"Hour (H)\")\n\n self.graphWidget.setBackground('w')\n pen = pg.mkPen(color=(255, 0, 0), width=15, style=QtCore.Qt.DashDotLine)\n self.graphWidget.plot(x, y, pen=pen, symbol='+', symbolSize=30, symbolBrush=('b'))\n self.graphWidget.setXRange(-5, 12, padding=0)\n self.graphWidget.setYRange(25, 50, padding=0)\n self.graphWidget.showGrid(x=True, y=True)\n\n\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n main = MainWindow()\n main.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()","sub_path":"part-2/level-2.py","file_name":"level-2.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"269306379","text":"import tensorflow as tf\nfrom .solver_base import solver_base\nfrom ...utils.log import log_message\n\nclass solver_gan(solver_base):\n def __init__(self):\n solver_base.__init__(self)\n\n def build_graph(self, **kwargs):\n cfg_args = kwargs['cfg_args']\n loss_list = kwargs['loss']\n var_prefix = kwargs['var_prefix']\n\n log_message('solver_gan', '---Graph Solver..---')\n\n gpu_list = cfg_args.gpu_list\n\n tf_vars = tf.trainable_variables()\n var_g = [var for var in tf_vars if 'gen_' in var.name]\n var_d = [var for var in tf_vars if 'dis_' in var.name]\n\n loss_g_full_list = loss.public_ops['loss_g']\n loss_d_full_list = []\n for g_id in range(0, len(gpu_list)):\n with tf.device('/gpu:{}'.format(g_id)):\n _loss_d = loss.public_ops['loss_d'][g_id]\n if('loss_gp' in loss.public_ops):\n _loss_d += loss.public_ops['loss_gp'][g_id]\n\n loss_d_full_list.append(_loss_d)\n\n train_op_g = self.construct_solver(loss_g_full_list, var_g, cfg_args.param)\n train_op_d = self.construct_solver(loss_d_full_list, var_d, cfg_args.param)\n\n self.public_ops['train_op_g'] = train_op_g\n self.public_ops['train_op_d'] = train_op_d\n\nclass solver_classifier(solver_base):\n def __init__(self):\n solver_base.__init__(self)\n\n def build_graph(self, **kwargs):\n cfg_args = kwargs['cfg_args']\n phase = kwargs['phase']\n loss = kwargs['loss']\n global_data_dict = kwargs['global_data_dict']\n\n log_message('solver_classifier', '---Graph Solver..---')\n\n gpu_list = cfg_args.gpu_list\n\n tf_vars = tf.trainable_variables()\n var_ = [var for var in tf_vars if 'classifier_' in var.name]\n\n loss_full_list = loss.public_ops['loss_full']\n train_op = self.construct_solver(loss_full_list, var_, cfg_args.param)\n self.public_ops['loss_full'] = loss_full_list\n self.public_ops['train_op'] = train_op","sub_path":"framework/modules/solver/mp_gan.py","file_name":"mp_gan.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"23126095","text":"#system\nimport os\nimport time \n\n# spartan\nimport spartan.manipulation.grasp_supervisor\nimport spartan.manipulation.background_subtraction\nimport spartan.calibration.handeyecalibration\nimport spartan.utils.utils as spartanUtils\n\n\nfrom spartan.utils.taskrunner import TaskRunner\n\n# ros\nimport tf2_ros\n\n\nclass TFWrapper(object):\n\n def __init__(self):\n self.tfBuffer = None\n self.tfListener = None\n self.taskRunner = TaskRunner()\n self.taskRunner.callOnThread(self.setup)\n\n def setup(self):\n self.tfBuffer = tf2_ros.Buffer()\n self.tfListener = tf2_ros.TransformListener(self.tfBuffer)\n\n def getBuffer(self):\n while self.tfBuffer is None:\n time.sleep(0.1)\n\n return self.tfBuffer\n\n\ndef setupRLGDirector(globalsDict=None):\n\n tfWrapper = TFWrapper()\n tfBuffer = tfWrapper.getBuffer()\n\n graspSupervisor = spartan.manipulation.grasp_supervisor.GraspSupervisor.makeDefault(tfBuffer=tfBuffer)\n graspSupervisor.robotSystem = globalsDict['robotSystem'] # for visualization\n globalsDict['graspSupervisor'] = graspSupervisor\n\n \n backgroundSubtraction = spartan.manipulation.background_subtraction.BackgroundSubtractionDataCapture.makeDefault(tfBuffer=tfBuffer)\n globalsDict['backgroundSubtraction'] = backgroundSubtraction\n\n\n\n spartanSourceDir = spartanUtils.getSpartanSourceDir()\n handEyeCalibrationConfigFilename = os.path.join(spartanSourceDir, \"src/catkin_projects/station_config/RLG_iiwa_1/hand_eye_calibration/carmine_1.yaml\")\n\n\n cal = spartan.calibration.handeyecalibration.HandEyeCalibration(globalsDict['robotSystem'], configFilename=handEyeCalibrationConfigFilename)\n cal.loadConfigFromFile()\n globalsDict['cal'] = cal\n\n # set rate limit on RemoteTreeViewer\n # fix for https://github.com/RobotLocomotion/spartan/issues/244\n globalsDict['treeViewer'].subscriber.setSpeedLimit(5)","sub_path":"modules/spartan/director/iiwamanipdev.py","file_name":"iiwamanipdev.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"511735198","text":"#!/ usr/bin/env\n# coding=utf-8\n#\n# Copyright 2019 ztosec & https://sec.zto.com/\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"\nauthor: b5mali4\nTo use:\n>>> sq = Square(3)\n>>> sq.area \n\"\"\"\nimport os\nimport signal\nimport json\nimport time\nimport subprocess\nfrom sys import version_info\nfrom common import log\nfrom subprocess import Popen\nfrom common.http_util import header_to_lowercase\nfrom common.http_util import header_to_str\nfrom common.http_util import json_to_urlencoded\nfrom common.plugins_util import load_default_checkers, modify_default_checkers\nfrom common.settings import DEFAULT_CONTENT_TYPE\nfrom common.settings import FORM_DATA_CONTENT_TYPE\nfrom common.settings import JSON_TEXT_CONTENT_TYPE\nfrom plugins.base.vuln_enum import PluginSwith\nfrom common.plugin_config.localfile_plugin_config import LocalFilePluginConfig\nfrom argparse import Namespace\n\nif version_info < (3, 0):\n IS_WIN = subprocess.mswindows\nelse:\n IS_WIN = subprocess._mswindows\n\nlogger = log.get_default_logger()\n\n\ndef modify_checker(broadcast):\n \"\"\"\n 修改本地插件配置信息,只修改本地配置文件\n\n {\"type\": \"plugin\", \"action\": \"modify\", \"data\": {\"name\": checker_name, \"switch\": PluginSwith.ON}\n :param broadcast: \n :return: \n \"\"\"\n checker_name = broadcast[\"data\"][\"name\"]\n switch = broadcast[\"data\"][\"switch\"]\n checkers_dict = load_default_checkers()\n if checker_name in checkers_dict:\n logger.info('接收到修改插件{}状态为{}的请求'.format(checker_name, switch))\n LocalFilePluginConfig().modify_plugin_config(checker_name, \"useable\", switch)\n modify_default_checkers()\n\n\ndef scan(package, task_id, create_user, status):\n \"\"\"\n :param package: \n :param task_id: \n :param create_user: \n :param status: \n :return: \n \"\"\"\n logger.info(\"hunter task has started\")\n # 加载插件,只有一个插件\n checkers = load_default_checkers()\n logger.info('loading package success')\n logger.info('loading plugin success')\n try:\n if checkers[\"sqlmap\"].useable == PluginSwith.ON:\n sqlmap_process = SqlmapProcess(package, task_id)\n sqlmap_process.engine_start()\n while not sqlmap_process.engine_has_terminated() and sqlmap_process.process is not None:\n logger.info(\"sqlmap program is runing\")\n time.sleep(5)\n sqlmap_process.engine_kill()\n logger.warn(\"sqlmap program runs to completion\")\n except KeyboardInterrupt as e:\n logger.exception(\"scan error\")\n finally:\n logger.info(\"hunter task has done\")\n\n\nclass SqlmapProcess():\n def __init__(self, package, task_id):\n self.process = None\n self.package = package\n self.task_id = task_id\n\n def parse_package(self):\n \"\"\"\n 将从mq中获得的数据解析 ,sqlmap会自动解析参数,json还是普通data\n :return: \n \"\"\"\n header = None\n cookie = None\n url = self.package['url'] if \"url\" in self.package else None\n\n if \"headers\" in self.package:\n header = header_to_lowercase(json.loads(self.package[\"headers\"]))\n if \"Cookie\" in json.loads(self.package[\"headers\"]):\n cookie = json.loads(self.package[\"headers\"])['Cookie']\n if header:\n header = header_to_str(header)\n data = self.parse_data(self.package, header)\n return url, data, cookie, header\n\n def parse_data(self, package, header):\n \"\"\"\n 根据请求头解析数据\n :param package: \n :param header: \n :return: \n \"\"\"\n\n result = None\n\n if \"data\" not in package or package[\"data\"] == \"\":\n return result\n\n if header and \"content-type\" in header:\n if FORM_DATA_CONTENT_TYPE in header[\"content-type\"] or DEFAULT_CONTENT_TYPE in header[\"content-type\"]:\n return json_to_urlencoded(json.loads(package['data']))\n elif JSON_TEXT_CONTENT_TYPE in header[\"content-type\"]:\n return str(json.loads(package[\"data\"]))\n return json_to_urlencoded(json.loads(package['data']))\n\n def get_command(self):\n \"\"\"\n 根据数据的得到命令,超时重联3次\n :return: \n \"\"\"\n command = self.init_command_by_path()\n # status = True\n # 表示不正常,比如一个数据包中没有url\n url, data, cookie, headers = self.parse_package()\n if url is None or url == \"\":\n return False, command\n command += [\"--url\", \"{}\".format(url)]\n if data is not None and data != \"\":\n command += [\"--data\", \"{}\".format(data)]\n if cookie is not None and cookie != \"\":\n command += [\"--cookie\", \"{}\".format(cookie)]\n if headers is not None and headers != \"\":\n command += [\"--headers\", \"{}\".format(headers)]\n command += [\"--batch\"]\n command += [\"--purge-output\"]\n # print (\" \".join(command))\n return True, command\n\n def init_command_by_path(self):\n \"\"\"\n 根据路径\n :return: \n \"\"\"\n from common.path import SQLMAP_SCRIPT_PATH\n command = [\"python2\", SQLMAP_SCRIPT_PATH]\n command += [\"--celery\", \"{}\".format(self.task_id)]\n return command\n\n def engine_start(self):\n \"\"\"开始命令\"\"\"\n status, command = self.get_command()\n # print status, command\n if status:\n self.process = Popen(command, shell=False, close_fds=not IS_WIN)\n\n def engine_stop(self):\n \"\"\"\n 结束\n :return: \n \"\"\"\n if self.process:\n self.process.terminate()\n return self.process.wait()\n else:\n return None\n\n def engine_process(self):\n return self.process\n\n def engine_kill(self):\n \"\"\"\n 强制kill,删除SQLMAP扫描缓存记录\n :return: \n \"\"\"\n if self.process:\n try:\n self.process.kill()\n os.killpg(self._process.pid, signal.SIGTERM)\n return self.process.wait()\n except:\n pass\n return None\n\n def engine_get_id(self):\n \"\"\"\n 获得进程模块\n :return: \n \"\"\"\n if self.process:\n return self.process.pid\n else:\n return None\n\n def engine_get_returncode(self):\n \"\"\"\n 如果为None表示命令还在执行中,为0表示已经执行完成并退出\n :return: \n \"\"\"\n if self.process:\n self.process.poll()\n return self.process.returncode\n else:\n return None\n\n def engine_has_terminated(self):\n return isinstance(self.engine_get_returncode(), int)\n","sub_path":"SqlmapCelery/taskschedule/task_schedule.py","file_name":"task_schedule.py","file_ext":"py","file_size_in_byte":7288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"121580679","text":"from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', 'core.views.main', name='main'), \n url(r'^(?P\\d+)/?$', 'core.views.expense', name='expense'),\n url(r'^register/?$', 'core.views.register', name='register'),\n url(r'^overview/(?P\\d*)/?$', 'core.views.overview', name='overview'),\n url(r'^login/?$', 'core.views.login_view'),\n url(r'^logout/?$', 'core.views.logout_view'),\n\n # Api\n url(r'^api/user/?$', 'core.views.api_user', name='api_user'),\n url(r'^api/userbar/?$', 'core.views.api_userbar', name='userbar'),\n url(r'^api/add_friend/?$', 'core.views.api_add_friend', name='api_add_friend'),\n url(r'^api/expense/(?P\\d*)/?$', 'core.views.api_expense', name='api_expense'),\n url(r'^api/item/(?P\\d*)/?$', 'core.views.api_item', name='api_item'),\n)\n","sub_path":"debthing/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"612544298","text":"import os,sys\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport random\nimport ipdb\nimport glob\nfrom scipy.ndimage.filters import gaussian_filter1d\nfrom scipy.interpolate import griddata\nfrom sklearn.externals import joblib\n\ndir_of_this_script = os.path.dirname(os.path.realpath(__file__))\ndemonstration_dir = os.path.join(dir_of_this_script, '..', 'datasets', 'pick_20190228')\ndemo_path_list = glob.glob(os.path.join(demonstration_dir,'*.npy'))\ndemo_path_list = sorted(demo_path_list)\nsigma = 5\n\nlen_norm=101\ndatasets_raw = []\ndatasets_filtered = []\ndatasets_norm = []\nfig = plt.figure(0)\nax = fig.gca(projection='3d')\nfor demo_path in demo_path_list:\n raw_demo = np.load(demo_path, 'r')\n filtered_demo = gaussian_filter1d(raw_demo.T, sigma=sigma).T\n grid = np.linspace(0, 2, len_norm)\n time_stamp = np.linspace(0, 2, len(raw_demo))\n norm_demo = griddata(time_stamp, filtered_demo, grid, method='linear')\n datasets_raw.append(raw_demo)\n datasets_filtered.append(datasets_raw)\n datasets_norm.append(datasets_norm)\n# ax.plot(norm_demo[:,0],norm_demo[:,1],norm_demo[:,2])\n# plt.show()\npkl_dir = os.path.join(demonstration_dir,\"pkl\")\nprint('Saving the datasets as pkl ...')\njoblib.dump(datasets_raw, os.path.join(pkl_dir, 'datasets_raw.pkl'))\njoblib.dump(datasets_filtered, os.path.join(pkl_dir, 'datasets_filtered.pkl'))\njoblib.dump(datasets_norm, os.path.join(pkl_dir, 'datasets_norm.pkl'))\nprint('Loaded, filtered, normalized, preprocessed and saved the datasets successfully!!!')\n\n\n \n\n\n","sub_path":"data_process/load_pick_trainig_data.py","file_name":"load_pick_trainig_data.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"177396758","text":"import json\nfrom pycircuit.circuit import Node\nfrom pycircuit.pcb import Pcb\nfrom pycircuit.formats import extends\n\n@extends(Node)\ndef to_pcpl(self):\n assert(self.footprint)\n width, height = self.footprint.package.size()\n return {\n 'id': self.id,\n 'width': width,\n 'height': height,\n }\n\n\n@extends(Pcb)\ndef to_pcpl(self, filename):\n nodes = []\n for node in self.circuit.iter_nodes():\n nodes.append(node.to_pcpl())\n with open(filename, 'w') as f:\n print(json.dumps(nodes), file=f)\n\n\n@extends(Pcb)\ndef from_pcpl(self, filename):\n with open(filename) as f:\n for node in json.loads(f.read()):\n self.circuit.node_by_id(node['id']).place(node['x'], node['y'])\n","sub_path":"pycircuit/formats/pcpl.py","file_name":"pcpl.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"298895973","text":"#!/usr/bin/env python\n#\n# Jiao Lin \n\nimport numpy as np, histogram as H, histogram.hdf as hh, os\n\n\nclass EnergyAxisMissingBinCenterAtZero(Exception): pass\n\n\ndef sqe2dos(sqe, T, Ecutoff, elastic_E_cutoff, M, initdos=None, update_weights=None):\n \"\"\"\n Given a single-phonon SQE, compute DOS\n\n The basic procedure is\n * construct an initial guess of DOS\n * use this DOS to compute 1-phonon SQE\n * for both exp and sim SQE, integrate along Q to obtain S(E)\n * scale the initial guess DOS by the S(E) ratio\n * optionally we can do this again\n\n Parameters\n ----------\n sqe:histogram\n S(Q,E)\n\n T:float\n Temperature (kelvin)\n\n Ecutoff:float\n Cutoff energy beyond which DOS must be zero\n \n Elastic_E_cutoff: 2-tuple of floats\n Cutoff energy bracket for removing the elastic line (unit: meV)\n \n M:float\n Atomic mass\n \n initdos:histogram\n initial guess of DOS\n\n update_weights:2-tuple of floats\n weights for DOS update strategies (continuity, area conservation)\n\n \"\"\"\n # create initial guess of dos\n Efull = sqe.E\n dE = sqe.E[1] - sqe.E[0]\n assert dE > 0, \"Energy axis must be incremental\"\n Eplus = Efull[Efull > -dE / 2]\n if abs(Eplus[0]) > dE / 1e6:\n raise EnergyAxisMissingBinCenterAtZero('\"0\" must be one of the bin centers of the energy axis')\n Eplus[0] = 0.\n if initdos is None:\n initdos = guess_init_dos(Eplus, Ecutoff)\n else:\n # make sure the energy axis is compatible with sqe\n dos_Eaxis_part1 = initdos[(Eplus[0], Eplus[-1])].E\n if dos_Eaxis_part1.size != Eplus.size or not np.allclose(dos_Eaxis_part1, Eplus):\n raise RuntimeError(\"Incompatible energy axis. DOS: %s, SQE: %s\" % (dos_Eaxis_part1, Eplus))\n pass\n # compute sqe from dos\n from ..forward.phonon import computeSQESet, kelvin2mev\n Q = sqe.Q\n dQ = Q[1] - Q[0]\n E = sqe.E\n dE = E[1] - E[0]\n beta = 1. / (T * kelvin2mev)\n Q2, E2, sqeset = computeSQESet(1, Q, dQ, initdos.E, dE, M, initdos.I, beta)\n # compute S(E) from SQE\n # - experiment\n # -- only need the positive part\n expsqe = sqe.copy()\n expsqe_Epositive = expsqe[(), (dE / 10, None)].I\n expsqeE2_Epositive = expsqe[(), (dE / 10, None)].E2\n mask = expsqe_Epositive != expsqe_Epositive\n expsqe_Epositive[mask] = 0\n expsqeE2_Epositive[mask] = 0\n expse = expsqe_Epositive.sum(0)\n expse_E2 = expsqeE2_Epositive.sum(0)\n # - simulation\n simsqe_arr = sqeset[0]\n simsqe = H.histogram('simsqe', [('Q', Q2, '1./angstrom'), ('E', E2, 'meV')], simsqe_arr)\n simsqe_Epositive = simsqe[(), (Eplus[0], Eplus[-1])]\n simsqe_Epositive.I[mask] = 0\n simse = simsqe_Epositive.I.sum(0)\n # apply scale factor to dos\n # but only at the range of the measurement\n N_Eplus = Eplus.size\n dos_in_range = initdos[(Eplus[0], Eplus[-1])].copy()\n with np.errstate(divide='ignore', invalid='ignore'):\n dos_in_range.I *= expse / simse\n # remember the relative error of the dos\n dos_relative_error = expse_E2 ** .5 / expse\n # clean up bad values\n dos_in_range.I[dos_in_range.I != dos_in_range.I] = 0\n # clean up data near elastic line\n n_small_E = (Eplus < elastic_E_cutoff[1]).sum()\n dos_in_range.I[:n_small_E] = Eplus[:n_small_E] ** 2 * dos_in_range.I[n_small_E] / Eplus[n_small_E] ** 2\n # keep positive\n dos_in_range.I[dos_in_range.I < 0] = 0\n dos_in_range.E2[:] = (dos_in_range.I * dos_relative_error)**2\n # DOS range to update should be smaller than SQE E range, so we need to\n Emin = Eplus[0]; Emax = min(Eplus[-1], Ecutoff)\n dos_to_update = dos_in_range[(Emin, min(Eplus[-1], Emax*2))]\n # update\n return update_dos(initdos, dos_to_update, Emin, Emax, weights=update_weights)\n\n\ndef update_dos(original_dos_hist, new_dos_hist, Emin, Emax, weights=None):\n # only if the spectrum is nontrivial beyond Emax, we need rescale\n \"\"\" Parameters\n ----------\n original_dos_hist:histogram\n original phonon density of states\n\n new_dos_hist:histogram\n new phonon density of states\n\n Emin:float\n minimum value for energy transfer axis\n\n Emax:float \n maximum value for energy transfer axis\n \n weights:float \n weights for DOS update strategies (continuity, area conservation)\n\n \"\"\"\n from .stitch_dos import DOSStitcher\n stitch = DOSStitcher(weights)\n return stitch(original_dos_hist, new_dos_hist, Emin, Emax)\n\n\ndef guess_init_dos(E, cutoff):\n \"\"\"return an initial DOS\n\n It is x^2 near E=0, and flat after that, until it reaches\n maximum E.\n \"\"\"\n dos = np.ones(E.size, dtype=float)\n dos[E > cutoff] = 0.\n end_of_E2_zone = cutoff / 3.\n dos[E < end_of_E2_zone] = (E * E / end_of_E2_zone / end_of_E2_zone)[E < end_of_E2_zone]\n dE = E[1] - E[0]\n norm = np.sum(dos) * dE\n g = dos / norm\n Eaxis = H.axis(\"E\", E, 'meV')\n return H.histogram(\"DOS\", [Eaxis], data=g)\n\n\n# the following methods are obsolete\n\"\"\"\ndef update_dos_continuous(original_dos_hist, Emin, Emax, g, gerr):\n return update_dos_(original_dos_hist, Emin, Emax, g, gerr, compute_scalefactor_using_continuous_criteria)\n\ndef update_dos_keep_area(original_dos_hist, Emin, Emax, g, gerr):\n \"update the lower E portion of the dos by keeping the area of the updated portion intact\"\n return update_dos_(original_dos_hist, Emin, Emax, g, gerr, compute_scalefactor_using_area_criteria)\n\ndef update_dos_(original_dos_hist, Emin, Emax, g, gerr, compute_scalefactor):\n \"update the lower E portion of the dos by using a function to compute the scale factor\" \n scale = compute_scalefactor(original_dos_hist, Emin, Emax, g)\n g *= scale\n # compute error bar\n gerr *= scale\n # compute new DOS\n newdos = original_dos_hist.copy()\n # by updating only the front portion\n newdos[(Emin, Emax)].I[:] = g\n newdos[(Emin, Emax)].E2[:] = gerr**2\n # now renormalize\n norm = newdos.I.sum()\n newdos.I/=norm\n newdos.E2/=norm*norm\n return newdos\n\"\"\"\n\n# End of file\n","sub_path":"multiphonon/backward/singlephonon_sqe2dos.py","file_name":"singlephonon_sqe2dos.py","file_ext":"py","file_size_in_byte":6076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"378440861","text":"import torch\nfrom torch import nn\n\n\nclass ESIM(nn.Module):\n def __init__(self,\n vocab_len,\n wordvc_dim,\n hidden_dim = 300,\n output_dim = 3,\n weight_matrix=None,\n pretrained=False,\n fine_tune=False,\n dropout=0.5):\n super(ESIM, self).__init__()\n self.word_embeddings = nn.Embedding(vocab_len, wordvc_dim)\n self.pretrained = pretrained\n self.weight_matrix = weight_matrix\n\n self.fine_tune = fine_tune\n\n self.encoder = nn.LSTM(input_size=wordvc_dim, hidden_size=hidden_dim, num_layers=1, bidirectional=True, batch_first=True)\n\n self.softmax_a = nn.Softmax(dim=-1)\n self.softmax_b = nn.Softmax(dim=-2)\n\n self.inference = nn.LSTM(input_size=2*hidden_dim, hidden_size=hidden_dim, num_layers=1, bidirectional=True, batch_first=True)\n\n self.mlp = nn.Linear(8*hidden_dim, output_dim)\n self.act = nn.Tanh()\n\n self.dropout = nn.Dropout(dropout)\n\n self.init_weights()\n\n def init_weights(self):\n if self.pretrained:\n self.word_embeddings.weight.data.copy_(self.weight_matrix)\n self.word_embeddings.requires_grad_(self.fine_tune)\n\n def forward(self, input_a, input_b):\n # batch first\n input_a = input_a.transpose(0, 1)\n input_b = input_b.transpose(0, 1)\n # Word embedding\n embeddings_a = self.word_embeddings(input_a) # [batch_size, seq_len_a, emb_size]\n embeddings_a = self.dropout(embeddings_a)\n embeddings_b = self.word_embeddings(input_b) # [batch_size, seq_len_b, emb_size]\n embeddings_b = self.dropout(embeddings_b)\n\n # Input encoding\n encoded_a, _ = self.encoder(embeddings_a) # [batch_size, seq_len_a, 2 * hidden_size]\n encoded_a = self.dropout(encoded_a)\n encoded_b, _ = self.encoder(embeddings_b) # [batch_size, seq_len_b, 2 * hidden_size]\n encoded_b = self.dropout(encoded_b)\n\n # Local inference modeling\n attentions = torch.matmul(encoded_a, encoded_b.transpose(-2, -1)) # [batch_size, seq_len_a, seq_len_b]\n # Local inference collected over sequences\n summation_a = self.softmax_a(attentions)\n summation_a = torch.matmul(summation_a, encoded_b) # [batch_size, seq_len_a, 2 * hidden_size]\n\n summation_b = self.softmax_b(attentions).transpose(-2, -1)\n summation_b = torch.matmul(summation_b, encoded_a) # [batch_size, seq_len_b, 2 * hidden_size]\n\n # Enhancement of local inference information\n # difference\n diff_a = torch.sub(encoded_a, summation_a) # [batch_size, seq_len_a, 2 * hidden_size]\n diff_b = torch.sub(encoded_b, summation_b) # [batch_size, seq_len_b, 2 * hidden_size]\n # element-wise produce\n ewp_a = torch.mul(encoded_a, summation_a) # [batch_size, seq_len_a, 2 * hidden_size]\n ewp_b = torch.mul(encoded_b, summation_b) # [batch_size, seq_len_b, 2 * hidden_size]\n # concatenate\n # [batch_size, 4 * seq_len_a, 2 * hidden_size]\n enhancement_a = torch.cat((encoded_a, summation_a, diff_a, ewp_a), dim=-2)\n # [batch_size, 4 * seq_len_b, 2 * hidden_size]\n enhancement_b = torch.cat((encoded_b, summation_b, diff_b, ewp_b), dim=-2)\n\n # Inference Composition\n val_a, _ = self.inference(enhancement_a) # [batch_size, 4 * seq_len_a, 2 * hidden_size]\n val_a = self.dropout(val_a)\n val_b, _ = self.inference(enhancement_b) # [batch_size, 4 * seq_len_b, 2 * hidden_size]\n val_b = self.dropout(val_b)\n\n mean_a = torch.mean(val_a, dim=-2)\n max_a, _ = torch.max(val_a, dim=-2)\n mean_b = torch.mean(val_b, dim=-2)\n max_b, _ = torch.max(val_b, dim=-2)\n\n val = torch.cat((mean_a, max_a, mean_b, max_b), dim=-1) # [batch_size, 4 * 2 * hidden_size]\n\n # predict\n output = self.mlp(val)\n output = self.dropout(output)\n output = self.act(output)\n\n return output\n\n\nif __name__ == '__main__':\n from data_helper import get_iter\n\n vector_path = 'F:/DATASET/Glove/glove.6B/glove.6B.300d.txt'\n torch_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n (text_field, label_field), (train_iter, dev_iter, test_iter) = get_iter(vector_path=vector_path,\n train_path='./data/snli_1.0_train.csv',\n dev_path='./data/snli_1.0_dev.csv',\n test_path='./data/snli_1.0_test.csv',\n file_format='csv',\n batch_size=32,\n torch_device=torch_device)\n model = ESIM(vocab_len=len(text_field.vocab), wordvc_dim=300, hidden_dim=64, output_dim=3, fine_tune=True,\n weight_matrix=text_field.vocab.vectors, pretrained=True, dropout=0.5)\n model.to(torch_device)\n model.eval()\n\n for iter in dev_iter:\n # print(iter.sentence1.size())\n print(model(iter.sentence1, iter.sentence2))\n break\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"413212429","text":"# coding=utf-8\n\"\"\"cvTesseract.py: Class to interface Tesseract OCR API\"\"\"\n\nimport tesseract\nimport cv2\nimport cv2.cv as cv\n\nclass OCR():\n\tdef __init__(self):\n\t\tself.api = tesseract.TessBaseAPI()\n\t\tself.api.Init(\".\",\"eng\",tesseract.OEM_DEFAULT)\n\t\tself.api.SetVariable(\"tessedit_char_whitelist\", \"ABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ0-123456789\")\n\t\t#self.api.SetPageSegMode(tesseract.PSM_SINGLE_CHAR)\n\t\tself.api.SetPageSegMode(tesseract.PSM_SINGLE_LINE)\n\t\t\n\tdef __del__(self):\n\t\tself.api.End()\n\t\t\n\tdef recognize(self, img):\n\t\t\n\t\t#add 20 pixel border to image before processing\n\t\timg1=cv2.copyMakeBorder(img, 20,20,20,20, cv2.BORDER_CONSTANT, value=(255,255,255))\n\t\t\n\t\t#convert to IPL image \n\t\th,w = img1.shape\n\t\tiplimage = cv.CreateImageHeader((w,h), cv.IPL_DEPTH_8U, 1)\n\t\tcv.SetData(iplimage, img1.tostring(),img1.dtype.itemsize * w)\n\t\t\n\t\t#try to recognize the text\n\t\ttesseract.SetCvImage(iplimage,self.api)\n\t\ttext=self.api.GetUTF8Text()\n\t\tconf=self.api.MeanTextConf()\n\t\t\n\t\treturn text.strip(), conf\n\n","sub_path":"cvTesseract.py","file_name":"cvTesseract.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"365114760","text":"#coding:utf-8\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.common.keys import Keys\nimport unittest,time,re\nclass Youdao(unittest.TestCase):\n def setUp(self):\n self.browser = webdriver.Chrome()\n self.browser.maximize_window()\n self.browser.implicitly_wait(29)\n self.base_url = 'http://www.youdao.com/'\n self.verificationErrors = []\n self.accept_next_alert = True\n #有道搜索的测试用例\n def testyoudao_search(self):\n '''有道搜索'''\n browser = self.browser\n browser.get(self.base_url)\n browser.find_element_by_xpath('//*[@id=\"translateContent\"]').send_keys('这可能就是生活吧')\n browser.find_element_by_xpath('//*[@id=\"form\"]/button').click()\n time.sleep(4)\n #释放\n def tearDown(self):\n self.browser.close()\n self.assertEqual([],self.verificationErrors)\n#测试\nif __name__ == '__main__':\n unittest.main()\n\n#youdao.py 文件中编写一条用例\n\n","sub_path":"youdao.py","file_name":"youdao.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"636208661","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport heapq\nclass Solution:\n def trapRainWater(self, heightMap: List[List[int]]) -> int:\n if not heightMap or not heightMap[0]: return 0\n m=len(heightMap)\n n=len(heightMap[0])\n heap=[]\n seen=set()\n for i in range(n):\n heapq.heappush(heap,(heightMap[0][i],0,i))\n heapq.heappush(heap,(heightMap[m-1][i],m-1,i))\n seen.add((0,i))\n seen.add((m-1,i))\n for j in range(m):\n heapq.heappush(heap,(heightMap[j][0],j,0))\n heapq.heappush(heap,(heightMap[j][n-1],j,n-1))\n seen.add((j,0))\n seen.add((j,n-1))\n ans=0\n while heap:\n d,x,y=heapq.heappop(heap)\n for dx,dy in [(-1,0),(1,0),(0,1),(0,-1)]:\n new_x,new_y=x+dx,y+dy\n if new_x>=0 and new_x=0 and new_y 0:\n ordered_items = [v[0] for v in sorted(local_d.items(), key=lambda p:p[1], reverse=True)]\n update_tree(ordered_items, ret_tree, header_table, count)\n return ret_tree, header_table\n\n\ndef update_tree(items, in_tree, header_table, count):\n if items[0] in in_tree.children:\n in_tree.children[items[0]].inc(count)\n else:\n in_tree.children[items[0]] = tree_node(items[0], count, in_tree)\n if header_table[items[0]][1] == None:\n header_table[items[0]][1] = in_tree.children[items[0]]\n else:\n update_header(header_table[items[0]][1], in_tree.children[items[0]])\n if len(items) > 1:\n update_tree(items[1::], in_tree.children[items[0]], header_table, count)\n\n\ndef update_header(node_to_test, target_node):\n #寻找最后面的node 链表\n while (node_to_test.node_link != None):\n node_to_test = node_to_test.node_link\n node_to_test.node_link = target_node\n\n\ndef ascend_tree(leaf_node, prefix_path):\n if leaf_node.parent != None:\n prefix_path.append(leaf_node.name)\n ascend_tree(leaf_node.parent, prefix_path)\n\n\ndef find_prefix_path(base_pat, tree_node):\n cond_pats = {}\n while tree_node != None:\n prefix_path = []\n ascend_tree(tree_node, prefix_path)\n if len(prefix_path) > 1:\n cond_pats[frozenset(prefix_path[1:])] = tree_node.count\n tree_node = tree_node.node_link\n return cond_pats\n\n\ndef mine_tree(in_tree, header_table, min_sup, prefix, freq_item_list):\n bigL = [v[0] for v in sorted(header_table.items(), key=lambda p: p[1][0])]\n for base_pat in bigL:\n new_freq_set = prefix.copy()\n new_freq_set.add(base_pat)\n freq_item_list.append(new_freq_set)\n cond_patt_bases = find_prefix_path(new_freq_set, header_table[base_pat][1])\n my_cond_tree, my_head = create_tree(cond_patt_bases, min_sup)\n if my_cond_tree != None:\n my_cond_tree.disp()\n if my_head != None:\n mine_tree(my_cond_tree, my_head, min_sup, new_freq_set, freq_item_list)\n\n\ndef loadSimpDat():\n simpDat = [['r', 'z', 'h', 'j', 'p'],\n ['z', 'y', 'x', 'w', 'v', 'u', 't', 's'],\n ['z'],\n ['r', 'x', 'n', 'o', 's'],\n ['y', 'r', 'x', 'z', 'q', 't', 'p'],\n ['y', 'z', 'x', 'e', 'q', 's', 't', 'm']]\n return simpDat\n\n\ndef createInitSet(dataSet):\n retDict = {}\n for trans in dataSet:\n retDict[frozenset(trans)] = 1\n return retDict\n\n\nif __name__ == '__main__':\n simp_dat = loadSimpDat()\n init_dat = createInitSet(simp_dat)\n tree, header_table = create_tree(init_dat, 3)\n freqItems = []\n mine_tree(tree, header_table, 3, set([]), freqItems)\n","sub_path":"Ch12/my_fp_growth.py","file_name":"my_fp_growth.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"218138790","text":"def DataManupulate(element):\n\tfilterArr = list(filter(lambda no: no % 2 == 0 ,element))\n\tprint(filterArr)\n\tsquareNums = list(map(lambda no:(no**2),filterArr))\n\tprint(squareNums)\n\tresult = reduce(lambda no1, no2 : (no1+no2),squareNums)\n\tprint(result)\n\n\n\n\nelements = []\nelementCount = int(input(\"Number of Elments : \\n\"))\nprint('Eneter Elements list : \\n')\nfor i in range(elementCount):\n\telements.append(int(input()))\nprint(elements)\n\n\n\n\n\n\n\n# elements = [5, 2, 3, 4, 3, 4, 1, 2, 8, 10]\nDataManupulate(elements)","sub_path":"PyCodes/Assignment4/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"316167242","text":"# /usr/local/bin/python3.8\n# -*- coding: utf-8 -*-\n# @Time : 2021-03-25 5:51 下午\n# @Author : 张晨旭\n# @IDE : PyCharm\n# @PROJECT : Test_Api\n# @File : test_conntact.py\nimport requests\n\n\ndef get_token():\n r = requests.get(\n \"https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=ww491558834d2ff0a1&corpsecret=XifeGU1Ud2Sn8PQXIETGVOWAISwvyxNRjkzbcuU9xFA\")\n token = r.json()[\"access_token\"]\n return token\n\ndef test_get_member():\n get_member_url = f\"https://qyapi.weixin.qq.com/cgi-bin/user/get?access_token={get_token()}&userid=a001\"\n r =requests.get(get_member_url)\n print(r)\n print(r.json())\n\ndef test_update_member():\n update_member_url = f\"https://qyapi.weixin.qq.com/cgi-bin/user/update?access_token={get_token()}\"\n data = {\n \"userid\": \"a001\",\n \"name\": \"李四\",\n }\n r = requests.post(url=update_member_url, json=data)\n print(r.json())\n\ndef test_del_member():\n del_member_url = f\"https://qyapi.weixin.qq.com/cgi-bin/user/delete?access_token={get_token()}&userid=root\"\n r = requests.get(del_member_url)\n print(r.json())\n\ndef test_add_member():\n add_member_url = f\"https://qyapi.weixin.qq.com/cgi-bin/user/create?access_token={get_token()}\"\n data = {\n \"userid\": \"zhangsan\",\n \"name\": \"张三\",\n \"mobile\": \"13800000000\",\n \"department\": [1],\n }\n r = requests.post(url=add_member_url, json=data)\n print(r.json())","sub_path":"test_conntact/test_conntact.py","file_name":"test_conntact.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"57727316","text":"## ChapGPT, Lanchain\n#import openai\nfrom dotenv import load_dotenv\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.schema import HumanMessage\n\nfrom fastapi import FastAPI, Request\nfrom fastapi.responses import HTMLResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi import FastAPI, WebSocket\n\n## ChapGPT, Lanchain\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.schema import HumanMessage\n\n## SQlAlchemy to connect Postgres\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import text\n\nSQLALCHEMY_DATABASE_URL = \"postgresql://test_dev:jhinchak_2023#@127.0.0.1/test_dev\"\n\nengine = create_engine(SQLALCHEMY_DATABASE_URL);\n\nwith engine.connect() as conn:\n result = conn.execute(text(\"select * from first;\"))\n print(result.all())\n\n\n## Initiate openAI and langchain\nload_dotenv('.env')\nchat = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\nmessages = []\n\napp = FastAPI()\n\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\n\ntemplates = Jinja2Templates(directory=\"templates\")\n\n## https://stackoverflow.com/questions/63667466/video-streaming-app-using-fastapi-and-opencv#63667607\n\n@app.get(\"/\")\nasync def index(request: Request):\n print ('I am here')\n #return HTMLResponse(html)\n return templates.TemplateResponse(\"item.html\", {\"request\": request})\n\n@app.websocket(\"/ws\")\nasync def websocket_endpoint(websocket: WebSocket):\n print ('I am here too')\n await websocket.accept()\n print ('I am here as well')\n while True:\n data = await websocket.receive_text()\n print ('I am here repeat')\n\n ## Reponse from chatGPT\n ## https://www.haihai.ai/langchain/\n message = data;\n usr_msg = HumanMessage(content=message)\n messages.append(usr_msg)\n ai_msg = chat(messages)\n print(ai_msg.content)\n messages.append(ai_msg)\n\n with engine.connect() as conn:\n ## insert user message\n result = conn.execute(text(\"CALL SP_Ins_chatcomment(:user_id, :session_id, :user_type, :user_comment );\"), {\"user_id\":'test_usr', \"session_id\":'sessionTetsum',\"user_type\":'user',\"user_comment\":message} );\n ## insert ai message ##WORKS!!!\n #result = conn.execute(text(\"CALL SP_Ins_chatcomment(:user_id, :session_id, :user_type, :user_comment );\"), {\"user_id\":'test_usr', \"session_id\":'sessionTetsum',\"user_type\":'chatGPT',\"user_comment\":ai_msg.content} );\n ## Use named parameters!!! insert ai message ##WORKS!!!\n result = conn.execute(text(\"CALL SP_Ins_chatcomment(p_user_id => :user_id, p_session_id => :session_id, p_user_type => :user_type, p_user_comment => :user_comment );\"), {\"user_id\":'test_usr', \"session_id\":'sessionTetsum',\"user_type\":'chatGPT',\"user_comment\":ai_msg.content} );\n conn.commit();\n\n #await websocket.send_text(f\"Message text was: {data}\")\n await websocket.send_text(f\"{ai_msg.content}\")\n\n","sub_path":"try/fastAPI/src/example_websocket6_ninja_chatGPT_Postgres/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"410570164","text":"\n\nfrom xai.brain.wordbase.nouns._sluggard import _SLUGGARD\n\n#calss header\nclass _SLUGGARDS(_SLUGGARD, ):\n\tdef __init__(self,): \n\t\t_SLUGGARD.__init__(self)\n\t\tself.name = \"SLUGGARDS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"sluggard\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_sluggards.py","file_name":"_sluggards.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"77641868","text":"#!/usr/bin/python\n\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import WebDriverException\nfrom time import sleep\nfrom urllib.parse import urlparse\nfrom ui.login.zbUILoginCore import Login\nfrom ui.zbUIShared import *\nfrom common.zbCommon import validateDataNotEmpty\nimport pdb, time\n\n# global CSS parameters for Policies/Alert > Notify page\nCSS_SELECTOR_CHIP_INPUT = \"input.md-input[type='search']\"\nCSS_SELECTOR_INPUT_OPTIONS_CHIPS = \"md-virtual-repeat-container > div.md-virtual-repeat-scroller > div.md-virtual-repeat-offsetter > ul > li\"\n\nCSS_NOTIFY_SELECT_SYSTEM = \"[md-search-text='notifyCtrl.systemText'][md-selected-item='notifyCtrl.selectedSystemItem'] [name=''][type='search']\"\nCSS_NOTIFY_SELECT_THREAT = \"[md-search-text='notifyCtrl.threatText'] [name=''][type='search']\"\nCSS_NOTIFY_SELECT = \"[type='search']\"\nCSS_NOTIFY_THREAT_SECTION = 'div.threat-section'\nCSS_NOTIFY_SYSTEM_SECTION = 'div.system-section'\nCSS_NOTIFY_USERS = \"md-autocomplete-parent-scope strong.ng-binding\"\nCSS_NOTIFY_USER_CHIP = \"md-chip.ng-scope\" #\"[ng-model='notifyCtrl.threatUserList'] md-chip[ng-repeat='$chip in $mdChipsCtrl.items']\" #'md-chips > * > md-chip'\nCSS_CHIP_REMOVE_BUTTON = 'button.md-chip-remove'\nCSS_NOTIFY_SAVE_BUTTON = 'button[ng-click=\"notifyCtrl.submitTenantNotificationSettings()\"]'\nCSS_BUTTON_WRAPPER = \"li.ng-scope\"\n\n\nclass Notify():\n def __init__(self, **kwargs):\n self.params = kwargs\n self.selenium = Login(**kwargs).login()\n\n\n def gotoNotify(self):\n # go to Policies/Alerts > Notify\n url = urlparse(self.params[\"url\"])\n rcode = self.selenium.getURL(url.scheme+'://'+url.netloc+'/policiesalerts/notifications')\n waitLoadProgressDone(self.selenium)\n\n\n def verifyThreatNotifications(self, **kwargs):\n self.gotoNotify()\n rcode = self.configAddThreatNotifications(**kwargs)\n return rcode\n\n\n def verifySystemNotifications(self, **kwargs): \n self.gotoNotify() \n rcode = self.configAddSystemNotifications(**kwargs)\n return rcode\n\n def configAddThreatNotifications(self, **kwargs):\n \n # set default user if none entered\n # Using 'aaa Zbat Automation' in list because other accounts could not be found in dropdown or due to the need to scroll down\n\n user = kwargs[\"user\"] if \"user\" in kwargs else [\"aaa Zbat Automation\"]\n user = [x.lower() for x in user]\n user = ''.join(user)\n\n # initialize by delete all pre-existing user first\n self.configDeleteNotifyUser(user)\n\n # configure threat notification\n params = {\"selector\": CSS_NOTIFY_THREAT_SECTION, \"waittype\":\"visibility\", \"timeout\":3}\n threat = self.selenium.findSingleCSS(**params)\n params = {\"selector\": CSS_NOTIFY_SELECT_THREAT, \"waittype\":\"visibility\", \"timeout\":3}\n threatNotify = self.selenium.findSingleCSS(**params)\n if not threatNotify:\n print(\"Notification Threats not able to find recipient fields.\")\n return False\n threatNotify.click()\n time.sleep(1)\n \n \n threatNotify.send_keys(\"aaa\")\n params = {\"selector\":CSS_NOTIFY_USERS, \"waittype\":\"located\", \"timeout\":3, \"err_msg\": \"Unable to find users again\"}\n rcode = self.selenium.findMultiCSS(**params)\n if not rcode:\n print(\"Notification not able to find any users for System recipient\")\n for index, item in enumerate(rcode):\n if item.text.strip().lower() in user:\n item.click()\n time.sleep(1)\n break\n\n threatNotify.click()\n threatNotify.send_keys(Keys.ESCAPE)\n time.sleep(1)\n\n params = {\"selector\": CSS_NOTIFY_SAVE_BUTTON, \"waittype\":\"visibility\", \"timeout\":3}\n rcode = self.selenium.click(**params)\n\n # make sure that it's properly added\n rcode = self.configCheckUserExist(user, usertype=\"threat\")\n \n return rcode\n\n\n def configAddSystemNotifications(self, **kwargs):\n # set default user if none entered\n # Using 'aaa Zbat Automation' in list because other accounts could not be found in dropdown or due to the need to scroll down\n user = kwargs[\"user\"] if \"user\" in kwargs else [\"aaa Zbat Automation\"]\n user = [x.lower() for x in user]\n\n # initialize by delete all pre-existing user first\n self.configDeleteNotifyUser(user)\n\n # configure System notification\n params = {\"selector\": CSS_NOTIFY_SYSTEM_SECTION, \"waittype\":\"visibility\", \"timeout\":3}\n system = self.selenium.findSingleCSS(**params)\n params = {\"selector\": CSS_NOTIFY_SELECT_SYSTEM, \"waittype\":\"located\", \"timeout\":3}\n systemNotify = self.selenium.findSingleCSS(**params)\n if not systemNotify:\n print(\"Notification System not able to find recipient fields.\")\n return False\n \n \n systemNotify.click()\n time.sleep(1)\n \n \n systemNotify.send_keys(\"aaa\")\n params = {\"selector\":CSS_NOTIFY_USERS, \"waittype\":\"located\", \"timeout\":3, \"err_msg\": \"Unable to find users again\"}\n rcode = self.selenium.findMultiCSS(**params)\n if not rcode:\n print(\"Notification not able to find any users for System recipient\")\n for index, item in enumerate(rcode):\n if item.text.strip().lower() in user:\n item.click()\n time.sleep(1)\n break\n\n systemNotify.click()\n systemNotify.send_keys(Keys.ESCAPE)\n time.sleep(1)\n waitLoadProgressDone(self.selenium)\n params = {\"selector\": CSS_NOTIFY_SAVE_BUTTON, \"waittype\":\"visibility\", \"timeout\":3}\n rcode = self.selenium.click(**params)\n\n # make sure that it's properly added\n rcode = self.configCheckUserExist(user, usertype=\"system\")\n return rcode\n\n\n def configDeleteNotifyUser(self, user):\n userlist = [user] if type(user) == str else user\n #userlist = [x.lower() for x in userlist]\n\n '''\n params = {\"selector\": CSS_NOTIFY_THREAT_SECTION, \"waittype\":\"visibility\", \"timeout\":3}\n section = self.selenium.findSingleCSS(**params)\n params = {\"browserobj\": section, \"selector\": CSS_NOTIFY_SELECT, \"waittype\":\"visibility\", \"timeout\":3}\n field = self.selenium.findSingleCSS(**params)\n if not field:\n print \"Notification field not able to find any recipient\"\n return False\n '''\n params = {\"selector\": CSS_NOTIFY_USER_CHIP, \"waittype\":\"located\", \"timeout\":5}\n \n chips = self.selenium.findMultiCSS(**params)\n \n if not chips:\n print(\"No chips found\")\n # if no chips found, then no need to delete, return True.\n return True\n for chip in chips:\n chiptext = chip.text.split('\\n')[0]\n if chiptext.strip().lower() in userlist:\n params = {\"browserobj\":chip, \"selector\":CSS_CHIP_REMOVE_BUTTON, \"waittype\":\"clickable\", \"timeout\":3}\n rcode = self.selenium.click(**params)\n #field.send_keys(Keys.ENTER)\n time.sleep(1)\n params = {\"selector\": CSS_NOTIFY_SAVE_BUTTON, \"waittype\":\"visibility\", \"timeout\":3}\n rcode = self.selenium.click(**params)\n return rcode\n\n\n\n def configCheckUserExist(self, userlist, usertype=\"threat\"):\n self.gotoNotify()\n\n #userlist = [x.lower() for x in userlist]\n print(userlist)\n\n # configure threat notification\n if usertype == \"threat\":\n params = {\"selector\": CSS_NOTIFY_THREAT_SECTION, \"waittype\":\"visibility\", \"timeout\":3}\n if usertype == \"system\":\n params = {\"selector\": CSS_NOTIFY_SYSTEM_SECTION, \"waittype\":\"visibility\", \"timeout\":3}\n\n section = self.selenium.findSingleCSS(**params)\n params = {\"browserobj\": section, \"selector\": CSS_NOTIFY_SELECT, \"waittype\":\"visibility\", \"timeout\":3}\n field = self.selenium.findSingleCSS(**params)\n if not field:\n return False\n params = {\"browserobj\": section, \"selector\": CSS_NOTIFY_USER_CHIP, \"waittype\":\"visibility\", \"timeout\":5}\n chips = self.selenium.findMultiCSS(**params)\n\n if not chips:\n print(\"no matching user chip found\")\n return False\n for chip in chips:\n chiptext = chip.text.split('\\n')[0]\n if chiptext.strip().lower() in userlist:\n # found user match\n return True\n return False\n\n def close(self):\n if self.selenium:\n self.selenium.quit()\n","sub_path":"lib/ui/zbUINotify.py","file_name":"zbUINotify.py","file_ext":"py","file_size_in_byte":8642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"246247953","text":"#coding:utf-8\n\nimport numpy as np\nimport os\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\nimport cv2\n\nfrom collections import defaultdict\nfrom io import StringIO\n# from matplotlib import pyplot as plt\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nsys.path.append(\"..\")\nfrom object_detection.utils import ops as utils_ops\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\n\nimport rospy\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\n\n# MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'\n# MODEL_NAME = 'ssdlite_mobilenet_v2_coco_2018_05_09'\nMODEL_NAME = 'mask_rcnn_inception_v2_coco_2018_01_28'\nMODEL_FILE = MODEL_NAME + '.tar.gz'\nDOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'\nPATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'\nPATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')\nNUM_CLASSES = 90\n\n# Mamually Install\n# opener = urllib.request.URLopener()\n# opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)\n\n# Load a Tensorflow mode into memory\ndef load_frozenmodel():\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n return detection_graph\n\ndef load_labelmap():\n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n return category_index\n\nclass ObjectDetection(object):\n def __init__(self, detection_graph, category_index):\n self.image_sub = rospy.Subscriber(\"/usb_cam/image_raw\", Image, self.imageCallback, queue_size=10)\n self.image_pub = rospy.Publisher(\"/object_detection/image\", Image, queue_size=10)\n self.detection_graph = detection_graph\n self.category_index = category_index\n\n def imageCallback(self, image_msg):\n try:\n self.cv_image = CvBridge().imgmsg_to_cv2(image_msg, \"bgr8\")\n except CvBridgeError as e:\n print (e)\n\n def main(self):\n rospy.init_node(\"object_detection_ros\")\n rate = rospy.Rate(30)\n with self.detection_graph.as_default():\n with tf.Session(graph=self.detection_graph) as sess:\n while not rospy.is_shutdown():\n image_np = self.cv_image\n image_np_expanded = np.expand_dims(image_np, axis=0)\n ops = tf.get_default_graph().get_operations()\n all_tensor_names = {output.name for op in ops for output in op.outputs}\n tensor_dict = {}\n for key in [\n 'num_detections', 'detection_boxes', 'detection_scores',\n 'detection_classes', 'detection_masks'\n ]:\n tensor_name = key + ':0'\n if tensor_name in all_tensor_names:\n tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(\n tensor_name)\n if 'detection_masks' in tensor_dict:\n # The following processing is only for single image\n detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])\n detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])\n # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.\n real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)\n detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])\n detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])\n detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(\n detection_masks, detection_boxes, image_np.shape[0], image_np.shape[1])\n detection_masks_reframed = tf.cast(\n tf.greater(detection_masks_reframed, 0.5), tf.uint8)\n # Follow the convention by adding back the batch dimension\n tensor_dict['detection_masks'] = tf.expand_dims(\n detection_masks_reframed, 0)\n image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')\n\n # Run inference\n output_dict = sess.run(tensor_dict,\n feed_dict={image_tensor: np.expand_dims(image_np, 0)})\n\n # all outputs are float32 numpy arrays, so convert types as appropriate\n output_dict['num_detections'] = int(output_dict['num_detections'][0])\n output_dict['detection_classes'] = output_dict[\n 'detection_classes'][0].astype(np.uint8)\n output_dict['detection_boxes'] = output_dict['detection_boxes'][0]\n output_dict['detection_scores'] = output_dict['detection_scores'][0]\n if 'detection_masks' in output_dict:\n output_dict['detection_masks'] = output_dict['detection_masks'][0]\n \n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n output_dict['detection_boxes'],\n output_dict['detection_classes'],\n output_dict['detection_scores'],\n self.category_index,\n instance_masks=output_dict.get('detection_masks'),\n use_normalized_coordinates=True,\n line_thickness=8)\n \n image_height = self.cv_image.shape[0]\n image_width = self.cv_image.shape[1]\n resize_image = cv2.resize(image_np, (image_width, image_height))\n pub_image = CvBridge().cv2_to_imgmsg(resize_image, \"bgr8\")\n self.image_pub.publish(pub_image)\n\ndef main():\n # Load \n category = load_labelmap()\n graph = load_frozenmodel()\n # Detection\n detection = ObjectDetection(graph, category)\n detection.main()\n\nif __name__ == '__main__':\n print(\"start\")\n main()\n","sub_path":"object_detection/mask_rcnn_ros.py","file_name":"mask_rcnn_ros.py","file_ext":"py","file_size_in_byte":6654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"106638282","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pygame\nfrom ondas import *\nfrom circulo import *\nfrom pygame.locals import *\n\ndef eventos():\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\texit()\n\t\tif event.type == pygame.KEYDOWN:\n\t\t\tif event.key == K_ESCAPE:\n\t\t\t\texit()\n\t\tif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\tif pygame.mouse.get_pressed()[0]:\n\t\t\t\tlistado.put(circulo(2,2,pygame.mouse.get_pos(),[0,0,255]))\n\npygame.init()\nscreen = pygame.display.set_mode((1366,768))\ntimer = pygame.time.Clock()\nlistado = ondas()\n#c1 = circulo(200,2,[320,240])\n#listado.put(c1)\nlistado.test()\nwhile True:\n\teventos()\n\n\tlistado.pintar(screen)\n\tlistado.avance()\n\tpygame.display.flip()\n\ttimer.tick(100)\n","sub_path":"4- Efects/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"234174292","text":"\"\"\"\nVisualizador.\n\"\"\"\n\nimport glfw\nfrom OpenGL.GL import *\nimport numpy as np\nimport sys\n\nimport transformations2 as tr2\nimport easy_shaders as es\n\nfrom model import Tpose, Axis\nfrom controller import Controller\n\nif __name__ == '__main__':\n\n # Initialize glfw\n if not glfw.init():\n sys.exit()\n\n width = 600\n height = 600\n\n window = glfw.create_window(width, height, 'TPOSE EPIC', None, None)\n\n if not window:\n glfw.terminate()\n sys.exit()\n\n glfw.make_context_current(window)\n\n # Creamos el controlador\n controller = Controller()\n\n # Connecting the callback function 'on_key' to handle keyboard events\n glfw.set_key_callback(window, controller.on_key)\n\n # Creating shader programs for textures and for colores\n textureShaderProgram = es.SimpleTextureModelViewProjectionShaderProgram()\n colorShaderProgram = es.SimpleModelViewProjectionShaderProgram()\n\n # Setting up the clear screen color\n glClearColor(0.15, 0.15, 0.15, 1.0)\n\n # As we work in 3D, we need to check which part is in front,\n # and which one is at the back\n glEnable(GL_DEPTH_TEST)\n\n # Creamos los objetos\n axis = Axis()\n tpose = Tpose('img/ricardo.png', 'img/sad.png', 'img/mememan.png')\n # ricardo = feliz\n # sad = triste\n # mememan = neutral\n\n controller.set_toggle(tpose, 'face')\n controller.set_toggle(axis, 'axis')\n\n # Creamos la camara y la proyección\n projection = tr2.ortho(-1, 1, -1, 1, 0.1, 100)\n view = tr2.lookAt(\n np.array([10, 10, 5]), # Donde está parada la cámara\n np.array([0, 0, 0]), # Donde estoy mirando\n np.array([0, 0, 1]) # Cual es vector UP\n )\n\n while not glfw.window_should_close(window):\n\n # Using GLFW to check for input events\n glfw.poll_events()\n\n # Filling or not the shapes depending on the controller state\n if controller.fill_polygon:\n glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)\n else:\n glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n\n # Clearing the screen in both, color and depth\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n # Dibujamos\n axis.draw(colorShaderProgram, projection, view)\n tpose.draw(colorShaderProgram, textureShaderProgram, projection, view)\n\n # Once the drawing is rendered, buffers are swap so an uncomplete drawing is never seen.\n glfw.swap_buffers(window)\n\n glfw.terminate()\n","sub_path":"décimas/decima5/viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"643186082","text":"\nimport pandas as pd\nimport numpy as np\nimport json\nimport itertools\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import stopwords\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport sys\n\nn = len(sys.argv)\nif n < 2:\n print(\"Please input an anime name\")\n sys.exit()\n\npath_in_str = 'data/anime_list_final_231.json'\njson_file = open(path_in_str)\ndata = json.load(json_file)\nanime_df_raw = pd.DataFrame.from_dict(data, orient='index')\nanime_df_raw = anime_df_raw[anime_df_raw['error'] != 'not_found']\nanime_df = anime_df_raw[['id', 'title', 'mean', 'genres', 'statistics', 'num_episodes', 'synopsis']]\nanime_df = anime_df.dropna()\n\n\nanime_df['genres_name'] = anime_df['genres'].apply(lambda x : [a['name'] for a in x])\nanime_df['genres_id'] = anime_df['genres'].apply(lambda x : [a['id'] for a in x])\n\n\nanime_df['watching'] = anime_df['statistics'].apply(lambda x : x['status']['watching'])\nanime_df['num_list_users'] = anime_df['statistics'].apply(lambda x : x['num_list_users'])\nanime_df['completed'] = anime_df['statistics'].apply(lambda x : x['status']['completed'])\nanime_df['plan_to_watch'] = anime_df['statistics'].apply(lambda x : x['status']['plan_to_watch'])\nanime_df['dropped'] = anime_df['statistics'].apply(lambda x : x['status']['dropped'])\nanime_df['on_hold'] = anime_df['statistics'].apply(lambda x : x['status']['on_hold'])\n\n\nanime_df.drop(['genres', 'statistics'], axis=1, inplace=True)\n\n\nanime_df.rename({'id': 'anime_id'}, axis=1, inplace=True)\n\nnltk.download('wordnet', quiet = True)\nnltk.download('stopwords', quiet = True)\nnltk.download('averaged_perceptron_tagger', quiet = True)\nnltk.download('punkt', quiet = True)\n \n \n \nstop_words = set(stopwords.words('english'))\n\nlemmatizer = WordNetLemmatizer()\n\nverbs = {'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'}\n\n\ndef extract_tokens(text):\n text = text.lower()\n\n sentence =[]\n tokens = nltk.word_tokenize(text)\n tags = nltk.pos_tag(tokens)\n\n i = 0\n for token in tokens:\n if tags[i][1] not in verbs: \n lemma_tag = lemmatizer.lemmatize(token)\n else:\n lemma_tag = lemmatizer.lemmatize(token, 'v')\n\n\n if lemma_tag not in stop_words:\n if lemma_tag.isalpha():\n sentence.append(lemma_tag)\n i = i+1\n \n lemma_sentence = ' '.join(sentence)\n lemma_sentence = lemma_sentence.replace(\"'s\", \" is\")\n lemma_sentence = lemma_sentence.replace(\"'ve\", \" have\")\n lemma_sentence = lemma_sentence.replace(\"'ll\", \" will\")\n lemma_sentence = lemma_sentence.replace(\"'m\", \" am\")\n lemma_sentence = lemma_sentence.replace(\"n't\", \" not\")\n lemma_sentence = lemma_sentence.replace(\"'d\", \" would\")\n lemma_sentence = lemma_sentence.replace(\"'re\", \" are\")\n return lemma_sentence\n \nanime_df[\"lemma_synopsis\"]= anime_df[\"synopsis\"].apply(extract_tokens)\n\ntf_idf_vectorizer = TfidfVectorizer()\ntf_idf_anime_id = tf_idf_vectorizer.fit_transform((anime_df[\"lemma_synopsis\"]))\n \ncos_sim = cosine_similarity(tf_idf_anime_id, tf_idf_anime_id)\ntf_idf_vectorizer.get_feature_names_out().shape\n\n\n\n\ndef genre_agg(genres):\n return [genre for i in range(1, 6) for genre in itertools.combinations(genres, r=i)]\n\ntf_genre_vec = TfidfVectorizer(analyzer=genre_agg)\ntf_idf_genre = tf_genre_vec.fit_transform(anime_df['genres_name'])\ncos_sim_genre = cosine_similarity(tf_idf_genre, tf_idf_genre)\n\n\n\nanime_df['anime_id'] = anime_df['anime_id'].astype(int)\nanime_names = pd.Series(np.array(anime_df['title']))\n \ndef recommend_anime(title, max_reco = 10, cosine_sim = cos_sim, cosine_sim_genre = cos_sim_genre):\n recommended_animes = []\n index = anime_names[anime_names == title].index[0]\n # print(index)\n # print(anime_df.iloc[index])\n \n similar_scores = pd.Series(cosine_sim[index])\n similar_scores_genre = pd.Series(cosine_sim_genre[index])\n\n similar_scores_mul = similar_scores.mul(similar_scores_genre)\n similar_scores_mul = similar_scores_mul.sort_values(ascending=False)\n\n top_animes = list(similar_scores_mul.iloc[1:max_reco+1].index)\n for anime_index in top_animes:\n anime_row = anime_df.iloc[anime_index]\n anime_name = anime_row['title']\n recommended_animes.append(anime_name)\n return recommended_animes\n\n\n\nfor i in range(1, n):\n print(\"Anime recommendations for: \"+sys.argv[i])\n\n try:\n print(recommend_anime(sys.argv[i]))\n except:\n print(\"Invalid anime name according to MyAnimeList, please enter a valid name\")\n","sub_path":"tf_idf_syn_genre_recommender.py","file_name":"tf_idf_syn_genre_recommender.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"424843139","text":"import re\nimport htmlentitydefs\n\n##\n# From http://effbot.org/zone/re-sub.htm#unescape-html\n# Removes HTML or XML character references and entities from a text string.\n#\n# @param text The HTML (or XML) source text.\n# @return The plain text, as a Unicode string, if necessary.\n\ndef unescape(text):\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"\":\n # character reference\n try:\n if text[:3] == \"\":\n return unichr(int(text[3:-1], 16))\n else:\n return unichr(int(text[2:-1]))\n except ValueError:\n pass\n else:\n # named entity\n try:\n text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])\n except KeyError:\n pass\n return text # leave as is\n return re.sub(\"?\\w+;\", fixup, text)\n\ndef remove_quote(text, replacement=u\"\", quote_prefix=u\">\"):\n lines = []\n put_replacement = True\n for line in text.splitlines():\n if line.strip().startswith(quote_prefix):\n if put_replacement:\n lines.append(replacement)\n put_replacement = False\n else:\n lines.append(line)\n put_replacement = True\n return u\"\\n\".join(lines)\n\ndef remove_signature(text, dividers=[re.compile(r'^--\\s+')]):\n lines = []\n found = False\n for line in text.splitlines():\n for divider in dividers:\n if divider.match(line) is not None:\n found = True\n break\n if found:\n break\n lines.append(line)\n return u\"\\n\".join(lines)\n\nEMAIL_NAME_RE = re.compile(r'[,:]? \"?.*?\"? <[^@]+@[^>]+>')\n\ndef replace_email_name(text, replacement=u\"\"):\n return EMAIL_NAME_RE.sub(replacement, text)\n\nEMAIL_RE = re.compile(r'[^\\s]+@[^\\s]+')\n\ndef replace_email(text, replacement=u\"\"):\n return EMAIL_RE.sub(replacement, text)\n","sub_path":"froide/helper/text_utils.py","file_name":"text_utils.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"349343554","text":"# Import libraries\nimport math\nfrom src import *\nimport Space_Object\n\nclass Ship(Space_Object):\n # keys are passed as dictionary\n # {\"up\":, \"left\":, \"right\":, \"shoot\":}\n def __init__(self, pos, vel, rad, rot, ang, img, siz, cen, sca, sid, keys):\n Space_Object.__init__(self, pos, vel, rad, rot, ang, img, siz, cen, sca)\n self.sid = sid\n self.keys = keys\n self.lives = 3\n self.score = 0\n self.health = 100\n self.sheild = 100\n self.float_count = 0\n\n def update(self, game):\n # Get key presses\n key_U = (game.key_map.get(simplegui.KEY_MAP[self.keys[\"up\"]]) == True)\n key_L = (game.key_map.get(simplegui.KEY_MAP[self.keys[\"left\"]]) == True)\n key_R = (game.key_map.get(simplegui.KEY_MAP[self.keys[\"right\"]]) == True)\n\n # Check forward movement\n if (key_U):\n self.vel.x += math.cos(self.rot - (math.pi / 2)) * game.SHIP_FORWARD_THRUST\n self.vel.y += math.sin(self.rot - (math.pi / 2)) * game.SHIP_FORWARD_THRUST\n self.cen = [255,85]\n else:\n self.vel.x *= game.SHIP_BACKWARDS_DRAG\n self.vel.y *= game.SHIP_BACKWARDS_DRAG\n self.cen = [85,85]\n\n # Check rotation\n if (key_L and key_R):\n self.ang = 0\n self.cen = [255,85]\n elif (key_L):\n self.ang = -game.SHIP_ROTATION_SPEED\n self.cen = [595,85]\n elif (key_R):\n self.ang = game.SHIP_ROTATION_SPEED\n self.cen = [425,85]\n else:\n self.ang = 0\n\n # If no thrusters on, then drift slowly\n self.float_count += 1 \t\n if ((not key_U) and (not key_L) and (not key_R) and self.float_count % 5 == 0):\n self.pos.x += random.randint(-1, 1)\n self.pos.y += random.randint(-1, 1)\n\n Space_Object.update(self, game)\n \n \n def draw(self, game, canvas):\n # Draw ship stats\n if self.sid == \"L\":\n canvas.draw_text('Score: 0', (10,20), 20, COLOR, 'monospace')\n canvas.draw_line([10,35],[210,35], 10, \"Red\")\n canvas.draw_line([10,50],[210,50], 10, \"Blue\")\n else:\n canvas.draw_text('Score: 0', (game.CANVAS_W-210,20), 20, game.COLOR, 'monospace')\n canvas.draw_line([game.CANVAS_W-10,35],[game.CANVAS_W-210,35], 10, \"Red\")\n canvas.draw_line([game.CANVAS_W-10,50],[game.CANVAS_W-210,50], 10, \"Blue\")\n\n Space_Object.draw(self, canvas)","sub_path":"Asteroids/src/Ship.py","file_name":"Ship.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"512289067","text":"#!/usr/bin/env python\n#\n# Build dynamic library with JNI using user-provided arguments and place it to resources directory\n# of Maven package\n#\n# NOTE: this script must be python2/3 compatible\n#\n# How to use: build_native_for_maven.py []\n#\n\n\nfrom __future__ import absolute_import, print_function\n\nimport contextlib\nimport errno\nimport os\nimport platform\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport zipfile\n\n\n@contextlib.contextmanager\ndef _tempdir(prefix=None):\n tmp_dir = tempfile.mkdtemp(prefix=prefix)\n yield tmp_dir\n # TODO(yazevnul): log error\n shutil.rmtree(tmp_dir, ignore_errors=True)\n\n\ndef _get_platform():\n if sys.platform.startswith('linux'):\n return 'linux'\n return sys.platform\n\ndef _get_arch():\n machine = platform.machine()\n if machine.lower() == 'amd64':\n return 'x86_64'\n return machine\n\n\ndef _get_arcadia_root():\n arcadia_root = None\n path = os.path.dirname(os.path.abspath(sys.argv[0]))\n while True:\n if os.path.isfile(os.path.join(path, '.arcadia.root')):\n arcadia_root = path\n break\n\n if path == os.path.dirname(path):\n break\n\n path = os.path.dirname(path)\n\n assert arcadia_root is not None, 'you are probably trying to use this script with repository being checkout not from the root'\n return arcadia_root\n\n\ndef _get_ya_path():\n ya_path = os.path.join(_get_arcadia_root(), 'ya')\n assert os.path.isfile(ya_path), 'no `ya` in arcadia root'\n assert os.access(ya_path, os.X_OK), '`ya` must be executable'\n return ya_path\n\n\ndef _get_package_resources_dir(base_dir):\n return os.path.join(base_dir, 'src', 'main', 'resources')\n\n\ndef _get_native_lib_dir(root_dir, package_arcadia_path):\n return os.path.join(root_dir, package_arcadia_path, 'src', 'native_impl')\n\n\ndef _ensure_dir_exists(path):\n try:\n os.makedirs(path)\n except OSError as e:\n import errno\n if e.errno != errno.EEXIST:\n raise\n\n\ndef _get_current_machine_resources_dir():\n return ''.join((_get_platform(), '-', _get_arch()))\n\n\ndef _extract_classes_from_jar(jar_file, dst_dir):\n with zipfile.ZipFile(jar_file, 'r') as zf:\n for member_name in zf.namelist():\n if member_name.endswith('.class'):\n zf.extract(member_name, dst_dir)\n\n\ndef _main():\n if len(sys.argv) < 3:\n raise Exception('Required basedir and library_name arguments is not specified')\n\n base_dir = sys.argv[1]\n lib_name = sys.argv[2]\n package_name = os.path.basename(os.path.abspath(base_dir))\n package_arcadia_path = os.path.relpath(base_dir, _get_arcadia_root())\n ya_path = _get_ya_path()\n resources_dir = _get_package_resources_dir(base_dir)\n _ensure_dir_exists(resources_dir)\n shared_lib_dir = os.path.join(\n resources_dir,\n _get_current_machine_resources_dir(),\n 'lib')\n _ensure_dir_exists(shared_lib_dir)\n native_lib_dir = _get_native_lib_dir(_get_arcadia_root(), package_arcadia_path)\n env = os.environ.copy()\n\n print('building dynamic library with `ya`', file=sys.stderr)\n sys.stderr.flush()\n\n with _tempdir(prefix='catboost_build-') as build_output_dir:\n ya_make = ([sys.executable, ya_path, 'make', native_lib_dir]\n + ['--output', build_output_dir]\n + ['-D', 'CATBOOST_OPENSOURCE=yes']\n + ['-D', 'CFLAGS=-DCATBOOST_OPENSOURCE=yes']\n + sys.argv[3:])\n print (' '.join(ya_make))\n subprocess.check_call(\n ya_make,\n env=env,\n stdout=sys.stdout,\n stderr=sys.stderr)\n\n native_lib_build_dir = _get_native_lib_dir(build_output_dir, package_arcadia_path)\n jar_name = lib_name + '.jar'\n jar_src_path = os.path.join(native_lib_build_dir, jar_name)\n if os.path.exists(jar_src_path):\n \"\"\"\n Ya Make's DLL_JAVA packs classes generated by SWIG into it's own jar,\n put these classes into resource dir to be added in main package's jar.\n \"\"\"\n\n print('extract classes from jar to resources', file=sys.stderr)\n _extract_classes_from_jar(jar_src_path, resources_dir)\n\n \"\"\"\n Copy jar with sources to target dir (needed for documentation generators)\n \"\"\"\n print('copy sources jar to target', file=sys.stderr)\n\n target_dir = os.path.join(base_dir, 'target')\n \"\"\"\n ensure that target directory exists, can't use exist_ok flag because it is unavailable in\n python 2.7\n \"\"\"\n try:\n os.makedirs(target_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n shutil.copy(os.path.join(native_lib_build_dir, lib_name + '-sources.jar'), target_dir)\n\n native_lib_name = {\n 'darwin': 'lib{}.dylib',\n 'win32': '{}.dll',\n 'linux': 'lib{}.so',\n }[_get_platform()].format(lib_name)\n\n print('copying dynamic library to resources/lib', file=sys.stderr)\n shutil.copy(\n os.path.join(_get_native_lib_dir(build_output_dir, package_arcadia_path), native_lib_name),\n shared_lib_dir)\n\n\nif '__main__' == __name__:\n _main()\n","sub_path":"catboost/jvm-packages/tools/build_native_for_maven.py","file_name":"build_native_for_maven.py","file_ext":"py","file_size_in_byte":5380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"320334986","text":"#coding: utf-8\r\n#-----------------------------\r\n# 安装脚本\r\n#-----------------------------\r\nimport sys,os,shutil\r\npanelPath = os.getenv('BT_PANEL')\r\nos.chdir(panelPath)\r\nsys.path.append(\"class/\")\r\nimport public,tarfile,time,re\r\n\r\nclass panel_nginx:\r\n _name = 'nginx'\r\n _version = None\r\n _setup_path = None\r\n\r\n def __init__(self,name,version,setup_path):\r\n self._name = name\r\n self._version = version\r\n self._setup_path = setup_path\r\n \r\n def install_soft(self,downurl): \r\n status = public.get_server_status('W3SVC')\r\n if status >=0:\r\n public.bt_print('准备卸载IIS..')\r\n os.system(\"iisreset /stop\") \r\n public.change_server_start_type('W3SVC',-1)\r\n\r\n if public.get_server_status(self._name) >= 0: \r\n public.delete_server(self._name)\r\n\r\n path = self._setup_path \r\n temp = self._setup_path + '/temp/' + self._name + self._version +'.rar'\r\n\r\n #配置PHP上传路径\r\n public.bt_print('正在更改PHP上传路径...')\r\n self.set_php_upload_path()\r\n\r\n public.bt_print('��在下载安装包...') \r\n public.downloadFile(downurl + '/win/nginx_new/'+ self._name + '.rar',temp)\r\n if not os.path.exists(temp): return public.returnMsg(False,'文件下载失败,请检查网络!');\r\n\r\n public.bt_print('正在解压...')\r\n from unrar import rarfile\r\n try: \r\n rar = rarfile.RarFile(temp) \r\n rar.extractall(path)\r\n except :\r\n time.sleep(1)\r\n rar = rarfile.RarFile(temp) \r\n rar.extractall(path)\r\n\r\n #设置启动权限\r\n public.bt_print('正在配置目录权限...')\r\n self.set_webserver_access()\r\n\r\n public.bt_print('正在配置' + self._name + '...')\r\n phps = self.get_php_versions()\r\n public.bt_print(phps)\r\n phps_str = ','.join(phps)\r\n \r\n import psutil\r\n cpuCount = psutil.cpu_count() / 2\r\n if cpuCount < 2: cpuCount = 2\r\n if cpuCount > 6: cpuCount = 6\r\n cpuCount = int(cpuCount)\r\n\r\n iniPath = self._setup_path + '/' + self._name + '/config.ini'\r\n conf = public.readFile(iniPath)\r\n conf = re.sub('path\\s?=.+','path = ' + public.format_path(self._setup_path),conf);\r\n conf = re.sub('php_versions\\s?=.+','php_versions = ' + phps_str,conf);\r\n conf = re.sub('php_cgi_thread\\s?=.+','php_cgi_thread = ' + str(cpuCount),conf);\r\n public.writeFile(iniPath,conf)\r\n\r\n public.bt_print('正在安装' + self._name + '服务...')\r\n password = public.readFile('data/www')\r\n if os.path.exists(self._setup_path + '/' + self._name + '/version.pl'): os.remove(self._setup_path + '/' + self._name + '/version.pl') \r\n\r\n #zend需要授权c:/Windows目录,无法www用户无法授权\r\n rRet = public.create_server(self._name,self._name,self._setup_path + '/' + self._name + '/nginx_server.exe','',\"nginx是一款轻量级的Web 服务器/反向代理服务器及电子邮件(IMAP/POP3)代理服务器,并在一个BSD-like 协议下发行\")\r\n time.sleep(1);\r\n if public.get_server_status(self._name) >= 0:\r\n public.M('config').where(\"id=?\",('1',)).setField('webserver','nginx')\r\n if public.set_server_status(self._name,'start'):\r\n public.bt_print('安装成功.')\r\n return public.returnMsg(True,self._name + '安装成功')\r\n else:\r\n return public.returnMsg(False,'启动失败,请检查配置文件是否错误!') \r\n return rRet; \r\n\r\n def uninstall_soft(self):\r\n try:\r\n if os.path.exists(self._setup_path + '/phpmyadmin'): shutil.rmtree(self._setup_path + '/phpmyadmin')\r\n except :\r\n pass\r\n if public.get_server_status(self._name) >= 0: \r\n public.delete_server(self._name) \r\n time.sleep(2)\r\n shutil.rmtree(self._setup_path + '/' + self._name)\r\n\r\n return public.returnMsg(True,'卸载成功!');\r\n\r\n #更新软件\r\n def update_soft(self,downurl):\r\n files = ['config.ini','config/nginx.conf']\r\n for filename in files:\r\n filepath = self._setup_path + '/' + self._name + '/' + filename\r\n if os.path.exists(filepath): shutil.copy(filepath,filepath + '.backup');\r\n rRet = self.install_soft(downurl)\r\n if not rRet['status'] : rRet;\r\n \r\n for filename in files:\r\n filepath = self._setup_path + '/' + self._name + '/' + filename + '.backup'\r\n if os.path.exists(filepath): \r\n shutil.copy(filepath,filepath.replace('.backup',''));\r\n os.remove(filepath)\r\n \r\n return public.returnMsg(True,'更新成功!');\r\n\r\n #获取所有php版本\r\n def get_php_versions(self):\r\n phpPath = self._setup_path + '/php'\r\n phps = []\r\n if os.path.exists(phpPath): \r\n for filename in os.listdir(phpPath): \r\n if os.path.isdir(phpPath + '/' + filename):\r\n try: \r\n version = int(filename)\r\n phps.append(filename)\r\n except :\r\n pass\r\n return phps;\r\n\r\n #由于C:/Windows无法增加www权限,故修改上传目录到C:/Temp\r\n def set_php_upload_path(self): \r\n phps = self.get_php_versions()\r\n for version in phps:\r\n iniPath = self._setup_path + '/php' + '/' + version + '/php.ini'\r\n \r\n if os.path.exists(iniPath):\r\n conf = public.readFile(iniPath) \r\n conf = re.sub(';?upload_tmp_dir.+','upload_tmp_dir=\"C:/Temp\"',conf);\r\n public.writeFile(iniPath,conf)\r\n return True\r\n\r\n\r\n #恢复网站权限(仅适配nginx下www权限)\r\n def set_webserver_access(self):\r\n if not os.path.exists('C:/Temp'): os.makedirs('C:/Temp') \r\n public.set_file_access(\"C:/Temp\",\"IIS_IUSRS\",public.file_all,False)\r\n user = 'www'\r\n data = public.M('config').where(\"id=?\",('1',)).field('sites_path').find();\r\n\r\n if data['sites_path'].find('/www/') >=0 :\r\n wwwroot = os.getenv(\"BT_SETUP\")[:2] + '/wwwroot'\r\n if not os.path.exists(wwwroot): os.makedirs(wwwroot)\r\n\r\n backup_path = os.getenv(\"BT_SETUP\")[:2] + '/backup'\r\n if not os.path.exists(backup_path): os.makedirs(backup_path)\r\n \r\n public.M('config').where('id=?',(1,)).setField('backup_path',backup_path)\r\n public.M('config').where('id=?',(1,)).setField('sites_path',wwwroot)\r\n\r\n data = public.M('config').where(\"id=?\",('1',)).field('sites_path').find();\r\n\r\n #完全控制权限\r\n paths = [\"C:/Temp\", public.GetConfigValue('logs_path'), public.GetConfigValue('setup_path') + '/nginx' ,data['sites_path'] ] \r\n\r\n #只读权限\r\n flist = []\r\n for x in paths: public.get_paths(x,flist) \r\n #批量设置上层目录只读权限 \r\n for f in flist:\r\n print(\"正在设置 %s 权限\" % f)\r\n public.set_file_access(f,user,public.file_read,False)\r\n\r\n for f in paths: \r\n print(\"正在设置 %s 权限\" % f)\r\n public.set_file_access(f,user,public.file_all,False)\r\n\r\n public.set_file_access(os.getenv(\"BT_SETUP\") + '/nginx',user,public.file_all)\r\n\r\n return public.returnMsg(True,'权限恢复成功,当前仅恢复Nginx启动所需权限,网站权限需要手动恢复!')\r\n","sub_path":"PxZwlELmqyK1QsaW/c3233f6bd265232b643092f144354752a9ff24e1.py","file_name":"c3233f6bd265232b643092f144354752a9ff24e1.py","file_ext":"py","file_size_in_byte":7728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"633239715","text":"try:\n from os import scandir\nexcept ImportError:\n from scandir import scandir # use scandir PyPI module on Python < 3.5\n\ndef scantree(path):\n \"\"\"Recursively yield DirEntry objects for given directory.\"\"\"\n for entry in scandir(path):\n if entry.is_dir(follow_symlinks=False):\n yield from scantree(entry.path) # see below for Python 2.x\n else:\n yield entry\n\nif __name__ == '__main__':\n import sys\n mypath = '/Users/anandihalli/Documents/01_Work/brazil'\n for entry in scantree(mypath):\n if(entry.name != '.DS_Store'):\n print(entry.name)\n","sub_path":"Pyhton/Tests and genral scripts/test_2.py","file_name":"test_2.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"294614633","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/fynance/dev/set_neuralnetwork_tools.py\n# Compiled at: 2019-02-12 11:42:02\n# Size of source mod 2**32: 4877 bytes\nfrom keras.models import Model\nfrom keras.layers import Input, Dense, Dropout\nfrom keras.optimizers import Adam\nfrom keras import regularizers, initializers, constraints\n__all__ = [\n 'incr_seed', 'set_layer', 'set_nn_model']\n\ndef incr_seed(SEED, incr=1):\n \"\"\" Increment seed \"\"\"\n if SEED is None:\n return\n return SEED + incr\n\n\ndef set_layer(nn, n_neurons, layer=Dense, dropout=None, SEED_dropout=None, **kwargs):\n \"\"\" Set `Dense` layers \n \n Parameters\n ----------\n :nn: keras.Model\n An initilized neural network (cf `Input` keras documation).\n :n_neurons: int\n Number of neurons to set in this layer.\n :layer: keras.layer\n Kind of layers to use.\n :dropout: float\n At each iteration an part of variables is dropout. cf keras doc.\n :SEED: int\n A number to set the random weights.\n :kwargs: any parameters of `Dense` \n cf keras documentation.\n \n Returns\n -------\n :nn: keras.Model\n Neural network with one more layer.\n \n \"\"\"\n if dropout is None:\n return layer(n_neurons, **kwargs)(nn)\n nn = layer(n_neurons, **kwargs)(nn)\n return Dropout(dropout, seed=SEED_dropout)(nn)\n\n\ndef set_nn_model(X, neurons_list=[], layer=Dense, SEED=None, SEED_dropout=None, l1=0.01, l2=0.01, dropout=None, lr=0.01, b_1=0.99, b_2=0.999, decay=0.0, name=None, loss='mse', metrics=['accuracy'], l1_bias=0.01, l2_bias=0.01, l1_acti=0.01, l2_acti=0.01, m=0.0, std=1.0, **kwargs):\n \"\"\" Set a very basic neural network with `Dense` layers.\n \n Parameters\n ----------\n :X: np.ndarray[ndim=2, dtype=np.float32]\n Matrix of features of shape (T, N) with T is the number of \n observations and N the number of features.\n :neurons_list: list of int\n Each number correspond at the number of neurons in corrsponding layers.\n :layer: keras.layer\n Kind of layers to use.\n :dropout: float\n At each iteration an part of variables is dropout. cf keras doc.\n :SEED: int\n A number to set the random weights.\n For other parameters cf Keras documentation.\n \n Returns\n -------\n :model: Keras.model\n A Neural Network ready to be train !\n \n \"\"\"\n T, N = X.shape\n KERN_REG = regularizers.l1_l2(l1=l1, l2=l2)\n BIAS_REG = regularizers.l1_l2(l1=l1_bias, l2=l2_bias)\n ACTIV_REG = regularizers.l1_l2(l1=l1_acti, l2=l2_acti)\n KERN_CONS = constraints.MinMaxNorm(min_value=(-2.0),\n max_value=2.0,\n rate=1.0,\n axis=0)\n BIAS_CONS = constraints.MinMaxNorm(min_value=(-2.0),\n max_value=2.0,\n rate=1.0,\n axis=0)\n inputs = Input(shape=(N,), sparse=False)\n kern_init = initializers.RandomNormal(mean=m, stddev=std, seed=SEED)\n SEED = incr_seed(SEED, incr=1)\n nn = set_layer(\n inputs, neurons_list[0], dropout=dropout, kernel_regularizer=KERN_REG, SEED_dropout=SEED_dropout, \n bias_regularizer=BIAS_REG, activity_regularizer=ACTIV_REG, \n kernel_initializer=kern_init, kernel_constraint=KERN_CONS, \n bias_constraint=BIAS_CONS, **kwargs)\n SEED_dropout = incr_seed(SEED_dropout, incr=1)\n for n_neurons in neurons_list[1:]:\n kern_init = initializers.RandomNormal(mean=m, stddev=std, seed=SEED)\n SEED = incr_seed(SEED, incr=1)\n nn = set_layer(\n nn, n_neurons, layer=layer, dropout=dropout, SEED_dropout=SEED_dropout, \n kernel_regularizer=KERN_REG, kernel_initializer=kern_init, \n bias_regularizer=BIAS_REG, activity_regularizer=ACTIV_REG, \n kernel_constraint=KERN_CONS, bias_constraint=BIAS_CONS, **kwargs)\n SEED_dropout = incr_seed(SEED_dropout, incr=1)\n\n kern_init = initializers.RandomNormal(mean=m, stddev=std, seed=SEED)\n SEED = incr_seed(SEED, incr=1)\n outputs = set_layer(\n nn, 1, dropout=dropout, SEED_dropout=SEED_dropout, kernel_regularizer=KERN_REG, \n bias_regularizer=BIAS_REG, activity_regularizer=ACTIV_REG, \n kernel_initializer=kern_init, kernel_constraint=KERN_CONS, \n bias_constraint=BIAS_CONS, **kwargs)\n SEED_dropout = incr_seed(SEED_dropout, incr=1)\n model = Model(inputs=inputs, outputs=outputs)\n model.name = name\n model.compile(optimizer=Adam(lr=lr,\n beta_1=b_1,\n beta_2=b_2,\n decay=decay,\n amsgrad=True),\n loss=loss,\n metrics=metrics)\n return model","sub_path":"pycfiles/fynance-1.0.8-py3.7-linux-x86_64/set_neuralnetwork_tools.cpython-37.py","file_name":"set_neuralnetwork_tools.cpython-37.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"284020453","text":"import json\nimport os\nimport sys\nfrom datetime import datetime\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import dates\n\n\ndef get_struct(file_name):\n with open(file_name,'r') as f:\n DATA_FULL=json.load(f)\n return DATA_FULL\n\n\ndef main():\n plt.rcParams['pdf.fonttype'] = 42\n plt.rcParams['font.family'] = 'Calibri'\n sis = sys.platform\n if sis == 'win32':\n file_name = os.getcwd() + '\\\\GES_DATA.json'\n else:\n file_name = os.getcwd() + '/GES_DATA.json'\n DATA=get_struct(file_name)\n WHAT_GRAF=\"Level\"\n #for name in DATA:\n for name in DATA:\n date=[]\n levels=[]\n for datem in DATA[name]:\n date.append(dates.date2num(datetime.strptime(datem[:-9],'%Y-%m-%d')))\n levels.append(DATA[name][datem][WHAT_GRAF])\n plt.figure()\n axes = plt.subplot(1, 1, 1)\n axes.xaxis.set_major_formatter (dates.DateFormatter(\"%d.%m.%y\"))\n plt.plot(date,levels)\n plt.grid()\n figname=name+\"_\"+WHAT_GRAF+\".png\"\n plt.savefig(figname, bbox_inches='tight')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Grafiki_Ischod.py","file_name":"Grafiki_Ischod.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"75178214","text":"import os\nimport logging\nfrom operator import mul\nfrom functools import reduce\nfrom discord.ext import commands\nfrom datetime import datetime as dt\n\nlogging.basicConfig(level=logging.INFO)\nbot = commands.Bot(command_prefix=commands.when_mentioned_or('/'), help_command=None)\nconfig = {\n 'daug': {\n 'guild_id': 494911447420108820,\n 'guild_logs_id': 674500858054180874,\n 'role_member_id': 579591779364372511,\n 'role_contributor_id': 631299456037289984,\n 'channel_tips_id': 693388545628438538,\n 'category_issues_id': 601219955035209729,\n 'category_open_id': 575935336765456394,\n 'category_closed_id': 640090897417240576,\n 'category_archive_id': 689447835590066212,\n },\n}\n\n\n@bot.event\nasync def on_ready():\n ID = reduce(mul, (2, 7, 11, 33637, 223253, 434803))\n await bot.get_channel(ID).send(dt.now().strftime(\"%Y/%m/%d %H:%M:%S\"))\n\n\nif __name__ == '__main__':\n bot.config = config\n bot.load_extension('jishaku')\n bot.load_extension('dispander')\n bot.load_extension('daug.extension')\n bot.load_extension('cogs.admin')\n bot.load_extension('cogs.database')\n bot.load_extension('cogs.public')\n bot.load_extension('cogs.werewolf')\n bot.run(os.environ['DISCORD_BOT_TOKEN'])\n","sub_path":"src/discordbot.py","file_name":"discordbot.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"228672357","text":"#!/usr/bin/env python3\n#\n# Copyright (c) 2019 Roberto Riggio\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"API Manager (REST Northbound Interface).\"\"\"\n\nimport inspect\nimport json\nimport base64\nimport re\n\nfrom uuid import UUID\n\nimport tornado.web\nimport tornado.httpserver\n\nfrom tornado.web import Application\nfrom pymodm.errors import ValidationError\n\nfrom empower_core.serialize import serialize\nfrom empower_core.service import EService\nfrom empower_core.launcher import srv_or_die, srv\nfrom empower_core.launcher import SERVICES\n\nDEBUG = True\nDEFAULT_PORT = 8888\nDEFAULT_WEBUI = \"/var/www/webui/\"\nCOOKIE_SECRET = b'xyRTvZpRSUyk8/9/McQAvsQPB4Rqv0w9mBtIpH9lf1o='\nLOGIN_URL = \"/auth/login\"\n\n\ndef validate(returncode=200, min_args=0, max_args=0):\n \"\"\"Validate REST method.\"\"\"\n\n def decorator(func):\n\n def magic(self, *args):\n\n try:\n\n if len(args) < min_args or len(args) > max_args:\n msg = \"Invalid url (%u, %u)\" % (min_args, max_args)\n raise ValueError(msg)\n\n params = {}\n\n if self.request.body and json.loads(self.request.body):\n params = json.loads(self.request.body)\n\n if \"version\" in params:\n del params[\"version\"]\n\n output = func(self, *args, **params)\n\n if returncode == 200:\n self.write_as_json(output)\n\n except KeyError as ex:\n self.send_error(404, message=str(ex))\n\n except ValueError as ex:\n self.send_error(400, message=str(ex))\n\n except AttributeError as ex:\n self.send_error(400, message=str(ex))\n\n except TypeError as ex:\n self.send_error(400, message=str(ex))\n\n except ValidationError as ex:\n self.send_error(400, message=ex.message)\n\n self.set_status(returncode, None)\n\n magic.__doc__ = func.__doc__\n\n return magic\n\n return decorator\n\n\n# pylint: disable=W0223\nclass BaseHandler(tornado.web.RequestHandler):\n \"\"\"Base Handler.\"\"\"\n\n # service associated to this handler\n service = None\n\n URLS = []\n\n def get_current_user(self):\n \"\"\"Return username of the currently logged user or None.\"\"\"\n\n return self.get_secure_cookie(\"username\")\n\n @classmethod\n def auth_based(cls):\n \"\"\"Return true if both account and project managers are available\"\"\"\n\n pmngr = srv(\"projectsmanager\")\n amngr = srv(\"accountsmanager\")\n\n return bool(pmngr and amngr)\n\n\nclass IndexHandler(BaseHandler):\n \"\"\"Index page handler.\"\"\"\n\n URLS = [r\"/\", r\"/([a-z]*).html\"]\n\n def get_project(self):\n \"\"\"Get the current project or return None if not project is set.\"\"\"\n\n # check if a project has been selected\n project_id = self.get_secure_cookie(\"project_id\")\n\n if not project_id:\n return None\n\n project_id = UUID(project_id.decode('UTF-8'))\n projects_manager = srv_or_die(\"projectsmanager\")\n\n if project_id not in projects_manager.projects:\n self.clear_cookie(\"project_id\")\n return None\n\n return projects_manager.projects[project_id]\n\n @tornado.web.authenticated\n def get(self, args=None):\n \"\"\"Render index page.\"\"\"\n\n try:\n\n if self.auth_based():\n\n username = self.get_secure_cookie(\"username\").decode('UTF-8')\n accounts_manager = srv_or_die(\"accountsmanager\")\n account = accounts_manager.accounts[username]\n\n page = \"index.html\" if not args else \"%s.html\" % args\n\n self.render(page,\n username=account.username,\n password=account.password,\n name=account.name,\n email=account.email,\n project=self.get_project())\n\n else:\n\n page = \"index.html\" if not args else \"%s.html\" % args\n\n self.render(page)\n\n except KeyError as ex:\n self.send_error(404, message=str(ex))\n\n except ValueError as ex:\n self.send_error(400, message=str(ex))\n\n\nclass AuthSwitchProjectHandler(BaseHandler):\n \"\"\"Login page handler.\"\"\"\n\n URLS = [r\"/auth/switch_project\"]\n\n def get(self):\n \"\"\"Set the active project.\"\"\"\n\n username = self.get_secure_cookie(\"username\").decode('UTF-8')\n\n # if root deselect project\n if username == \"root\":\n self.clear_cookie(\"project_id\")\n self.redirect('/')\n return\n\n # check if the project id is in the URL\n project_id = self.get_argument(\"project_id\", None)\n\n # reset project selected\n if not project_id:\n self.clear_cookie(\"project_id\")\n self.redirect('/')\n return\n\n try:\n\n # set project\n project_id = UUID(project_id)\n projects_manager = srv_or_die(\"projectsmanager\")\n project = projects_manager.projects[project_id]\n\n if project.owner != username:\n self.clear_cookie(\"project_id\")\n self.redirect('/')\n return\n\n self.set_secure_cookie(\"project_id\", str(project.project_id))\n\n except KeyError:\n self.clear_cookie(\"project_id\")\n\n except ValueError:\n self.clear_cookie(\"project_id\")\n\n self.redirect('/')\n\n\nclass AuthLoginHandler(BaseHandler):\n \"\"\"Login page handler.\"\"\"\n\n URLS = [r\"/auth/login\"]\n\n def get(self):\n \"\"\"Render login page.\"\"\"\n\n if not self.auth_based():\n self.set_secure_cookie(\"username\", \"none\")\n self.redirect('/')\n return\n\n if self.get_current_user():\n self.redirect('/')\n return\n\n self.render(\"login.html\", error=self.get_argument(\"error\", \"\"))\n\n def post(self):\n \"\"\"Process login credentials.\"\"\"\n\n username = self.get_argument(\"username\", \"\")\n password = self.get_argument(\"password\", \"\")\n\n accounts_manager = srv_or_die(\"accountsmanager\")\n\n if accounts_manager.check_permission(username, password):\n self.set_secure_cookie(\"username\", username)\n self.redirect(\"/index.html\")\n else:\n self.clear_cookie(\"username\")\n self.redirect(\"/auth/login?error=Wrong credentials!\")\n\n\nclass AuthLogoutHandler(BaseHandler):\n \"\"\"Logout page handler.\"\"\"\n\n URLS = [r\"/auth/logout\"]\n\n def get(self):\n \"\"\"Process logout request.\"\"\"\n\n self.clear_cookie(\"username\")\n self.clear_cookie(\"project_id\")\n self.redirect(\"/auth/login\")\n\n\nclass APIHandler(tornado.web.RequestHandler):\n \"\"\"Base class for all the REST calls.\"\"\"\n\n # service associated to this handler\n service = None\n\n @classmethod\n def auth_based(cls):\n \"\"\"Return true if both account and project managers are available\"\"\"\n\n pmngr = srv(\"projectsmanager\")\n amngr = srv(\"accountsmanager\")\n\n return bool(pmngr and amngr)\n\n def write_error(self, status_code, **kwargs):\n \"\"\"Write error as JSON message.\"\"\"\n\n self.set_header('Content-Type', 'application/json')\n\n value = {\n \"title\": self._reason,\n \"status_code\": status_code,\n \"detail\": kwargs.get(\"message\"),\n }\n\n self.finish(json.dumps(serialize(value), indent=4))\n\n def write_as_json(self, value):\n \"\"\"Return reply as a json document.\"\"\"\n\n self.write(json.dumps(serialize(value), indent=4))\n\n def prepare(self):\n \"\"\"Prepare to handler reply.\"\"\"\n\n self.set_header('Content-Type', 'application/json')\n\n # no account manager or project manager\n if not self.auth_based():\n return\n\n # get requests do not require authentication\n if self.request.method == \"GET\":\n return\n\n accounts_manager = srv_or_die(\"accountsmanager\")\n projects_manager = srv_or_die(\"projectsmanager\")\n\n auth_header = self.request.headers.get('Authorization')\n\n if auth_header is None or not auth_header.startswith('Basic '):\n self.set_header('WWW-Authenticate', 'Basic realm=Restricted')\n self.send_error(401, message=\"Missing authorization header\")\n return\n\n auth_bytes = bytes(auth_header[6:], 'utf-8')\n auth_decoded = base64.b64decode(auth_bytes).decode()\n username, password = auth_decoded.split(':', 2)\n\n # account does not exists\n if not accounts_manager.check_permission(username, password):\n self.send_error(401, message=\"Invalid username/password\")\n return\n\n account = accounts_manager.accounts[username]\n\n # root can do everything\n if account.username == \"root\":\n return\n\n # check if logged user is accessing his/her own account\n if self.request.uri.startswith(\"/api/v1/accounts\"):\n\n pattern = re.compile(\"/api/v1/accounts/([a-zA-Z0-9:-]*)/?\")\n match = pattern.match(self.request.uri)\n\n if match and match.group(1):\n username = match.group(1)\n if username == account.username:\n return\n\n # check if logged user is accessing one of his/her projects\n if self.request.uri.startswith(\"/api/v1/projects\"):\n\n pattern = re.compile(\"/api/v1/projects/([a-zA-Z0-9-]*)/?\")\n match = pattern.match(self.request.uri)\n\n if match and match.group(1):\n project_id = UUID(match.group(1))\n if project_id in projects_manager.projects:\n project = projects_manager.projects[project_id]\n if account.username == project.owner:\n return\n\n self.send_error(401, message=\"URI not authorized\")\n\n\nBOILER_PLATE = \"\"\"# REST API\n\nThe REST API consists of a set of RESTful resources and their attributes.\nThe base URL for the REST API is the following:\n\n http{s}://{username}:{password}@{hostname}:{port}/api/v1/{resource}\n\nOf course, you need to replace hostname and port with the hostname/port\ncombination for your controller.\n\nThe current (and only) version of the API is v1.\n\nThe REST API uses HTTP basic authentication control access to RESTful resource.\n\nNotice that there are two kinds of accounts:\n\n * user accounts, which have complete CRUD access only to all the URLs that\n begins with /api/v1/projects/{project_id}.\n\n * root account, which has complete CRUD access to all URLs. All the URLs that\n DO NOT start with /api/v1/projects/{project_id} require a root account to\n be accessed. The only exception is the URL /api/v1/accounts/{user_id} which\n is fully accessible to all users.\n \"\"\"\n\n\n# pylint: disable=W0223\nclass DocHandler(APIHandler):\n \"\"\"Generates markdown documentation.\"\"\"\n\n URLS = [r\"/api/v1/doc/?\"]\n\n def get(self):\n \"\"\"Generates markdown documentation.\n\n Args:\n\n None\n\n Example URLs:\n\n GET /api/v1/doc\n \"\"\"\n\n exclude_list = [\"StaticFileHandler\", \"DocHandler\"]\n handlers = set()\n accum = [BOILER_PLATE]\n\n for rule in self.service.application.default_router.rules:\n if not rule.target.rules:\n continue\n handlers.add(rule.target.rules[0].target)\n\n handlers = sorted(handlers, key=lambda x: x.__name__)\n\n accum.append(\"## Handlers\\n\")\n\n for handler in handlers:\n\n if handler.__name__ in exclude_list:\n continue\n\n accum.append(\" * [%s](#%s)\" %\n (handler.__name__, handler.__name__))\n\n accum.append(\"\\n\")\n\n for handler in handlers:\n\n if handler.__name__ in exclude_list:\n continue\n\n accum.append(\"# %s ([Top](#handlers))\\n\" %\n (handler.__name__, handler.__name__))\n\n accum.append(\"%s\\n\" % inspect.getdoc(handler))\n\n if hasattr(handler, \"URLS\") and handler.URLS:\n accum.append(\"### URLs\\n\")\n for url in handler.URLS:\n accum.append(\" %s\" % url)\n\n accum.append(\"\\n\")\n\n if hasattr(handler, \"get\"):\n doc = inspect.getdoc(getattr(handler, \"get\"))\n if doc:\n accum.append(\"### GET\\n\")\n accum.append(doc)\n accum.append(\"\\n\")\n\n if hasattr(handler, \"put\"):\n doc = inspect.getdoc(getattr(handler, \"put\"))\n if doc:\n accum.append(\"### PUT\\n\")\n accum.append(doc)\n accum.append(\"\\n\")\n\n if hasattr(handler, \"post\"):\n doc = inspect.getdoc(getattr(handler, \"post\"))\n if doc:\n accum.append(\"### POST\\n\")\n accum.append(doc)\n accum.append(\"\\n\")\n\n if hasattr(handler, \"delete\"):\n doc = inspect.getdoc(getattr(handler, \"delete\"))\n if doc:\n accum.append(\"### DELETE\\n\")\n accum.append(doc)\n accum.append(\"\\n\")\n\n self.write('\\n'.join(accum))\n\n\n# pylint: disable=W0223\nclass ManagersHandler(APIHandler):\n \"\"\"Handle managers.\"\"\"\n\n URLS = [r\"/api/v1/managers/?\",\n r\"/api/v1/managers/([a-zA-Z0-9-]*)/?\"]\n\n @validate(min_args=0, max_args=1)\n def get(self, *args):\n \"\"\"Get the active managers\n Args:\n [0]: the manager name\n \"\"\"\n\n if not args:\n return list(SERVICES.values())\n\n return SERVICES[args[0]]\n\n\nclass APIManager(EService):\n \"\"\"Service exposing a REST API\n\n Parameters:\n port: the port on which the HTTP server should listen (optional,\n default: 8888)\n \"\"\"\n\n HANDLERS = [IndexHandler, AuthLoginHandler, AuthLogoutHandler,\n DocHandler, ManagersHandler, AuthSwitchProjectHandler]\n\n def __init__(self, context, service_id, webui, port):\n\n super().__init__(context=context, service_id=service_id, webui=webui,\n port=port)\n\n self.settings = {\n \"static_path\": self.webui + \"static/\",\n \"cookie_secret\": COOKIE_SECRET,\n \"template_path\": self.webui + \"templates/\",\n \"login_url\": LOGIN_URL,\n \"debug\": DEBUG,\n }\n\n self.application = Application([], **self.settings)\n\n self.http_server = tornado.httpserver.HTTPServer(self.application)\n\n @property\n def webui(self):\n \"\"\"Return path to Web UI.\"\"\"\n\n return self.params[\"webui\"]\n\n @webui.setter\n def webui(self, value):\n \"\"\"Set path to Web UI.\"\"\"\n\n if \"webui\" in self.params and self.params[\"webui\"]:\n raise ValueError(\"Param webui can not be changed\")\n\n self.params[\"webui\"] = value\n\n @property\n def port(self):\n \"\"\"Return port.\"\"\"\n\n return self.params[\"port\"]\n\n @port.setter\n def port(self, value):\n \"\"\"Set port.\"\"\"\n\n if \"port\" in self.params and self.params[\"port\"]:\n raise ValueError(\"Param port can not be changed\")\n\n self.params[\"port\"] = int(value)\n\n def start(self):\n \"\"\"Start api manager.\"\"\"\n\n super().start()\n\n self.http_server.listen(self.port)\n\n self.log.info(\"Listening on port %u\", self.port)\n\n self.http_server.start()\n\n def register_handler(self, handler):\n \"\"\"Add a new handler class.\"\"\"\n\n for url in handler.URLS:\n self.log.info(\"Registering URL: %s\", url)\n self.application.add_handlers(r\".*$\", [(url, handler)])\n\n\ndef launch(context, service_id, webui=DEFAULT_WEBUI, port=DEFAULT_PORT):\n \"\"\" Initialize the module. \"\"\"\n\n return APIManager(context=context, service_id=service_id, webui=webui,\n port=port)\n","sub_path":"empower_core/apimanager/apimanager.py","file_name":"apimanager.py","file_ext":"py","file_size_in_byte":16584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"424217925","text":"# -*- coding: utf-8 -*-\r\n\r\nimport sqlite3, csv, gspread, configparser, re, glob, codecs\r\nfrom urllib.parse import _NetlocResultMixinStr\r\nfrom tkinter import filedialog, messagebox\r\nfrom tkinter import *\r\nfrom PIL import Image, ImageTk\r\n\r\n# remote_sheet_exists = False # Why is this here? Belongs in a class handling the database and/or cloud integration\r\n\r\nclass Editor: # Parent class\r\n def __init__(self):\r\n print(\"Starting editor...\")\r\n self.config = configparser.ConfigParser(allow_no_value=True) # no_value must be allowed so that we can read the list of valid pop types, which is just a list of names\r\n self.config.read('config/MapEditor.ini')\r\n\r\n self.remote_sheet = None # Set later by self.setup_remote_sheet()\r\n\r\n self.has_remote_sheet = True # If true, functions related to getting remote data will be used.\r\n\r\n self.remote_sheet_columns = self.get_remote_sheet_column_indices(self.config.items('AlphanumericColumns')) | self.get_remote_sheet_column_indices(self.config.items('NumericColumns'))\r\n sorted_tuples = sorted(self.remote_sheet_columns.items(), key=lambda item: item[1])\r\n self.remote_sheet_columns = {k: v for k, v in sorted_tuples}\r\n\r\n valid_cultures = self.get_valid_cultures(\"../../common/cultures/\")\r\n valid_religions = self.get_valid_religions(\"../../common/religions/\")\r\n valid_trade_goods = self.get_valid_trade_goods(\"../../common/trade_goods/\")\r\n\r\n # Declare the credentials var on init, but it will only be given a file at the credentials select stage by the GUI object\r\n self.remote_credentials = None # NEED TO GET THIS FROM GUI FUNCTION\r\n\r\n self.map_handler = MapHandler(\"land_input.bmp\",\"sea_input.bmp\")\r\n self.gui = EditorGUI(self.remote_sheet_columns,\r\n remote_sheet=None, # Remote sheet = none until loaded\r\n db = None, # Database = none until loaded\r\n valid_cultures = valid_cultures,\r\n valid_religions = valid_religions,\r\n valid_trade_goods = valid_trade_goods,\r\n config = self.config) \r\n # Prompt a selection of the database path\r\n db_path = self.gui.prompt_file_select()\r\n self.db = database_connection(self.remote_sheet,db_path)\r\n self.gui.db = self.db # Load database now that it has been initialised\r\n self.remote_credentials = self.gui.prompt_remote_credentials() # Get remote credentials\r\n self.setup_remote_sheet() # Load the remote data now that we have the credentials ready\r\n self.gui.remote_sheet = self.remote_sheet # Load the remote sheet for reference by GUI functions\r\n self.gui.start_main_gui()\r\n \r\n # Declare the map var on init, but it will only be given a file at the file select stage\r\n self.editor_file = None # NEED TO GET THIS FROM GUI FUNCTION\r\n\r\n def config_to_dict(self, input_config):\r\n config_as_dict = {s:dict(input_config.items(s)) for s in input_config.sections()}\r\n return config_as_dict\r\n\r\n def get_remote_sheet_column_indices(self, input_columns):\r\n remote_sheet_columns = dict(input_columns)\r\n for k, v in remote_sheet_columns.items():\r\n remote_sheet_columns[k] = int(v) # Convert the column indices to integers\r\n return remote_sheet_columns # Dict\r\n\r\n def setup_remote_sheet(self):\r\n remote_sheet_name = self.config['RemoteSheet']['SheetName']\r\n self.remote_sheet = RemoteSheet(\r\n sheet_name = remote_sheet_name, \r\n credentials = self.remote_credentials,\r\n column_indices = self.remote_sheet_columns\r\n )\r\n self.has_remote_sheet = True\r\n\r\n def get_valid_cultures(self, cultures_directory):\r\n replace_values = {\" \": \"\", \"{\": \"\", \"}\": \"\", \"=\": \"\",\r\n \"\\r\": \"\",\r\n \"\\t\": \"\"}\r\n replace_values = dict((re.escape(k), v) for k, v in replace_values.items())\r\n pattern = re.compile(\"|\".join(replace_values.keys()))\r\n valid_cultures = []\r\n for cultures_file in glob.glob(cultures_directory+\"*.txt\"):\r\n with codecs.open(cultures_file, \"r+\", \"utf-8-sig\") as culture_file_data:\r\n data = culture_file_data.read()\r\n # Find the bit that says cultures and cut off everything before\r\n #cultures = data.partition(\"culture = {\")[2]\r\n cultures = data.partition(\"culture = {\")[2]\r\n cultures = cultures.partition(\"barbarian_names\")[0]\r\n cultures = re.sub(\"#.*\", \"\", cultures)\r\n cultures = re.sub(\".*names\", \"\", cultures)\r\n cultures = re.sub(\".*family\", \"\", cultures)\r\n cultures = re.sub(\"\\\\{(.|\\\\n|\\\\r|\\\\t)*?\\\\}\", \"\", cultures)\r\n cultures = pattern.sub(lambda m: replace_values[re.escape(m.group(0))], cultures)\r\n cultures = cultures.split(\"\\n\")\r\n for culture in cultures:\r\n if len(culture) > 0 and culture not in valid_cultures:\r\n valid_cultures.append(culture)\r\n #cultures = cultures.partition(\"\")[1]\r\n #cultures = regex.sub(br'\\{[^()]*+(?:(?R)[^()]*)*+\\}', '', cultures)\r\n #print(cultures)\r\n culture_file_data.close()\r\n print(\"Valid cultures = \" + str(valid_cultures))\r\n return valid_cultures\r\n \r\n def get_valid_religions(self, religions_directory):\r\n replace_values = {\" \": \"\", \"{\": \"\", \"}\": \"\", \"=\": \"\",\r\n \"\\r\": \"\",\r\n \"\\t\": \"\",\r\n }\r\n replace_values = dict((re.escape(k), v) for k, v in replace_values.items())\r\n pattern = re.compile(\"|\".join(replace_values.keys()))\r\n valid_religions = []\r\n for religions_file in glob.glob(religions_directory+\"*.txt\"):\r\n with codecs.open(religions_file, \"r+\", \"utf-8-sig\") as religion_file_data:\r\n data = religion_file_data.read()\r\n # Find the bit that says religions and cut off everything before\r\n religions = re.sub(\" .*\", \"\", data)\r\n religions = re.sub(\" .*\", \"\", religions)\r\n religions = re.sub(\"\\\\{\\\\{[^}]*\\\\}\\\\}\", \"\", religions, re.MULTILINE | re.DOTALL)\r\n pattern = re.compile(\"|\".join(replace_values.keys()))\r\n religions = pattern.sub(lambda m: replace_values[re.escape(m.group(0))], religions)\r\n religions = re.sub(\"#.*\", \"\", religions)\r\n religions = religions.split(\"\\n\")\r\n for religion in religions:\r\n if len(religion) > 0 and religion not in valid_religions:\r\n valid_religions.append(religion)\r\n #religions = religions.partition(\"\")[1]\r\n #religions = regex.sub(br'\\{[^()]*+(?:(?R)[^()]*)*+\\}', '', religions)\r\n #print(religions)\r\n religion_file_data.close()\r\n print(\"Valid religions = \" + str(valid_religions))\r\n return valid_religions\r\n \r\n def get_valid_trade_goods(self, trade_goods_directory):\r\n replace_values = {\" \": \"\", \"{\": \"\", \"}\": \"\", \"=\": \"\",\r\n \"\\r\": \"\",\r\n \"\\t\": \"\",\r\n }\r\n replace_values = dict((re.escape(k), v) for k, v in replace_values.items())\r\n pattern = re.compile(\"|\".join(replace_values.keys()))\r\n valid_trade_goods = []\r\n for trade_goods_file in glob.glob(trade_goods_directory+\"*.txt\"):\r\n with codecs.open(trade_goods_file, \"r+\", \"utf-8-sig\") as trade_good_file_data:\r\n data = trade_good_file_data.read()\r\n # Find the bit that says trade_goods and cut off everything before\r\n trade_goods = re.sub(\" .*\", \"\", data)\r\n trade_goods = re.sub(\"\\t.*\", \"\", trade_goods)\r\n trade_goods = re.sub(\"\\\\{\\\\{[^}]*\\\\}\\\\}\", \"\", trade_goods, re.MULTILINE | re.DOTALL)\r\n pattern = re.compile(\"|\".join(replace_values.keys()))\r\n trade_goods = pattern.sub(lambda m: replace_values[re.escape(m.group(0))], trade_goods)\r\n trade_goods = re.sub(\"#.*\", \"\", trade_goods)\r\n trade_goods = trade_goods.split(\"\\n\")\r\n for trade_good in trade_goods:\r\n if len(trade_good) > 0 and trade_good not in valid_trade_goods:\r\n valid_trade_goods.append(trade_good)\r\n #trade_goods = trade_goods.partition(\"\")[1]\r\n #trade_goods = regex.sub(br'\\{[^()]*+(?:(?R)[^()]*)*+\\}', '', trade_goods)\r\n #print(trade_goods)\r\n trade_good_file_data.close()\r\n print(\"Valid tradegoods = \" + str(valid_trade_goods))\r\n return valid_trade_goods\r\n\r\n# MapHandler deals with the map image files\r\nclass MapHandler:\r\n def __init__(self, img_land, img_sea):\r\n # Images for the land and sea maps. These contain the colours of provinces and searegions\r\n self.img_land = Image.open(img_land,\"r\") # Should be PNG to save space\r\n self.img_sea = Image.open(img_sea,\"r\") # Should be PNG to save space\r\n\r\n # Create lists of colours in each file, which we can use to iterate through all the provinces/searegions\r\n # Each colour is effectively a unique ID for the province/searegion\r\n # Maxcolors set to 100,000 to exceed the default maximum, as we have over 10,000 provinces\r\n self.land_colours = self.img_land.getcolors(maxcolors=100000)\r\n self.sea_colours = self.img_sea.getcolors(maxcolors=100000)\r\n\r\n # List of colours in the land provinces file\r\n # Each item is a tuple of the RGB value of a land province's colour\r\n self.land_provinces = []\r\n # Add colours to the list. Index [1] of each is a tuple with the RGB value\r\n for colour in self.land_colours:\r\n self.land_provinces.append(colour[1])\r\n # Ignore white, which we use in the input images to delineate sea and land.\r\n self.land_provinces.remove((255,255,255))\r\n\r\n # List of colours in the sea provinces file\r\n # See above land provinces\r\n self.sea_provinces = []\r\n for colours in self.sea_provinces:\r\n self.sea_provinces.append(colour[1])\r\n\r\n # Get the total number of provinces\r\n # Why do we need this again?\r\n self.total_provinces = len(self.sea_provinces) + len(self.land_provinces)\r\n\r\n # Read back the lengths to the user\r\n print(str(len(self.sea_provinces)) + \" sea provinces found and \" +\r\n str(len(self.land_provinces)) + \" land provinces found.\")\r\n print(\"Total provinces in land and sea: \" + str(self.total_provinces))\r\n\r\n # Get the definition CSV file. If there isn't one, one will be generated on the first time it's loaded.\r\n try:\r\n self.definition_csv = open(\"definition.csv\",\"r\")\r\n except:\r\n self.definition_csv = False\r\n \r\n # Check for a local setup file - maybe not necessary if we're just pulling from the remote\r\n try:\r\n self.province_setup_csv = open(\"province_setup.csv\",'r',encoding='UTF-8')\r\n except:\r\n self.province_setup_csv = False\r\n\r\n\r\n \r\nclass RemoteSheet:\r\n # https://youtu.be/cnPlKLEGR7E?t=346\r\n def __init__(self, sheet_name, credentials, column_indices):\r\n self.credentials = credentials # Credentials are loaded from a separate file, selected when the user is prompted on startup\r\n \r\n client = gspread.service_account(filename=credentials)\r\n \r\n self.sheet = client.open(sheet_name).sheet1 # Sheet name is passed from config by the parent class\r\n\r\n self.column_indices = column_indices\r\n\r\n def write_to_sheet(self, provid, column, data):\r\n # Data is a row from the database\r\n rownum = int(provid) + 1\r\n column = column + 1 # Google sheets counts from 1. Great job, Google sheets. \r\n # colnum = self.column_indices[column]\r\n self.sheet.update_cell(rownum, column, str(data))\r\n \r\n def get_province_data(self, provid):\r\n # THOUGHTS:\r\n # Is it best to read straight from the sheet, and not rely on anything local at all?\r\n # We could just read one row at a time when we click on the corresponding province\r\n #\r\n # Process is as follows:\r\n # 1) Get the PROVID locally, from the save file using the RGB comparison\r\n # 2) Lookup the PROVID on the spreadsheet\r\n # 3) Return the data from that PROVID as a tuple/list\r\n province_data = self.sheet.row_values(int(provid) + 1) # Add one, as the first row is used by columns\r\n return province_data\r\n\r\n# Database connection class\r\n# Only needed for the definition reading so we can interpret the province map\r\n# As we will be reading directly from the remote sheet at all times\r\nclass database_connection(object):\r\n def __init__(self, column_indices, db_path):\r\n self.connection = sqlite3.connect(db_path)\r\n self.cursor = self.connection.cursor()\r\n \r\n self.load_db()\r\n\r\n self.checksum_query = \"\"\"INSERT OR IGNORE INTO province_checksums(province_checksum)\r\n VALUES (:checksum)\"\"\"\r\n\r\n self.definition_query = \"\"\"INSERT OR IGNORE INTO definition(\r\n Province_id,\r\n R,\r\n G,\r\n B,\r\n Name,\r\n x\r\n )\r\n VALUES (\r\n ?,\r\n ?,\r\n ?,\r\n ?,\r\n ?,\r\n ?\r\n )\"\"\"\r\n\r\n # A list to hold new provinces with checksums not existing in the current save\r\n self.new_sea_provinces = []\r\n self.new_land_provinces = []\r\n\r\n # A list of free province IDs which can be re-used when provinces are deleted\r\n self.free_ids = []\r\n\r\n self.column_indices = column_indices\r\n\r\n def db_fetchone(self):\r\n return self.cursor.fetchone()\r\n\r\n def setup_update_query(self, row):\r\n # Instead of defining the columns individually, just get their column index by the name\r\n for item in row:\r\n if item == \"\":\r\n item = \"0\"\r\n query = \"UPDATE province_setup SET Culture ='\" + row[self.column_indices(\"Culture\")]+\"',\" \\\r\n \" Religion ='\" + row[self.column_indices(\"Religion\")]+\"',\" \\\r\n \" TradeGoods ='\" + row[self.column_indices(\"Culture\")]+\"',\" \\\r\n \" Citizens =\" + row[self.column_indices(\"Culture\")]+\",\" \\\r\n \" Freedmen =\" + row[self.column_indices(\"Culture\")]+\",\" \\\r\n \" LowerStrata =\" + row[self.column_indices(\"Culture\")]+\",\" \\\r\n \" MiddleStrata =\" + row[self.column_indices(\"Culture\")]+\",\" \\\r\n \" Proletariat =\" + row[self.column_indices(\"Culture\")]+\",\" \\\r\n \" Slaves =\" + row[self.column_indices(\"Culture\")]+\",\" \\\r\n \" Tribesmen =\" + row[self.column_indices(\"Culture\")]+\",\" \\\r\n \" UpperStrata =\" + row[self.column_indices(\"Culture\")]+\",\" \\\r\n \" Civilization =\" + row[self.column_indices(\"Culture\")]+\",\" \\\r\n \" SettlementRank =\" + row[self.column_indices(\"Culture\")]+\",\" \\\r\n \" NameRef ='\" + row[self.column_indices(\"Culture\")].replace(\"'\",\"’\")+\"',\" \\\r\n \" AraRef ='\" + row[self.column_indices(\"Culture\")].replace(\"'\",\"’\")+\"',\" \\\r\n \" Terrain ='\" + row[self.column_indices(\"Culture\")].replace(\"'\", \"’\") + \"' WHERE ProvID = \" + row[provid_col]\r\n return self.cursor.execute(query, \"\")\r\n\r\n def db_fetchall(self):\r\n return self.cursor.fetchall()\r\n\r\n def query(self,query,params):\r\n # For functions that don't need to create any new fields\r\n return self.cursor.execute(query,params)\r\n\r\n def commit_many(self,query,params):\r\n # For functions that create many new fields\r\n self.cursor.executemany(query, params)\r\n self.connection.commit()\r\n\r\n def db_commit(self,query):\r\n # For functions that need to create a new field\r\n self.cursor.execute(query)\r\n self.connection.commit()\r\n\r\n def load_db(self):\r\n # Populate a database with default province IDs\r\n db_schema = \"province_setup_schema.sql\"\r\n #Read the schema file and output it as a string\r\n all_schema_contents = str(open(db_schema).read())\r\n\r\n #Separate the schema contents into separate commands by semicolons\r\n individual_schema_contents = all_schema_contents.split(';')\r\n #Get rid of newline and tab indicators from the raw string\r\n for schema_command in individual_schema_contents:\r\n for ch in [\"\\n\", \"\\t\"]:\r\n schema_command.replace(ch, \"\")\r\n for schema_command in individual_schema_contents:\r\n self.db_commit(schema_command + \";\")\r\n\r\n def province_checksum(self,province):\r\n R = province[0]\r\n G = province[1]*1000\r\n B = province[2]*1000000\r\n province_checksum = R + G + B\r\n return str(province_checksum)\r\n\r\n\r\n def clear_old_provinces(self, provinces):\r\n RGB_query = \"SELECT R, G, B FROM definition;\"\r\n self.query(RGB_query, \"\")\r\n RGBs = self.db_fetchall()\r\n for RGB in RGBs:\r\n if RGB not in provinces:\r\n params = RGB\r\n search_query = \"SELECT Province_ID FROM definition WHERE R=? AND G=? AND B=?;\"\r\n self.query(search_query,params)\r\n province_id = str(self.db_fetchone()[0])\r\n print(\"Province \" + province_id + \" no longer exists\")\r\n self.free_ids.append(province_id)\r\n deletion_params = (province_id,)\r\n definition_deletion = \"DELETE FROM definition WHERE Province_ID = ?;\"\r\n self.query(definition_deletion,deletion_params)\r\n setup_deletion = \"DELETE FROM province_setup WHERE ProvID = ?;\"\r\n self.query(setup_deletion,deletion_params)\r\n # Also delete checksum from checksums table\r\n checksum_params = (self.province_checksum(RGB),)\r\n checksum_deletion = \"DELETE FROM province_checksums WHERE province_checksum = ?;\"\r\n self.query(checksum_deletion,checksum_params)\r\n if len(self.free_ids) > 0:\r\n self.db_commit(\"\")\r\n # Free up this RGB's Province ID\r\n # Add it to a list of free Province IDs to be assigned to new provinces\r\n # And make sure that other province IDs above it are shifted down\r\n\r\n def checksum_search(self, province):\r\n checksum = self.province_checksum(province)\r\n search_checksum_query = \"SELECT * FROM province_checksums WHERE province_checksum = \" + checksum + \";\"\r\n self.query(search_checksum_query,\"\")\r\n if self.db_fetchone() != None:\r\n # print(\"Checksum \" + checksum + \" verified\")\r\n return True\r\n else:\r\n # print(\"No previously existing province with checksum \" + checksum)\r\n return False \r\n\r\n def submit_province(self, province, provtype, new_province,i):\r\n R = str(province[0])\r\n G = str(province[1])\r\n B = str(province[2])\r\n # If the checksum does not exist, create the province\r\n if self.checksum_search(province) == False:\r\n if new_province == True:\r\n definition_params = (str(i), R, G, B, provtype+str(i),\"x\")\r\n definition_query = self.definition_query\r\n self.query(definition_query, definition_params)\r\n checksum_params = {\"checksum\":self.province_checksum(province)}\r\n checksum_query = self.checksum_query\r\n self.query(checksum_query, checksum_params)\r\n # print(\"Created definition for province \" + str(i))\r\n return True\r\n elif new_province == False:\r\n if provtype == \"seaprov\":\r\n self.new_sea_provinces.append(province)\r\n return False\r\n elif provtype == \"landprov\":\r\n self.new_land_provinces.append(province)\r\n return False\r\n\r\n def import_definition(self):\r\n # Import definition\r\n if definition_csv:\r\n rows = list(csv.reader(definition_csv, delimiter=\";\"))\r\n for i, row in enumerate(rows):\r\n rows[i] = list(row)\r\n for row in rows:\r\n # print(row)\r\n self.query(self.definition_query, (row[0], row[1], row[2], row[3], row[4], row[5]))\r\n self.connection.commit()\r\n\r\n def fill_definition(self):\r\n self.clear_old_provinces(land_sea_provinces)\r\n self.compensate_for_deleted_provinces()\r\n self.query(\"SELECT max(rowid) from definition;\",\"\")\r\n num_defined_provinces = self.db_fetchone()[0]\r\n if num_defined_provinces != None:\r\n i = num_defined_provinces + 1\r\n # print(\"Found \" + str(i - 1) + \" provinces already defined.\")\r\n else:\r\n i = 1\r\n while i < total_provinces:\r\n try:\r\n for province in land_provinces:\r\n if self.submit_province(province, \"landprov\", True,i) == True:\r\n i = i+1\r\n for province in sea_provinces:\r\n if self.submit_province(province, \"seaprov\", True,i) == True:\r\n i = i+1\r\n for province in self.new_land_provinces:\r\n self.submit_province(province, \"landprov\", False,i)\r\n i = i+1\r\n for province in self.new_sea_provinces:\r\n self.submit_province(province, \"seaprov\", False,i)\r\n i = i+1\r\n finally:\r\n self.db_commit(\"\")\r\n break\r\n\r\n def compensate_for_deleted_provinces(self):\r\n for free_id in self.free_ids:\r\n params = (free_id,)\r\n definition_query = \"UPDATE definition SET Province_id = (Province_id - 1) WHERE Province_id > ?;\"\r\n self.query(definition_query,params)\r\n province_setup_query = \"UPDATE province_setup SET ProvID = (ProvID - 1) WHERE ProvID > ?;\"\r\n self.query(province_setup_query,params)\r\n self.db_commit(\"\")\r\n\r\n def export_to_csv(self): # Rework - needs to export to CSV from sheet\r\n print(\"Exporting to CSV\")\r\n select_setup = \"SELECT * FROM province_setup;\"\r\n self.query(select_setup, \"\")\r\n with open(\"pyParaMapEditor_OUTPUT.csv\",\"w\", newline=\"\",encoding=\"UTF-8\") as csv_file:\r\n csv_writer = csv.writer(csv_file)\r\n csv_writer.writerow([i[0] for i in self.cursor.description])\r\n for row in self.cursor:\r\n # print(row)\r\n csv_writer.writerow(row)\r\n\r\nclass EditorGUI():\r\n def __init__(self, column_indices, remote_sheet, db, valid_cultures, valid_religions, valid_trade_goods, config):\r\n self.root = Tk()\r\n\r\n self.im_selector = Image.open(\"selector.gif\", \"r\")\r\n\r\n self.config = config # In order to get column identities\r\n self.minority_pop_start_column = self.config['MinorityPopDef']['StartColumn'] # Column where the first minority pop is defined\r\n self.minority_pop_columns = self.config['MinorityPopColumns']\r\n self.columns_per_minority = len(self.config.items('MinorityPopColumns')) # Number of columns to iterate per minority pop\r\n self.valid_pop_types = self.config['ValidPopTypes']\r\n\r\n self.remote_sheet = remote_sheet\r\n self.db = db # Reference to database for picking out province ID from RGB values on the image\r\n\r\n # Take the column names and use them for editable fields on the GUI. The names are stored as keys in the indices dict\r\n # Unpack the dict into a list literal\r\n self.data_fields = [*column_indices]\r\n\r\n # Get lists of valid cultures, religions and tradegoods in the mod so we can refuse to submit an invalid culture\r\n self.valid_cultures = valid_cultures\r\n self.valid_religions = valid_religions\r\n self.valid_trade_goods = valid_trade_goods\r\n\r\n def start_main_gui(self): # Called by main Editor class when it's time to call up the main view\r\n self.create_mapview()\r\n self.create_minorities_button()\r\n self.frame.pack(fill=BOTH,expand=1)\r\n self.add_map_canvas()\r\n\r\n self.list_of_entries = self.create_fields()\r\n self.create_minority_pop_fields()\r\n\r\n # Empty value for comparing when field values are changed\r\n self.entry_value = None\r\n\r\n # Empty value for comparing the previous selected province\r\n self.prevprovince = None\r\n\r\n self.selector_img = ImageTk.PhotoImage(Image.open(\"selector.gif\").convert(\"RGBA\"))\r\n\r\n #mouseclick event definitions\r\n self.canvas.bind_all(\"\", self._on_mousewheel_dn)\r\n self.canvas.bind_all(\"\", self._on_mousewheel_up)\r\n self.canvas.bind_all(\"\",self.scan)\r\n self.canvas.bind_all(\"\", lambda event, fieldvar=self.list_of_entries:self.submit_entry(event, fieldvar))\r\n self.canvas.bind(\"\", self.getprovince)\r\n # self.canvas.bind(\"\", self.zoom_view) # TODO\r\n\r\n self.root.mainloop()\r\n \r\n def prompt_file_select(self): # Select the local database, used for loading and saving map data. Data will also be saved to the remote sheet if there is one\r\n # We have to initialise Tkinter so we can create GUI, but we'll just use the default file select function\r\n file_select = Tk()\r\n # Hide the default Tkinter window, as we'll be popping up a file select dialog\r\n file_select.withdraw()\r\n file_name = filedialog.asksaveasfilename( # Use saveas as this allows the user to create a new file if there isn't one, or to load & overwrite an existing one\r\n initialdir = \"./\",\r\n title = \"Select or create map editor save file\",\r\n filetypes = ((\"all files\",\"\"),(\"all files\",\"*\"))\r\n )\r\n # Update the class attribute to select the database\r\n database_file = file_name\r\n file_select.destroy()\r\n return database_file # Pass this back to the main Editor class\r\n\r\n def prompt_remote_credentials(self): # Get the JSON credentials for connecting to the Google Sheet\r\n # We have to initialise Tkinter so we can create GUI, but we'll just use the default file select function\r\n credentials_select = Tk()\r\n credentials_select.withdraw()\r\n # Check if the user wants to connect to the remote sheet\r\n file_name = filedialog.askopenfilename(\r\n initialdir = \"./\",\r\n title = \"Select credentials for remote editing. Cancel to edit remotely\",\r\n filetypes = ((\"json files\",\"*.json\"),(\"json files\",\"*.json\"))\r\n )\r\n # Update the class attribute if a file was selected\r\n if str(file_name) != \"\":\r\n self.remote_credentials = file_name\r\n credentials_select.destroy()\r\n return(self.remote_credentials) # Flag that we are indeed using remote credentials\r\n\r\n def create_mapview(self):\r\n #setting up a tkinter canvas with scrollbars\r\n self.frame = Frame(self.root, bd=2, relief=SUNKEN)\r\n self.frame.grid_rowconfigure(0, weight=1)\r\n self.frame.grid_columnconfigure(0, weight=1)\r\n xscroll = Scrollbar(self.frame, orient=HORIZONTAL)\r\n xscroll.grid(row=1, column=0, sticky=E+W)\r\n yscroll = Scrollbar(self.frame)\r\n yscroll.grid(row=0, column=1, sticky=N+S)\r\n self.canvas = Canvas(self.frame, bd=0, xscrollcommand=xscroll.set, yscrollcommand=yscroll.set)\r\n self.canvas.grid(row=0, column=0, sticky=N+S+E+W)\r\n xscroll.config(command=self.canvas.xview)\r\n yscroll.config(command=self.canvas.yview)\r\n\r\n self.editorframe = Frame(self.frame, bd=2, relief=SUNKEN, padx=30) # Main frame for basisc province data on the right of the map\r\n self.editorframe.grid(row=0, column=2)\r\n self.minoritiesframe = Frame (self.frame, bd=2, relief=SUNKEN, pady=30)# Extra frame for minority pop fields at the bottom\r\n self.minoritiesframe.grid(row=2, column = 0) # Beneath the map and X-axis scrollbar\r\n\r\n self.event2canvas = lambda e, c: (c.canvasx(e.x), c.canvasy(e.y))\r\n\r\n self.mousewheel = 0 # Mousewheel press status\r\n\r\n def export_to_csv(self):\r\n pass\r\n # Get data from Google Sheets API\r\n\r\n def create_minority_pop_fields(self):\r\n self.minority_pop_entries = []\r\n # Create 7 columns of 4 entry fields to populate each with the 4 minority pop columns\r\n entry_fields = self.config.items('MinorityPopColumns')\r\n num_minority_pops = self.config['MinorityPopDef']['NumMinorityPops']\r\n i = 1 # Value for column placement in the GUI\r\n ref_column = 0\r\n while i < ( int(num_minority_pops) * 2 ) + 1: # Double these to add space for labels between each column, and add one for space for the first label\r\n for field, value in entry_fields:\r\n self.make_minority_entry(self.minoritiesframe,field,value,i,str(int(value)+ref_column))\r\n ref_column = ref_column + self.columns_per_minority\r\n i = i+2\r\n \r\n def make_minority_entry(self, parent, caption, row_num, col_num, ref_column, **options):\r\n Label(parent, text=caption, pady=10).grid(row = row_num, column = str(int(col_num)-1))\r\n entry = Entry(parent, width=10, font=(\"Arial 18\"), **options)\r\n entry.category = \"minority_province_data\"\r\n entry.row = row_num\r\n entry.column = ref_column # Get the number of the column to search from in the remote sheet\r\n entry.name = caption\r\n entry.grid(row = row_num, column = col_num)\r\n self.minority_pop_entries.append(entry)\r\n return entry\r\n\r\n def get_minority_pop_entry_column(self, entry):\r\n column = int(self.minority_pop_start_column) + int(entry.column)\r\n return column\r\n \r\n def refresh_minority_pop_entry(self, entry):\r\n entry.config(state=\"normal\")\r\n entry.config({\"background\":\"yellow\"})\r\n entry.delete(0,999) # Clear the contents of the entry widget\r\n try:\r\n entry.insert(0,self.province_data[self.get_minority_pop_entry_column(entry)]) # Get the cell data from the remote sheet\r\n except Exception as ex: # If there is no data\r\n pass\r\n entry.config({\"background\":\"white\"}) # Reset colour as it may have been yellow or green when edited\r\n \r\n def create_fields(self):\r\n i = 1\r\n list_of_entries = []\r\n for field in self.data_fields:\r\n setting = \"normal\"\r\n if field == \"PROVID\":\r\n setting = \"readonly\"\r\n entry = self.make_entry(self.editorframe, field, i, state=setting)\r\n entry.bind(\"\", self.entry_changing)\r\n entry.bind(\"\", self.entry_changed)\r\n list_of_entries.append(entry) # Appends in column order\r\n i += 1\r\n return list_of_entries\r\n \r\n # These two functions handle how a field shows that its value has been edited but not submitted, or edited and submitted\r\n def entry_changing(self, event):\r\n value = event.widget.get()\r\n self.entry_value = value\r\n\r\n def entry_changed(self, event):\r\n value = event.widget.get()\r\n if value != self.entry_value:\r\n event.widget.config({\"background\":\"yellow\"})\r\n \r\n def create_minorities_button(self):\r\n minorities_button = Button(self.frame, command= lambda: self.view_minority_pops(), text=\"View minority pops\", bd=4, height=2, padx=2, bg=\"deep sky blue\")\r\n minorities_button.grid(row=1, column=2)\r\n\r\n # If name changes, it also needs to change in the definition.csv\r\n def change_name(self,submission):\r\n # definition.csv puts semicolons between spaces in names\r\n csv_submission = str(submission).replace(\" \",\";\")\r\n extra_query = \"UPDATE definition SET 'Name'='\" + csv_submission + \"' WHERE Province_id = \"+ self.list_of_entries[0].get() +\";\"\r\n self.db.db_commit(extra_query)\r\n print(\"Name changed in definition\")\r\n\r\n def make_entry(self, parent, caption, rownum, **options):\r\n Label(parent, text=caption, pady=10).grid(row = rownum, column = 0)\r\n entry = Entry(parent, width=16, font=(\"Arial 18\"), **options)\r\n entry.category = \"main_province_data\"\r\n entry.name = str(caption)\r\n entry.grid(row = rownum, column = 1)\r\n return entry\r\n\r\n def province_submission_warning(self, messagetitle, messagetext):\r\n messagebox.showwarning(messagetitle, messagetext)\r\n print(messagetitle + \" \" + messagetext)\r\n\r\n\r\n def validate_culture(self, event, submission):\r\n if submission not in self.valid_cultures:\r\n event.widget.config({\"background\":\"orange\"})\r\n self.province_submission_warning(\"Unrecognised culture\", \"The culture '\" + submission + \"' which you entered is invalid. Check for typos, or add the culture to the mod files before proceeding.\")\r\n return False\r\n \r\n def validate_religion(self, event, submission):\r\n if submission not in self.valid_religions:\r\n event.widget.config({\"background\":\"orange\"})\r\n self.province_submission_warning(\"Unrecognised religion\", \"The religion '\" + submission + \"' which you entered is invalid. Check for typos, or add the religion to the mod files before proceeding.\")\r\n return False\r\n\r\n def validate_trade_goods(self, event, submission):\r\n if submission not in self.valid_trade_goods:\r\n event.widget.config({\"background\":\"orange\"})\r\n self.province_submission_warning(\"Unrecognised tradegood\", \"The tradegood '\" + submission + \"' which you entered is invalid. Check for typos, or add the tradegood to the mod files before proceeding.\")\r\n return False\r\n \r\n def validate_numeric_only(self, event, submission):\r\n if not submission.isnumeric():\r\n event.widget.config({\"background\":\"orange\"})\r\n self.province_submission_warning(\"Value must be a number\", \"The value for this field must always be a number\")\r\n return False\r\n\r\n def validate_pop_type(self, event, submission):\r\n if submission not in self.valid_pop_types:\r\n event.widget.config({\"background\":\"orange\"})\r\n self.province_submission_warning(\"Invalid pop type\", \"The pop type '\" + submission + \"' which you entered is invalid. Check for typos.\")\r\n return False\r\n\r\n def submit_main_province_data(self, event, submission, fields):\r\n self.widget_id = fields.index(event.widget) # Get column number\r\n print(\"Attempting to submit value \" + submission + \" in field \" + event.widget.name)\r\n if event.widget.name == \"culture\":\r\n if self.validate_culture(event, submission) == False:\r\n return False\r\n elif event.widget.name == \"religion\":\r\n if self.validate_religion(event, submission) == False:\r\n return False\r\n elif event.widget.name == \"tradegoods\":\r\n if self.validate_trade_goods(event, submission) == False:\r\n return False\r\n elif event.widget.name == \"nameref\":\r\n self.change_name(submission)\r\n elif event.widget.name in {k.lower(): v for k, v in self.config.items('NumericColumns')}: # All others must only accept numeric data\r\n if self.validate_numeric_only(event, submission) == False:\r\n return False\r\n self.remote_sheet.write_to_sheet(provid = self.list_of_entries[0].get(), column = self.widget_id, data = submission)\r\n print(\"Submission successful\")\r\n\r\n def submit_minority_province_data(self, event, submission):\r\n print(\"Attempting to submit value \" + submission + \" in minority pop field \" + event.widget.name)\r\n if event.widget.name == \"culture\":\r\n if self.validate_culture(event, submission) == False:\r\n return False\r\n elif event.widget.name == \"religion\":\r\n if self.validate_religion(event, submission) == False:\r\n return False\r\n elif event.widget.name == \"size\":\r\n if self.validate_numeric_only(event, submission) == False:\r\n return False\r\n elif event.widget.name == \"poptype\":\r\n if self.validate_pop_type(event, submission) == False:\r\n return False\r\n self.remote_sheet.write_to_sheet(provid = self.list_of_entries[0].get(), column = self.get_minority_pop_entry_column(event.widget), data = submission)\r\n print(\"Submission successful\")\r\n \r\n def submit_entry(self, event, fields): # Refactored - write to remote only\r\n try:\r\n submission = event.widget.get()\r\n # Now find the field that corresponds to the widget\r\n # If event widget is in main province data\r\n if event.widget.category == \"main_province_data\":\r\n if self.submit_main_province_data(event, submission, fields) == False:\r\n return False # The submit data function will return False if the submission was erroneous, do not continue.\r\n # If the event widget is in minority pops\r\n elif event.widget.category == \"minority_province_data\":\r\n if self.submit_minority_province_data(event, submission) == False:\r\n return False # The submit data function will return False if the submission was erroneous, do not continue.\r\n # Send the submission to the database at the correct PROVID (list of entries 0) and column (widget_id)\r\n event.widget.config({\"background\":\"lime\"})\r\n except Exception as ex:\r\n print(\"Submission failed.\")\r\n print(ex)\r\n event.widget.config({\"background\":\"red\"})\r\n \r\n def add_map_canvas(self):\r\n self.canvas_img = ImageTk.PhotoImage(file='main_input.png', size=(1024,768))\r\n pxdata = Image.open('main_input.png','r')\r\n self.px = pxdata.load()\r\n self.canvas.create_image(0, 0, image=self.canvas_img, anchor=\"nw\")\r\n self.canvas.config(scrollregion=self.canvas.bbox(ALL))\r\n\r\n #function to be called when mouse is clicked\r\n def getprovince(self,event):\r\n #outputting x and y coords to console\r\n cx, cy = self.event2canvas(event, self.canvas)\r\n print (\"click at (%d, %d) / (%d, %d)\" % (event.x,event.y,cx,cy))\r\n colour = self.px[cx,cy]\r\n params = colour # Pass the RGB colour as a database query to find the PROVID\r\n self.refresh_selector_position(cx, cy) # Redraw selector and canvas, prevents lag\r\n province = self.lookup_province_rgb(params)\r\n # Do not refresh / overwrite the contents of data entry fields if the province is already selected\r\n if province != self.prevprovince:\r\n self.refresh_province_data_fields(province)\r\n \r\n def refresh_selector_position(self, cx, cy):\r\n # Clear the canvas and draw a selector where you last clicked\r\n self.canvas.delete(\"all\")\r\n self.canvas.create_image(0, 0, image=self.canvas_img, anchor=\"nw\")\r\n self.canvas.create_image((cx, cy), image=self.selector_img)\r\n \r\n def lookup_province_rgb(self, params):# Look in definition first to get the province ID from RGB\r\n search_query = \"SELECT Province_ID FROM definition WHERE R=? AND G=? AND B=?;\"\r\n self.db.query(search_query,params)\r\n province = str(self.db.db_fetchone()[0])\r\n return province\r\n \r\n def refresh_province_data_fields(self,province):\r\n self.prevprovince = province\r\n print(\"province ID is \" + province)\r\n # Below three lines are obsolete, as we will read directly from the remote sheet\r\n # province_data_query = \"SELECT * FROM province_setup WHERE ProvID = \" + province + \";\"\r\n # self.db.query(province_data_query, \"\")\r\n # province_data = self.db.db_fetchone()\r\n # TODO\r\n self.province_data = self.remote_sheet.get_province_data(province) # Get the row's data for this province from the remote spreadsheet\r\n for index, entry in enumerate(self.list_of_entries): # Load the new data for the relevant province from the province data sheet\r\n self.refresh_entry(index, entry)\r\n for entry in self.minority_pop_entries:\r\n self.refresh_minority_pop_entry(entry)\r\n\r\n def refresh_entry(self, index, entry):\r\n entry.config(state=\"normal\")\r\n entry.delete(0,999) # Clear the contents of the entry widget\r\n try:\r\n entry.insert(0,self.province_data[index]) # Get the cell data from the remote sheet\r\n except: # If there is no data\r\n pass\r\n entry.config({\"background\":\"white\"}) # Reset colour as it may have been yellow or green when edited\r\n if index == 0:\r\n entry.config(state=\"readonly\")\r\n \r\n def _on_mousewheel_dn(self, event):\r\n self.mousewheel = 1\r\n self.canvas.scan_mark(event.x, event.y)\r\n\r\n def _on_mousewheel_up(self, event):\r\n self.mousewheel = 0\r\n\r\n def scan(self, event):\r\n if self.mousewheel == 1:\r\n self.canvas.scan_dragto(event.x,event.y, gain = 1)\r\n\r\n\r\neditor = Editor()","sub_path":"map_data/pyParaMapEditor_imp19c/PyParaMapEditor_imp19c_v2.py","file_name":"PyParaMapEditor_imp19c_v2.py","file_ext":"py","file_size_in_byte":41933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"554704881","text":"from flask import Flask, render_template, make_response\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nimport numpy as np\nimport pandas as pd\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.linear_model import LinearRegression\nimport io\napp = Flask(__name__,static_folder='public')\n\n@app.route('/')\ndef index():\n dataset =pd.read_csv('Salary_Data.csv') #use the correct data set\n x=dataset.iloc[:,:-1].values\n y=dataset.iloc[:,1].values\n X_train, X_test, Y_train, Y_test=train_test_split(x,y,test_size=1/3,random_state=0)\n regressor = LinearRegression()\n regressor.fit(X_train,Y_train)\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n\n axis.scatter(X_train,Y_train,color='red')\n axis.plot(X_train,regressor.predict(X_train),color='blue')\n axis.set_title('asd')\n canvas = FigureCanvas(fig)\n output = io.BytesIO()\n canvas.print_png(output)\n response = make_response(output.getvalue())\n response.mimetype = 'image/png'\n return response\n\n@app.route('/home')\ndef home():\n return render_template('home.html')\n@app.route('/show')\ndef showGraph():\n return render_template('show.html')\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"aiml/webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"4946948","text":"def fight(text):\n \"\"\"\n >>> fight('')\n \"Let's fight again!\"\n >>> fight('abracadabra')\n 'army3 wins!'\n >>> fight('wmu')\n \"Let's fight again!\"\n \"\"\"\n\n army = {\n 'army1': {'w': 4, 'p': 3, 'b': 2, 's': 1},\n 'army2': {'m': 4, 'q': 3, 'd': 2, 'z': 1},\n 'army3': {'a': 4, 'e': 3, 'o': 2, 'u': 1, 'i': 1}\n }\n power = {}\n\n for name_army in army:\n power[name_army] = 0\n for char in text:\n power[name_army] += army[name_army].get(char, 0)\n max_power_befor = max(power.values())\n max_power_army_befor = list(power.keys())[list(power.values()).index(max_power_befor)]\n power_compare = {}\n power_compare[max_power_army_befor] = max_power_befor\n del power[max_power_army_befor]\n max_power_after = max(power.values())\n max_power_army_after = list(power.keys())[list(power.values()).index(max_power_after)]\n power_compare[max_power_army_after] = max_power_after\n if power_compare[max_power_army_befor] != power_compare[max_power_army_after]:\n return f\"{max_power_army_befor} wins!\"\n else:\n return \"Let's fight again!\"\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","sub_path":"Alphabet war.py","file_name":"Alphabet war.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"102384082","text":"import telnetlib\nimport re\nimport time\n\n\ndef extractip(ip, port, interface):\n import time\n import telnetlib\n import re\n ipRegexPatt = re.compile('(([2][5][0-5]\\.)|([2][0-4][0-9]\\.)|([0-1]?[0-9]?[0-9]\\.)){3}'\n + '(([2][5][0-5])|([2][0-4][0-9])|([0-1]?[0-9]?[0-9]))')\n\n telNetSession = telnetlib.Telnet()\n\n telNetSession.open(ip, port)\n\n time.sleep(2)\n\n telNetSession.write('enable\\n')\n time.sleep(2)\n\n telNetSession.write('show ip int brief ' + interface + '\\n')\n\n time.sleep(2)\n\n ResultString = telNetSession.read_very_eager()\n telNetSession.close()\n\n result = ipRegexPatt.search(ResultString)\n return result.group()\n\n\nclass IpExtractorClass:\n pass\n","sub_path":"Scratchfiles/IpExtractor.py","file_name":"IpExtractor.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
+{"seq_id":"111986530","text":"#\n# === Introduction ===\n#\n# In this problem, you will again build a planner that helps a robot\n# find the best path through a warehouse filled with boxes\n# that it has to pick up and deliver to a dropzone. Unlike Part A,\n# however, in this problem the robot is moving in a continuous world\n# (albeit in discrete time steps) and has constraints on the amount\n# it can turn its wheels in a given time step.\n# \n# Your file must be called `partB.py` and must have a class\n# called `DeliveryPlanner`.\n# This class must have an `__init__` function that takes five \n# arguments: `self`, `warehouse`, `todo`, `max_distance`, and\n# `max_steering`.\n# The class must also have a function called `plan_delivery` that \n# takes a single argument, `self`.\n#\n# === Input Specifications ===\n# \n# `warehouse` will be a list of m strings, each with n characters,\n# corresponding to the layout of the warehouse. The warehouse is an\n# m x n grid. warehouse[i][j] corresponds to the spot in the ith row\n# and jth column of the warehouse, where the 0th row is the northern\n# end of the warehouse and the 0th column is the western end.\n#\n# The characters in each string will be one of the following:\n#\n# '.' (period) : traversable space.\n# '#' (hash) : a wall. If the robot contacts a wall space, it will crash.\n# '@' (dropzone): the space where all boxes must be delivered. The dropzone may be traversed like \n# a '.' space.\n#\n# Each space is a 1 x 1 block. The upper-left corner of space warehouse[i][j] is at the point (j,-i) in\n# the plane. Spaces outside the warehouse are considered walls; if any part of the robot leaves the \n# warehouse, it will be considered to have crashed into the exterior wall of the warehouse.\n# \n# For example, \n# warehouse = ['.#.',\n# '.#.',\n# '..@']\n# is a 3x3 warehouse. The dropzone is at space (2,-2) and there are walls at spaces (1,0) \n# and (1,-1). The rest of the warehouse is empty space.\n#\n# The robot is a circle of radius 0.25. The robot begins centered in the dropzone space.\n# The robot's initial bearing is 0.\n#\n# The argument `todo` is a list of points representing the center point of each box.\n# todo[0] is the first box which must be delivered, followed by todo[1], and so on.\n# Each box is a square of size 0.2 x 0.2. If the robot contacts a box, it will crash.\n#\n# The arguments `max_distance` and `max_steering` are parameters constraining the movement\n# of the robot on a given time step. They are described more below.\n#\n# === Rules for Movement ===\n#\n# - The robot may move any distance between 0 and `max_distance` per time step.\n# - The robot may set its steering angle anywhere between -`max_steering` and \n# `max_steering` per time step. A steering angle of 0 means that the robot will\n# move according to its current bearing. A positive angle means the robot will \n# turn counterclockwise by `steering_angle` radians; a negative steering_angle \n# means the robot will turn clockwise by abs(steering_angle) radians.\n# - Upon a movement, the robot will change its steering angle instantaneously to the \n# amount indicated by the move, and then it will move a distance in a straight line in its\n# new bearing according to the amount indicated move.\n# - The cost per move is 1 plus the amount of distance traversed by the robot on that move.\n#\n# - The robot may pick up a box whose center point is within 0.5 units of the robot's center point.\n# - If the robot picks up a box, it incurs a total cost of 2 for that move (this already includes \n# the 1-per-move cost incurred by the robot).\n# - While holding a box, the robot may not pick up another box.\n# - The robot may put a box down at a total cost of 1.5 for that move. The box must be placed so that:\n# - The box is not contacting any walls, the exterior of the warehouse, any other boxes, or the robot\n# - The box's center point is within 0.5 units of the robot's center point\n# - A box is always oriented so that two of its edges are horizontal and the other two are vertical.\n# - If a box is placed entirely within the '@' space, it is considered delivered and is removed from the \n# warehouse.\n# - The warehouse will be arranged so that it is always possible for the robot to move to the \n# next box on the todo list without having to rearrange any other boxes.\n#\n# - If the robot crashes, it will stop moving and incur a cost of 100*distance, where distance\n# is the length it attempted to move that move. (The regular movement cost will not apply.)\n# - If an illegal move is attempted, the robot will not move, but the standard cost will be incurred.\n# Illegal moves include (but are not necessarily limited to):\n# - picking up a box that doesn't exist or is too far away\n# - picking up a box while already holding one\n# - putting down a box too far away or so that it's touching a wall, the warehouse exterior, \n# another box, or the robot\n# - putting down a box while not holding a box\n#\n# === Output Specifications ===\n#\n# `plan_delivery` should return a LIST of strings, each in one of the following formats.\n#\n# 'move {steering} {distance}', where '{steering}' is a floating-point number between\n# -`max_steering` and `max_steering` (inclusive) and '{distance}' is a floating-point\n# number between 0 and `max_distance`\n# \n# 'lift {b}', where '{b}' is replaced by the index in the list `todo` of the box being picked up\n# (so if you intend to lift box 0, you would return the string 'lift 0')\n#\n# 'down {x} {y}', where '{x}' is replaced by the x-coordinate of the center point of where the box\n# will be placed and where '{y}' is replaced by the y-coordinate of that center point\n# (for example, 'down 1.5 -2.9' means to place the box held by the robot so that its center point\n# is (1.5,-2.9)).\n#\n# === Grading ===\n# \n# - Your planner will be graded against a set of test cases, each equally weighted.\n# - Each task will have a \"baseline\" cost. If your set of moves results in the task being completed\n# with a total cost of K times the baseline cost, you will receive 1/K of the credit for the\n# test case. (Note that if K < 1, this means you earn extra credit!)\n# - Otherwise, you will receive no credit for that test case. This could happen for one of several \n# reasons including (but not necessarily limited to):\n# - plan_delivery's moves do not deliver the boxes in the correct order.\n# - plan_delivery's output is not a list of strings in the prescribed format.\n# - plan_delivery does not return an output within the prescribed time limit.\n# - Your code raises an exception.\n#\n# === Additional Info ===\n# \n# - You may add additional classes and functions as needed provided they are all in the file `partB.py`.\n# - Your partB.py file must not execute any code when it is imported. \n# - Upload partB.py to Project 2 on T-Square in the Assignments section. Do not put it into an \n# archive with other files.\n# - Ask any questions about the directions or specifications on Piazza.\n#\nimport numpy as np\nfrom math import *\n\nPI = pi\n\ndef padding(warehouse, n=5):\n warehouse = np.array(warehouse)\n r = len(warehouse[0])*n\n c = len(warehouse)*n\n n_pad = int(0.25*n)\n new_ware = np.array([['.']*r]*c)\n for i in range(len(warehouse)):\n for j in range(len(warehouse[i])):\n if warehouse[i][j] == '#':\n for k in range(max(0,i*n-n_pad),min(r*n,(i+1)*n+n_pad)):\n for l in range(max(0,j*n-n_pad),min(c*n,(j+1)*n+n_pad)):\n try:\n new_ware[k][l] = '#'\n except IndexError:\n continue\n warehouse_padded=[]\n for row in new_ware.tolist():\n warehouse_padded.append(\"\".join(row))\n \n return warehouse_padded\n\ndef angle_trunc(a):\n \"\"\"This maps all angles to a domain of [-pi, pi]\"\"\"\n while a < 0.0:\n a += pi * 2\n return ((a + pi) % (pi * 2)) - pi\n\nclass DeliveryPlanner:\n\n def __init__(self, warehouse, todo, max_distance, max_steering):\n self.ori_todo = todo\n self.n = len(warehouse)\n self.m = len(warehouse[0])\n self.k = 4\n self.max_distance = max_distance\n self.max_steering = max_steering\n \n new_ware = padding(warehouse, self.k)\n self.warehouse = new_ware\n \n new_todo = []\n for target in todo:\n (j,i) = target\n i = -int(i*self.k)\n j = int(j*self.k)\n new_todo.append([i,j])\n self.todo = new_todo\n \n for i in range(self.n):\n for j in range(self.m):\n if warehouse[i][j]==\"@\":\n self.remove = [j+0.5, -i-0.5]\n self.dz = [i*self.k+2, j*self.k+2]\n \n\n def plan_delivery(self):\n \n moves = []\n current = self.dz\n now = 0\n for x in range(0, len(self.todo)):\n target = x\n\n goal = self.todo[x]\n \n route = self.find_route(current, goal, target)\n route1 = self.zip_route(route)\n new_route = route1[:len(route1)-1]\n new_move,now = self.route2move(new_route,now)\n moves += new_move\n \n lift = 'lift '+ str(target)\n moves.append(lift)\n \n route2 = route1[::-1]\n new_route = route2[1:]\n new_move,now = self.route2move(new_route,now)\n moves += new_move\n \n down = 'down '+str(self.remove[0])+' '+str(self.remove[1])\n moves.append(down)\n \n return moves\n\n def find_route(self, init, goal, target):\n grid = list()\n for i in range(self.n*self.k):\n line1 = list()\n for j in range(self.m*self.k):\n if self.warehouse[i][j] in [\"@\", \".\"]:\n line1.append(0)\n elif self.warehouse[i][j]==\"#\":\n line1.append(1)\n grid.append(line1)\n \n for todo1 in self.todo[target+1:]:\n x = todo1[0]\n y = todo1[1]\n grid[x][y] = 1\n try:\n grid[x+1][y] = 1\n grid[x-1][y] = 1\n grid[x][y+1] = 1\n grid[x][y-1] = 1\n grid[x+1][y+1] = 1\n grid[x-1][y+1] = 1\n grid[x-1][y-1] = 1\n grid[x+1][y-1] = 1\n except IndexError:\n continue\n \n heuristic = [[-1 for col in range(len(grid[0]))] for row in range(len(grid))]\n for i in range(self.n):\n for j in range(self.m):\n distance = ( (i-goal[0])**2 + (j-goal[1])**2 ) ** 0.5\n heuristic[i][j] = distance\n \n delta = [[-1, 0],[0,-1],[1,0],[0,1],\n [-1,-1],[-1,1],[1,-1],[1,1]]\n \n closed = [[0 for col in range(len(grid[0]))] for row in range(len(grid))]\n closed[init[0]][init[1]] = 1\n \n expand = [[9999 for col in range(len(grid[0]))] for row in range(len(grid))]\n \n x = init[0]\n y = init[1]\n g = 0\n f = g + heuristic[x][y]\n \n open = [[f, g, x, y]]\n \n found = False # flag that is set when search is complete\n resign = False # flag set if we can't find expand\n count = 0\n \n while not found and not resign:\n if len(open) == 0:\n resign = True\n return \"Fail\"\n else:\n open.sort()\n open.reverse()\n next = open.pop()\n x = next[2]\n y = next[3]\n g = next[1]\n expand[x][y] = count\n count += 1\n \n if x == goal[0] and y == goal[1]:\n found = True\n else:\n for i in range(len(delta)):\n x2 = x + delta[i][0]\n y2 = y + delta[i][1]\n if x2 >= 0 and x2 < len(grid) and y2 >=0 and y2 < len(grid[0]):\n if closed[x2][y2] == 0 and grid[x2][y2] == 0:\n if i < 4:\n g2 = g + 2\n else:\n g2 = g + 3\n f2 = g2 + heuristic[x2][y2]\n open.append([f2, g2, x2, y2])\n closed[x2][y2] = 1\n route = [goal]\n current = goal\n cost1 = expand[current[0]][current[1]]\n while expand[current[0]][current[1]]>0:\n for i in range(len(delta)):\n x2 = current[0] + delta[i][0]\n y2 = current[1] + delta[i][1]\n if x2 >= 0 and x2 < len(grid) and y2 >=0 and y2 < len(grid[0]):\n if expand[x2][y2]1 or abs(diff3[1])>1:\n x1 = temp[0] + copysign(abs(diff3[0])-1.3, diff3[0])\n y1 = temp[1] + copysign(abs(diff3[1])-1.3, diff3[1])\n out_route.append([x1,y1])\n out_route.append(route[len(route)-1])\n return out_route\n \n def route2move(self, route, now):\n route = np.array(route)\n turns = []\n for i in range(1,len(route)):\n x = route[i][1]-route[i-1][1]\n y = route[i-1][0]-route[i][0]\n distance = (x**2 + y**2)**0.5 / self.k\n angle = atan2(y, x)\n angle = angle_trunc(angle)\n turn = angle_trunc(angle-now)\n \n if abs(turn)>self.max_steering:\n string = \"move \"+str(turn/2)+\" 0\"\n turns.append(string)\n if distance>self.max_distance:\n string = \"move \"+str(turn/2)+\" \"+str(distance/2) \n turns.append(string)\n string = \"move 0 \"+str(distance/2)\n turns.append(string)\n else:\n string = \"move \"+str(turn/2)+\" \"+str(distance) \n turns.append(string)\n else: \n if distance>self.max_distance:\n string = \"move \"+str(turn)+\" \"+str(distance/2) \n turns.append(string)\n string = \"move 0 \"+str(distance/2)\n turns.append(string)\n else:\n string = \"move \"+str(turn)+\" \"+str(distance) \n turns.append(string)\n now = angle\n return turns,now\n\n#warehouse=['#######.',\n# '#.......',\n# '#@......']\n#todo=[(7.5, -1.5), (7.5, -0.5)]\n#max_distance=3.0\n#max_steering=PI / 2. + 0.01\n#\n#exec_count = 1\n#def execute_student_plan(warehouse, todo,\n# max_distance, max_steering):\n# global exec_count\n#\n# student_planner = DeliveryPlanner(warehouse, todo,\n# max_distance, max_steering)\n#\n# action_list = student_planner.plan_delivery()\n# drawMoves.drawWH2(exec_count, warehouse, todo, action_list)\n# exec_count += 1\n#\n# state = State(warehouse, todo, max_distance, max_steering)\n# num_delivered = 0\n# next_box_to_deliver = num_delivered\n#\n#execute_student_plan(warehouse, todo, max_distance, max_steering)\n","sub_path":"Project2/partB.py","file_name":"partB.py","file_ext":"py","file_size_in_byte":16064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}
|