diff --git "a/2764.jsonl" "b/2764.jsonl" new file mode 100644--- /dev/null +++ "b/2764.jsonl" @@ -0,0 +1,1837 @@ +{"seq_id":"74276609995","text":"from parsl import python_app\n\n\n@python_app\ndef create_galaxy_lib(zphot_para, lephare_dir, lephare_sandbox, stdout=None):\n \"\"\" LePhare step 1: creating SED library\n\n Args:\n zphot_para (str): zphot_para path\n lephare_dir (str): the LePhare installation directory path\n lephare_sandbox (str): working directory path\n \"\"\"\n import os\n import subprocess\n import shlex\n from utils import get_logger\n \n logger = get_logger(\n name='sedtolib', debug=True,\n stdout=os.path.join(lephare_sandbox, stdout)\n )\n\n # import logging\n\n # logger = logging.getLogger('sedtolib')\n # if stdout:\n # handler = logging.FileHandler(os.path.join(lephare_sandbox, stdout))\n # formatter = logging.Formatter(\n # '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'\n # )\n # handler.setFormatter(formatter)\n # logger.addHandler(handler)\n\n # logger.setLevel(logging.DEBUG)\n\n logger.info('Creating SED library')\n\n origin_path = os.getcwd()\n os.chdir(lephare_sandbox)\n\n os.environ['LEPHAREWORK'] = os.getcwd()\n logger.info('LEPHAREWORK: {}'.format(os.environ['LEPHAREWORK']))\n\n cmd_phz = f'{lephare_dir}/sedtolib -t G -c {zphot_para}'\n subplog = open('sedtolib.run', 'w+')\n logger.info(f\"Executing {cmd_phz}\")\n proc = subprocess.Popen(shlex.split(cmd_phz), stdout=subplog, stderr=subplog, universal_newlines=True)\n proc.wait()\n logger.info(f\"Return code = {proc.returncode}\")\n\n os.chdir(origin_path)\n\n\n@python_app\ndef create_filter_set(zphot_para, lephare_dir, lephare_sandbox, stdout=None):\n \"\"\" LePhare step 2: creating filter transmission files\n\n Args:\n zphot_para (str): zphot_para path\n lephare_dir (str): the LePhare installation directory path\n lephare_sandbox (str): working directory path\n \"\"\"\n import os\n import subprocess\n import shlex\n from utils import get_logger\n \n logger = get_logger(\n name='filter', debug=True,\n stdout=os.path.join(lephare_sandbox, stdout)\n )\n\n origin_path = os.getcwd()\n os.chdir(lephare_sandbox)\n logger.info('Creating filter transmission files')\n\n os.environ['LEPHAREWORK'] = os.getcwd()\n logger.info('LEPHAREWORK: {}'.format(os.environ['LEPHAREWORK']))\n\n cmd_phz = f'{lephare_dir}/filter -c {zphot_para} '\n subplog = open('filter.run', 'w+')\n logger.info(f\"Executing {cmd_phz}\")\n proc = subprocess.Popen(shlex.split(cmd_phz), stdout=subplog, stderr=subplog, universal_newlines=True)\n proc.wait()\n logger.info(f\"Return code = {proc.returncode}\")\n\n os.chdir(origin_path)\n\n\n@python_app\ndef compute_galaxy_mag(zphot_para, lephare_dir, lephare_sandbox, stdout=None):\n \"\"\" LePhare step 3: theoretical magnitudes library\n\n Args:\n zphot_para (str): zphot_para path\n lephare_dir (str): the LePhare installation directory path\n lephare_sandbox (str): working directory path\n \"\"\"\n import os\n import subprocess\n import shlex\n from utils import get_logger\n \n logger = get_logger(\n name='mag_gal', debug=True,\n stdout=os.path.join(lephare_sandbox, stdout)\n )\n\n origin_path = os.getcwd()\n os.chdir(lephare_sandbox)\n logger.info('Computing theoretical magnitudes')\n\n os.environ['LEPHAREWORK'] = os.getcwd()\n logger.info('LEPHAREWORK: {}'.format(os.environ['LEPHAREWORK']))\n\n cmd_phz = f'{lephare_dir}/mag_gal -t G -c {zphot_para} '\n subplog = open('mag_gal.run', 'w+')\n logger.info(f\"Executing {cmd_phz}\")\n proc = subprocess.Popen(shlex.split(cmd_phz), stdout=subplog, stderr=subplog, universal_newlines=True)\n proc.wait()\n logger.info(f\"Return code = {proc.returncode}\")\n\n os.chdir(origin_path)\n\n\n@python_app\ndef run_zphot(key, filename, interval, shifts, zphot_output, photo_type, err_type, apply_corr,\n bands, zphot, col_index, cat_fmt, idxs, namephotoz, lephare_dir, lephare_sandbox, stdout=None):\n \"\"\" Runs LePhare for each input data (fits) \"\"\"\n\n import shutil\n import shlex\n import subprocess\n import pyarrow as pa\n import pyarrow.parquet as parq\n import pandas as pd\n import os\n from numpy import loadtxt\n from utils import (\n create_dir, get_photometric_columns, format_input, create_inputs_symbolic_link\n )\n from utils import get_logger\n\n lephare_run_path = os.path.join(lephare_sandbox, f'zphot-{key}')\n create_dir(lephare_run_path)\n\n logger = get_logger(\n name='mag_gal', debug=True,\n stdout=os.path.join(lephare_run_path, stdout)\n )\n\n logger.info('Running zphot ID: {}'.format(key))\n logger.info('Input file: {}'.format(filename))\n logger.info('Interval: {}'.format(interval))\n\n origin_path = os.getcwd()\n os.chdir(lephare_sandbox) \n\n # Gets the list of columns used by LePhare to filter photometric data\n columns_list = get_photometric_columns(bands, photo_type, err_type, col_index, apply_corr)\n\n # Loading in memory only the range of selected rows\n table = parq.read_table(filename, columns=columns_list)[interval[0]:interval[1]]\n tb = table.to_pandas()\n\n # Gets the index column to be added to the final result\n col_index_values = tb.get(col_index).to_numpy()\n\n # Create txt input expected by Lephare\n lephare_input = format_input(\n key, tb, bands, photo_type, err_type, col_index, apply_corr, cat_fmt\n )\n\n shutil.move(lephare_input, lephare_run_path)\n create_inputs_symbolic_link(lephare_sandbox, lephare_run_path)\n\n os.chdir(lephare_run_path)\n os.environ['LEPHAREWORK'] = os.getcwd()\n # os.environ['LEPHAREDIR'] = os.path.dirname(os.path.normpath(lephare_dir))\n\n shifts = f'-APPLY_SYSSHIFT {shifts}' if shifts else str()\n phzout = 'lephare.out'\n\n logger.info(f'LEPHAREWORK: {os.getcwd()}')\n logger.info(f'LEPHAREDIR: {os.getenv(\"LEPHAREDIR\")}')\n\n cmd_phz = f'{lephare_dir}/zphota -c {zphot} -CAT_IN {lephare_input} -CAT_OUT {phzout} {shifts}'\n\n logger.info(f\"Run zphot cmd: {cmd_phz}\")\n\n subplog = open('zphot.run', 'w+')\n\n proc = subprocess.Popen(shlex.split(cmd_phz), stdout=subplog, stderr=subplog, universal_newlines=True)\n proc.wait()\n logger.info(f\"Return code = {proc.returncode}\")\n\n # Loading lePhare output only with selected columns (idxs)\n zphotoz = loadtxt(phzout, comments='#', usecols=(idxs), unpack=True)\n\n # Calculating the photoz error as the mean of Z_BEST68_LOW and Z_BEST68_HIGH\n ihigh, ilow = namephotoz.index('Z_BEST68_HIGH'), namephotoz.index('Z_BEST68_LOW')\n\n photozerr = abs(zphotoz[ihigh]-zphotoz[ilow])/2. #The name of the column on file must be ERR_Z\n\n _parquet = {}\n\n for value, name in zip(zphotoz, namephotoz):\n _parquet[name.lower()] = value\n\n _parquet[col_index.lower()] = col_index_values\n _parquet['err_z'] = photozerr\n\n df = pd.DataFrame(_parquet)\n table = pa.Table.from_pandas(df, preserve_index=False)\n\n parq.write_table(table, zphot_output)\n\n os.chdir(origin_path)\n\n return {\"name\": os.path.basename(filename), \"file\": zphot_output}","repo_name":"linea-it/photoz-parsl","sub_path":"apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":7062,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"58220300","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nOnepixelMissingPackage = np.load('../CorruptData/one_pixel_inpainting.npy')\nReconstructedData = np.load('../Plot/Data/Question3_OneMising_LSTM64/PredictedImages.npy')\nOnePixel_Image_index = np.load('../Plot/Data/Question3_OneMising_LSTM64/BestFillIndex.npy')\nCross_Entropy_ground_truth = np.load('../Plot/Data/Question3_OneMising_LSTM64/GroundTruth_Cross_entropy_Collection.npy')\nCross_Entropy_In_Painting = np.load('../Plot/Data/Question3_OneMising_LSTM64/InPainting_Cross_entropy_Collection.npy')\n\nImageIndex = 0 #0-999\n\nOringinal_Image = np.reshape(OnepixelMissingPackage[1][ImageIndex],[28,28])\nPixel_Missing_image = np.reshape(OnepixelMissingPackage[0][ImageIndex],[28,28])\nThis_selected_image_ind = OnePixel_Image_index[ImageIndex]\nReconstruted_image = np.reshape(ReconstructedData[ImageIndex][This_selected_image_ind],[28,28])\n\nfig = plt.figure(figsize=(6, 3.2))\nax = fig.add_subplot(131)\nax.set_title('Original_Image')\nplt.imshow(Oringinal_Image)\nplt.text(5,35,'Groundturth Cross_entropy\\n='+str(Cross_Entropy_ground_truth[ImageIndex]))\nax.set_aspect('equal')\nbx = fig.add_subplot(132)\nbx.set_title('Pixel_Missing_Image')\nplt.imshow(Pixel_Missing_image)\nbx.set_aspect('equal')\ncx = fig.add_subplot(133)\ncx.set_title('Reconstructed_Image')\nplt.imshow(Reconstruted_image)\nplt.text(5,35,'Inpainting Cross_entropy\\n='+str(Cross_Entropy_In_Painting[ImageIndex]))\nplt.text(50, .025, r'$\\mu=100,\\ \\sigma=15$')\ncx.set_aspect('equal')\ncax = fig.add_axes([0.42, 0.1, 1.08, 0.8])\ncax.get_xaxis().set_visible(False)\ncax.get_yaxis().set_visible(False)\ncax.patch.set_alpha(0)\ncax.set_frame_on(False)\n# plt.colorbar(orientation='vertical')\nplt.show()\n\n","repo_name":"jhwjhw0123/RNN_sequence_image_prediction_and_in-painting","sub_path":"codes/Painting/Problem3ImageShow.py","file_name":"Problem3ImageShow.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"12619328540","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport re\nimport os\nimport nltk\nfrom nltk import FreqDist\nfrom ManipulateFile import ManipulateFile\n\nclass TextStringDocument(object):\n\tdef __init__(self, url_document):\n\t\tself.url_document = url_document\t\n\t# dictStopWords = onde tem as palavras que devem ser ignoradas\t\n\tdef to_vector(self,path,dictStopWords):\t\t\n\t\t#TEM ERRO AQUI\n\t\tprint(self.url_document)\n\t\tfile =open(path+self.url_document,'r', encoding='utf-8');\n\t\tword = file.read()\t\t\t\t\t\t\t\t\t\n\t\tfile.close()\t\t\n\t\tlistDocument = ManipulateFile().filter_list_docs(word) \t\t\t\t\t\t\n\t\tprint(len(listDocument))\n\t\tquery = ManipulateFile().filter_query_document(word)\t\t\t\t\t\t\t\t\t\n\t\tstemmer = nltk.stem.RSLPStemmer()\t\t\t\t\t\t\n\t\tmodifiedQuery = re.sub('[^A-Za-z]+',' ',query)\t\t\t\n\t\tmodifiedQuery = modifiedQuery.lower()\t\t\n\t\tquery_tokenize = nltk.word_tokenize(modifiedQuery)\t\n\t\tresultTokenize = []\t\t\n\t\tfor toke in query_tokenize:\n\t\t\ttoke = stemmer.stem(toke)\n\t\t\tif not dictStopWords.__contains__(toke):\n\t\t\t\tresultTokenize.append(toke)\t\t\n\t\treturn (FreqDist(resultTokenize),listDocument)\t\t\t\t\t\t\t\t\t\n","repo_name":"brunosousadev/information-retrieval","sub_path":"REV/TextStringDocument.py","file_name":"TextStringDocument.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"12221854224","text":"import dir_command\nimport work_with_csv\nimport subtitle_preparation\nimport argparse\n\n#!/usr/bin/env python3\n\ndef create_parser():\n parser = argparse.ArgumentParser(description='Overlay data from scv file to video. \\n Example: \\n main.py -i input_name.avi -d data.csv -o output_name.avi')\n parser.add_argument('-i', '--input', help=\"Specify the input file\",nargs=1, type=str, required=True)\n parser.add_argument('-o', '--output', help=\"Specify the output file\",nargs=1, type=str, required=True)\n parser.add_argument('-d', '--data', help=\"Specify the data file\",nargs=1, type=str, required=True)\n parser.add_argument('-f', '--freq', help=\"Specify the data frequency in Hz\",nargs=1, type=float, default=50.0)\n parser.add_argument('-s', '--subtitle', help=\"Specify the name subtitle file\",nargs=1, type=str, default='file.ass')\n\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n args = create_parser()\n\n START_TIME = 0\n\n input_video_file_name = args.input[0]\n data_file_name = args.data[0]\n output_video_file_name = args.output[0]\n step_size = 1 / args.freq\n file_name_subtitle = args.subtitle\n\n\n duration = dir_command.get_duration_video_in_seconds(input_video_file_name)\n\n title = work_with_csv.get_title_from_csv(data_file_name)\n data_gen = work_with_csv.get_data_from_csv(data_file_name)\n\n\n # Creating a subtitle file\n subtitle_preparation.create_subtitle_file(duration=duration, title=title, data_gen=data_gen,\n file_name_subtitle=file_name_subtitle, \n start_time=START_TIME, \n step_size=step_size)\n\n # Video captioning\n dir_command.overlay_sub_to_video(input_video_file_name=input_video_file_name, \n output_video_file_name=output_video_file_name, \n file_name_subtitle=file_name_subtitle)\n\n\n\n\n\n","repo_name":"Shnuer/video_overlay","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"2332529296","text":"from __future__ import absolute_import\n\nimport numpy as np\nfrom sympy import Matrix, nsimplify\nfrom .material import material_property, cached_property\nfrom .mineral import Mineral\nfrom .solutionmodel import SolutionModel\nfrom .solutionmodel import MechanicalSolution, IdealSolution\nfrom .solutionmodel import SymmetricRegularSolution, AsymmetricRegularSolution\nfrom .solutionmodel import SubregularSolution\nfrom .averaging_schemes import reuss_average_function\n\nfrom ..tools.reductions import independent_row_indices\nfrom ..tools.chemistry import sum_formulae, sort_element_list_to_IUPAC_order\n\n\nclass SolidSolution(Mineral):\n \"\"\"\n This is the base class for all solid solutions.\n Site occupancies, endmember activities and the constant\n and pressure and temperature dependencies of the excess\n properties can be queried after using set_composition()\n States of the solid solution can only be queried after setting\n the pressure, temperature and composition using set_state().\n\n This class is available as :class:`burnman.SolidSolution`.\n It uses an instance of :class:`burnman.SolutionModel` to\n calculate interaction terms between endmembers.\n\n All the solid solution parameters are expected to be in SI units. This\n means that the interaction parameters should be in J/mol, with the T\n and P derivatives in J/K/mol and m^3/mol.\n\n The parameters are relevant to all solution models. Please\n see the documentation for individual models for details about\n other parameters.\n\n Parameters\n ----------\n name : string\n Name of the solid solution\n solution_type : string\n String determining which SolutionModel to use. One of 'mechanical',\n 'ideal', 'symmetric', 'asymmetric' or 'subregular'.\n endmembers : list of lists\n List of endmembers in this solid solution. The first item of each\n list should be a :class:`burnman.Mineral` object. The second item\n should be a string with the site formula of the endmember.\n molar_fractions : numpy array (optional)\n The molar fractions of each endmember in the solid solution.\n Can be reset using the set_composition() method.\n \"\"\"\n\n def __init__(self,\n name=None,\n solution_type=None,\n endmembers=None,\n energy_interaction=None,\n volume_interaction=None,\n entropy_interaction=None,\n energy_ternary_terms=None,\n volume_ternary_terms=None,\n entropy_ternary_terms=None,\n alphas=None,\n molar_fractions=None):\n \"\"\"\n Set up matrices to speed up calculations for when P, T, X is defined.\n \"\"\"\n Mineral.__init__(self)\n\n # SolidSolution needs a method attribute to call Mineral.set_state().\n # Note that set_method() below will not change self.method\n self.method = 'SolidSolutionMethod'\n\n if name is not None:\n self.name = name\n if solution_type is not None:\n self.solution_type = solution_type\n if endmembers is not None:\n self.endmembers = endmembers\n if energy_interaction is not None:\n self.energy_interaction = energy_interaction\n if volume_interaction is not None:\n self.volume_interaction = volume_interaction\n if entropy_interaction is not None:\n self.entropy_interaction = entropy_interaction\n if energy_ternary_terms is not None:\n self.energy_ternary_terms = energy_ternary_terms\n if volume_ternary_terms is not None:\n self.volume_ternary_terms = volume_ternary_terms\n if entropy_ternary_terms is not None:\n self.entropy_ternary_terms = entropy_ternary_terms\n if alphas is not None:\n self.alphas = alphas\n if endmembers is not None:\n self.endmembers = endmembers\n\n if hasattr(self, 'endmembers') is False:\n raise Exception(\"'endmembers' attribute missing \"\n \"from solid solution\")\n\n # Set default solution model type\n if hasattr(self, 'solution_type'):\n if self.solution_type == 'mechanical':\n self.solution_model = MechanicalSolution(self.endmembers)\n elif self.solution_type == 'ideal':\n self.solution_model = IdealSolution(self.endmembers)\n else:\n if hasattr(self, 'energy_interaction') is False:\n self.energy_interaction = None\n if hasattr(self, 'volume_interaction') is False:\n self.volume_interaction = None\n if hasattr(self, 'entropy_interaction') is False:\n self.entropy_interaction = None\n\n if self.solution_type == 'symmetric':\n self.solution_model = SymmetricRegularSolution(\n self.endmembers, self.energy_interaction,\n self.volume_interaction, self.entropy_interaction)\n elif self.solution_type == 'asymmetric':\n if hasattr(self, 'alphas') is False:\n raise Exception(\n \"'alphas' attribute missing from solid solution\")\n self.solution_model = AsymmetricRegularSolution(\n self.endmembers, self.alphas, self.energy_interaction,\n self.volume_interaction, self.entropy_interaction)\n elif self.solution_type == 'subregular':\n if hasattr(self, 'energy_ternary_terms') is False:\n self.energy_ternary_terms = None\n if hasattr(self, 'volume_ternary_terms') is False:\n self.volume_ternary_terms = None\n if hasattr(self, 'entropy_ternary_terms') is False:\n self.entropy_ternary_terms = None\n\n self.solution_model = SubregularSolution(\n self.endmembers,\n self.energy_interaction, self.volume_interaction,\n self.entropy_interaction,\n self.energy_ternary_terms, self.volume_ternary_terms,\n self.entropy_ternary_terms)\n else:\n raise Exception(\"Solution model type \"\n + self.solution_type + \"not recognised.\")\n else:\n self.solution_model = SolutionModel()\n\n # Equation of state\n for i in range(self.n_endmembers):\n self.endmembers[i][0].set_method(\n self.endmembers[i][0].params['equation_of_state'])\n\n # Molar fractions\n if molar_fractions is not None:\n self.set_composition(molar_fractions)\n\n def get_endmembers(self):\n return self.endmembers\n\n def set_composition(self, molar_fractions):\n \"\"\"\n Set the composition for this solid solution.\n Resets cached properties.\n\n Parameters\n ----------\n molar_fractions: list of float\n molar abundance for each endmember, needs to sum to one.\n \"\"\"\n assert(len(self.endmembers) == len(molar_fractions))\n\n if self.solution_type != 'mechanical':\n assert(sum(molar_fractions) > 0.9999)\n assert(sum(molar_fractions) < 1.0001)\n\n self.reset()\n self.molar_fractions = np.array(molar_fractions)\n\n def set_method(self, method):\n for i in range(self.n_endmembers):\n self.endmembers[i][0].set_method(method)\n # note: do not set self.method here!\n self.reset()\n\n def set_state(self, pressure, temperature):\n\n Mineral.set_state(self, pressure, temperature)\n for i in range(self.n_endmembers):\n self.endmembers[i][0].set_state(pressure, temperature)\n\n @material_property\n def formula(self):\n \"\"\"\n Returns molar chemical formula of the solid solution.\n \"\"\"\n return sum_formulae(self.endmember_formulae, self.molar_fractions)\n\n @material_property\n def activities(self):\n \"\"\"\n Returns a list of endmember activities [unitless].\n \"\"\"\n return self.solution_model.activities(self.pressure, self.temperature,\n self.molar_fractions)\n\n @material_property\n def activity_coefficients(self):\n \"\"\"\n Returns a list of endmember activity coefficients\n (gamma = activity / ideal activity) [unitless].\n \"\"\"\n return self.solution_model.activity_coefficients(self.pressure,\n self.temperature,\n self.molar_fractions)\n\n @material_property\n def molar_internal_energy(self):\n \"\"\"\n Returns molar internal energy of the mineral [J/mol].\n Aliased with self.energy\n \"\"\"\n return self.molar_helmholtz + self.temperature * self.molar_entropy\n\n @material_property\n def excess_partial_gibbs(self):\n \"\"\"\n Returns excess partial molar gibbs free energy [J/mol].\n Property specific to solid solutions.\n \"\"\"\n return self.solution_model.excess_partial_gibbs_free_energies(self.pressure, self.temperature, self.molar_fractions)\n\n @material_property\n def excess_partial_volumes(self):\n \"\"\"\n Returns excess partial volumes [m^3].\n Property specific to solid solutions.\n \"\"\"\n return self.solution_model.excess_partial_volumes(self.pressure,\n self.temperature,\n self.molar_fractions)\n\n @material_property\n def excess_partial_entropies(self):\n \"\"\"\n Returns excess partial entropies [J/K].\n Property specific to solid solutions.\n \"\"\"\n return self.solution_model.excess_partial_entropies(self.pressure,\n self.temperature,\n self.molar_fractions)\n\n @material_property\n def partial_gibbs(self):\n \"\"\"\n Returns endmember partial molar gibbs free energy [J/mol].\n Property specific to solid solutions.\n \"\"\"\n return (np.array([self.endmembers[i][0].gibbs\n for i in range(self.n_endmembers)])\n + self.excess_partial_gibbs)\n\n @material_property\n def partial_volumes(self):\n \"\"\"\n Returns endmember partial volumes [m^3].\n Property specific to solid solutions.\n \"\"\"\n return (np.array([self.endmembers[i][0].molar_volume\n for i in range(self.n_endmembers)])\n + self.excess_partial_volumes)\n\n @material_property\n def partial_entropies(self):\n \"\"\"\n Returns endmember partial entropies [J/K].\n Property specific to solid solutions.\n \"\"\"\n return (np.array([self.endmembers[i][0].molar_entropy\n for i in range(self.n_endmembers)])\n + self.excess_partial_entropies)\n\n @material_property\n def excess_gibbs(self):\n \"\"\"\n Returns molar excess gibbs free energy [J/mol].\n Property specific to solid solutions.\n \"\"\"\n return self.solution_model.excess_gibbs_free_energy(self.pressure,\n self.temperature,\n self.molar_fractions)\n\n @material_property\n def gibbs_hessian(self):\n \"\"\"\n Returns an array containing the second compositional derivative\n of the Gibbs free energy [J]. Property specific to solid solutions.\n \"\"\"\n return self.solution_model.gibbs_hessian(self.pressure,\n self.temperature,\n self.molar_fractions)\n\n @material_property\n def entropy_hessian(self):\n \"\"\"\n Returns an array containing the second compositional derivative\n of the entropy [J/K]. Property specific to solid solutions.\n \"\"\"\n return self.solution_model.entropy_hessian(self.pressure,\n self.temperature,\n self.molar_fractions)\n\n @material_property\n def volume_hessian(self):\n \"\"\"\n Returns an array containing the second compositional derivative\n of the volume [m^3]. Property specific to solid solutions.\n \"\"\"\n return self.solution_model.volume_hessian(self.pressure,\n self.temperature,\n self.molar_fractions)\n\n @material_property\n def molar_gibbs(self):\n \"\"\"\n Returns molar Gibbs free energy of the solid solution [J/mol].\n Aliased with self.gibbs.\n \"\"\"\n return sum([self.endmembers[i][0].gibbs * self.molar_fractions[i]\n for i in range(self.n_endmembers)]) + self.excess_gibbs\n\n @material_property\n def molar_helmholtz(self):\n \"\"\"\n Returns molar Helmholtz free energy of the solid solution [J/mol].\n Aliased with self.helmholtz.\n \"\"\"\n return self.molar_gibbs - self.pressure * self.molar_volume\n\n @material_property\n def molar_mass(self):\n \"\"\"\n Returns molar mass of the solid solution [kg/mol].\n \"\"\"\n return sum([self.endmembers[i][0].molar_mass\n * self.molar_fractions[i]\n for i in range(self.n_endmembers)])\n\n @material_property\n def excess_volume(self):\n \"\"\"\n Returns excess molar volume of the solid solution [m^3/mol].\n Specific property for solid solutions.\n \"\"\"\n return self.solution_model.excess_volume(self.pressure,\n self.temperature,\n self.molar_fractions)\n\n @material_property\n def molar_volume(self):\n \"\"\"\n Returns molar volume of the solid solution [m^3/mol].\n Aliased with self.V.\n \"\"\"\n return sum([self.endmembers[i][0].molar_volume\n * self.molar_fractions[i]\n for i in range(self.n_endmembers)]) + self.excess_volume\n\n @material_property\n def density(self):\n \"\"\"\n Returns density of the solid solution [kg/m^3].\n Aliased with self.rho.\n \"\"\"\n return self.molar_mass / self.molar_volume\n\n @material_property\n def excess_entropy(self):\n \"\"\"\n Returns excess molar entropy [J/K/mol].\n Property specific to solid solutions.\n \"\"\"\n return self.solution_model.excess_entropy(self.pressure,\n self.temperature,\n self.molar_fractions)\n\n @material_property\n def molar_entropy(self):\n \"\"\"\n Returns molar entropy of the solid solution [J/K/mol].\n Aliased with self.S.\n \"\"\"\n return sum([self.endmembers[i][0].S * self.molar_fractions[i]\n for i in range(self.n_endmembers)]) + self.excess_entropy\n\n @material_property\n def excess_enthalpy(self):\n \"\"\"\n Returns excess molar enthalpy [J/mol].\n Property specific to solid solutions.\n \"\"\"\n return self.solution_model.excess_enthalpy(self.pressure,\n self.temperature,\n self.molar_fractions)\n\n @material_property\n def molar_enthalpy(self):\n \"\"\"\n Returns molar enthalpy of the solid solution [J/mol].\n Aliased with self.H.\n \"\"\"\n return sum([self.endmembers[i][0].H * self.molar_fractions[i]\n for i in range(self.n_endmembers)]) + self.excess_enthalpy\n\n @material_property\n def isothermal_bulk_modulus(self):\n \"\"\"\n Returns isothermal bulk modulus of the solid solution [Pa].\n Aliased with self.K_T.\n \"\"\"\n return self.V * 1. / (sum([self.endmembers[i][0].V\n / (self.endmembers[i][0].K_T)\n * self.molar_fractions[i]\n for i in range(self.n_endmembers)]))\n\n @material_property\n def adiabatic_bulk_modulus(self):\n \"\"\"\n Returns adiabatic bulk modulus of the solid solution [Pa].\n Aliased with self.K_S.\n \"\"\"\n if self.temperature < 1e-10:\n return self.isothermal_bulk_modulus\n else:\n return (self.isothermal_bulk_modulus\n * self.molar_heat_capacity_p / self.molar_heat_capacity_v)\n\n @material_property\n def isothermal_compressibility(self):\n \"\"\"\n Returns isothermal compressibility of the solid solution.\n (or inverse isothermal bulk modulus) [1/Pa].\n Aliased with self.K_T.\n \"\"\"\n return 1. / self.isothermal_bulk_modulus\n\n @material_property\n def adiabatic_compressibility(self):\n \"\"\"\n Returns adiabatic compressibility of the solid solution.\n (or inverse adiabatic bulk modulus) [1/Pa].\n Aliased with self.K_S.\n \"\"\"\n return 1. / self.adiabatic_bulk_modulus\n\n @material_property\n def shear_modulus(self):\n \"\"\"\n Returns shear modulus of the solid solution [Pa].\n Aliased with self.G.\n \"\"\"\n G_list = np.fromiter((e[0].G for e in self.endmembers), dtype=float,\n count=self.n_endmembers)\n return reuss_average_function(self.molar_fractions, G_list)\n\n @material_property\n def p_wave_velocity(self):\n \"\"\"\n Returns P wave speed of the solid solution [m/s].\n Aliased with self.v_p.\n \"\"\"\n return np.sqrt((self.adiabatic_bulk_modulus\n + 4. / 3. * self.shear_modulus) / self.density)\n\n @material_property\n def bulk_sound_velocity(self):\n \"\"\"\n Returns bulk sound speed of the solid solution [m/s].\n Aliased with self.v_phi.\n \"\"\"\n return np.sqrt(self.adiabatic_bulk_modulus / self.density)\n\n @material_property\n def shear_wave_velocity(self):\n \"\"\"\n Returns shear wave speed of the solid solution [m/s].\n Aliased with self.v_s.\n \"\"\"\n return np.sqrt(self.shear_modulus / self.density)\n\n @material_property\n def grueneisen_parameter(self):\n \"\"\"\n Returns grueneisen parameter of the solid solution [unitless].\n Aliased with self.gr.\n \"\"\"\n if self.temperature < 1e-10:\n return float('nan')\n else:\n return (self.thermal_expansivity * self.isothermal_bulk_modulus\n * self.molar_volume / self.molar_heat_capacity_v)\n\n @material_property\n def thermal_expansivity(self):\n \"\"\"\n Returns thermal expansion coefficient (alpha)\n of the solid solution [1/K].\n Aliased with self.alpha.\n \"\"\"\n return (1. / self.V) * sum([self.endmembers[i][0].alpha\n * self.endmembers[i][0].V\n * self.molar_fractions[i]\n for i in range(self.n_endmembers)])\n\n @material_property\n def molar_heat_capacity_v(self):\n \"\"\"\n Returns molar heat capacity at constant volume of the\n solid solution [J/K/mol].\n Aliased with self.C_v.\n \"\"\"\n return (self.molar_heat_capacity_p\n - self.molar_volume * self.temperature\n * self.thermal_expansivity * self.thermal_expansivity\n * self.isothermal_bulk_modulus)\n\n @material_property\n def molar_heat_capacity_p(self):\n \"\"\"\n Returns molar heat capacity at constant pressure\n of the solid solution [J/K/mol].\n Aliased with self.C_p.\n \"\"\"\n return sum([self.endmembers[i][0].molar_heat_capacity_p\n * self.molar_fractions[i]\n for i in range(self.n_endmembers)])\n\n @cached_property\n def stoichiometric_matrix(self):\n \"\"\"\n A sympy Matrix where each element M[i,j] corresponds\n to the number of atoms of element[j] in endmember[i].\n \"\"\"\n def f(i, j):\n e = self.elements[j]\n if e in self.endmember_formulae[i]:\n return nsimplify(self.endmember_formulae[i][e])\n else:\n return 0\n return Matrix(len(self.endmember_formulae), len(self.elements), f)\n\n @cached_property\n def stoichiometric_array(self):\n \"\"\"\n An array where each element arr[i,j] corresponds\n to the number of atoms of element[j] in endmember[i].\n \"\"\"\n return np.array(self.stoichiometric_matrix)\n\n @cached_property\n def reaction_basis(self):\n \"\"\"\n An array where each element arr[i,j] corresponds\n to the number of moles of endmember[j] involved in reaction[i].\n \"\"\"\n reaction_basis = np.array([v[:] for v in\n self.stoichiometric_matrix.T.nullspace()])\n\n if len(reaction_basis) == 0:\n reaction_basis = np.empty((0, len(self.endmember_names)))\n\n return reaction_basis\n\n @cached_property\n def n_reactions(self):\n \"\"\"\n The number of reactions in reaction_basis.\n \"\"\"\n return len(self.reaction_basis[:, 0])\n\n @cached_property\n def independent_element_indices(self):\n \"\"\"\n A list of an independent set of element indices. If the amounts of\n these elements are known (element_amounts),\n the amounts of the other elements can be inferred by\n -compositional_null_basis[independent_element_indices].dot(element_amounts).\n \"\"\"\n return sorted(independent_row_indices(self.stoichiometric_matrix.T))\n\n @cached_property\n def dependent_element_indices(self):\n \"\"\"\n The element indices not included in the independent list.\n \"\"\"\n return [i for i in range(len(self.elements))\n if i not in self.independent_element_indices]\n\n @cached_property\n def compositional_null_basis(self):\n \"\"\"\n An array N such that N.b = 0 for all bulk compositions that can\n be produced with a linear sum of the endmembers in the solid solution.\n \"\"\"\n null_basis = np.array([v[:] for v in\n self.stoichiometric_matrix.nullspace()])\n\n M = null_basis[:, self.dependent_element_indices]\n assert (M.shape[0] == M.shape[1]) and (M == np.eye(M.shape[0])).all()\n\n return null_basis\n\n @cached_property\n def endmember_formulae(self):\n \"\"\"\n A list of formulae for all the endmember in the solid solution.\n \"\"\"\n return [mbr[0].params['formula'] for mbr in self.endmembers]\n\n @cached_property\n def endmember_names(self):\n \"\"\"\n A list of names for all the endmember in the solid solution.\n \"\"\"\n return [mbr[0].name for mbr in self.endmembers]\n\n @cached_property\n def n_endmembers(self):\n \"\"\"\n The number of endmembers in the solid solution.\n \"\"\"\n return len(self.endmembers)\n\n @cached_property\n def elements(self):\n \"\"\"\n A list of the elements which could be contained in the solid solution,\n returned in the IUPAC element order.\n \"\"\"\n keys = []\n for f in self.endmember_formulae:\n keys.extend(f.keys())\n\n return sort_element_list_to_IUPAC_order(set(keys))\n","repo_name":"sannecottaar/PartIII-practicals","sub_path":"Mars/burnman_1.1/burnman/classes/solidsolution.py","file_name":"solidsolution.py","file_ext":"py","file_size_in_byte":23992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"70878867914","text":"from osgeo import gdal\r\nimport numpy as np\r\nfrom osgeo import osr\r\n\r\n\r\n#使用gdal.Warp对MODIS数据进行重投影。\r\nmodis_lai=gdal.Open(r'D:\\DATA\\MODIS\\LAI\\MCD15A3H.A2020181.h13v11.006.2020188204620.hdf')\r\nsubdataset_one = modis_lai.GetSubDatasets()[0][0] # 第一个子数据集合\r\n# gdal.Warp(r'D:\\code\\reprojection.tif', subdataset_one, dstSRS='EPSG:4326')\r\n\r\n\r\n\r\ndef reproject(src_file, dst_file, p_width, p_height, epsg_to):\r\n \"\"\"\r\n :param src_file: 输入文件\r\n :param dst_file: 输出文件\r\n :param p_width: 输出图像像素宽度\r\n :param p_height: 输出图像像素高度\r\n :param epsg_to: 输出图像EPSG坐标代码\r\n :return:\r\n \"\"\"\r\n # 首先,读取输入数据,然后获得输入数据的投影,放射变换参数,以及图像宽高等信息\r\n src_ds = gdal.Open(src_file)\r\n src_srs = osr.SpatialReference()\r\n src_srs.ImportFromWkt(src_ds.GetProjection())\r\n\r\n srs_trans = src_ds.GetGeoTransform()\r\n x_size = src_ds.RasterXSize\r\n y_size = src_ds.RasterYSize\r\n\r\n # 获得输出数据的投影,建立两个投影直接的转换关系\r\n dst_srs = osr.SpatialReference()\r\n dst_srs.ImportFromEPSG(epsg_to)\r\n tx = osr.CoordinateTransformation(src_srs, dst_srs)\r\n\r\n # 计算输出图像四个角点的坐标\r\n (uly, ulx, _) = tx.TransformPoint(srs_trans[0], srs_trans[3])\r\n (ury, urx, _) = tx.TransformPoint(srs_trans[0] + srs_trans[1] * x_size, srs_trans[3])\r\n (lly, llx, _) = tx.TransformPoint(srs_trans[0], srs_trans[3] + srs_trans[5] * y_size)\r\n (lry, lrx, _) = tx.TransformPoint(srs_trans[0] + srs_trans[1] * x_size + srs_trans[2] * y_size,\r\n srs_trans[3] + srs_trans[4] * x_size + srs_trans[5] * y_size)\r\n\r\n min_x = min(ulx, urx, llx, lrx)\r\n max_x = max(ulx, urx, llx, lrx)\r\n min_y = min(uly, ury, lly, lry)\r\n max_y = max(uly, ury, lly, lry)\r\n\r\n # 创建输出图像,需要计算输出图像的尺寸(重投影以后图像的尺寸会发生变化)\r\n driver = gdal.GetDriverByName('GTiff')\r\n dst_ds = driver.Create(dst_file,\r\n int((max_x - min_x) / p_width),\r\n int((max_y - min_y) / p_height),\r\n 1, gdal.GDT_Float32)\r\n dst_trans = (min_x, p_width, srs_trans[2],\r\n max_y, srs_trans[4], -p_height)\r\n\r\n # 设置GeoTransform和Projection信息\r\n dst_ds.SetGeoTransform(dst_trans)\r\n dst_ds.SetProjection(dst_srs.ExportToWkt())\r\n # 进行投影转换\r\n gdal.ReprojectImage(src_ds, dst_ds,\r\n src_srs.ExportToWkt(), dst_srs.ExportToWkt(),\r\n gdal.GRA_Bilinear)\r\n dst_ds.GetRasterBand(1).SetNoDataValue(0) # 设置NoData值\r\n dst_ds.FlushCache()\r\n\r\n del src_ds\r\n del dst_ds\r\n\r\n\r\nreproject(subdataset_one,\r\n r'D:\\code\\reprojection2.tif', 0.005, 0.005, 4326)","repo_name":"xiaoguo123456/gdal_example","sub_path":"modis_reproject.py","file_name":"modis_reproject.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"27275150715","text":"# -*- coding: utf-8-*-\n\nWORDS = [\"TASK\"]\n\ntasks = []\n\ndef handle(text, mic, profile):\n text = text.lower()\n if 'new' in text:\n mic.say('Ok, when?')\n date = mic.activeListen()\n mic.say('What task?')\n task = mic.activeListen()\n global tasks\n tasks.append([date, task])\n mic.say('OK, added %s at %s' % (task, date))\n elif 'next' in text:\n date, task = tasks[0]\n mic.say('Next task is %s at %s' % (task, date))\n elif 'all' in text:\n mic.say(\"Current tasks:\")\n for date, task in tasks:\n mic.say('Task: %s at %s' % (task, date))\n mic.say(\"That's all\")\n else:\n mic.say('Hmm... What?')\n\ndef isValid(text):\n return 'task' in text.lower()\n","repo_name":"alex-pat/jasper-modules","sub_path":"Schedule.py","file_name":"Schedule.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"3900046014","text":"from pygame import Surface, font\n\nfrom game.constants import Colors\n\nfont.init()\n\nfonts = {}\nfor i, size in enumerate([28, 32, 60]):\n fonts[i] = font.Font(\"assets/pixeloid-font/PixeloidMono-1G8ae.ttf\", size)\n\n\ndef render_text(\n screen: Surface,\n text: str,\n size: int = 0,\n grid_x: int = 0,\n grid_y: int = 0,\n text_align: str = \"center\",\n) -> None:\n text = fonts[size].render(text, True, Colors.UI)\n\n center = screen.get_width() / 32 * grid_x, screen.get_height() / 20 * grid_y\n\n if text_align == \"left\":\n text_rect = text.get_rect(midleft=center)\n elif text_align == \"right\":\n text_rect = text.get_rect(midright=center)\n else:\n text_rect = text.get_rect(center=center)\n\n screen.blit(text, text_rect)\n","repo_name":"a96lex/pygame-tank-game","sub_path":"game/text_renderer.py","file_name":"text_renderer.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"14488937889","text":"\"\"\"\nhttps://leetcode.com/problems/add-binary/\n\n67. Add Binary\n\nGiven two binary strings, return their sum (also a binary string).\n\nThe input strings are both non-empty and contains only characters 1 or 0.\n\nExample 1:\n\nInput: a = \"11\", b = \"1\"\nOutput: \"100\"\nExample 2:\n\nInput: a = \"1010\", b = \"1011\"\nOutput: \"10101\"\n \n\nConstraints:\n\nEach string consists only of '0' or '1' characters.\n1 <= a.length, b.length <= 10^4\nEach string is either \"0\" or doesn't contain any leading zero.\n\nSolution\n\n将两个字符串长度拉平,用零补充\n分别放入两个栈\n循环两个栈,每次取出一个和carry相加%2放到输出,如果和大于等于2, carry=1\n循环结束如果carry是1,将1加到输出\n\n\"\"\"\n\n\nclass Solution:\n def addBinary(self, a: str, b: str) -> str:\n len_a, len_b = len(a), len(b)\n if len_a < len_b:\n a = '0' * (len_b - len_a) + a\n else:\n b = '0' * (len_a - len_b) + b\n stack_a, stack_b = list(a), list(b)\n output = ''\n carry = 0\n while stack_a and stack_b:\n cur_sum = int(stack_a.pop()) + int(stack_b.pop()) + carry\n if cur_sum >= 2:\n cur_sum = cur_sum % 2\n carry = 1\n else:\n carry = 0\n output = str(cur_sum) + output\n if carry == 1:\n output = '1' + output\n return output\n","repo_name":"emingenc/leetcode","sub_path":"facebook/67-add-binary.py","file_name":"67-add-binary.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"27989577139","text":"def RLE(s):\n s += '.'\n list_ans = []\n last = s[0]\n count = 1\n for i in range(1, len(s)):\n if s[i] == last:\n count += 1\n else:\n list_ans.append([last, count])\n last = s[i]\n count = 1\n return list_ans\n\n\nexec(input())\n","repo_name":"Nacnano/my-chula-courses","sub_path":"2110101-com-prog/grader/04_Loop/04_Loop_F11.py","file_name":"04_Loop_F11.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"28"} +{"seq_id":"44497787030","text":"from collections import deque\nimport sys\ninput = sys.stdin.readline\n\nN, K = map(int, input().split())\nqueue = deque([i+1 for i in range(N)])\nresult = []\n\n\nwhile queue:\n count = 1\n while count != K:\n queue.append(queue.popleft())\n count += 1\n result.append(queue.popleft())\n\nprint('<', end='')\nfor i in range(len(result)):\n if i < len(result) - 1:\n print(result[i], end=', ')\n else:\n print(result[i], end='')\nprint('>')\n\n# https://www.acmicpc.net/problem/11866\n\n# 원에서 K번째가 되면 popleft 안되면 popleft후 append로 오쪽 른향으로 원이 돌아간다.","repo_name":"TUKF4-CodingTestStudy/JunsuPark","sub_path":"박준수 알고리즘/자료구조/큐,덱/11866번.py","file_name":"11866번.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"25074719960","text":"import itertools\r\nfrom typing import List\r\n\r\n\r\nclass Solution:\r\n def letterCombinations(self, digits: str) -> List[str]:\r\n number_dict = {\r\n \"2\": [\"a\", \"b\", \"c\"],\r\n \"3\": [\"d\", \"e\", \"f\"],\r\n \"4\": [\"g\", \"h\", \"i\"],\r\n \"5\": [\"j\", \"k\", \"l\"],\r\n \"6\": [\"m\", \"n\", \"o\"],\r\n \"7\": [\"p\", \"q\", \"r\", \"s\"],\r\n \"8\": [\"t\", \"u\", \"v\"],\r\n \"9\": [\"w\", \"x\", \"y\", \"z\"]\r\n }\r\n\r\n res_list = []\r\n tmp_list = []\r\n # t_list = []\r\n\r\n for index in range(0, len(digits)):\r\n if digits[index] in number_dict:\r\n tmp_list.append(number_dict[digits[index]])\r\n\r\n # for words in tmp_list:\r\n # t_list += words\r\n\r\n ### Compute all cartesian values from tmp_list\r\n tmp_list = list(itertools.product(*tmp_list))\r\n # print(tmp_list)\r\n\r\n for i in tmp_list:\r\n ### i is tuple, so need to make as string\r\n ### use \"\".join() to make a tuple as a string\r\n res_list.append(\"\".join(i))\r\n\r\n return res_list\r\n\r\n\r\nsol = Solution()\r\nstring = \"23\"\r\n\r\nprint(sol.letterCombinations(string))","repo_name":"j00hoon/PythonProjects","sub_path":"PythonProject/Test/LetterCombinations.py","file_name":"LetterCombinations.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"16848554779","text":"decimal = int(input())\nbin = []\nif decimal == 0:\n print(0)\n exit()\nwhile True:\n bin.append(decimal % 2)\n decimal = int(decimal / 2)\n if(decimal == 0):\n bin.append(decimal)\n break\nif bin[len(bin) - 1] == 0:\n bin.pop()\n\nfor i in range(len(bin)):\n print(bin.pop(), end=\"\") ","repo_name":"lani009/BOJ","sub_path":"codeUp/python/1416.py","file_name":"1416.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"72541386635","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 2 15:44:20 2019\n\n@author: zelord\n\"\"\"\n \n#1\nT = int(input())\nfor i in range(T):\n a = int(input())\n list = [1, 1, 1, 2, 2, 3, 4, 5, 7, 9]\n for i in range(11,a+1):\n temp = list[i-2]+list[i-6]\n list.append(temp)\n print(list[a-1])\n \n \n \n \n#2\nlist = []\nresult = [1, 1, 1, 2, 2, 3, 4, 5, 7, 9]\nT = int(input())\nfor _ in range(T):\n list.append(int(input()))\n\nif int(max(list)) > 10:\n for i in range(11,max(list) + 1):\n result.append(int(result[i-2] + result[i-6]))\n\nfor i in list:\n print(result[i-1])\n ","repo_name":"zel0rd/goalgo.github.io","sub_path":"BaekJoon/9461_zel0rd.py","file_name":"9461_zel0rd.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"74287537995","text":"\r\n\r\n# call arp Table\r\n# func() takes arp table date and exstracts the IP and MAC addresses in pairs (with out broadcast addresses)\r\n# func() Check Mac address to see if MAC appears more than once in arp table\r\n# create a new code file and import previous function in to logic for complete program\r\n# create a func() a log with the message date and time the machine was attack\r\n\r\n\r\n\r\n\r\nimport os\r\nimport ipaddress\r\n\r\ndef getarpTable():\r\n rawArpList = []\r\n arpList ={}\r\n\r\n os.system(r\"arp -a > ArpCheck.txt\")\r\n with open(\"ArpCheck.txt\",\"r\") as arpFile:\r\n for line in arpFile: # if statements to remove unwanted lines\r\n if \"Inter\" in line: # remove lines with text Descriptors\r\n continue\r\n elif \"ff-ff-ff\" in line: # remove brodcast entries\r\n continue\r\n elif len(line) < 2: # remove lines without information\r\n continue\r\n else:\r\n rawArpList.append(line.split()) # combine raw data in to one list\r\n for line in rawArpList:# combine data in to a new Dictionary\r\n arpList[line[0]] = line[1]\r\n return arpList\r\n\r\ndef arpSpoofDetector(arpTable):\r\n macList = []\r\n spoofFound = 0\r\n for ip in arpTable: # intiates the loop to compare\r\n if arpTable[ip] in macList:\r\n print(f\"Found duplicate MAC address {arpTable[ip]} in ARP Table \")\r\n spoofFound +=1\r\n else:\r\n macList.append(arpTable[ip])\r\n if spoofFound >0 :\r\n print(\"This could be an idicator of an ARP spoof attack\")\r\n print(\"futher investoigation is required.\")\r\n else:\r\n print(\"No ARP spoof detected\")\r\n #print(macList)\r\n return","repo_name":"hscott1216/CSULB_Security_Prof","sub_path":"arp.py","file_name":"arp.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"27239231227","text":"def cube_root(num):\n\n left = 0\n right = num\n\n while left <= right:\n\n mid = (left + right) / 2\n\n if abs(mid**3 - num) < 0.0001:\n return mid\n elif mid**3 < num:\n left = mid\n else:\n right = mid\n return None\n \nprint(cube_root(int(input(\"Enter a number: \"))))","repo_name":"Nivedha3106/PythonProgram","sub_path":"cubeBinary.py","file_name":"cubeBinary.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"12606548519","text":"# Thanks, Jason Sun, for most of this!\n\nimport os\nimport re\nimport pickle\nimport numpy as np\nfrom ..data import ARES\nimport scipy.special as sp\nfrom types import FunctionType\nfrom scipy.integrate import quad\nfrom scipy.interpolate import interp1d, Akima1DInterpolator\nfrom ..util.ProgressBar import ProgressBar\nfrom .Constants import rho_cgs, c, cm_per_mpc\nfrom .HaloMassFunction import HaloMassFunction\n\ntry:\n import h5py\nexcept ImportError:\n pass\n\ntry:\n from mpi4py import MPI\n rank = MPI.COMM_WORLD.rank\n size = MPI.COMM_WORLD.size\nexcept ImportError:\n rank = 0\n size = 1\n\ntry:\n import hankel\n have_hankel = True\n from hankel import HankelTransform, SymmetricFourierTransform\nexcept ImportError:\n have_hankel = False\n\nfour_pi = 4 * np.pi\n\nclass HaloModel(HaloMassFunction):\n\n def mvir_to_rvir(self, m):\n return (3. * m / (4. * np.pi * self.pf['halo_delta'] \\\n * self.cosm.mean_density0)) ** (1. / 3.)\n\n def cm_relation(self, m, z, get_rs):\n \"\"\"\n The concentration-mass relation\n \"\"\"\n if self.pf['halo_cmr'] == 'duffy':\n return self._cm_duffy(m, z, get_rs)\n elif self.pf['halo_cmr'] == 'zehavi':\n return self._cm_zehavi(m, z, get_rs)\n else:\n raise NotImplemented('help!')\n\n def _cm_duffy(self, m, z, get_rs=True):\n c = 6.71 * (m / (2e12)) ** -0.091 * (1 + z) ** -0.44\n rvir = self.mvir_to_rvir(m)\n\n if get_rs:\n return c, rvir / c\n else:\n return c\n\n def _cm_zehavi(self, m, z, get_rs=True):\n c = ((m / 1.5e13) ** -0.13) * 9.0 / (1 + z)\n rvir = self.mvir_to_rvir(m)\n\n if get_rs:\n return c, rvir / c\n else:\n return c\n\n def _dc_nfw(self, c):\n return c** 3. / (4. * np.pi) / (np.log(1 + c) - c / (1 + c))\n\n def rho_nfw(self, r, m, z):\n\n c, r_s = self.cm_relation(m, z, get_rs=True)\n\n x = r / r_s\n rn = x / c\n\n if np.iterable(x):\n result = np.zeros_like(x)\n result[rn <= 1] = (self._dc_nfw(c) / (c * r_s)**3 / (x * (1 + x)**2))[rn <= 1]\n\n return result\n else:\n if rn <= 1.0:\n return self._dc_nfw(c) / (c * r_s) ** 3 / (x * (1 + x) ** 2)\n else:\n return 0.0\n\n def u_nfw(self, k, m, z):\n \"\"\"\n Normalized Fourier Transform of an NFW profile.\n\n ..note:: This is Equation 81 from Cooray & Sheth (2002).\n\n Parameters\n ----------\n k : int, float\n Wavenumber\n m :\n \"\"\"\n c, r_s = self.cm_relation(m, z, get_rs=True)\n\n K = k * r_s\n\n asi, ac = sp.sici((1 + c) * K)\n bs, bc = sp.sici(K)\n\n # The extra factor of np.log(1 + c) - c / (1 + c)) comes in because\n # there's really a normalization factor of 4 pi rho_s r_s^3 / m,\n # and m = 4 pi rho_s r_s^3 * the log term\n norm = 1. / (np.log(1 + c) - c / (1 + c))\n\n return norm * (np.sin(K) * (asi - bs) - np.sin(c * K) / ((1 + c) * K) \\\n + np.cos(K) * (ac - bc))\n\n def u_isl(self, k, m, z, rmax):\n \"\"\"\n Normalized Fourier transform of an r^-2 profile.\n\n rmax : int, float\n Effective horizon. Distance a photon can travel between\n Ly-beta and Ly-alpha.\n\n \"\"\"\n\n asi, aco = sp.sici(rmax * k)\n\n return asi / rmax / k\n\n def u_isl_exp(self, k, m, z, rmax, rstar):\n return np.arctan(rstar * k) / rstar / k\n\n def u_exp(self, k, m, z, rmax):\n rs = 1.\n\n L0 = (m / 1e11)**1.\n c = rmax / rs\n\n kappa = k * rs\n\n norm = rmax / rs**3\n\n return norm / (1. + kappa**2)**2.\n\n def u_cgm_rahmati(self, k, m, z):\n rstar = 0.0025\n return np.arctan((rstar * k) ** 0.75) / (rstar * k) ** 0.75\n\n def u_cgm_steidel(self, k, m, z):\n rstar = 0.2\n return np.arctan((rstar * k) ** 0.85) / (rstar * k) ** 0.85\n\n def FluxProfile(self, r, m, z, lc=False):\n return m * self.ModulationFactor(z, r=r, lc=lc) / (4. * np.pi * r**2)\n\n #@RadialProfile.setter\n #def RadialProfile(self, value):\n # pass\n\n def FluxProfileFT(self, k, m, z, lc=False):\n _numerator = lambda r: 4. * np.pi * r**2 * np.sin(k * r) / (k * r) \\\n * self.FluxProfile(r, m, z, lc=lc)\n _denominator = lambda r: 4. * np.pi * r**2 *\\\n self.FluxProfile(r, m, z, lc=lc)\n _r_LW = 97.39 * self.ScalingFactor(z)\n temp = quad(_numerator, 0., _r_LW)[0] / quad(_denominator, 0., _r_LW)[0]\n return temp\n\n def ScalingFactor(self, z):\n return (self.cosm.h70 / 0.7)**-1 * (self.cosm.omega_m_0 / 0.27)**-0.5 * ((1. + z) / 21.)**-0.5\n\n def ModulationFactor(self, z0, z=None, r=None, lc=False):\n \"\"\"\n Return the modulation factor as a function of redshift or comoving distance\n - Reference: Ahn et al. 2009\n :param z0: source redshift\n :param z: the redshift (whose LW intensity is) of interest\n :param r: the distance from the source in cMpc\n :lc: True or False, including the light cone effect\n :return:\n \"\"\"\n if z != None and r == None:\n r_comov = self.cosm.ComovingRadialDistance(z0, z)\n elif z == None and r != None:\n r_comov = r\n else:\n raise ValueError('Must specify either \"z\" or \"r\".')\n alpha = self.ScalingFactor(z0)\n _a = 0.167\n r_star = c * _a * self.cosm.HubbleTime(z0) * (1.+z0) / cm_per_mpc\n ans = np.maximum(1.7 * np.exp(-(r_comov / 116.29 / alpha)**0.68) - 0.7, 0.0)\n if lc == True:\n ans *= np.exp(-r/r_star)\n return ans\n\n def _get_ps_integrals(self, k, iz, prof1, prof2, lum1, lum2, mmin1, mmin2,\n term):\n \"\"\"\n Compute integrals over profile, weighted by bias, dndm, etc.,\n needed for halo model.\n\n .. note :: This is really just a wrapper around _integrate_over_prof,\n that handles the fact that `k` can be a number or an array.\n\n \"\"\"\n\n if type(k) == np.ndarray:\n integ1 = []; integ2 = []\n for _k in k:\n _integ1, _integ2 = self._integrate_over_prof(_k, iz,\n prof1, prof2, lum1, lum2, mmin1, mmin2, term)\n integ1.append(_integ1)\n integ2.append(_integ2)\n\n integ1 = np.array(integ1)\n integ2 = np.array(integ2)\n else:\n integ1, integ2 = self._integrate_over_prof(k, iz,\n prof1, prof2, lum1, lum2, mmin1, mmin2, term)\n\n return integ1, integ2\n\n def _integrate_over_prof(self, k, iz, prof1, prof2, lum1, lum2, mmin1,\n mmin2, term):\n \"\"\"\n Compute integrals over profile, weighted by bias, dndm, etc.,\n needed for halo model.\n \"\"\"\n\n p1 = np.abs([prof1(k, M, self.tab_z[iz]) for M in self.tab_M])\n p2 = np.abs([prof2(k, M, self.tab_z[iz]) for M in self.tab_M])\n\n bias = self.tab_bias[iz]\n rho_bar = self.cosm.rho_m_z0 * rho_cgs\n dndlnm = self.tab_dndlnm[iz] # M * dndm\n\n if (mmin1 is None) and (lum1 is None):\n fcoll1 = 1.\n\n # Small halo correction. Make use of Cooray & Sheth Eq. 71\n _integrand = dndlnm * (self.tab_M / rho_bar) * bias\n corr1 = 1. - np.trapz(_integrand, x=np.log(self.tab_M))\n elif lum1 is not None:\n corr1 = 0.0\n fcoll1 = 1.\n else:\n fcoll1 = self.tab_fcoll[iz,np.argmin(np.abs(mmin1-self.tab_M))]\n corr1 = 0.0\n\n if (mmin2 is None) and (lum2 is None):\n fcoll2 = 1.#self.mgtm[iz,0] / rho_bar\n _integrand = dndlnm * (self.tab_M / rho_bar) * bias\n corr2 = 1. - np.trapz(_integrand, x=np.log(self.tab_M))\n elif lum2 is not None:\n corr2 = 0.0\n fcoll2 = 1.\n else:\n fcoll2 = self.fcoll_2d(z, np.log10(Mmin_2))#self.fcoll_Tmin[iz]\n corr2 = 0.0\n\n ok = self.tab_fcoll[iz] > 0\n\n # If luminosities passed, then we must cancel out a factor of halo\n # mass that generally normalizes the integrand.\n if lum1 is None:\n weight1 = self.tab_M\n norm1 = rho_bar * fcoll1\n else:\n weight1 = lum1\n norm1 = 1.\n\n if lum2 is None:\n weight2 = self.tab_M\n norm2 = rho_bar * fcoll2\n else:\n weight2 = lum2\n norm2 = 1.\n\n ##\n # Are we doing the 1-h or 2-h term?\n if term == 1:\n integrand = dndlnm * weight1 * weight2 * p1 * p2 / norm1 / norm2\n\n result = np.trapz(integrand[ok==1], x=np.log(self.tab_M[ok==1]))\n\n return result, None\n\n elif term == 2:\n integrand1 = dndlnm * weight1 * p1 * bias / norm1\n integrand2 = dndlnm * weight2 * p2 * bias / norm2\n\n integral1 = np.trapz(integrand1[ok==1], x=np.log(self.tab_M[ok==1]),\n axis=0)\n integral2 = np.trapz(integrand2[ok==1], x=np.log(self.tab_M[ok==1]),\n axis=0)\n\n return integral1 + corr1, integral2 + corr2\n\n else:\n raise NotImplemented('dunno man')\n\n def _prep_for_ps(self, z, k, prof1, prof2, ztol):\n \"\"\"\n Basic prep: fill prof1=None or prof2=None with defaults, determine\n the index of the requested redshift in our lookup tables.\n \"\"\"\n\n iz = np.argmin(np.abs(z - self.tab_z))\n\n if abs(self.tab_z[iz] - z) > ztol:\n raise ValueError('Requested z={} not in grid (ztol={}).'.format(z,\n ztol))\n\n if prof1 is None:\n prof1 = self.u_nfw\n if prof2 is None:\n prof2 = prof1\n\n if k is None:\n k = self.tab_k_lin\n\n return iz, k, prof1, prof2\n\n def _get_ps_lin(self, k, iz):\n \"\"\"\n Return linear matter power spectrum for requested wavenumber `k`.\n\n .. note :: Assumes we already know the index of the redshift of interest\n in our lookup tables, `iz`.\n\n \"\"\"\n if k is None:\n k = self.tab_k_lin\n ps_lin = self.tab_ps_lin[iz]\n else:\n ps_lin = np.exp(np.interp(np.log(k), np.log(self.tab_k_lin),\n np.log(self.tab_ps_lin[iz])))\n\n return ps_lin\n\n def get_ps_1h(self, z, k=None, prof1=None, prof2=None, lum1=None, lum2=None,\n mmin1=None, mmin2=None, ztol=1e-3):\n \"\"\"\n Compute 1-halo term of power spectrum.\n \"\"\"\n\n iz, k, prof1, prof2 = self._prep_for_ps(z, k, prof1, prof2, ztol)\n\n integ1, none = self._get_ps_integrals(k, iz, prof1, prof2,\n lum1, lum2, mmin1, mmin2, term=1)\n\n return integ1\n\n def get_ps_2h(self, z, k=None, prof1=None, prof2=None, lum1=None, lum2=None,\n mmin1=None, mmin2=None, ztol=1e-3):\n \"\"\"\n Get 2-halo term of power spectrum.\n \"\"\"\n\n iz, k, prof1, prof2 = self._prep_for_ps(z, k, prof1, prof2, ztol)\n\n ps_lin = self._get_ps_lin(k, iz)\n\n integ1, integ2 = self._get_ps_integrals(k, iz, prof1, prof2,\n lum1, lum2, mmin1, mmin2, term=2)\n\n ps = integ1 * integ2 * ps_lin\n\n return ps\n\n def get_ps_shot(self, z, k=None, lum1=None, lum2=None, mmin1=None, mmin2=None,\n ztol=1e-3):\n \"\"\"\n Compute the two halo term quickly\n \"\"\"\n\n iz, k, _prof1_, _prof2_ = self._prep_for_ps(z, k, None, None, ztol)\n\n dndlnm = self.tab_dndlnm[iz]\n integrand = dndlnm * lum1 * lum2\n shot = np.trapz(integrand, x=np.log(self.tab_M), axis=0)\n\n return shot\n\n def get_ps_tot(self, z, k=None, prof1=None, prof2=None, lum1=None, lum2=None,\n mmin1=None, mmin2=None, ztol=1e-3):\n \"\"\"\n Return total power spectrum as sum of 1h and 2h terms.\n \"\"\"\n ps_1h = self.get_ps_1h(z, k, prof1, prof2, lum1, lum2, mmin1, mmin2, ztol)\n ps_2h = self.get_ps_2h(z, k, prof1, prof2, lum1, lum2, mmin1, mmin2, ztol)\n\n return ps_1h + ps_2h\n\n def CorrelationFunction(self, z, R, k=None, Pofk=None, load=True):\n \"\"\"\n Compute the correlation function of the matter power spectrum.\n\n Parameters\n ----------\n z : int, float\n Redshift of interest.\n R : int, float, np.ndarray\n Scale(s) of interest\n\n \"\"\"\n\n ##\n # Load from table\n ##\n if self.pf['hmf_load_ps'] and load:\n iz = np.argmin(np.abs(z - self.tab_z_ps))\n assert abs(z - self.tab_z_ps[iz]) < 1e-2, \\\n 'Supplied redshift (%g) not in table!' % z\n if len(R) == len(self.tab_R):\n assert np.allclose(R, self.tab_R)\n return self.tab_cf_mm[iz]\n\n return np.interp(R, self.tab_R, self.tab_cf_mm[iz])\n\n ##\n # Compute from scratch\n ##\n\n # Has P(k) already been computed?\n if Pofk is not None:\n if k is None:\n k = self.tab_k\n assert len(Pofk) == len(self.tab_k), \\\n \"Mismatch in shape between Pofk and k!\"\n\n else:\n k = self.tab_k\n Pofk = self.get_ps_tot(z, self.tab_k)\n\n return self.InverseFT3D(R, Pofk, k)\n\n def InverseFT3D(self, R, ps, k=None, kmin=None, kmax=None,\n epsabs=1e-12, epsrel=1e-12, limit=500, split_by_scale=False,\n method='clenshaw-curtis', use_pb=False, suppression=np.inf):\n \"\"\"\n Take a power spectrum and perform the inverse (3-D) FT to recover\n a correlation function.\n \"\"\"\n assert type(R) == np.ndarray\n\n if (type(ps) == FunctionType) or isinstance(ps, interp1d) \\\n or isinstance(ps, Akima1DInterpolator):\n k = ps.x\n elif type(ps) == np.ndarray:\n # Setup interpolant\n\n assert k is not None, \"Must supply k vector as well!\"\n\n #if interpolant == 'akima':\n # ps = Akima1DInterpolator(k, ps)\n #elif interpolant == 'cubic':\n\n ps = interp1d(np.log(k), ps, kind='cubic', assume_sorted=True,\n bounds_error=False, fill_value=0.0)\n\n #_ps = interp1d(np.log(k), np.log(ps), kind='cubic', assume_sorted=True,\n # bounds_error=False, fill_value=-np.inf)\n #\n #ps = lambda k: np.exp(_ps.__call__(np.log(k)))\n\n else:\n raise ValueError('Do not understand type of `ps`.')\n\n if kmin is None:\n kmin = k.min()\n if kmax is None:\n kmax = k.max()\n\n norm = 1. / ps(np.log(kmax))\n\n ##\n # Use Steven Murray's `hankel` package to do the transform\n ##\n if method == 'ogata':\n assert have_hankel, \"hankel package required for this!\"\n\n integrand = lambda kk: four_pi * kk**2 * norm * ps(np.log(kk)) \\\n * np.exp(-kk * R / suppression)\n ht = HankelTransform(nu=0, N=k.size, h=0.001)\n\n #integrand = lambda kk: ps(np.log(kk)) * norm\n #ht = SymmetricFourierTransform(3, N=k.size, h=0.001)\n\n #print(ht.integrate(integrand))\n cf = ht.transform(integrand, k=R, ret_err=False, inverse=True) / norm\n\n return cf / (2. * np.pi)**3\n else:\n pass\n # Otherwise, do it by-hand.\n\n\n ##\n # Optional progress bar\n ##\n pb = ProgressBar(R.size, use=self.pf['progress_bar'] * use_pb,\n name='ps(k)->cf(R)')\n\n # Loop over R and perform integral\n cf = np.zeros_like(R)\n for i, RR in enumerate(R):\n\n if not pb.has_pb:\n pb.start()\n\n pb.update(i)\n\n # Leave sin(k*R) out -- that's the 'weight' for scipy.\n integrand = lambda kk: norm * four_pi * kk**2 * ps(np.log(kk)) \\\n * np.exp(-kk * RR / suppression) / kk / RR\n\n if method == 'clenshaw-curtis':\n\n if split_by_scale:\n kcri = np.exp(ps.x[np.argmin(np.abs(np.exp(ps.x) - 1. / RR))])\n\n # Integral over small k is easy\n lowk = np.exp(ps.x) <= kcri\n klow = np.exp(ps.x[lowk == 1])\n plow = ps.y[lowk == 1]\n sinc = np.sin(RR * klow) / klow / RR\n integ = norm * four_pi * klow**2 * plow * sinc \\\n * np.exp(-klow * RR / suppression)\n cf[i] = np.trapz(integ * klow, x=np.log(klow)) / norm\n\n kstart = kcri\n\n #print(RR, 1. / RR, kcri, lowk.sum(), ps.x.size - lowk.sum())\n #\n #if lowk.sum() < 1000 and lowk.sum() % 100 == 0:\n # import matplotlib.pyplot as pl\n #\n # pl.figure(2)\n #\n # sinc = np.sin(RR * k) / k / RR\n # pl.loglog(k, integrand(k) * sinc, color='k')\n # pl.loglog([kcri]*2, [1e-4, 1e4], color='y')\n # raw_input('')\n\n else:\n kstart = kmin\n\n # Add in the wiggly part\n cf[i] += quad(integrand, kstart, kmax,\n epsrel=epsrel, epsabs=epsabs, limit=limit,\n weight='sin', wvar=RR)[0] / norm\n\n else:\n raise NotImplemented('help')\n\n pb.finish()\n\n # Our FT convention\n cf /= (2 * np.pi)**3\n\n return cf\n\n def FT3D(self, k, cf, R=None, Rmin=None, Rmax=None,\n epsabs=1e-12, epsrel=1e-12, limit=500, split_by_scale=False,\n method='clenshaw-curtis', use_pb=False, suppression=np.inf):\n \"\"\"\n This is nearly identical to the inverse transform function above,\n I just got tired of having to remember to swap meanings of the\n k and R variables. Sometimes clarity is better than minimizing\n redundancy.\n \"\"\"\n assert type(k) == np.ndarray\n\n if (type(cf) == FunctionType) or isinstance(cf, interp1d) \\\n or isinstance(cf, Akima1DInterpolator):\n R = cf.x\n elif type(cf) == np.ndarray:\n # Setup interpolant\n\n assert R is not None, \"Must supply R vector as well!\"\n\n #if interpolant == 'akima':\n # ps = Akima1DInterpolator(k, ps)\n #elif interpolant == 'cubic':\n cf = interp1d(np.log(R), cf, kind='cubic', assume_sorted=True,\n bounds_error=False, fill_value=0.0)\n\n else:\n raise ValueError('Do not understand type of `ps`.')\n\n if Rmin is None:\n Rmin = R.min()\n if Rmax is None:\n Rmax = R.max()\n\n norm = 1. / cf(np.log(Rmin))\n\n if method == 'ogata':\n assert have_hankel, \"hankel package required for this!\"\n\n integrand = lambda RR: four_pi * R**2 * norm * cf(np.log(RR))\n ht = HankelTransform(nu=0, N=k.size, h=0.1)\n\n #integrand = lambda kk: ps(np.log(kk)) * norm\n #ht = SymmetricFourierTransform(3, N=k.size, h=0.001)\n\n #print(ht.integrate(integrand))\n ps = ht.transform(integrand, k=k, ret_err=False, inverse=False) / norm\n\n return ps\n\n ##\n # Optional progress bar\n ##\n pb = ProgressBar(R.size, use=self.pf['progress_bar'] * use_pb,\n name='cf(R)->ps(k)')\n\n # Loop over k and perform integral\n ps = np.zeros_like(k)\n for i, kk in enumerate(k):\n\n if not pb.has_pb:\n pb.start()\n\n pb.update(i)\n\n if method == 'clenshaw-curtis':\n\n # Leave sin(k*R) out -- that's the 'weight' for scipy.\n # Note the minus sign.\n integrand = lambda RR: norm * four_pi * RR**2 * cf(np.log(RR)) \\\n * np.exp(-kk * RR / suppression) / kk / RR\n\n if split_by_scale:\n Rcri = np.exp(cf.x[np.argmin(np.abs(np.exp(cf.x) - 1. / kk))])\n\n # Integral over small k is easy\n lowR = np.exp(cf.x) <= Rcri\n Rlow = np.exp(cf.x[lowR == 1])\n clow = cf.y[lowR == 1]\n sinc = np.sin(kk * Rlow) / Rlow / kk\n integ = norm * four_pi * Rlow**2 * clow * sinc \\\n * np.exp(-kk * Rlow / suppression)\n ps[i] = np.trapz(integ * Rlow, x=np.log(Rlow)) / norm\n\n Rstart = Rcri\n\n #if lowR.sum() < 1000 and lowR.sum() % 100 == 0:\n # import matplotlib.pyplot as pl\n #\n # pl.figure(2)\n #\n # sinc = np.sin(kk * R) / kk / R\n # pl.loglog(R, integrand(R) * sinc, color='k')\n # pl.loglog([Rcri]*2, [1e-4, 1e4], color='y')\n # raw_input('')\n\n else:\n Rstart = Rmin\n\n\n # Use 'chebmo' to save Chebyshev moments and pass to next integral?\n ps[i] += quad(integrand, Rstart, Rmax,\n epsrel=epsrel, epsabs=epsabs, limit=limit,\n weight='sin', wvar=kk)[0] / norm\n\n\n else:\n raise NotImplemented('help')\n\n pb.finish()\n\n #\n return np.abs(ps)\n\n @property\n def tab_k(self):\n \"\"\"\n k-vector constructed from mps parameters.\n \"\"\"\n if not hasattr(self, '_tab_k'):\n dlogk = self.pf['hps_dlnk']\n kmi, kma = self.pf['hps_lnk_min'], self.pf['hps_lnk_max']\n logk = np.arange(kmi, kma+dlogk, dlogk)\n self._tab_k = np.exp(logk)\n\n return self._tab_k\n\n @tab_k.setter\n def tab_k(self, value):\n self._tab_k = value\n\n @property\n def tab_R(self):\n \"\"\"\n R-vector constructed from mps parameters.\n \"\"\"\n if not hasattr(self, '_tab_R'):\n dlogR = self.pf['hps_dlnR']\n Rmi, Rma = self.pf['hps_lnR_min'], self.pf['hps_lnR_max']\n logR = np.arange(Rmi, Rma+dlogR, dlogR)\n self._tab_R = np.exp(logR)\n\n return self._tab_R\n\n @property\n def tab_z_ps(self):\n \"\"\"\n Redshift array -- different than HMF redshifts!\n \"\"\"\n if not hasattr(self, '_tab_z_ps'):\n zmin = self.pf['hps_zmin']\n zmax = self.pf['hps_zmax']\n dz = self.pf['hps_dz']\n\n Nz = int(round(((zmax - zmin) / dz) + 1, 1))\n self._tab_z_ps = np.linspace(zmin, zmax, Nz)\n\n return self._tab_z_ps\n\n @tab_z_ps.setter\n def tab_z_ps(self, value):\n self._tab_z_ps = value\n\n @tab_R.setter\n def tab_R(self, value):\n self._tab_R = value\n\n print('Setting R attribute. Should verify it matches PS.')\n\n def __getattr__(self, name):\n\n if hasattr(HaloMassFunction, name):\n return HaloMassFunction.__dict__[name].__get__(self, HaloMassFunction)\n\n if (name[0] == '_'):\n raise AttributeError('This will get caught. Don\\'t worry! {}'.format(name))\n\n if name not in self.__dict__.keys():\n if self.pf['hmf_load']:\n self._load_hmf()\n else:\n # Can generate on the fly!\n if name == 'tab_MAR':\n self.TabulateMAR()\n else:\n self.TabulateHMF(save_MAR=False)\n\n if name not in self.__dict__.keys():\n self._load_ps()\n\n return self.__dict__[name]\n\n def _load_ps(self, suffix='hdf5'):\n \"\"\" Load table from HDF5 or binary. \"\"\"\n\n if self.pf['hps_assume_linear']:\n print(\"Assuming linear matter PS...\")\n self._tab_ps_mm = np.zeros((self.tab_z_ps.size, self.tab_k.size))\n self._tab_cf_mm = np.zeros((self.tab_z_ps.size, self.tab_R.size))\n for i, _z_ in enumerate(self.tab_z_ps):\n iz = np.argmin(np.abs(_z_ - self.tab_z))\n self._tab_ps_mm[i,:] = self._get_ps_lin(_z_, iz)\n\n return\n\n fn = '%s/input/hmf/%s.%s' % (ARES, self.tab_prefix_ps(), suffix)\n\n if re.search('.hdf5', fn) or re.search('.h5', fn):\n f = h5py.File(fn, 'r')\n self.tab_z_ps = f['tab_z_ps'].value\n self.tab_R = f['tab_R'].value\n self.tab_k = f['tab_k'].value\n self.tab_ps_mm = f['tab_ps_mm'].value\n self.tab_cf_mm = f['tab_cf_mm'].value\n f.close()\n elif re.search('.pkl', fn):\n f = open(fn, 'rb')\n self.tab_z_ps = pickle.load(f)\n self.tab_R = pickle.load(f)\n self.tab_k = pickle.load(f)\n self.tab_ps_mm = pickle.load(f)\n self.tab_cf_mm = pickle.load(f)\n f.close()\n else:\n raise IOError('Unrecognized format for hps_table.')\n\n def tab_prefix_ps(self, with_size=True):\n \"\"\"\n What should we name this table?\n\n Convention:\n ps_FIT_logM_nM_logMmin_logMmax_z_nz_\n\n Read:\n halo mass function using FIT form of the mass function\n using nM mass points between logMmin and logMmax\n using nz redshift points between zmin and zmax\n\n \"\"\"\n\n M1, M2 = self.pf['hmf_logMmin'], self.pf['hmf_logMmax']\n\n z1, z2 = self.pf['hps_zmin'], self.pf['hps_zmax']\n\n dlogk = self.pf['hps_dlnk']\n kmi, kma = self.pf['hps_lnk_min'], self.pf['hps_lnk_max']\n #logk = np.arange(kmi, kma+dlogk, dlogk)\n #karr = np.exp(logk)\n\n dlogR = self.pf['hps_dlnR']\n Rmi, Rma = self.pf['hps_lnR_min'], self.pf['hps_lnR_max']\n #logR = np.arange(np.log(Rmi), np.log(Rma)+dlogR, dlogR)\n #Rarr = np.exp(logR)\n\n\n if with_size:\n logMsize = (self.pf['hmf_logMmax'] - self.pf['hmf_logMmin']) \\\n / self.pf['hmf_dlogM']\n zsize = ((self.pf['hps_zmax'] - self.pf['hps_zmin']) \\\n / self.pf['hps_dz']) + 1\n\n assert logMsize % 1 == 0\n logMsize = int(logMsize)\n assert zsize % 1 == 0\n zsize = int(round(zsize, 1))\n\n # Should probably save NFW information etc. too\n return 'hps_%s_logM_%s_%i-%i_z_%s_%i-%i_lnR_%.1f-%.1f_dlnR_%.3f_lnk_%.1f-%.1f_dlnk_%.3f' \\\n % (self.hmf_func, logMsize, M1, M2, zsize, z1, z2,\n Rmi, Rma, dlogR, kmi, kma, dlogk)\n else:\n raise NotImplementedError('help')\n\n def tab_prefix_ps_check(self, with_size=True):\n \"\"\"\n A version of the prefix to be used only for checkpointing.\n\n This just means take the full prefix and hack out the bit with the\n redshift interval.\n \"\"\"\n\n prefix = self.tab_prefix_ps(with_size)\n\n iz = prefix.find('_z_')\n iR = prefix.find('_lnR_')\n\n return prefix[0:iz] + prefix[iR:]\n\n @property\n def tab_ps_mm(self):\n if not hasattr(self, '_tab_ps_mm'):\n self._load_ps()\n return self._tab_ps_mm\n\n @tab_ps_mm.setter\n def tab_ps_mm(self, value):\n self._tab_ps_mm = value\n\n @property\n def tab_cf_mm(self):\n if not hasattr(self, '_tab_cf_mm'):\n ps = self.tab_ps_mm\n return self._tab_cf_mm\n\n @tab_cf_mm.setter\n def tab_cf_mm(self, value):\n self._tab_cf_mm = value\n\n def TabulatePS(self, clobber=False, checkpoint=True, **ftkwargs):\n \"\"\"\n Tabulate the matter power spectrum as a function of redshift and k.\n \"\"\"\n\n pb = ProgressBar(len(self.tab_z_ps), 'ps_dd')\n pb.start()\n\n # Lists to store any checkpoints that are found\n _z = []\n _ps = []\n _cf = []\n if checkpoint:\n if (not os.path.exists('tmp')):\n os.mkdir('tmp')\n\n pref = self.tab_prefix_ps_check(True)\n fn = 'tmp/{}.{}.pkl'.format(pref, str(rank).zfill(3))\n\n if os.path.exists(fn) and (not clobber):\n\n # Should delete if clobber == True?\n\n if rank == 0:\n print(\"Checkpoints for this model found in tmp/.\")\n print(\"Re-run with clobber=True to overwrite.\")\n\n f = open(fn, 'rb')\n while True:\n\n try:\n tmp = pickle.load(f)\n except EOFError:\n break\n\n _z.append(tmp[0])\n _ps.append(tmp[1])\n _cf.append(tmp[2])\n\n if _z != []:\n print(\"Processor {} loaded checkpoints for z={}\".format(rank, _z))\n\n elif os.path.exists(fn):\n os.remove(fn)\n\n # Must collect checkpoints so we don't re-run something another\n # processor did!\n if size > 1 and _z != []:\n _zdone = MPI.COMM_WORLD.reduce(_z, root=0)\n zdone = MPI.COMM_WORLD.bcast(_zdone, root=0)\n _zdone_by = MPI.COMM_WORLD.reduce([rank] * len(_z), root=0)\n zdone_by = MPI.COMM_WORLD.bcast(_zdone_by, root=0)\n else:\n zdone = []\n zdone_by = []\n\n # Figure out what redshift still need to be done by somebody\n assignments = []\n for k, z in enumerate(self.tab_z_ps):\n if z in zdone:\n continue\n\n assignments.append(z)\n\n # Split up the work among processors\n my_assignments = []\n for k, z in enumerate(assignments):\n if k % size != rank:\n continue\n\n my_assignments.append(z)\n\n if size > 1:\n if len(assignments) % size != 0:\n print(\"WARNING: Uneven load: {} redshifts and {} processors!\".format(len(assignments), size))\n\n tab_ps_mm = np.zeros((len(self.tab_z_ps), len(self.tab_k)))\n tab_cf_mm = np.zeros((len(self.tab_z_ps), len(self.tab_R)))\n for i, z in enumerate(self.tab_z_ps):\n\n # Done but not by me!\n if (z in zdone) and (z not in _z):\n continue\n\n if z not in my_assignments:\n continue\n\n ##\n # Calculate from scratch\n ##\n print(\"Processor {} generating z={} PS and CF...\".format(rank, z))\n\n # Must interpolate back to fine grid (uniformly sampled\n # real-space scales) to do FFT and obtain correlation function\n tab_ps_mm[i] = self.get_ps_tot(z, self.tab_k)\n\n # Compute correlation function at native resolution to save time\n # later.\n tab_cf_mm[i] = self.InverseFT3D(self.tab_R, tab_ps_mm[i],\n self.tab_k, **ftkwargs)\n\n pb.update(i)\n\n if not checkpoint:\n continue\n\n with open(fn, 'ab') as f:\n pickle.dump((z, tab_ps_mm[i], tab_cf_mm[i]), f)\n #print(\"Processor {} wrote checkpoint for z={}\".format(rank, z))\n\n pb.finish()\n\n # Grab checkpoints before writing to disk\n for i, z in enumerate(self.tab_z_ps):\n\n # Done but not by me! If not for this, Allreduce would sum\n # solutions from different processors.\n if (z in zdone) and (z not in _z):\n continue\n\n # Two processors did the same redshift (backward compatibility)\n if zdone.count(z) > 1:\n done_by = []\n for ii, zz in enumerate(zdone):\n if zz != z:\n continue\n done_by.append(zdone_by[ii])\n\n if rank != done_by[0]:\n continue\n\n ##\n # Load checkpoint, if one exists.\n ##\n if z in _z:\n\n j = _z.index(z)\n tab_ps_mm[i] = _ps[j]\n tab_cf_mm[i] = _cf[j]\n\n\n # Collect results!\n if size > 1:\n tmp1 = np.zeros_like(tab_ps_mm)\n nothing = MPI.COMM_WORLD.Allreduce(tab_ps_mm, tmp1)\n self.tab_ps_mm = tmp1\n\n tmp2 = np.zeros_like(tab_cf_mm)\n nothing = MPI.COMM_WORLD.Allreduce(tab_cf_mm, tmp2)\n self.tab_cf_mm = tmp2\n\n else:\n self.tab_ps_mm = tab_ps_mm\n self.tab_cf_mm = tab_cf_mm\n\n # Done!\n\n def SavePS(self, fn=None, clobber=True, destination=None, format='hdf5',\n checkpoint=True, **ftkwargs):\n \"\"\"\n Save matter power spectrum table to HDF5 or binary (via pickle).\n\n Parameters\n ----------\n fn : str (optional)\n Name of file to save results to. If None, will use\n self.tab_prefix_ps and value of format parameter to make one up.\n clobber : bool\n Overwrite pre-existing files of the same name?\n destination : str\n Path to directory (other than CWD) to save table.\n format : str\n Format of output. Can be 'hdf5' or 'pkl'\n\n \"\"\"\n\n if destination is None:\n destination = '.'\n\n # Determine filename\n if fn is None:\n fn = '%s/%s.%s' % (destination, self.tab_prefix_ps(True), format)\n else:\n if format not in fn:\n print(\"Suffix of provided filename does not match chosen format.\")\n print(\"Will go with format indicated by filename suffix.\")\n\n if os.path.exists(fn):\n if clobber:\n os.system('rm -f %s' % fn)\n else:\n raise IOError('File %s exists! Set clobber=True or remove manually.' % fn)\n\n # Do this first! (Otherwise parallel runs will be garbage)\n self.TabulatePS(clobber=clobber, checkpoint=checkpoint, **ftkwargs)\n\n if rank > 0:\n return\n\n self._write_ps(fn, clobber, format)\n\n def _write_ps(self, fn, clobber, format=format):\n\n try:\n import hmf\n hmf_v = hmf.__version__\n except AttributeError:\n hmf_v = 'unknown'\n\n if os.path.exists(fn):\n if clobber:\n os.system('rm -f %s' % fn)\n else:\n raise IOError('File %s exists! Set clobber=True or remove manually.' % fn)\n\n if format == 'hdf5':\n f = h5py.File(fn, 'w')\n f.create_dataset('tab_z_ps', data=self.tab_z_ps)\n f.create_dataset('tab_R', data=self.tab_R)\n f.create_dataset('tab_k', data=self.tab_k)\n f.create_dataset('tab_ps_mm', data=self.tab_ps_mm)\n f.create_dataset('tab_cf_mm', data=self.tab_cf_mm)\n\n f.close()\n # Otherwise, pickle it!\n else:\n f = open(fn, 'wb')\n pickle.dump(self.tab_z_ps, f)\n pickle.dump(self.tab_R, f)\n pickle.dump(self.tab_k, f)\n pickle.dump(self.tab_ps_mm, f)\n pickle.dump(self.tab_cf_mm, f)\n pickle.dump(dict(('hmf-version', hmf_v)))\n f.close()\n\n print('Wrote %s.' % fn)\n return\n","repo_name":"mirochaj/ares","sub_path":"ares/physics/HaloModel.py","file_name":"HaloModel.py","file_ext":"py","file_size_in_byte":35136,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"28"} +{"seq_id":"32186463865","text":"from django.test import TestCase\nfrom rest_framework.test import APIClient\n\n\n# Create your tests here.\nclass VGNControllerTest(TestCase):\n def setUp(self) -> None:\n self.client = APIClient()\n\n def test_with_coords_correct_url(self):\n response = self.client.get('/api/v1/vgn/',\n {'from_lon': 10.8911,\n 'from_lat': 49.8934,\n 'to_lon': 10.905089378356934,\n 'to_lat': 49.90699828690093\n },\n format='json')\n self.assertEqual(response.data, {\n \"url\": \"https://www.vgn.de/verbindungen/?to=coord:10.8911:49.8934:WGS84[DD.DDDDD]:Bamberg, Franz-Ludwig-Straße 8&td=coord:10.905089378356934:49.90699828690093:WGS84[DD.DDDDD]:Bamberg, Feldkirchenstraße 21\"\n })\n\n def test_without_coords_400(self):\n response = self.client.get('/api/v1/vgn/', format='json')\n self.assertEqual(response.status_code, 400)\n","repo_name":"michigg/lector_backend","sub_path":"src/apps/vgn_controller/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"74693844556","text":"# import sys\r\n# print(sys.version)\r\n# print(sys.executable)\r\n\r\nimport feedparser as fp\r\nimport json\r\nimport newspaper\r\nfrom newspaper import Article\r\nfrom time import mktime\r\nfrom datetime import datetime\r\nimport csv\r\n\r\n# print(\"Hello\")\r\n\r\n# Set the limit for number of articles to download\r\nLIMIT = 100\r\narticles_array = []\r\n\r\ndata = {}\r\ndata['newspapers'] = {}\r\n\r\n# Loads the JSON files with news sites\r\nwith open('news_links.json') as data_file:\r\n companies = json.load(data_file)\r\n\r\n# import pdb; pdb.set_trace()\r\n# print(companies)\r\n\r\n\r\n############# Extract News Article ###########\r\ncount = 1\r\n# Iterate through each news company\r\nfor company, value in companies.items():\r\n if 'rss' in value:\r\n d = fp.parse(value['rss'])\r\n print(\"Downloading articles from \", company)\r\n newsPaper = {\r\n \"rss\": value['rss'],\r\n \"link\": value['link'],\r\n \"articles\": []\r\n }\r\n for entry in d.entries:\r\n # Check if publish date is provided, if no the article is skipped.\r\n # This is done to keep consistency in the data and to keep the script from crashing.\r\n if hasattr(entry, 'published'):\r\n if count > LIMIT:\r\n break\r\n article = {}\r\n article['link'] = entry.link\r\n date = entry.published_parsed\r\n article['published'] = datetime.fromtimestamp(mktime(date)).isoformat()\r\n try:\r\n content = Article(entry.link)\r\n content.download()\r\n content.parse()\r\n except Exception as e:\r\n # If the download for some reason fails (ex. 404) the script will continue downloading\r\n # the next article.\r\n print(e)\r\n print(\"continuing...\")\r\n continue\r\n article['title'] = content.title\r\n article['text'] = content.text\r\n article['authors'] = content.authors\r\n article['top_image'] = content.top_image\r\n article['movies'] = content.movies\r\n newsPaper['articles'].append(article)\r\n articles_array.append(article)\r\n print(count, \"articles downloaded from\", company, \", url: \", entry.link)\r\n count = count + 1\r\n else:\r\n # This is the fallback method if a RSS-feed link is not provided.\r\n # It uses the python newspaper library to extract articles\r\n print(\"Building site for \", company)\r\n paper = newspaper.build(value['link'], memoize_articles=False)\r\n newsPaper = {\r\n \"link\": value['link'],\r\n \"articles\": []\r\n }\r\n noneTypeCount = 0\r\n for content in paper.articles:\r\n if count > LIMIT:\r\n break\r\n try:\r\n content.download()\r\n content.parse()\r\n except Exception as e:\r\n print(e)\r\n print(\"continuing...\")\r\n continue\r\n # Again, for consistency, if there is no found publish date the article will be skipped.\r\n\r\n article = {}\r\n article['title'] = content.title\r\n article['authors'] = content.authors\r\n article['text'] = content.text\r\n article['top_image'] = content.top_image\r\n article['movies'] = content.movies\r\n article['link'] = content.url\r\n article['published'] = content.publish_date\r\n newsPaper['articles'].append(article)\r\n articles_array.append(article)\r\n print(count, \"articles downloaded from\", company, \" using newspaper, url: \", content.url)\r\n count = count + 1\r\n #noneTypeCount = 0\r\n count = 1\r\n data['newspapers'][company] = newsPaper\r\n\r\n\r\n\r\n######################## Save into CSV File\r\n\r\n\r\ndef one_line(value):\r\n # value.strip('\\\"')\r\n return ''.join(value.splitlines())\r\n\r\ntry:\r\n f = csv.writer(open('Scraped_data_news_output.csv', 'w', encoding='utf-8'))\r\n # f.writerow(['Title', 'Authors','Text','Image','Videos','Link','Published_Date'])\r\n #print(article)\r\n for artist_name in articles_array:\r\n # import pdb; pdb.set_trace()\r\n title = artist_name['title']\r\n authors=artist_name['authors']\r\n text=artist_name['text']\r\n image=artist_name['top_image']\r\n video=artist_name['movies']\r\n link=artist_name['link']\r\n publish_date=artist_name['published']\r\n # Add each artist’s name and associated link to a row\r\n # f.writerow([title, authors, text, image, video, link, publish_date])\r\n\r\n # Add only text for each article \r\n # import pdb; pdb.set_trace()\r\n new_text = one_line(text)\r\n f.writerow([new_text])\r\nexcept Exception as e: print(e)\r\n\r\n","repo_name":"sawal386/cs229_project","sub_path":"online_news_retrieval.py","file_name":"online_news_retrieval.py","file_ext":"py","file_size_in_byte":4913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"16504250749","text":"class Solution:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n if len(nums) == 0:\n return [[]]\n \n res = []\n for i in range(len(nums)):\n for l in self.permuteUnique(nums[0:i] + nums[i + 1:]):\n tmp = [nums[i]] + l\n if tmp not in res:\n res.append([nums[i]] + l)\n \n # res = list(set(res))\n \n return res\n","repo_name":"FoxerLee/Leetcode","sub_path":"submission/python/0047.py","file_name":"0047.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"20759408506","text":"\"\"\"\nLatihan\n\n>= 88 kriteria A\n77 >= dan < 88 kriteria B\n60 >= dan < 77 kriteria C \n45 >= dan < 60 kriteria D\nselain itu E \n\"\"\"\n\nnilai = 95\n\nif nilai >= 88:\n print('A')\nelif nilai >= 77 and nilai < 88:\n print('B')\nelif nilai >= 60 and nilai < 77:\n print('C')\nelif nilai >= 45 and nilai < 60:\n print('D')\nelse:\n print('E')\n\n\"\"\"\nBuatlah program untuk menampilkan bilangan \nyang memiliki kelipatan 3, jika sudah ditemukan sebanyak \nvariable \"sampai\", maka berhenti.\nGunakan while atau for dan break \n\"\"\"\n\nmulai = input('Mulai dari: ') #10\nsampai = input('Sampai: ') #50\ntemuan = input('Jumlah temuan? ') #7\n\nmulai = int(mulai)\nsampai = int(sampai)\ntemuan = int(temuan)\n \nfor i in range(mulai, sampai, 3):\n print(i)\n if i is temuan:\n print('angka ditemukan', i)\n break","repo_name":"lyfewithcode/python-v1","sub_path":"exercise 2.py","file_name":"exercise 2.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"36299022030","text":"from django.shortcuts import *\nfrom rest_framework import viewsets\nfrom .models import *\nfrom .serializers import *\nfrom django.http import HttpResponse\nfrom rest_framework.permissions import *\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom django.http import JsonResponse\nfrom rest_framework.decorators import api_view, permission_classes\nfrom django.utils import timezone\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework.status import *\n# Create your views here.\n\n\n# HOME PAGE VIEW\nclass HomeView(APIView):\n #permission_classes = [IsAuthenticatedOrReadOnly]\n def get(self, request, *args, **kwargs):\n user = request.user\n qs = Food.objects.all()\n serializer = FoodSerializer(qs, many=True)\n return Response(serializer.data)\n\n\nclass FoodViewSet(viewsets.ModelViewSet):\n queryset =Food.objects.all()\n serializer_class =FoodSerializer\n permission_classes = [IsAuthenticatedOrReadOnly]\n\n\n\n@api_view(['GET'])\ndef getproduct(request, pk):\n product = Food.objects.get(pk=pk)\n indserializer = FoodSerializer(product, many=False)\n return Response(indserializer.data)\n\n\n# FUNCTION TO ADD ITEM TO CART\n@api_view(['POST'])\n@permission_classes((IsAuthenticated,))\ndef add_to_cart(request, pk):\n # ORDER ITEM TO BE ADDED TO CART\n food = get_object_or_404(Food, pk=pk)\n order_item, created = OrderItem.objects.get_or_create(\n food=food,\n user=request.user,\n ordered=False\n )\n order_qs = Order.objects.filter(user=request.user, ordered=False)\n # IF STATEMENT TO CHECK IF ORDER ITEM EXIST IN CART\n if order_qs.exists():\n order = order_qs[0]\n # IF STATEMENT TO INCREASE THE ORDER ITEM QUANTITY BY 1 IF THE USER ALREADY HAS THE food ITEM IN CART\n if order.foods.filter(food__pk=food.pk).exists():\n order_item.quantity += 1\n order_item.save()\n else:\n order.foods.add(order_item)\n return Response({\n 'cart_status': 'updated in cart'\n })\n else:\n # CREATE AN ORDER ITEM OF FOOD IF IT DOES NOT EXIST IN USER CART\n ordered_date = timezone.now()\n order = Order.objects.create(user=request.user, ordered_date=ordered_date)\n order.foods.add(order_item)\n return Response ({\n 'cart_status': 'added to cart'\n })\n\n\n\n# REMOVE FROM CART FUNCTIONS\n@api_view(['POST'])\n@permission_classes((IsAuthenticated,))\ndef remove_from_cart(request, pk):\n # ORDER ITEM TO REMOVE FROM CART\n food = get_object_or_404(Food, pk=pk)\n order_qs = Order.objects.filter(\n user=request.user,\n ordered=False\n )\n # CHECK IF ORDER ITEM EXIST IN CART ITEMS OF USER\n if order_qs.exists():\n order = order_qs[0]\n # check if the order item is in the order\n if order.foods.filter(food__pk=food.pk).exists():\n order_item = OrderItem.objects.filter(\n food=food,\n user=request.user,\n ordered=False\n )[0]\n # REMOVE FROM CART\n order.foods.remove(order_item)\n order_item.delete()\n return Response({\n 'cart_status': 'removed from cart'\n })\n else:\n return Response({\n 'cart_status': 'object does not exist in your cart'\n })\n else:\n return Response({\n 'cart_status': 'object does not exist in your cart'\n })\n\n\n# FUNCTION TO REMOVE SINGLE ITEM FROM CART\n@api_view(['POST'])\n@permission_classes((IsAuthenticated,))\ndef remove_from_cart_item(request, pk):\n # CHECK IF ORDERED ITEM EXIST IN CART\n food = get_object_or_404(Food, pk=pk)\n order_qs = Order.objects.filter(\n user=request.user,\n ordered=False\n )\n if order_qs.exists():\n order = order_qs[0]\n # check if the ordered item is in the order\n if order.foods.filter(food__pk=food.pk).exists():\n order_item = OrderItem.objects.filter(\n food=food,\n user=request.user,\n ordered=False\n )[0]\n # REDUCE QUANTITY OF ORDER ITEM BY 1\n if order_item.quantity <=0:\n return Response({\n 'cart_status': 'object does not exist in your cart'\n })\n else:\n order_item.quantity -= 1\n order_item.save()\n return Response({\n 'cart_status': 'object quantity was reduced in your cart'\n })\n else:\n return Response({\n 'cart_status': 'object quantity was reduced in your cart'\n })\n else:\n return Response({\n 'cart_status': 'object does not exist in your cart'\n })\n\n\n# VIEW TO DISPLAY ALL AVAILABLE ORDER ITEM THAT THE ORDERED STATUS IS FALSE\nclass order_summaryView(APIView):\n permission_classes = [IsAuthenticated]\n def get(self, *args, **kwargs):\n try:\n order = Order.objects.filter(user=self.request.user, ordered=False)\n serializer = OrderSerializer(order, many=True)\n return Response(serializer.data)\n except ObjectDoesNotExist:\n return Response({\n 'cart_status': 'YOU HAVE NO ORDER IN CART'\n })\n\n\nclass CheckOutView(APIView):\n permission_classes = [IsAuthenticated]\n\n def get(self, request):\n order = Order.objects.get(user=request.user, ordered=False)\n serializer = OrderSerializer(order, many=False)\n return Response(serializer.data)\n\n def post(self, request):\n data = request.data\n address = data.get('address')\n order = Order.objects.get(user=request.user, ordered=False)\n order.billing_address = address # billing_address\n order.ordered = True\n serializer = OrderSerializer(order, many=False)\n return Response(serializer.data, status=HTTP_201_CREATED)\n\n\n","repo_name":"vbello-tech/RESTUARANT-API","sub_path":"food_store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"41775364459","text":"import os\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport umap\nfrom matplotlib import pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import mutual_info_score\n\nfrom tgca import *\n\n\ndef do_pca(data):\n p = PCA(n_components=2)\n x = p.fit_transform(data.values.transpose())\n var = p.explained_variance_ratio_\n x = pd.DataFrame(x, index=data.columns)\n fig = plt.figure()\n plt.scatter(x[0], x[1])\n sns.despine(fig=fig, top=True, right=True)\n\n plt.figure()\n y = x[(x[0] < 20000) & (x[1] < 80000)]\n plt.scatter(y[0], y[1])\n plt.xlabel(round(var[0], 3) * 100)\n plt.ylabel(round(var[1], 3) * 100)\n idmax = [1148, 769, 395, 623, 907, 1081]\n plt.scatter(y.loc[idmax, 0], y.loc[idmax, 1])\n plt.show()\n return list(y.index)\n\n\ndef do_umap_by_genes(data, use_pickle=False, gene_subset=[],\n mpl=True, ply=True, pickle_file=UMAP_BY_GENE_PICKLE):\n if os.path.isfile(pickle_file) and use_pickle:\n embedding = pd.read_pickle(pickle_file)\n else:\n reducer = umap.UMAP()\n embedding = reducer.fit_transform(data)\n embedding = pd.DataFrame(embedding, index=data.index)\n embedding.to_pickle(pickle_file)\n subset = embedding[embedding.index.get_level_values(0).isin(gene_subset)]\n # print(subset)\n\n if mpl:\n fig = plt.figure()\n plt.scatter(embedding[0], embedding[1], marker='.')\n plt.scatter(subset[0], subset[1], label='subset', marker='.')\n sns.despine(fig=fig, top=True, right=True)\n plt.title('UMAP by genes')\n plt.show()\n if ply:\n embedding.columns = ['0', '1']\n embedding.index = data.index\n embedding = embedding.reset_index(level='symbol')\n print(embedding.head())\n # print(data.head())\n import plotly.express as px\n fig = px.scatter(embedding, x='0', y='1',\n template='plotly_white', hover_data=['symbol'])\n fig.show()\n return embedding\n\n\ndef do_umap_by_sample(data, clusters=[], use_pickle=False, pickle_file=UMAP_BY_SAMPLE_PICKLE):\n if os.path.isfile(pickle_file) and use_pickle:\n embedding = pd.read_pickle(pickle_file)\n else:\n reducer = umap.UMAP()\n embedding = reducer.fit_transform(data.transpose())\n embedding = pd.DataFrame(embedding, index=data.columns)\n print('embeddings')\n print(embedding)\n embedding.to_pickle(pickle_file)\n # if clusters != []:\n # num_unique_clusters = clusters['cluster'].unique()\n plt.title('UMAP by samples')\n fig = plt.figure()\n plt.scatter(embedding.iloc[:, 0], embedding.iloc[:, 1], marker='.')\n sns.despine(fig=fig, top=True, right=True)\n plt.title('UMAP by sample')\n plt.legend()\n plt.show()\n # print(embedding)\n # import plotly.express as px\n # embedding.columns = ['0', '1']\n # fig = px.scatter(embedding, x='0', y='1')\n # hover_data=['petal_width'])\n # fig.show()\n\n\ndef get_clusterable_umap_embedding(\n data, n_neighbors=30,\n min_dist=0.0,\n n_components=50,\n min_samples=3,\n min_cluster_size=3):\n from hdbscan import HDBSCAN\n df = umap.UMAP(\n n_neighbors=n_neighbors,\n min_dist=min_dist,\n n_components=n_components,\n ).fit_transform(data)\n df = pd.DataFrame(df, index=data.index)\n print(df.head())\n labels = HDBSCAN(\n min_samples=min_samples,\n min_cluster_size=min_cluster_size,\n ).fit_predict(df)\n df['cluster'] = labels\n print(sorted(list(set(labels))))\n fig = plt.figure()\n for label, i in df.groupby(by='cluster', axis=0):\n plt.scatter(\n i.iloc[0, :],\n i.iloc[1, :],\n label=label,\n marker='.'\n )\n sns.despine(fig=fig, top=True, right=True)\n plt.show()\n return df\n\n\ndef get_similarity_measure(data, from_pickle=False, pickle_file=SIMILARITY_MEASURE_PICKLE):\n if from_pickle and os.path.isfile(pickle_file):\n return pd.read_pickle(pickle_file)\n else:\n N = data.shape[1]\n res = np.zeros((N, N))\n for i in range(N):\n for j in range(N):\n if i == j:\n res[i, j] = np.nan\n else:\n res[i, j] = ((data.values[:, i] - data.values[:, j]) ** 2).sum()\n res = pd.DataFrame(res)\n res = res / res.max().max()\n res.index = data.columns\n res.columns = data.columns\n\n res.to_pickle(path=pickle_file)\n return res\n\n\ndef do_scatter(data, x, y):\n fig = plt.figure()\n plt.scatter(data[x], data[y])\n sns.despine(fig=fig, top=True, right=True)\n plt.xlabel(x)\n plt.ylabel(y)\n plt.show()\n\n\ndef calc_MI(x, y, bins):\n c_xy = np.histogram2d(x, y, bins)[0]\n mi = mutual_info_score(None, None, contingency=c_xy)\n return mi\n\n\ndef compute_mi_matrix(data, from_pickle=False, bins=20,\n pickle_file=MUTUAL_INFORMATION_PICKLE):\n if from_pickle and os.path.isfile(pickle_file):\n return pd.read_pickle(pickle_file)\n N = data.shape[1]\n count = 0\n res = np.zeros(shape=(N, N))\n for i in range(len(data.columns)):\n for j in range(len(data.columns)):\n count += 1\n print('calculating MI score: {:2f}% completed'.format(count / (N * N)))\n x = data.iloc[:, i]\n y = data.iloc[:, j]\n res[i, j] = calc_MI(x, y, bins=bins)\n df = pd.DataFrame(res, index=data.columns, columns=data.columns)\n df.to_pickle(pickle_file)\n\n return df\n\ndef compute_ks_matrix(\n data, from_pickle=False,\n pickle_file=PROTEOME_KS_PICKLE):\n import scipy.stats as stats\n if from_pickle and os.path.isfile(pickle_file):\n return pd.read_pickle(pickle_file)\n N = data.shape[1]\n count = 0\n ks = np.zeros(shape=(N, N))\n p_val = np.zeros(shape=(N, N))\n for i in range(len(data.columns)):\n for j in range(len(data.columns)):\n count += 1\n print('calculating MI score: {:2f}% completed'.format(count / (N * N)))\n x = data.iloc[:, i]\n y = data.iloc[:, j]\n k, p = stats.ks_2samp(x.values, y.values)\n ks[i, j] = k\n p_val[i, j] = p\n ks = pd.DataFrame(ks, index=data.columns, columns=data.columns)\n p_val = pd.DataFrame(p_val, index=data.columns, columns=data.columns)\n df = pd.concat({'ks': ks, 'pval': p_val}, axis=0)\n df.to_pickle(pickle_file)\n\n return df\n","repo_name":"CiaranWelsh/TGCA","sub_path":"tgca/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"36610561607","text":"import math\nfrom typing import Any, Tuple\n\nimport numpy as np\nfrom pytorch_lightning import LightningDataModule\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom pl_bolts.utils import _SKLEARN_AVAILABLE\nfrom pl_bolts.utils.stability import under_review\nfrom pl_bolts.utils.warnings import warn_missing_pkg\n\nif _SKLEARN_AVAILABLE:\n from sklearn.utils import shuffle as sk_shuffle\nelse: # pragma: no cover\n warn_missing_pkg(\"sklearn\", pypi_name=\"scikit-learn\")\n\n\n@under_review()\nclass SklearnDataset(Dataset):\n \"\"\"Mapping between numpy (or sklearn) datasets to PyTorch datasets.\n\n Args:\n X: Numpy ndarray\n y: Numpy ndarray\n x_transform: Any transform that works with Numpy arrays\n y_transform: Any transform that works with Numpy arrays\n\n Example:\n >>> from sklearn.datasets import load_diabetes\n >>> from pl_bolts.datamodules import SklearnDataset\n ...\n >>> X, y = load_diabetes(return_X_y=True)\n >>> dataset = SklearnDataset(X, y)\n >>> len(dataset)\n 442\n\n \"\"\"\n\n def __init__(\n self,\n X: np.ndarray, # noqa: N803\n y: np.ndarray,\n x_transform: Any = None,\n y_transform: Any = None,\n ) -> None:\n super().__init__()\n self.data = X\n self.labels = y\n self.data_transform = x_transform\n self.labels_transform = y_transform\n\n def __len__(self) -> int:\n return len(self.data)\n\n def __getitem__(self, idx) -> Tuple[np.ndarray, np.ndarray]:\n x = self.data[idx].astype(np.float32)\n y = self.labels[idx]\n\n # Do not convert integer to float for classification data\n if not ((y.dtype == np.int32) or (y.dtype == np.int64)):\n y = y.astype(np.float32)\n\n if self.data_transform:\n x = self.data_transform(x)\n\n if self.labels_transform:\n y = self.labels_transform(y)\n\n return x, y\n\n\n@under_review()\nclass SklearnDataModule(LightningDataModule):\n \"\"\"Automatically generates the train, validation and test splits for a Numpy dataset. They are set up as dataloaders\n for convenience. Optionally, you can pass in your own validation and test splits.\n\n Example:\n\n >>> from sklearn.datasets import load_diabetes\n >>> from pl_bolts.datamodules import SklearnDataModule\n ...\n >>> X, y = load_diabetes(return_X_y=True)\n >>> loaders = SklearnDataModule(X, y, batch_size=32)\n ...\n >>> # train set\n >>> train_loader = loaders.train_dataloader()\n >>> len(train_loader.dataset)\n 310\n >>> len(train_loader)\n 10\n >>> # validation set\n >>> val_loader = loaders.val_dataloader()\n >>> len(val_loader.dataset)\n 88\n >>> len(val_loader)\n 3\n >>> # test set\n >>> test_loader = loaders.test_dataloader()\n >>> len(test_loader.dataset)\n 44\n >>> len(test_loader)\n 2\n\n \"\"\"\n\n name = \"sklearn\"\n\n def __init__(\n self,\n X, # noqa: N803\n y,\n x_val=None,\n y_val=None,\n x_test=None,\n y_test=None,\n val_split=0.2,\n test_split=0.1,\n num_workers=0,\n random_state=1234,\n shuffle=True,\n batch_size: int = 16,\n pin_memory=True,\n drop_last=False,\n *args,\n **kwargs,\n ) -> None:\n super().__init__(*args, **kwargs)\n self.num_workers = num_workers\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.pin_memory = pin_memory\n self.drop_last = drop_last\n\n # shuffle x and y\n if shuffle and _SKLEARN_AVAILABLE:\n X, y = sk_shuffle(X, y, random_state=random_state) # noqa: N806\n elif shuffle and not _SKLEARN_AVAILABLE: # pragma: no cover\n raise ModuleNotFoundError(\n \"You want to use shuffle function from `scikit-learn` which is not installed yet.\"\n )\n\n val_split = 0 if x_val is not None or y_val is not None else val_split\n test_split = 0 if x_test is not None or y_test is not None else test_split\n\n hold_out_split = val_split + test_split\n if hold_out_split > 0:\n val_split = val_split / hold_out_split\n hold_out_size = math.floor(len(X) * hold_out_split)\n x_holdout, y_holdout = X[:hold_out_size], y[:hold_out_size]\n test_i_start = int(val_split * hold_out_size)\n x_val_hold_out, y_val_holdout = x_holdout[:test_i_start], y_holdout[:test_i_start]\n x_test_hold_out, y_test_holdout = x_holdout[test_i_start:], y_holdout[test_i_start:]\n X, y = X[hold_out_size:], y[hold_out_size:] # noqa: N806\n\n # if don't have x_val and y_val create split from X\n if x_val is None and y_val is None and val_split > 0:\n x_val, y_val = x_val_hold_out, y_val_holdout\n\n # if don't have x_test, y_test create split from X\n if x_test is None and y_test is None and test_split > 0:\n x_test, y_test = x_test_hold_out, y_test_holdout\n\n self._init_datasets(X, y, x_val, y_val, x_test, y_test)\n\n def _init_datasets(\n self,\n x: np.ndarray,\n y: np.ndarray,\n x_val: np.ndarray,\n y_val: np.ndarray,\n x_test: np.ndarray,\n y_test: np.ndarray,\n ) -> None:\n self.train_dataset = SklearnDataset(x, y)\n self.val_dataset = SklearnDataset(x_val, y_val)\n self.test_dataset = SklearnDataset(x_test, y_test)\n\n def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n batch_size=self.batch_size,\n shuffle=self.shuffle,\n num_workers=self.num_workers,\n drop_last=self.drop_last,\n pin_memory=self.pin_memory,\n )\n\n def val_dataloader(self) -> DataLoader:\n return DataLoader(\n self.val_dataset,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n drop_last=self.drop_last,\n pin_memory=self.pin_memory,\n )\n\n def test_dataloader(self) -> DataLoader:\n return DataLoader(\n self.test_dataset,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n drop_last=self.drop_last,\n pin_memory=self.pin_memory,\n )\n","repo_name":"Lightning-Universe/lightning-bolts","sub_path":"src/pl_bolts/datamodules/sklearn_datamodule.py","file_name":"sklearn_datamodule.py","file_ext":"py","file_size_in_byte":6452,"program_lang":"python","lang":"en","doc_type":"code","stars":1603,"dataset":"github-code","pt":"28"} +{"seq_id":"25026985910","text":"\"\"\"Tests for asr_align.py.\"\"\"\nimport string\nfrom argparse import ArgumentParser\nfrom pathlib import Path\n\nimport numpy as np\nimport pytest\n\nfrom espnet2.bin.asr_align import CTCSegmentation, CTCSegmentationTask, get_parser, main\nfrom espnet2.tasks.asr import ASRTask\n\n\ndef test_get_parser():\n \"\"\"Check the parser.\"\"\"\n assert isinstance(get_parser(), ArgumentParser)\n\n\ndef test_main():\n \"\"\"Run main(·) once.\"\"\"\n with pytest.raises(SystemExit):\n main()\n\n\n@pytest.fixture()\ndef token_list(tmp_path: Path):\n \"\"\"Obtain a test file with a list of tokens.\"\"\"\n with (tmp_path / \"tokens.txt\").open(\"w\") as f:\n f.write(\"\\n\")\n for c in string.ascii_letters:\n f.write(f\"{c}\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n return tmp_path / \"tokens.txt\"\n\n\n@pytest.fixture()\ndef asr_config_file(tmp_path: Path, token_list):\n \"\"\"Obtain ASR config file for test.\"\"\"\n # Write default configuration file\n ASRTask.main(\n cmd=[\n \"--dry_run\",\n \"true\",\n \"--output_dir\",\n str(tmp_path / \"asr\"),\n \"--token_list\",\n str(token_list),\n \"--token_type\",\n \"char\",\n \"--decoder\",\n \"rnn\",\n ]\n )\n return tmp_path / \"asr\" / \"config.yaml\"\n\n\n@pytest.mark.execution_timeout(5)\ndef test_CTCSegmentation(asr_config_file):\n \"\"\"Test CTC segmentation.\n\n Note that due to the random vector that is given to the CTC segmentation function,\n there is a small chance that this test might randomly fail. If this ever happens,\n use the test file test_utils/ctc_align_test.wav instead, or a fixed test vector.\n \"\"\"\n\n num_samples = 100000\n fs = 16000\n # text includes:\n # one blank line\n # kaldi-style utterance names\n # one char not included in char list\n text = (\n \"\\n\"\n \"utt_a HOTELS\\n\"\n \"utt_b HOLIDAY'S STRATEGY\\n\"\n \"utt_c ASSETS\\n\"\n \"utt_d PROPERTY MANAGEMENT\\n\"\n )\n # speech either from the test audio file or random\n speech = np.random.randn(num_samples)\n aligner = CTCSegmentation(\n asr_train_config=asr_config_file,\n fs=fs,\n kaldi_style_text=True,\n min_window_size=10,\n )\n segments = aligner(speech, text, fs=fs)\n # check segments\n assert isinstance(segments, CTCSegmentationTask)\n kaldi_text = str(segments)\n first_line = kaldi_text.splitlines()[0]\n assert \"utt_a\" == first_line.split(\" \")[0]\n start, end, score = segments.segments[0]\n assert start > 0.0\n assert start < (num_samples / fs)\n assert end >= start\n assert score < 0.0\n # check options and align with \"classic\" text converter\n option_dict = {\n \"fs\": 16000,\n \"time_stamps\": \"fixed\",\n \"samples_to_frames_ratio\": 512,\n \"min_window_size\": 100,\n \"max_window_size\": 20000,\n \"set_blank\": 0,\n \"scoring_length\": 10,\n \"replace_spaces_with_blanks\": True,\n \"gratis_blank\": True,\n \"kaldi_style_text\": False,\n \"text_converter\": \"classic\",\n }\n aligner.set_config(**option_dict)\n assert aligner.warned_about_misconfiguration\n text = [\"HOTELS\", \"HOLIDAY'S STRATEGY\", \"ASSETS\", \"PROPERTY MANAGEMENT\"]\n segments = aligner(speech, text, name=\"foo\")\n segments_str = str(segments)\n first_line = segments_str.splitlines()[0]\n assert \"foo_0000\" == first_line.split(\" \")[0]\n # test the ratio estimation (result: 509)\n ratio = aligner.estimate_samples_to_frames_ratio()\n assert 500 <= ratio <= 520\n","repo_name":"espnet/espnet","sub_path":"test/espnet2/bin/test_asr_align.py","file_name":"test_asr_align.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","stars":7371,"dataset":"github-code","pt":"28"} +{"seq_id":"35516936887","text":"import pandas as pd\nimport util_functions as uf\nfrom plotly.offline import plot\nfrom plotly.graph_objs import *\nfrom plotly import tools\n\n\nif __name__ == \"__main__\":\n # Connect to AWS\n uf.set_env_path()\n conn, cur = uf.aws_connect()\n\n # Trip Frequency Count for entire pilot by Operator\n full_df = pd.read_sql(\"\"\"select distinct\n user_freqs.operatorclean,\n user_freqs.user_trips,\n count(*) as freq_user_trips\n from\n ((select distinct\n operatorclean,\n userid,\n count(*) as user_trips\n from dockless_trips\n where operatorclean in ('lime', 'spin')\n group by 1, 2\n order by operatorclean, count(*))\n union\n /*ofo users*/\n (select distinct\n 'ofo' as operatorclean,\n userid,\n sum(trips) as user_trips\n from ofo_users\n group by 1, 2\n order by operatorclean, sum(trips))\n union\n /*jump users*/\n (select distinct\n 'jump' as operatorclean,\n userid,\n sum(trips) as user_trips\n from jump_users\n group by 1, 2\n order by operatorclean, sum(trips))) as user_freqs\n group by 1, 2\n order by 1, 2;\n \"\"\", con=conn)\n\n # Initialize Excel Instance\n # operator = 'jump'\n for operator in full_df['operatorclean'].drop_duplicates().tolist():\n df = full_df[full_df['operatorclean'] == operator].copy()\n df = df[df['user_trips'] < 50]\n # Calculate Cumulative Sum and Perc\n df['cumulative_sum'] = df['freq_user_trips'].cumsum()\n df['cumulative_perc'] = (df['cumulative_sum'] / df['freq_user_trips'].sum()) * 100\n df['demarcation'] = 80\n\n # Set up Plotly Traces\n trace1 = Bar(x=df.user_trips, y=df.freq_user_trips, name='Users by Frequency of Trips', marker=dict(color='rgb(34,163,192)'))\n trace2 = Scatter(x=df.user_trips, y=df.cumulative_perc, name='Cumulative Percentage', yaxis='y2',\n line=dict(color='rgb(243,158,115)', width=2.4))\n trace3 = Scatter(x=df.user_trips, y=df.demarcation, name='80%', yaxis='y2',\n line=dict(color='rgba(128,128,128,.45)', dash='dash', width=1.5))\n data = [trace1, trace2, trace3]\n # Set up Plotly Layout\n layout = Layout(title='{}'.format(operator.title()), titlefont=dict(color='', family='', size=0),\n font=Font(color='rgb(128,128,128)', family='Balto, sans-serif', size=12),\n width=1500, height=623, paper_bgcolor='rgb(240, 240, 240)', plot_bgcolor='rgb(240, 240, 240)',\n hovermode='compare', margin=dict(b=120, l=60, r=60, t=65), showlegend=True,\n legend=dict(x=.83, y=1.3, font=dict(family='Balto, sans-serif', size=12, color='rgba(128,128,128,.75)'),),\n annotations=[dict(text=\"\", showarrow=False, xref=\"paper\", yref=\"paper\", textangle=90, x=1.029, y=.75,\n font=dict(family='Balto, sans-serif', size=14, color='rgba(243,158,115,.9)'),)],\n xaxis=dict(),\n yaxis=dict(title='Users by Frequency of Trips', range=[0, max(df['freq_user_trips'])],\n tickfont=dict(color='rgba(34,163,192,.75)'),\n titlefont=dict(family='Balto, sans-serif', size=14, color='rgba(34,163,192,.75)')),\n yaxis2=dict(range=[0, 101], tickfont=dict(color='rgba(243,158,115,.9)'), tickvals=[0, 20, 40, 60, 80, 100],\n overlaying='y', side='right'))\n # Save Figure\n '''fig = tools.make_subplots(rows=1, cols=1)\n fig.append_trace(trace1, 1, 1)\n fig.append_trace(trace2, 1, 1)\n fig.append_trace(trace3, 1, 1)\n fig['layout'].update(layout)'''\n fig = dict(data=data, layout=layout)\n plot(fig, auto_open=False, image='png', image_filename='plot_image',\n output_type='file', image_width=800, image_height=600,\n filename='{}_test.html'.format(operator), validate=False)\n","repo_name":"noahnewberger/Bikeshare-DC","sub_path":"archive/exploratory_analysis/dockless_user_freq_pareto.py","file_name":"dockless_user_freq_pareto.py","file_ext":"py","file_size_in_byte":4659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"3898114598","text":"import torch\nimport torch.nn as nn\n\nCRIPPEN_PARAMS = [0.1441, \n 0.0000 ,\n -0.2035, \n -0.2051, \n -0.2783, \n 0.1551 ,\n 0.00170, \n 0.08452, \n -0.1444, \n -0.0516, \n 0.1193 ,\n -0.0967, \n -0.5443, \n 0.0000 ,\n 0.2450 ,\n 0.1980 ,\n 0.0000 ,\n 0.1581 ,\n 0.2955 ,\n 0.2713 ,\n 0.1360 ,\n 0.4619 ,\n 0.5437 ,\n 0.1893 ,\n -0.8186, \n 0.2640 ,\n 0.2148 ,\n 0.08129, \n 0.1230 ,\n -0.2677, \n 0.2142 ,\n 0.2980 ,\n 0.1125 ,\n -1.0190, \n -0.7096, \n -1.0270, \n -0.5188, \n 0.08387, \n 0.1836 ,\n -0.3187, \n -0.4458, \n 0.01508, \n -1.950 ,\n -0.3239, \n -1.119 ,\n -0.3396, \n 0.2887 ,\n -0.4806,\n 0.1552 ,\n -0.2893, \n -0.0684, \n -0.4195, \n 0.0335 ,\n -0.3339, \n -1.189 ,\n 0.1788 ,\n -0.1526, \n 0.1129 ,\n 0.4833 ,\n -1.326 ,\n -0.1188,\n 0.4202 ,\n 0.6895 ,\n 0.8456 ,\n 0.8857 ,\n -2.996 ,\n 0.8612 ,\n 0.6482 ,\n -0.0024, \n 0.6237 ,\n -0.3808,\n -0.0025]\n\nC_PARAMS = [0.1441, \n 0.0000 ,\n -0.2035, \n -0.2051, \n -0.2783, \n 0.1551 ,\n 0.00170, \n 0.08452, \n -0.1444, \n -0.0516, \n 0.1193 ,\n -0.0967, \n -0.5443, \n 0.0000 ,\n 0.2450 ,\n 0.1980 ,\n 0.0000 ,\n 0.1581 ,\n 0.2955 ,\n 0.2713 ,\n 0.1360 ,\n 0.4619 ,\n 0.5437 ,\n 0.1893 ,\n -0.8186, \n 0.2640 ,\n 0.2148 ,\n 0.08129]\n\nH_PARAMS = [0.1230 ,\n -0.2677, \n 0.2142 ,\n 0.2980 ,\n 0.1125]\n\nN_PARAMS = [-1.0190, \n -0.7096, \n -1.0270, \n -0.5188, \n 0.08387, \n 0.1836 ,\n -0.3187, \n -0.4458, \n 0.01508, \n -1.950 ,\n -0.3239, \n -1.119 ,\n -0.3396, \n 0.2887 ,\n -0.4806]\n\nO_PARAMS = [0.1552 ,\n -0.2893, \n -0.0684, \n -0.4195, \n 0.0335 ,\n -0.3339, \n -1.189 ,\n 0.1788 ,\n -0.1526, \n 0.1129 ,\n 0.4833 ,\n -1.326 ,\n -0.1188]\n\nF_PARAM = 0.4202\n\nCl_PARAM = 0.6895\n\nBr_PARAM = 0.8456\n\nI_PARAM = 0.8857\n\nHal_PARAM = -2.996\n\nP_PARAM = 0.8612\n\nS_PARAMS = [0.6482 ,\n -0.0024, \n 0.6237]\n\nMe_PARAMS = [-0.3808,\n -0.0025]\n\nzinc_PARAMS = C_PARAMS + O_PARAMS + N_PARAMS + [F_PARAM] + H_PARAMS + S_PARAMS + [Cl_PARAM] + [Br_PARAM] + [I_PARAM] + [P_PARAM]\n\nclass CrippenNet(nn.Module):\n\n def sequence(self, orig_atom_fea_len, layer_list, out_len):\n seq = (nn.Linear(orig_atom_fea_len, layer_list[0]), self.nonlinear)\n for i in range(len(layer_list)-1):\n seq += (nn.Linear(layer_list[i], layer_list[i+1]), self.nonlinear)\n seq += (nn.Linear(layer_list[-1], out_len),)\n\n return seq\n \n \n def __init__(self, orig_atom_fea_len, n_conv=6, layer_list=[144], classifier=False):\n\n super(CrippenNet, self).__init__()\n\n self.classifier = classifier\n \n self.fea_len = orig_atom_fea_len\n \n self.nonlinear = nn.Sigmoid()\n\n self.softmax = nn.Softmax(dim=2)\n \n self.n_conv = n_conv\n \n self.C_NN = nn.Sequential(*self.sequence(orig_atom_fea_len*n_conv, layer_list, len(C_PARAMS)))\n self.O_NN = nn.Sequential(*self.sequence(orig_atom_fea_len*n_conv, layer_list, len(O_PARAMS)))\n self.N_NN = nn.Sequential(*self.sequence(orig_atom_fea_len*n_conv, layer_list, len(N_PARAMS)))\n self.H_NN = nn.Sequential(*self.sequence(orig_atom_fea_len*n_conv, layer_list, len(H_PARAMS)))\n self.S_NN = nn.Sequential(*self.sequence(orig_atom_fea_len*n_conv, layer_list, len(S_PARAMS)))\n\n \n def forward(self, atom_fea, adj):\n\n atom_fea = atom_fea[:,:,:self.fea_len]\n\n atom_feas = [atom_fea]\n for _ in range(self.n_conv):\n atom_feas.append(adj.matmul(atom_feas[-1]))\n\n atom_feas = torch.cat(atom_feas[1:], dim=2)\n \n C_type = self.softmax(self.C_NN(atom_feas)) # 28\n O_type = self.softmax(self.O_NN(atom_feas)) # 13\n N_type = self.softmax(self.N_NN(atom_feas)) # 15\n H_type = self.softmax(self.H_NN(atom_feas)) # 5\n S_type = self.softmax(self.S_NN(atom_feas)) # 3\n ones = torch.ones(atom_fea.shape[:2], device = atom_fea.device).unsqueeze(2)\n\n if self.classifier:\n return torch.cat([C_type*atom_fea[:,:,0:1].expand(C_type.shape),\n O_type*atom_fea[:,:,1:2].expand(O_type.shape),\n N_type*atom_fea[:,:,2:3].expand(N_type.shape),\n ones*atom_fea[:,:,3:4],\n H_type*atom_fea[:,:,4:5].expand(H_type.shape),\n S_type*atom_fea[:,:,5:6].expand(S_type.shape),\n ones*atom_fea[:,:,6:7],\n ones*atom_fea[:,:,7:8],\n ones*atom_fea[:,:,8:9],\n ones*atom_fea[:,:,9:10]], dim=2)\n \n else:\n \n crippen_values = atom_fea*torch.cat([C_type.matmul(torch.tensor(C_PARAMS, device = atom_fea.device)).unsqueeze(2),\n O_type.matmul(torch.tensor(O_PARAMS, device = atom_fea.device)).unsqueeze(2),\n N_type.matmul(torch.tensor(N_PARAMS, device = atom_fea.device)).unsqueeze(2),\n F_PARAM*torch.ones(atom_fea.shape[:2], device = atom_fea.device).unsqueeze(2),\n H_type.matmul(torch.tensor(H_PARAMS, device = atom_fea.device)).unsqueeze(2),\n S_type.matmul(torch.tensor(S_PARAMS, device = atom_fea.device)).unsqueeze(2),\n Cl_PARAM*torch.ones(atom_fea.shape[:2], device = atom_fea.device).unsqueeze(2),\n Br_PARAM*torch.ones(atom_fea.shape[:2], device = atom_fea.device).unsqueeze(2),\n I_PARAM*torch.ones(atom_fea.shape[:2], device = atom_fea.device).unsqueeze(2),\n P_PARAM*torch.ones(atom_fea.shape[:2], device = atom_fea.device).unsqueeze(2)], dim=2)\n \n crippen_values = torch.sum(crippen_values, dim=2)\n \n return torch.sum(crippen_values, dim=1, keepdim=True)\n","repo_name":"ftherrien/inv-design","sub_path":"didgen/models/CrippenNet.py","file_name":"CrippenNet.py","file_ext":"py","file_size_in_byte":7865,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"41072421641","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\nfrom django.views import View\n\nfrom .models import Profil\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n\n\nclass LoginView(View):\n def get(self, request):\n return render(request, 'page-user-login.html')\n\n def post(self, request):\n user = authenticate(username=request.POST.get('login'),\n password = request.POST.ger('password'))\n\n if user is None:\n return redirect('/')\n login(request, user)\n return redirect('/asosiy/')\n\nclass LogoutView(View):\n def get(self,request):\n logout(request)\n return redirect('/')\nclass RegisterView(View):\n def get(self,request):\n return render(request,'page-user-register.html')\n def post(self,request):\n if request.POST.get('p1') != request.POST.get('p2'):\n return redirect('/user/register/')\n user = User.objects.create_user(\n username = request.POST.get('f'),\n email = request.POST.get('e'),\n password = request.POST.get('p1'),\n first_name = request.POST.get('f'),\n last_name = request.POST.get('l'),\n )\n Profil.objects.create(\n shahar = request.POST.get('sh'),\n tel = request.POST.get('t'),\n jins = request.POST.get('gender'),\n user = user\n )\n send_mail(\n subject='Welcome message',\n message='Alistyle online store',\n from_email=settings.EMAIL_HOST_USER,\n recipient_list=[user.email],\n fail_silently=True)\n\n return redirect('/user/login/')\n\n\n\n\n","repo_name":"AbdullajonOdilov/online-shopping","sub_path":"userapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"20152528280","text":"import requests\nimport pandas as pd\nimport yfinance as yf\n\ndef get_NIC_from_yFinance():\n nic = yf.Ticker(\"NIC.AX\")\n nic_df = nic.history(start=\"2018-10-31\")\n nic_df = nic_df.asfreq(freq='B', method='ffill').fillna(method='ffill')\n nic_df = nic_df[:'2022-09-30']\n return nic_df\n\ndef get_NIC_from_csv():\n nic_df = pd.read_csv('NICStockPriceMonthly.csv')\n nic_df['Date'] = [pd.Timestamp(t, tz='Australia/Sydney') for t in nic_df['Date']]\n #df['Date'] = pd.to_datetime(df['Date'], infer_datetime_format=True, utc=True)\n nic_df.set_index('Date', inplace=True)\n nic_df.drop(columns=['Open', 'High', 'Low'], inplace=True)\n return nic_df\n\ndef get_nickel_data_from_businessinsider(nic_df):\n r = requests.get('https://markets.businessinsider.com/Ajax/Chart_GetChartData?instrumentType=Commodity&tkData=300002,10,0,333&from=20170817&to={0}'.format(nic_df.index[-1].strftime('%Y%m%d')))\n nickelHist = pd.DataFrame(r.json())\n nickelHist['Date'] = [pd.Timestamp(t, tz='Australia/Sydney') for t in nickelHist['Date']]\n nickelHist.index = nickelHist['Date']\n nickelHist = nickelHist[nickelHist.index >= nic_df.index[0]]\n nickelHist = nickelHist.asfreq(freq='B', method='ffill').fillna(method='ffill')\n nic_df = nic_df[nic_df.index <= nickelHist.index[-1]]\n\n return nic_df, nickelHist\n\ndef get_rates(nic_df):\n rates = pd.DataFrame(data=[\n {'date': nic_df.index[-1].strftime('%m/%d/%Y'),\t'rate': 3.60},\n# {'date': \"03/08/2023\",\t'rate': 3.60},\n# {'date': \"02/08/2023\",\t'rate': 3.35},\n# {'date': \"12/07/2022\",\t'rate': 3.10},\n# {'date': \"11/02/2022\",\t'rate': 2.85},\n# {'date': \"10/05/2022\",\t'rate': 2.60},\n {'date': \"09/07/2022\",\t'rate': 2.35},\n {'date': \"08/03/2022\",\t'rate': 1.85},\n {'date': \"07/06/2022\",\t'rate': 1.35},\n {'date': \"06/08/2022\",\t'rate': 0.85},\n {'date': \"05/04/2022\",\t'rate': 0.35},\n {'date': \"11/04/2020\",\t'rate': 0.10},\n {'date': \"03/20/2020\",\t'rate': 0.25},\n {'date': \"03/04/2020\",\t'rate': 0.50},\n {'date': \"10/02/2019\",\t'rate': 0.75},\n {'date': \"07/03/2019\",\t'rate': 1.00},\n {'date': \"06/05/2019\",\t'rate': 1.25},\n {'date': \"08/03/2016\",\t'rate': 1.50}\n ] )\n rates['date'] = [pd.Timestamp(t, tz='Australia/Sydney') for t in rates['date']]\n rates.index = rates['date']\n rates = rates.drop(columns=['date'])\n rates = rates.asfreq(freq='B').fillna(method='ffill')\n rates = rates[rates.index >= nic_df.index[0]]\n return rates\n\ndef union_data_in_one_df(nick, rates, nic, currency, production, mining, indonesia_metal_products):\n data = pd.DataFrame(data={\n 'NIC': nic['Close'], \n 'Nickle price': nick['Close'], \n 'Rate': rates['rate'], \n 'AUD/IDR': currency['Close'],\n 'NPI production': production['value'],\n 'Mining': mining['value'],\n 'Metal products in indonesia': indonesia_metal_products['Base metal products']\n }, index=nic.index)\n \n data = data.asfreq(freq='W-FRI').fillna(method='ffill')\n \n return data\n\ndef get_endog_and_exog(data):\n endog = 'NIC'\n exog = list(filter(lambda column: column != endog, data.columns))\n \n return (endog, exog)\n\ndef get_AUDIDR_from_yFinance():\n currency = yf.Ticker(\"AUDIDR=X\")\n currency_df = currency.history(start=\"2018-10-31\", end=\"2022-12-31\")\n currency_df = currency_df.tz_convert('Australia/Sydney')\n currency_df = currency_df[currency_df['Close'] > 7600]\n currency_df = currency_df.asfreq(freq='B', method='ffill').fillna(method='ffill')\n currency_df.index = currency_df.index.normalize()\n return currency_df\n\ndef get_AUDIDR_from_csv():\n currency_df = currency_df\n","repo_name":"geporys/ts-project-assignment-hse-2023","sub_path":"functions/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"40351098149","text":"#!/usr/bin/env python3\n#\n# Author: Peter Maloney\n#\n# Requires python 3.2 or newer. 3.1.2 (Ubuntu 10.04) does not work .\n#\n# Trying to prevent the head from parking on the disks, to reduce wear. \n#\n# This is useful for:\n# WD Green\n# Seagate Barracuda ST3000DM001-9YN166\n# Seagate Barracuda ST3000DM001-1CH166\n# Seagate Barracuda ST2000DM001-1CH164\n#\n#\n# goals:\n# -compatible with FreeBSD and Linux (requires ports: bash, facter, flock, gdd)\n# - only run on disks with known problem\n# dynamically detect problem... if Load_Cycle_Count is more than 50x the Power_Cycle_Count, then it's a problem disk\n# TODO: run by cron? by init?\n# - use flock to make sure more instances don't run\n# - when there are read errors, verify again:\n# -disk exists\n# -it is a problem disk\n# - every once in a while, look for new disks\n# - validate that dependencies are found\n# - read the middle of the disk instead of $RANDOM? it would probably lower the seeking (2 runs in a row will not seek, and runs while other IO not from this script will interfere a bit less since the middle is probably closer to the other requests)\n# DONE- make it quit so cron can restart it if the script itself is modified\n#\n# Licensed GNU GPLv2; if you did not recieve a copy of the license, get one at http://www.gnu.org/licenses/gpl-2.0.html\n\nimport sys\nimport os\nimport subprocess\nimport stat\nimport re\nimport time\nimport argparse\nimport fcntl\n\n################################################################################\n# error codes\n################################################################################\n\ne_missing_action=1\ne_bad_argparse=2 # defined by argparse, not used here\ne_missing_command=3\n\n# support python 3.2 which apparently has no subprocess.DEVNULL... using PIPE and then just not using communicate() seems to work fine, maybe just wasting some RAM for buffers, or some small CPU\nsubprocess_devnull = None\nif hasattr(subprocess, \"DEVNULL\"):\n subprocess_devnull = subprocess.DEVNULL\nelse:\n subprocess_devnull = subprocess.PIPE\n\ndef facter(key):\n p = subprocess.Popen([\"facter\", str(key)], \n stdout=subprocess.PIPE, stderr=subprocess_devnull)\n p.wait()\n if( p.returncode == 0 ):\n out, err = p.communicate()\n return out.decode(\"utf-8\").splitlines()[0]\n else:\n raise Exception(\"facter command failed; key = \\\"%s\\\"\" % (key))\n\ndef debug(text):\n if not debug_enabled:\n return\n print(\"DEBUG: %s\" % (text))\n sys.stdout.flush()\n \ndef warn(text):\n print(\"WARNING: %s\" % (text))\n sys.stdout.flush()\n \ndef info(text):\n print(\"INFO: %s\" % (text))\n sys.stdout.flush()\n \n################################################################################\n# Command line arguments\n################################################################################\n\nparser = argparse.ArgumentParser(description=\"Scan for intellipark disks and produce IO at regular intervals to prevent parking.\")\nactions=[\"install\",\"stop\",\"list\",\"run\"] # \"test\" action is purposely undocumented\nparser.add_argument('action', metavar='action', type=str,\n help='action to run %s' % actions, choices=actions)\nparser.add_argument('-d', '--debug', dest='debug', action='store_const',\n const=True, default=False,\n help='enable debug level output')\n\nargs = parser.parse_args()\n\ndebug_enabled = args.debug\naction = args.action\n\n################################################################################\n# functions\n################################################################################\n\n# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python\ndef which(cmd):\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(cmd)\n if fpath:\n if is_exe(cmd):\n return cmd\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, cmd)\n if is_exe(exe_file):\n return exe_file\n\n return None\n\n# expression = regex or string to search for\n# lines = a string with many lines, or a list\n# regex = False is like -F\n# keep = False is like -v\n# only = keep only the part of the line that matches; each match on the same line becomes a new line in the returned list\n# returns a list of matches\ndef grep(expression, lines, regex=True, keep=True, only=False, ignore_case=False):\n results = []\n \n #debug(\"grep expression = \\\"%s\\\", regex = %s, keep = %s, only = %s\" % (expression, regex, keep, only))\n if( regex ):\n p = re.compile(expression)\n \n if( isinstance(lines, str) ):\n lines = lines.splitlines()\n for line in lines:\n #debug(\"grep line = \\\"%s\\\"\" % (line))\n if( regex ):\n if( ignore_case ):\n m = p.search(line, re.IGNORECASE)\n else:\n m = p.search(line)\n #debug(\"grep type(m) = %s\" % type(m))\n if keep == (m != None):\n if( only ):\n results += [m.group(0)]\n else:\n results += [line]\n else:\n if keep == (expression in line):\n results += [line]\n \n if( len(results) == 0 ):\n return None\n return results\n\n# just a grep test\n#print( grep(\"/dev/mapper/\", [\"/dev/sda\", \"/dev/mapper/blah\"]) )\n#print( grep(\"/dev/mapper/\", [\"/dev/sda\", \"/dev/mapper/blah\"], keep=False) )\n#print( grep(\"/dev/mapper/\", [\"/dev/sda\", \"/dev/mapper/blah\"], only=True) )\n#print( grep(\"notfound\", [\"/dev/sda\", \"/dev/mapper/blah\"], only=True) )\n#exit(3)\n\ndef mystat(path):\n mode = os.stat(path)\n text = \"%s %s\" % (mode[stat.ST_MTIME], mode[stat.ST_SIZE])\n #debug(\"DEBUG: stat mtime and size: %s\" % text)\n return text\n\noriginalmtime = mystat( sys.argv[0] )\n\ndef list_all_disks():\n alldisks=[]\n if ( operatingsystem == \"FreeBSD\" ):\n p = subprocess.Popen([\"geom\", \"disk\", \"list\"], stdout=subprocess.PIPE, stderr=subprocess_devnull)\n \n p.wait()\n if( p.returncode == 0 ):\n out, err = p.communicate()\n out = out.decode(\"utf-8\")\n out = grep(\"Geom name:\", out, regex=False)\n if isinstance(out, str):\n out = out.splitlines()\n for line in out:\n device = line.split()[2]\n alldisks += [\"/dev/\" + device]\n else:\n p = subprocess.Popen([\"fdisk\", \"-l\"], stdout=subprocess.PIPE, stderr=subprocess_devnull)\n \n p.wait()\n if( p.returncode == 0 ):\n out, err = p.communicate()\n out = out.decode(\"utf-8\")\n\n # keep only device lines\n out = grep(\"Disk /dev\", out, regex=False)\n # remove virtual devices (without Load_Cycle_Count)\n out = grep(\"/dev/mapper/|/dev/md[0-9]|/dev/md-[0-9]|/dev/bcache\", out, keep=False)\n \n # Keep only name of the device\n lines = []\n for line in out:\n line = line.split()[1]\n lines += [line]\n \n # remove the colon after the device name, or possibly other extra stuff we don't want\n alldisks = grep(\"/dev/[a-zA-Z0-9]+\", lines, only=True)\n \n # DEBUG: for testing disk changes\n #alldisks+=[\"/tmp/fakedisk\"]\n #debug(\"list_all_disks; alldisks = %s\" % alldisks)\n\n return alldisks\n\n# handles MegaRAID too\ndef mysmartctl(device, args=\"-iA\"):\n p = subprocess.Popen([\"smartctl\", args, str(device)], stdout=subprocess.PIPE, stderr=subprocess_devnull)\n p.wait()\n \n if( p.returncode == 2 ):\n p = subprocess.Popen([\"smartctl\", args, \"-d\", \"megaraid,0\", str(device)], stdout=subprocess.PIPE, stderr=subprocess_devnull)\n p.wait()\n \n out, err = p.communicate()\n out = out.decode(\"utf-8\")\n return out\n\ndef list_intellipark_disks(alldisks):\n # find disks with the problem\n # dynamically detect problem... if Load_Cycle_Count is more than 2x the Power_Cycle_Count, then it is a problem disk\n disks=[]\n for disk in alldisks:\n if ( not os.path.exists(disk) ):\n continue\n\n #print(\"TESTING adding all disks; disk = %s\" % disk)\n #disks += [disk]\n #continue\n \n #TODO: instead of grep on a string, make a class that has the used information (model, Power_Cycle_Count, Load_Cycle_Count)\n # Add disks by model, for ones we know are bad\n smartid = mysmartctl(disk, \"-iA\")\n \n if( grep(\"Western.*Digital.*Green\", smartid, ignore_case=True) ):\n disks += [disk]\n debug(\"Found Western Digital Green: %s\" % disk)\n continue\n elif( grep(\"ST....DM001-9YN166\", smartid, ignore_case=True) ):\n disks += [disk]\n debug(\"Found Seagate Barracuda ST....DM001-9YN166: %s\" % disk)\n continue\n elif( grep(\"ST....DM001-1CH166\", smartid, ignore_case=True) ):\n disks += [disk]\n debug(\"Found Seagate Barracuda ST....DM001-1CH166: %s\" % disk)\n continue\n elif( grep(\"ST....DM001-1CH164\", smartid, ignore_case=True) ):\n disks += [disk]\n debug(\"Found Seagate Barracuda ST....DM001-1CH164 %s\" % disk)\n continue\n elif( grep(\"Hitachi Ultrastar\", smartid, ignore_case=True) ):\n continue\n\n startstopcount = grep(\"Power_Cycle_Count\", smartid)\n if( startstopcount != None ):\n startstopcount = int(startstopcount[0].split()[9])\n \n load = grep(\"Load_Cycle_Count\", smartid)\n if( load != None ):\n load = int(load[0].split()[9])\n\n debug(\"%s : startstopcount = %s, load = %s\" % (disk, startstopcount, load) )\n \n if( not load ):\n # intellipark disks tend to have that attribute... so this is most likely not one\n continue\n\n if( not startstopcount ):\n # it is missing the important information to know if we can test it\n \n if( operatingsystem == \"FreeBSD\" ):\n if( grep(\"Perc_Rated_Life_Used|Wear_Levelling_Count\", smartid) ):\n # if it's an SSD, don't test it ... it has no mechanical head to park\n # NOTE: I have no idea if this will test a hybrid disk\n # TODO: detect an SSD properly instead of just assuming it based on some known attributes present\n debug(\"Skipping SSD: %s\" % disk)\n continue\n else:\n diskname = disk.split(\"/\")[-1]\n rotational = None\n with open(\"/sys/block/%s/queue/rotational\" % diskname, \"r\") as f:\n b = f.read().splitlines()[0]\n debug( \"rotational b = %s\" % (b))\n if ( b == \"1\" ):\n rotational = True\n else:\n rotational = False\n \n if not rotational:\n debug(\"Skipping non-rotational disk (SSD?): %s\" % disk)\n continue\n elif( grep(\"Transport protocol:.*SAS$\", smartid) ):\n debug(\"Skipping SAS disk: %s\" % disk)\n # for a SAS disk, we only assume there is a problem if it is detected elsewhere\n continue\n \n # otherwise, do the fix on the disk, not knowing if it is necessary\n debug(\"unknown if it's an intellipark disk: %s\" % disk)\n disks += [disk]\n elif ( load > (startstopcount * 50) ):\n # if a disk has a load cycle count that is greater than 50 x the start stop count\n # then we assume it's an intellipark disk.\n debug( \"%s > %s * 50\" % (load, startstopcount))\n debug(\"Found suspected intellipark disk: %s\" % disk)\n disks += [disk]\n\n if ( len(disks) != 0):\n if debug_enabled:\n debug(\"found %d intellipark disks: %s\" % (len(disks), disks))\n else:\n info(\"found %d intellipark disks: %s\" % (len(disks), disks))\n else:\n info(\"no intellipark disks.\")\n\n return disks\n\ndef get_file_size(filename):\n \"Get the file size by seeking at end\"\n fd= os.open(filename, os.O_RDONLY)\n try:\n return os.lseek(fd, 0, os.SEEK_END)\n finally:\n os.close(fd)\n\n#def get_disk_size(device):\n #if operatingsystem == \"FreeBSD\":\n #smartid = mysmartctl(device)\n #lines = grep(\"User Capacity.*bytes\", smartid)\n #cols = lines[0].split()\n #size = cols[len(cols)-1]\n #else:\n #size=$(blockdev --getsize64 \"$d\")\n\n# cached middle in bytes; get_disk_size is slow, so don't call it often\ndisk_to_middle={}\n# number of bytes to read\nchunksize = 512\nalign = 512\n\n# this doesn't work... O_DIRECT doesn't work for os.read. http://bugs.python.org/issue5396\ndef read_raw(device, middle):\n # TODO: FIXME: make sure to use directio or no cache, or this will not work\n # if that can't be done in python, juse use dd\n # mydd if=\"$d\" of=/dev/null bs=512 count=1 iflag=direct skip=\"$middle\" &>/dev/null\n #with os.open(device, os.O_RDONLY | os.O_DIRECT) as f:\n if True:\n f = os.open(device, os.O_RDONLY | os.O_DIRECT)\n try:\n debug(\"reading disk %s\" % device)\n if(middle != 0):\n os.lseek(f, middle, os.SEEK_SET)\n chunk = os.read(f, chunksize)\n except KeyboardInterrupt as e:\n raise e\n except:\n e = sys.exc_info()[0]\n info(\"read failed, middle = %s, chunksize = %s\" % (middle, chunksize))\n raise e\n \ndef read_dd(device, middle):\n cmd = \"dd\"\n if operatingsystem == \"FreeBSD\":\n cmd = \"gdd\"\n\n #dd if=\"$device\" of=/dev/null bs=512 count=1 iflag=direct skip=\"$middle\" &>/dev/null\n p = subprocess.Popen([cmd, \"if=%s\" % device, \"of=/dev/null\", \"bs=%s\" % chunksize, \"count=1\", \"iflag=direct\", \"skip=%s\" % int(middle)], \n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p.wait()\n if( p.returncode != 0 ):\n out, err = p.communicate()\n warn(\"dd command failed; device = \\\"%s\\\"\" % (device))\n warn( out and len(out) != 0 and out.decode(\"utf-8\").splitlines()[0] )\n warn( err and len(err) != 0 and err.decode(\"utf-8\").splitlines()[0] )\n\ndef read_middle(device, middle=None):\n if device in disk_to_middle:\n middle = disk_to_middle[device]\n else:\n size = get_file_size(device)\n middle = size/2\n # align the read to a likely sector size, or larger\n size = size - (size % align)\n disk_to_middle[device] = middle\n \n debug(\"reading device = %s, middle = %s\" % (device, middle))\n #read_raw(device, middle)\n read_dd(device, middle / chunksize)\n \n################################################################################\n# Constants\n################################################################################\n\nif not which(\"facter\"):\n print(\"ERROR: missing command: \\\"%s\\\"\" % (dep))\n exit(e_missing_command)\n\npid = os.getpid()\n\n# If this file exists, the script will stop. The file is removed when starting the script unless another instance is running.\nstopFile = \"/var/run/anti-intellipark.stop\"\nstopChildrenFile = \"/var/run/anti-intellipark.stop-children\"\n# The lock file to preven other instances from running.\nlockFile = \"/var/run/anti-intellipark.lock\"\nbasePidFile = \"/var/run/anti-intellipark-%d.pid\" % (pid)\n\n# TODO: popen to use facter\noperatingsystem = facter(\"operatingsystem\")\ndebug(\"operatingsystem = %s\" % operatingsystem)\n\n# note: stat is not the same on the 2 OSses, but we aren't using it all, just comparing the output between runs and not parsing anything\n# TODO: avoid using: flock, stat\nif ( operatingsystem == \"FreeBSD\" ):\n deps = [\"flock\", \"facter\", \"smartctl\", \"geom\", \"stat\"]\nelse:\n deps = [\"flock\", \"facter\", \"smartctl\", \"fdisk\", \"stat\"]\n\n\n################################################################################\n# Main\n################################################################################\n\ndef run():\n info(\"Testing for intellipark disks\")\n timestamp_alldisks = time.time()\n alldisks = list_all_disks()\n intellipark_disks = list_intellipark_disks(alldisks)\n target_interval_alldisks = 60\n\n # The interval to use normally\n target_interval_default = 3\n # the current interval used (different depending on whether there are disks or not)\n target_interval = target_interval_default\n if len(intellipark_disks) == 0:\n target_interval = target_interval_alldisks\n\n stop = False\n # the timestamp at the start of the loop, before making sure 3 seconds passed\n timestamp_1 = None\n # the previous time 2, from after the 3 seconds passed on the previous loop\n timestamp_2_prev = None\n # The accumulated error in the timing, which should be subtracted from the calculated sleep time to keep it down\n # if too much is subtracted, then it will accumulate negative error, and stay close to the error from one run\n terror = 0\n while( not stop ):\n timestamp_1_prev = timestamp_1\n timestamp_1 = time.time()\n \n if timestamp_1_prev and timestamp_2:\n # Sleep until the current time is 3 seconds after the start of the previous processing\n sleeptime = timestamp_2 + target_interval - timestamp_1 - terror\n #debug(\"timestamp_1_prev = %s, timestamp_1 = %s, timestamp_2 = %s, sleeptime = %s\" % (timestamp_1_prev, timestamp_1, timestamp_2, sleeptime))\n if sleeptime > 0:\n time.sleep(sleeptime)\n else:\n terror = 0\n timestamp_2_prev = timestamp_2\n \n # The start time of the previous processing\n timestamp_2 = time.time()\n debug(\"after sleep, timestamp_2 = %s\" % timestamp_2)\n \n if timestamp_2_prev:\n # count total error in timing, so it can be removed next loop\n terror += timestamp_2 - timestamp_2_prev - target_interval\n #debug(\"terror = %s\" % terror)\n \n ## pretend processing time\n #debug(\"processing\")\n #time.sleep(3.2)\n #debug()\n \n # TODO: test this without threads... maybe add threads later\n for d in intellipark_disks:\n read_middle(d)\n\n # watch the list of devices\n # if the devices changed, it has to quit and restart\n if timestamp_alldisks + target_interval_alldisks > timestamp_2:\n timestamp_alldisks = time.time()\n alldisks2=list_all_disks()\n\n same = True\n if len(alldisks) != len(alldisks2):\n same = False\n else:\n n = 0\n count = len(alldisks)\n while( n < count):\n if( alldisks[n] != alldisks2[n] ):\n same = False\n break\n n += 1\n\n if not same:\n info(\"Disks were changed... testing for intellipark disks\")\n alldisks=alldisks2\n intellipark_disks = list_intellipark_disks(alldisks)\n\n # watch this script's mtime\n # if the script was modified, it has to quit and restart\n mtime = mystat(sys.argv[0])\n if( mtime != originalmtime ):\n info(\"This script was modified... quitting\")\n break\n\ndef main():\n for dep in deps:\n fail=0\n if not which(dep):\n print(\"ERROR: missing command: \\\"%s\\\"\" % (dep))\n fail=1\n if( fail == 1 ):\n exit(e_missing_command)\n\n # stops the current run, but does not stop a new one from being run afterwards\n if( action == \"stop\" ):\n # TODO: find a way for one python process to kill the other.\n print(\"ERROR: not implemented\")\n pass\n\n # installs the script in cron and /usr/local/bin\n # NOTE that puppet is in use on the servers and your changes will get overridden if you install this way.\n if ( action == \"install\" ):\n if ( sys.argv[0] != \"/usr/local/bin/anti-intellipark.py\" ):\n p = subprocess.Popen([\"cp\", sys.argv[0], \"/usr/local/bin/anti-intellipark.py\"], stdout=subprocess_devnull, stderr=subprocess_devnull)\n p.wait()\n\n p = subprocess.Popen([\"chmod\", \"a+rx\", \"/usr/local/bin/anti-intellipark.py\"], stdout=subprocess_devnull, stderr=subprocess_devnull)\n p.wait()\n \n with open(\"/etc/cron.d/anti-intellipark\", \"w\") as f:\n f.write(\"PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/games:/usr/local/sbin:/usr/local/bin:/root/bin\")\n f.write(\"* * * * * root /usr/local/bin/anti-intellipark.bash >/dev/null 2>&1\")\n\n # lists the Load_Cycle_Count for all recognized disks, and saves a log to the current directory\n if( action == \"list\" ):\n alldisks = list_all_disks()\n for disk in alldisks:\n # TODO: multithread this... smartctl can be slow\n out = mysmartctl(disk, \"-iA\")\n \n model = grep(\"Device Model:\", out)\n if( model == None or len(model) == 0 ):\n # probably something went wrong... the device name is probably invalid\n warn(\"Problem listing device: %s; out = %s\" % (disk, out))\n continue\n else:\n model = model[0].split()[2]\n \n load = grep(\"Load_Cycle_Count\", out)\n if ( load == None or len(load) == 0 ):\n load = None\n else:\n load = load[0].split()\n load = load[1] + \" \" + load[9]\n \n print(\"%-9s %-40s %-s\" % (disk, model, load))\n \n exit(0)\n\n # just a test to see if \"time ./anti...\" includes subprocess cpu usage\n if( action == \"test\" ):\n p = subprocess.Popen([\"bc\"], stdin=subprocess.PIPE, stdout=subprocess_devnull, stderr=subprocess_devnull)\n out, err = p.communicate(bytes(\"2^1000000\\n\", 'iso-8859-1'))\n p.wait()\n exit(0)\n \n if( action != \"run\" ):\n usage()\n exit(e_missing_action)\n \n got_lock = False\n try:\n with open(lockFile, \"wb\") as f:\n try:\n fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)\n got_lock = True\n except: # python3.4.x has BlockingIOError here, but python 3.2.x has IOError here... so just don't use those class names\n print(\"Could not obtain lock; another process already running? quitting\")\n exit(1)\n run()\n finally:\n if got_lock:\n os.remove(lockFile)\n\nmain()","repo_name":"petermaloney/misc","sub_path":"disk/anti-intellipark.py","file_name":"anti-intellipark.py","file_ext":"py","file_size_in_byte":22828,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"28"} +{"seq_id":"73044086794","text":"from lark import Lark, Transformer\nfrom dataclasses import dataclass, field\nfrom io import StringIO\nfrom typing import List, Optional, IO\n\n\n@dataclass\nclass Formula:\n pass\n\n\n@dataclass\nclass Atomic(Formula):\n pass\n\n\n@dataclass\nclass BinaryOperator:\n left: Formula\n right: Formula\n\n\n@dataclass\nclass UnaryOperator:\n child: Formula\n\n\n@dataclass\nclass CTL(Formula):\n formula: Formula\n\n\n@dataclass\nclass LTL(Formula):\n formula: Formula\n\n\n@dataclass\nclass LTL_F(Formula):\n formula: Formula\n\n\ndef fold(op, zero, args):\n result = None\n for arg in args:\n if result is None:\n result = arg\n else:\n result = op(result, arg)\n if result is None:\n return zero\n else:\n return result\n\n\n# Atoms\n\n\n@dataclass\nclass Bool(Atomic):\n value: bool\n\n\n@dataclass\nclass Variable(Atomic):\n name: str\n\n\n@dataclass\nclass Action(Atomic):\n name: str\n prefix: Optional[str] = field(default=None)\n\n\n@dataclass\nclass NotAction(Atomic):\n name: str\n prefix: Optional[str] = field(default=None)\n\n\n# Specific to LTLF\n\n\n@dataclass\nclass EndOfSequence(Atomic):\n pass\n\n\nEOS = EndOfSequence()\n\n# Propositional logic\n\n\n@dataclass\nclass Not(UnaryOperator):\n operator = \"!\"\n\n\n@dataclass\nclass And(BinaryOperator):\n operator = \"&\"\n\n @classmethod\n def make(cls, args):\n return fold(cls, Bool(True), args)\n\n\n@dataclass\nclass Equal(BinaryOperator):\n operator = \"=\"\n\n\n@dataclass\nclass Or(BinaryOperator):\n operator = \"|\"\n\n @classmethod\n def make(cls, args):\n return fold(cls, Bool(True), args)\n\n\n@dataclass\nclass Implies(BinaryOperator):\n operator = \"->\"\n\n\n# Linear Temporal Logic\n\n\n@dataclass\nclass Next(UnaryOperator):\n \"Must be true in the following step.\"\n operator = \"X\"\n\n\n@dataclass\nclass Eventually(UnaryOperator):\n \"The formula will eventually hold\"\n operator = \"F\"\n\n\n@dataclass\nclass Always(UnaryOperator):\n \"The formula holds at every step of the trace\"\n operator = \"G\"\n\n\n@dataclass\nclass Until(BinaryOperator):\n \"Left must happen until right happens; right will eventually happen.\"\n operator = \"U\"\n\n\n# CTL\n\n\n@dataclass\nclass ExistsFinally(UnaryOperator):\n \"\"\"\n p is true in a state s0 if there exists a series of transitions\n s_0 -> s_1, s_1 -> s_2, ...,s_{n−1} -> s_n such that p is true in s_n\n\n TODO: THIS IS A CTL FORMULA RATHER THAN LTLf!\n \"\"\"\n\n operator = \"EF\"\n\n\n# Abbreviations\n\n\ndef Last():\n \"The last instance of a trace\"\n return Not(Next(Bool(True)))\n\n\ndef WeakNext(formula):\n \"The given formula must hold unless it is last.\"\n return Not(Next(Not(formula)))\n\n\ndef Equiv(left, right):\n \"Logical equivalence\"\n return And(Implies(left, right), Implies(right, left))\n\n\ndef Releases(left, right):\n \"If left never becomes true, right must remain true forever.\"\n return Not(Until(Not(left), Not(right)))\n\n\ndef WeakUntil(left, right):\n \"ψ has to hold at least until φ; if φ never becomes true, ψ must remain true forever. <=> φ R (φ ∨ ψ) <=> (ψ U φ) ∨ G ψ\"\n # return Releases(right, Or(right, left))\n return Or(Until(left, right), Always(left))\n\n\n# Conversions\n\n\ndef ltlf_to_ltl(\n formula: Formula, eos: Variable, action: Variable, prefix_separator=\"_\"\n) -> Formula:\n \"\"\"\n Converts an LTLf formula in an LTL formula\n \"\"\"\n\n def rec(formula, mode=LTL_F):\n while type(formula) in (LTL, CTL, LTL_F):\n mode = type(formula)\n formula = formula.formula\n\n if mode is LTL or mode is CTL:\n if isinstance(formula, Atomic):\n if not (isinstance(formula, Bool) or isinstance(formula, Variable)):\n raise ValueError(\n f\"Expecting an atomic {mode.__name__} formula, but got: \",\n formula,\n )\n return formula\n elif isinstance(formula, UnaryOperator):\n return type(formula)(rec(formula.child, mode=mode))\n elif isinstance(formula, BinaryOperator):\n return type(formula)(\n rec(formula.left, mode=mode), rec(formula.right, mode=mode)\n )\n\n assert mode is LTL_F\n\n if isinstance(formula, Bool) or isinstance(formula, Variable):\n return formula\n elif isinstance(formula, NotAction) or isinstance(formula, Action):\n if formula.prefix is None:\n name = formula.name\n else:\n name = formula.prefix + prefix_separator + formula.name\n act = Equal(action, Variable(name))\n return And(\n Not(act) if isinstance(formula, NotAction) else act, Not(rec(EOS))\n )\n elif isinstance(formula, EndOfSequence):\n return eos\n elif isinstance(formula, Not):\n return Not(rec(formula.child))\n elif isinstance(formula, Or):\n return Or(rec(formula.left), rec(formula.right))\n elif isinstance(formula, Equal):\n return Equal(rec(formula.left), rec(formula.right))\n elif isinstance(formula, And):\n return And(rec(formula.left), rec(formula.right))\n elif isinstance(formula, Implies):\n return Implies(rec(formula.left), rec(formula.right))\n elif isinstance(formula, Next):\n return Next(And(rec(formula.child), Not(rec(EOS))))\n elif isinstance(formula, Eventually):\n return Eventually(And(rec(formula.child), Not(rec(EOS))))\n elif isinstance(formula, Always):\n return Always(Or(rec(formula.child), rec(EOS)))\n elif isinstance(formula, Until):\n return Until(\n left=rec(formula.left), right=And(rec(formula.right), Not(rec(EOS)))\n )\n else:\n raise ValueError(type(formula), formula, mode)\n\n return rec(formula)\n\n\ndef dump(formula: Formula, fp: IO, eos=None, nusvm_strict=True):\n \"\"\"\n Converts a formula into an NuSMV string\n \"\"\"\n\n def rec(formula):\n # Primitives\n if type(formula) in [LTL, CTL, LTL_F] and not nusvm_strict:\n formula = formula.formula\n if isinstance(formula, Bool):\n fp.write(\"TRUE\" if formula.value else \"FALSE\")\n elif isinstance(formula, Variable):\n fp.write(formula.name)\n elif isinstance(formula, Action):\n if nusvm_strict:\n raise ValueError(formula)\n if formula.prefix is not None:\n fp.write(formula.prefix)\n fp.write(\".\")\n fp.write(formula.name)\n elif isinstance(formula, EndOfSequence):\n if nusvm_strict:\n raise ValueError(formula)\n fp.write(eos if eos is not None else \"END\")\n\n elif isinstance(formula, UnaryOperator):\n # Operator\n fp.write(formula.operator)\n fp.write(\" \")\n # Child\n if isinstance(formula.child, Atomic):\n rec(formula.child)\n else:\n fp.write(\"(\")\n rec(formula.child)\n fp.write(\")\")\n\n elif isinstance(formula, BinaryOperator):\n # Left\n if isinstance(formula.left, Atomic):\n rec(formula.left)\n else:\n fp.write(\"(\")\n rec(formula.left)\n fp.write(\")\")\n # Operator\n fp.write(\" \")\n fp.write(formula.operator)\n fp.write(\" \")\n # Right\n if isinstance(formula.right, Atomic):\n rec(formula.right)\n else:\n fp.write(\"(\")\n rec(formula.right)\n fp.write(\")\")\n else:\n raise ValueError(type(formula), formula)\n\n # Recursive call\n rec(formula)\n\n\ndef dumps(formula, eos=None, nusvm_strict=True):\n buffer = StringIO()\n dump(\n formula=formula,\n fp=buffer,\n eos=eos,\n nusvm_strict=nusvm_strict,\n )\n return buffer.getvalue()\n\n\nparser = Lark.open(\"ltlf_grammar.lark\", rel_to=__file__, start=\"formula\")\n\n\nclass LTLParser(Transformer):\n def end(self, args) -> Last:\n return Last()\n\n def true(self, args) -> Bool:\n return True\n\n def false(self, args) -> Bool:\n return False\n\n def bool(self, args) -> Bool:\n return Bool(args[0])\n\n def paren(self, args):\n return args[0]\n\n def land(self, args) -> And:\n return And(*args)\n\n def formula(self, args):\n return args[0]\n\n def lnext(self, args) -> Next:\n return Next(*args)\n\n def ident(self, args):\n return args[0].value\n\n def act(self, args) -> Action:\n if len(args) == 2:\n return Action(prefix=args[0], name=args[1])\n return Action(args[0])\n\n def lor(self, args) -> Or:\n return Or(*args)\n\n def lnot(self, args) -> Not:\n return Not(*args)\n\n def until(self, args) -> Until:\n return Until(*args)\n\n def releases(self, args) -> Releases:\n return Releases(*args)\n\n def wuntil(self, args) -> WeakUntil:\n return WeakUntil(*args)\n\n def implies(self, args) -> Implies:\n return Implies(*args)\n\n def equiv(self, args) -> Equiv:\n return Equiv(*args)\n\n def globally(self, args) -> Always:\n return Always(*args)\n\n def eventually(self, args) -> Eventually:\n return Eventually(*args)\n\n def last(self, args) -> Last:\n return Last()\n\n def eq(self, args) -> Equal:\n return Equal(*args)\n\n def atom(self, args):\n return args[0]\n","repo_name":"cajomferro/shelley","sub_path":"shelley/parsers/ltlf_lark_parser.py","file_name":"ltlf_lark_parser.py","file_ext":"py","file_size_in_byte":9594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"4519856974","text":"import torch\n\nimport pytorch_lightning as pl\n\nfrom torch import nn\n\nfrom torch.utils.data import DataLoader\nfrom torch.distributions import Beta\nfrom torch.special import polygamma\n\nfrom environment import AllocationMDP\n\n\nclass BetaDistributionLearner(pl.LightningModule):\n \"\"\"agent that learn policy by the REINFORCE algorithm\"\"\"\n def __init__(self, **hyperparams):\n super(BetaDistributionLearner, self).__init__()\n\n self.policyNet = nn.Sequential(\n nn.Embedding(10, 6),\n nn.Linear(6, 2),\n AbsNlin()\n )\n\n # default values\n hyperparameterValues = {\n 'lr': 1e-3,\n 'baseline': 'cashout',\n\n 'timehorizon': 10,\n 'batch_size': 5000,\n 'n_experiments': 100,\n\n 'mu': 0.1,\n 'sigma': 0.1,\n 'utilityFn': 'sqrt'\n }\n\n hyperparameterValues.update(hyperparams)\n self.save_hyperparameters(hyperparameterValues)\n\n # extract the utility function\n currentUtilityFn = self.hparams.utilityFn\n if isinstance(currentUtilityFn, str):\n self.utilityFn = {\n 'sqrt': lambda x: x**0.5,\n 'log': lambda x: torch.log(x), \n }[currentUtilityFn]\n else:\n self.utilityFn = currentUtilityFn\n self.save_hyperparameters({'utilityFn': currentUtilityFn.__doc__})\n\n \"\"\"\n Forward operation functions\n \"\"\"\n\n def forward(self, states):\n times = states[..., 2].type(torch.int)\n return self.policyNet(times)\n\n def sampleActions(self, state):\n parameters = self.forward(state)\n dists = Beta(parameters[:, 0], parameters[:, 1])\n\n return dists.sample()[:, None]\n\n def log_likelihood(self, states, actions):\n \"\"\" Returns the log-likelihood of given actions at given states\n The batching can have any shape, but we assume that \n \"\"\"\n parameters = self.forward(states)\n dists = Beta(parameters[..., 0:1], parameters[..., 1:2])\n return dists.log_prob(actions), parameters\n\n def actionStatistics(self, states):\n parameters = self.forward(states)\n dists = Beta(parameters[..., 0:1], parameters[..., 1:2])\n\n return dists.mean, dists.variance\n\n def InverseFisherInfo(self, parameters):\n with torch.no_grad():\n a = parameters[..., 0:1]\n b = parameters[..., 1:2]\n M11 = polygamma(1, a) - polygamma(1, a+b)\n M12 = - polygamma(1, a+b)\n M21 = - polygamma(1, a+b)\n M22 = polygamma(1, b) - polygamma(1, a+b)\n\n invDeterm = (1. / (M11 * M22 - M12 * M21))[..., None]\n invFisherMatrices = invDeterm * torch.cat(\n [torch.cat([M22, -M12], dim=-1)[..., None],\n torch.cat([-M21, M22], dim=-1)[..., None]],\n dim=-1\n )\n return invFisherMatrices\n\n \"\"\"\n Training \n \"\"\"\n\n def training_step(self, batch, batch_id):\n \"\"\"Run a new epoch, apply the REINFORCE algorithm\"\"\"\n\n # run new epoch\n self.E.initRun(self.hparams.batch_size)\n\n stop = False\n while not stop:\n state = self.E.state\n actions = self.sampleActions(state)\n stop = self.E.evolveState(actions)\n\n observedUtility = self.utilityFn(\n self.E.reward[:, :]\n ).repeat((1, self.hparams.timehorizon))\n\n if self.hparams.baseline == 'cashout':\n # use as a baseline value function the value of cashing out now\n baseline = self.utilityFn(self.E.stateTrace[:, :, 0:2].sum(2))\n else:\n baseline = 0.\n\n logProb, parameters = self.log_likelihood(self.E.stateTrace, \n self.E.actionTrace)\n loss = - (observedUtility - baseline)[:, :, None] * logProb\n\n # make Fisher Information matrices for use in natural gradient descent\n self.currentParameters = parameters\n self.currentParameters.retain_grad()\n\n self.log('loss', loss.mean())\n self.log('meanUtility', self.utilityFn(self.E.reward).mean())\n\n return loss.mean()\n\n def backward(self, loss, **kwargs):\n loss.backward(retain_graph=True)\n parameterGrads = self.currentParameters.grad\n InverseFisher = self.InverseFisherInfo(self.currentParameters)\n\n naturalGradients = torch.einsum('nmij, nmj -> nmi', \n InverseFisher, parameterGrads)\n\n self.policyNet.zero_grad()\n torch.autograd.backward(self.currentParameters, grad_tensors=naturalGradients)\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=self.hparams.lr)\n\n \"\"\"\n Data handling\n \"\"\"\n\n def setup(self, stage=None):\n \"\"\"Initialize the environment\"\"\"\n self.E = AllocationMDP(self.hparams.timehorizon, self.hparams.mu,\n self.hparams.sigma)\n\n def train_dataloader(self):\n \"\"\"For now, we have no experience buffer, so the DL is empty\"\"\"\n return DataLoader(range(self.hparams.n_experiments))\n\n def validation_step(self, batch, batch_id):\n params = self.forward(torch.arange(0, 10)[:, None].repeat(1, 3))\n self.log('total certainty', params.sum(1).mean())\n self.log('mean action', \n self.actionStatistics(torch.tensor([0.5, 0.5, 0.])\n )[0].detach()\n )\n\n def val_dataloader(self):\n return DataLoader([0])\n\n\nclass ConstantBDLearner(BetaDistributionLearner):\n \"\"\"ConstantBDL is a beta distribution learner with a constant network\"\"\"\n def __init__(self, **hyperparams):\n super(ConstantBDLearner, self).__init__(**hyperparams)\n\n self.policyNet = nn.Sequential(nn.Linear(3, 6),\n nn.Linear(6, 2),\n AbsNlin())\n self.policyNet[0].weight = nn.Parameter(torch.zeros(6, 3), \n requires_grad=False)\n\n def forward(self, states):\n return self.policyNet(states)\n\n def validation_step(self, batch, batch_id):\n params = self.forward(torch.randn(1, 3))\n self.log('total certainty', params.sum())\n self.log('mean action', \n self.actionStatistics(torch.tensor([0.5, 0.5, 0.])\n )[0].detach()\n )\n\n\n\"\"\"\n\n Utility: nonlinearities as layers\n\n\"\"\"\n\n\nclass AbsNlin(nn.Module):\n \"\"\"docstring for squareNlin\"\"\"\n def __init__(self):\n super(AbsNlin, self).__init__()\n\n def forward(self, x):\n return torch.abs(x) + 1E-16\n","repo_name":"lrast/portfolioRL","sub_path":"WorkingExperiments/agents_clean.py","file_name":"agents_clean.py","file_ext":"py","file_size_in_byte":6922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4588508571","text":"\"\"\"Configuration file for Casimo\"\"\"\n\n#Global constants\n\n##General\nHANDS_PER_ROUND = 20 #Number of hands played in each round (per table)\nROUNDS = 10 #Number of rounds to run the simulator\nHEADING = 'Welcome to Casimo, the casino simulator.\\n\\n'\nTAILING = 'It will simulate ' + str(ROUNDS) + ' rounds of poker hands with ' + str(HANDS_PER_ROUND) + ' hands per round.\\n'\nGREETING = HEADING + TAILING\n\n##Dealing\n\n###hands/cards\n\nCLUBS = 0\nDIAMONDS = 1\nHEARTS = 2\nSPADES = 3\nSUITS = (CLUBS, DIAMONDS, HEARTS, SPADES)\nMIN_RANK = 2 #deuces\nMAX_RANK = 14 #aces\nHAND_SIZE = 5\nNUTLOW = [13, 5, 3, 2, 1]\n\n###patterns\n\nHICARD = 0\nBWDRAW = 1 #Broadway draw\nSTRDRAW = 5 #Assumed to be open ender\nFLDRAW = 10\nSTRFLDRAW = 15\nPBWDRAW = 21\nPSTRDRAW = 52\nPFLDRAW = 102\nPSTRFLDRAW = 152\nPAIR = 211\nTWO_PAIR = 221\nTRIPS = 333\nSTRAIGHT = 12345\nFLUSH = 22222\nFULL_HOUSE = 33322\nQUADS = 44441\nSTRFL = 54321\nNO_HAND = -1\n\n###patterns that needs sorting\nUNSTR = 6\nUNFL = 11\nUNSTRFL = 16\nUNPFL = 103\nUNPSF = 153\n\n##Tables\n\n###general\n\nMIN_STAKE = 4 #Number of chips for a big bet at the smallest stakes\nMAX_STAKE = 16 #Number of chips for a big bet at the highest stakes\nBUY_IN = 120 #Number of big bets required to sit down at a table\nMAX_TABLES = 32 #Maximum amount of tables at a stake\nSEATS = 5 #Number of seats at a table\nMAX_BETS = 6 #Maximum number of big bets per player per hand\n\n###for moving between stakes\nMAX_STACK = BUY_IN * 2 #When posting the BB, this is the max amount of big bets allowed (must move up otherwise)\nMIN_STACK = BUY_IN // 2 #When posting the BB, this is the least amount of big bets allowed (must move down otherwise)\nMIN_UNITS = MAX_BETS * SEATS #When posting the BB at the lowest stakes, this is the least amount of big bets allowed (must leave otherwise)\n\n##Players\n\n###general\n\nSEATED = 0\nMOVE_UP = 1\nMOVE_DOWN = 2\nBUSTO = 3\n\n###hand rankings\n\n#K_FULL = 1\n#T_FULL = 2\n#F_FULL = 3\n#A_FLUSH = 4\n#J_FLUSH = 6\n#A_STRAIGHT = 8\n#T_STRAIGHT = 12\n#E_STRAIGHT = 16\n#TRIP_A = 24\n#TRIP_Q = 32\n#TRIP_I = 48\n#TRIP_B = 64\n#A_UP = 96\n#Q_UP = 128\n#C_UP = 192\n#P_A = 256\n#P_DELTA = 40\n#SFDRAW = 420\n#SEQDRAW = 480\n#BRDRAW = 700\n#AK_HI = 780\n#AQ_HI = 830\n#AJ_HI = 870\n#AT_HI = 940 #Includes KQ-hi\n#TRASH = 999\n\n###hand ratings (planning to replace the rankings with these)\nTRIP_K = 1\nTRIP_G = 2\nA_UP = 3\nK_UP = 4\nJ_UP = 5\nI_UP = 6\nG_UP = 7\nACES = list(range(8, 11))\nKINGS = list(range(11, 15))\nQUEENS = list(range(15, 18))\nJACKS = list(range(18, 21))\nTENS = list(range(21, 24))\nNINES = list(range(24, 28))\nEIGHTS = list(range(28, 31))\nSEVENS = list(range(31, 34))\nSIXES = list(range(34, 37))\nFIVES = list(range(37, 41))\nFOURS = list(range(41, 44))\nTHREES = list(range(44, 47))\nDEUCES = list(range(47, 50))\nPAIRS = [None, None, DEUCES, THREES, FOURS, FIVES, SIXES, SEVENS, EIGHTS, NINES, TENS, JACKS, QUEENS, KINGS, ACES]\nAK_HI = list(range(50, 56))\nAQ_HI = list(range(56, 62))\nAJ_HI = list(range(62, 65))\nAT_HI = list(range(65, 67))\nA_HI = list(range(67, 70))\nKQ_HI = list(range(70, 74))\nKJ_HI = list(range(74, 78))\nKT_HI = list(range(78, 80))\nK_HI = list(range(80, 83))\nQJ_HI = list(range(83, 86))\nQT_HI = list(range(86, 88))\nQ_HI = list(range(88, 91))\nJT_HI = list(range(91, 93))\nJ_HI = list(range(93, 95))\nT_HI = list(range(95, 98))\nI_HI = list(range(98, 100))\n\n##Managers\n\n###general\n\nGROWRATE = 1\n","repo_name":"jonsimonsen/Casimo","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"70228848713","text":"from yc.models import YieldCurve, CashRate, SwapRate, FuturesRate, FRA\nfrom django.contrib import admin\n\nclass CashInline(admin.TabularInline):\n model = CashRate\n extra = 1\n\nclass SwapInline(admin.TabularInline):\n model = SwapRate\n extra = 1\n\nclass FRAInline(admin.TabularInline):\n model = FRA\n extra = 1\n\nclass FuturesInline(admin.TabularInline):\n model = FuturesRate\n extra = 1\n\n\nclass YCAdmin(admin.ModelAdmin):\n fields = ['name', 'currency', 'pricing_date']\n inlines = [CashInline, SwapInline, FRAInline, FuturesInline]\n list_display = ['name','currency', 'pricing_date']\n list_filter = ['currency', 'pricing_date']\n search_fields = ['name']\n date_hierarchy = 'pricing_date'\n save_as = True\n\nadmin.site.register(YieldCurve, YCAdmin)\n","repo_name":"jwg4/pfp","sub_path":"pyprice/yc/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30902080399","text":"from django.conf.urls import url\n\nfrom .views import (ArticleDetailView, ArticlePrintView, IssueDetailView,\n PageDetailView, PagePrintView,\n PeriodicalsSearchView, PublicationIssueAjax,\n PublicationDetailView, PublicationListView)\n\nurlpatterns = [\n url(r'^search/', PeriodicalsSearchView.as_view(), name='search'),\n url((r'^(?P[-\\w]+)/'\n 'issues/(?P[-\\w]+)/'\n 'page/(?P[0-9]+)/'\n 'articles/(?P[-\\w]+)/$'),\n ArticleDetailView.as_view(), name='article-detail'),\n url((r'^(?P[-\\w]+)/'\n 'issues/(?P[-\\w]+)/'\n 'page/(?P[0-9]+)/'\n 'articles/(?P[-\\w]+)/print/$'),\n ArticlePrintView.as_view(), name='article-print'),\n url((r'^(?P[-\\w]+)/'\n 'issues/(?P[-\\w]+)/'\n 'page/(?P[0-9]+)/$'),\n PageDetailView.as_view(), name='page-detail'),\n url((r'^(?P[-\\w]+)/'\n 'issues/(?P[-\\w]+)/'\n 'page/(?P[0-9]+)/print/$'),\n PagePrintView.as_view(), name='page-print'),\n url(r'^(?P[-\\w]+)/issues/(?P[-\\w]+)/$',\n IssueDetailView.as_view(), name='issue-detail'),\n url(r'^(?P[-\\w]+)/$',\n PublicationDetailView.as_view(), name='publication-detail'),\n url(r'^(?P[-\\w]+)/(?P[-\\w]+)/$',\n PublicationIssueAjax.as_view(), name='publication-issue-ajax'),\n url(r'^$', PublicationListView.as_view(), name='publication-list')\n]\n","repo_name":"kingsdigitallab/mpol-django","sub_path":"periodicals/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38327967963","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 5 00:47:12 2022\n\n@author: nghia_sv\n\"\"\"\n\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport crafting_helper as crafter\nfrom crafting_helper import Alignment\n\n#nghiant: we need some pixel-perfect shit by scaling up every component\n#--foil: x4\n#--portrait: x4\n#--stat_summon: x3\n#--font: heavyweight (size, no-scale: 49)\n#--stat_power: x3 (size: 147)\n#--stat_health: x3 (size: 147)\n\nSCALE_FOIL = 4\nSCALE_TRIBE = 1.7\nSCALE_PORTRAIT = 4\nSCALE_STAT_SUMMON = 3\nSCALE_NAME = 4\nSCALE_SIGIL = 3.6\n\nTRIBE_X = 0\nTRIBE_Y = 0\n\nPORTRAIT_X = 6 * SCALE_FOIL\nPORTRAIT_Y = 33 * SCALE_FOIL\n\nSTAT_SUMMON_X = 119 * SCALE_FOIL\nSTAT_SUMMON_Y = 21 * SCALE_FOIL\n\nSTAT_POWER_X = 18 * SCALE_FOIL\nSTAT_POWER_Y = 133 * SCALE_FOIL\n\nSTAT_HEALTH_X = 107 * SCALE_FOIL\nSTAT_HEALTH_Y = 148 * SCALE_FOIL\n\nSIGIL_X = 62 * SCALE_FOIL\nSIGIL_Y = 157 * SCALE_FOIL\n\nNAME_FONT = 'HEAVYWEI.TTF'\nNAME_FONT_SIZE = 24 * SCALE_NAME\nNAME_LETTER_SPACING = 6 * SCALE_NAME\n\nNAME_X = 63 * SCALE_FOIL\nNAME_Y = 8 * SCALE_FOIL\n\nSTAT_FONT = 'HEAVYWEI.TTF'\nSTAT_FONT_SIZE = 49 * SCALE_STAT_SUMMON\n\nROOT = 'C:/Users/nghia_sv/Desktop/image/'\n# ROOT = './'\nCARD_FOIL_SOURCE = 'card_foil/card_empty-resources.assets-3411.png'\nCARD_TRIBE_SOURCE = 'card_tribe/tribeicon_hooved-resources.assets-1616.png'\nCARD_PORTRAIT_SOURCE = 'card_portrait/portrait_goat-resources.assets-3441.png'\nCARD_STAT_SUMMON_SOURCE = 'card_stat/cost_1blood-resources.assets-1295.png'\nCARD_STAT_POWER = 0\nCARD_STAT_HEALTH = 1\nCARD_SIGIL_SOURCE = 'card_sigil/ability_tripleblood-resources.assets-3968.png'\nCARD_NAME = 'black goat'\n\n\n# CARD_PORTRAIT_SOURCE = 'card_portrait/portrait_direwolf-resources.assets-1518.png'\n# CARD_TRIBE_SOURCE = 'card_tribe/tribeicon_canine-resources.assets-1637.png'\n# CARD_STAT_SUMMON_SOURCE = 'card_stat/cost_3blood-resources.assets-3641.png'\n# CARD_STAT_POWER = 2\n# CARD_STAT_HEALTH = 5\n# CARD_SIGIL_SOURCE = 'card_sigil/ability_doublestrike-resources.assets-1952.png'\n# CARD_NAME = 'dire wolf'\n\n\n# CARD_PORTRAIT_SOURCE = 'card_portrait/portrait_wolf-resources.assets-3815.png'\n# CARD_TRIBE_SOURCE = 'card_tribe/tribeicon_canine-resources.assets-1637.png'\n# CARD_STAT_SUMMON_SOURCE = 'card_stat/cost_2blood-resources.assets-1286.png'\n# CARD_STAT_POWER = 3\n# CARD_STAT_HEALTH = 2\n# CARD_SIGIL_SOURCE = 'card_sigil/ability_doublestrike-resources.assets-1952.png'\n# CARD_NAME = 'wolf'\n\n\nCARD_PORTRAIT_SOURCE = 'card_portrait/portrait_ravenegg-resources.assets-1855.png'\nCARD_TRIBE_SOURCE = 'card_tribe/tribeicon_bird-resources.assets-3246.png'\nCARD_STAT_SUMMON_SOURCE = 'card_stat/cost_1blood-resources.assets-1295.png'\nCARD_STAT_POWER = 0\nCARD_STAT_HEALTH = 2\nCARD_SIGIL_SOURCE = 'card_sigil/ability_evolve_1-resources.assets-2713.png'\nCARD_NAME = 'raven egg'\n\n\n# CARD_PORTRAIT_SOURCE = 'card_portrait/portrait_adder-resources.assets-3623.png'\n# CARD_TRIBE_SOURCE = 'card_tribe/tribeicon_reptile-resources.assets-2914.png'\n# CARD_STAT_SUMMON_SOURCE = 'card_stat/cost_2blood-resources.assets-1286.png'\n# CARD_STAT_POWER = 1\n# CARD_STAT_HEALTH = 1\n# CARD_SIGIL_SOURCE = 'card_sigil/ability_deathtouch-resources.assets-3909.png'\n# CARD_NAME = 'adder'\n\n# CARD_TRIBE_SOURCE = 'card_tribe/tribeicon_bird-resources.assets-3246.png'\n\ncard_foil = crafter.prepare_image(ROOT + CARD_FOIL_SOURCE, scale=SCALE_FOIL)\ncard_tribe = crafter.prepare_image(ROOT + CARD_TRIBE_SOURCE, scale=SCALE_TRIBE)\ncard_portrait = crafter.prepare_image(ROOT + CARD_PORTRAIT_SOURCE, scale=SCALE_PORTRAIT)\ncard_stat_summon = crafter.prepare_image(ROOT + CARD_STAT_SUMMON_SOURCE, scale=SCALE_STAT_SUMMON)\nstat_power = crafter.render_text_object(str(CARD_STAT_POWER), STAT_FONT, STAT_FONT_SIZE)\nstat_health = crafter.render_text_object(str(CARD_STAT_HEALTH), STAT_FONT, STAT_FONT_SIZE)\ncard_sigil = crafter.prepare_image(ROOT + CARD_SIGIL_SOURCE, scale=SCALE_SIGIL)\ncard_name = crafter.render_text_object(str(CARD_NAME), NAME_FONT, NAME_FONT_SIZE, letter_spacing=NAME_LETTER_SPACING)\n\n#crafting\ncard_crafted = crafter.add_overlay(card_foil, card_tribe, x=TRIBE_X, y=TRIBE_Y, align=Alignment.TOP_LEFT, overlay_alpha=0.4)\ncard_crafted = crafter.add_overlay(card_crafted, card_portrait, x=PORTRAIT_X, y=PORTRAIT_Y, align=Alignment.TOP_LEFT)\ncard_crafted = crafter.add_overlay(card_crafted, card_stat_summon, x=STAT_SUMMON_X, y=STAT_SUMMON_Y, align=Alignment.TOP_RIGHT)\ncard_crafted = crafter.add_overlay(card_crafted, stat_power, x=STAT_POWER_X, y=STAT_POWER_Y, align=Alignment.TOP_CENTER)\ncard_crafted = crafter.add_overlay(card_crafted, stat_health, x=STAT_HEALTH_X, y=STAT_HEALTH_Y, align=Alignment.TOP_CENTER)\ncard_crafted = crafter.add_overlay(card_crafted, card_sigil, x=SIGIL_X, y=SIGIL_Y, align=Alignment.MID_CENTER)\ncard_crafted = crafter.add_overlay(card_crafted, card_name, x=NAME_X, y=NAME_Y, align=Alignment.TOP_CENTER)\n\n# plt.subplots()\n# plt.imshow(stat_power)\n\nplt.subplots()\nplt.imshow(card_crafted)\nImage.fromarray(np.uint8(card_crafted * 255)).save(CARD_NAME.replace(' ', '_') + '.png')\n","repo_name":"syncerpn/inscryption_card_render","sub_path":"craft_card.py","file_name":"craft_card.py","file_ext":"py","file_size_in_byte":4986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42106097179","text":"# Problem: https://www.hackerrank.com/challenges/word-order/problem\n\nn = int(input())\n\nhashMap = {}\nfor i in range(4):\n s = input()\n hashMap[s] = hashMap.get(s, 0) + 1\n \nprint(len(hashMap.values()))\n\nfor i in hashMap.values():\n print(i, end=\" \")\n \nprint(hashMap)\n","repo_name":"nabroleonx/A2SV","sub_path":"word-order.py","file_name":"word-order.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"37065104425","text":"import os \n\npath = input('Enter the path to the files: ')\n\nmetricsList = (\n 'creation date',\n 'modification date',\n 'title', \n 'size'\n)\n\nfor count, i in enumerate(metricsList, start=1):\n print(f'{count}. {i}')\n\nmetricsChoice = int(input(\"Enter the number corresponding to the metric: \"))\ncommonName = input('\\nEnter the common name for the files: ')\n\nfilePaths = []\n\nfor i in os.listdir(path):\n filePaths.append(path + i)\n\nif metricsChoice == 1:\n filePaths.sort(key = os.path.getctime)\nelif metricsChoice == 2:\n filePaths.sort(key=os.path.getmtime)\nelif metricsChoice == 3:\n filePaths.sort()\nelif metricsChoice == 4:\n filePaths.sort(key=os.path.getsize)\n\nn = 1\nfor src in filePaths:\n ext = src.split('.')[-1]\n os.rename(src, f'{path}/{commonName} {n}.{ext}')\n n += 1","repo_name":"Subhradeep10/Stunning-Scripts","sub_path":"Python/File Renamer/fileRenamer.py","file_name":"fileRenamer.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"27"} +{"seq_id":"37402747846","text":"#1.2 is one string a permutation of another string?\ndef is_permutation(str1, str2):\n if len(str1) != len(str2):\n return False\n\n counts = {}\n for i in range(0, len(str1)):\n if str1[i] not in counts:\n counts[str1[i]] = 1\n #print(counts)\n else:\n counts[str1[i]] = counts[str1[i]] + 1\n #print(counts)\n for i in range(0, len(str1)):\n if ((str2[i] not in counts) or (counts[str2[i]] <= 0)):\n #print(counts)\n return False\n else:\n counts[str2[i]] = counts[str2[i]] - 1\n #print(counts)\n return True\n\n\n# use sorting to save space\ndef is_permutation_2(str1, str2):\n if len(str1) != len(str2):\n return False\n str1_sorted = sorted(str1)\n str2_sorted = sorted(str2)\n for i in range(0, len(str1)):\n if (str1_sorted[i] != str2_sorted[i]):\n return False\n return True\n\n\n# instead of dictionary use list\n# this will save some space\n# assume ascii charset of 128\ndef is_permutation_3(str1, str2):\n if len(str1) != len(str2):\n return False\n\n counts = [0]*128\n\n for i in range(0, len(str1)):\n index = ord(str1[i])\n #print(index)\n counts[index] += 1\n\n for i in range(0, len(str2)):\n index = ord(str2[i])\n #print(index)\n counts[index] -= 1\n if counts[index] < 0:\n return False\n return True\n\n","repo_name":"ansoncoding/CTCI","sub_path":"Python/01_Strings_Arrays/IsPermutation.py","file_name":"IsPermutation.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"44411273765","text":"import logging\nfrom wmr.thriftapi import JobService\nfrom wmr.thriftapi.ttypes import JobStatus, State, PhaseStatus, DataPage, JobInfo\nfrom thrift.transport import TSocket\nfrom thrift.transport import TTransport\nfrom thrift.protocol import TBinaryProtocol\nfrom thrift.server import TServer\n\nclass MockJobService(JobService.Iface):\n def storeDataset(self, name, data):\n return \"/data/foo/nowhere\"\n \n def readDataPage(self, path, page):\n dataPage = DataPage()\n dataPage.data = \"\"\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc\n vulputate, ligula eu aliquet pretium, mi mi posuere nisi, consequat\n dictum mi nibh quis dui. Nullam urna dui, molestie vel aliquam vel,\n semper eu libero. Sed ac molestie erat. Cum sociis natoque penatibus et\n magnis dis parturient montes, nascetur ridiculus mus. Sed velit tortor,\n consequat et commodo sed, interdum ut leo. In non orci magna. Nulla\n facilisi. Sed pharetra justo ut enim semper convallis. Praesent rutrum\n faucibus nulla, ut dignissim neque mollis et. Mauris eu massa purus, in\n elementum dolor. Aliquam erat volutpat. Aliquam fringilla fringilla\n massa, quis malesuada risus dictum.\"\"\"\n dataPage.totalPages = 10\n \n def submit(self, request):\n return 10010101\n \n def getInfo(self, id):\n info = JobInfo()\n info.name = 'foo'\n info.nativeID = 'job_ZZZZZZZZZZ_ZZZZ'\n info.test = False\n info.inputPath = '/data/foo/nowhere'\n info.mapper = 'def mapper(key, val): pass'\n info.reducer = 'def reducer(key, vals): pass'\n info.requestedMapTasks = 30\n info.requestedReduceTasks = 5\n \n def getStatus(self, id):\n status = JobStatus()\n status.state = State.RUNNING\n status.info = self.getInfo(id)\n status.mapStatus = PhaseStatus()\n status.mapStatus.code = 0\n status.mapStatus.progress = 65.5\n status.mapStatus.outputPath = \"/data/foo/nowhere\"\n status.mapStatus.errors = \"It's alive!\"\n status.reduceStatus = status.mapStatus\n return status\n \n def kill(self, id):\n return\n\ndef run(host='localhost', port=9090):\n mock = MockJobService()\n processor = JobService.Processor(mock)\n transport = TSocket.TServerSocket(port)\n tfactory = TTransport.TBufferedTransportFactory()\n pfactory = TBinaryProtocol.TBinaryProtocolFactory()\n \n server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)\n logging.info(\"About to listen on %s:%d\" % (host, port))\n server.serve()\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n \n port = None\n import sys\n if len(sys.argv) == 2:\n port = int(sys.argv[1])\n run(port)\n","repo_name":"benguillet/wmr-frontend","sub_path":"wmr/mockserver.py","file_name":"mockserver.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13337157905","text":"import argparse\nimport sys\nimport time\nimport random\nfrom color import Color\nfrom tile import Tile\nfrom player import Player\n\nrandom.seed()\n\ndef wall():\n return Tile(False, Color(50,50,50))\n\ndef box():\n return Tile(False, Color(0,0,0), True)\n\ndef floor():\n return Tile(True, Color(0,200,0))\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--width', type=int)\nparser.add_argument('--height', type=int)\n\ndimensions = parser.parse_args()\n\nSCALE = 2\nWIDTH = dimensions.width/SCALE\nHEIGHT= dimensions.height/SCALE\n\nplayers = [\n Player(3,3,Color(255,0,255)),\n Player(WIDTH-3,3,Color(255,0,0)),\n Player(WIDTH-3,HEIGHT-3,Color(255,255,0)),\n Player(3,HEIGHT-3,Color(0,255,255))\n]\n\nplayfield = [[box() if random.random() > 0.4 else floor() for i in range(WIDTH)] for j in range(HEIGHT)]\nfor i in range(WIDTH):\n playfield[0][i] = wall()\n playfield[1][i] = wall()\n playfield[HEIGHT-1][i] = wall()\n\nfor i in range(HEIGHT):\n playfield[i][0] = wall()\n playfield[i][1] = wall()\n playfield[i][WIDTH-1] = wall()\n\nfor i in xrange(0, WIDTH, 2):\n for j in xrange(0, HEIGHT, 2):\n playfield[j][i] = wall()\n\nplayfield[3][3] = floor()\nplayfield[3][4] = floor()\nplayfield[4][3] = floor()\nplayfield[3][WIDTH-3] = floor()\nplayfield[4][WIDTH-3] = floor()\nplayfield[3][WIDTH-4] = floor()\nplayfield[HEIGHT-3][3] = floor()\nplayfield[HEIGHT-4][3] = floor()\nplayfield[HEIGHT-3][4] = floor()\nplayfield[HEIGHT-3][WIDTH-3] = floor()\nplayfield[HEIGHT-4][WIDTH-3] = floor()\nplayfield[HEIGHT-3][WIDTH-4] = floor()\n\nsys.stdout.write('READY\\n')\nsys.stdout.flush()\n\nlast_tick = time.time()\nwhile True:\n line = sys.stdin.readline()\n if line.strip() == 'SKILL':\n exit()\n elif line.strip() == 'STICK':\n tick = time.time() - last_tick > 0.1\n if tick:\n for player in players:\n player.tick(playfield)\n\n for j in range(dimensions.height):\n for i in range(dimensions.width):\n if tick:\n playfield[j/SCALE][i/SCALE].tick(playfield)\n sys.stdout.write(playfield[j/SCALE][i/SCALE].draw())\n sys.stdout.flush()\n\n if tick:\n last_tick = time.time()\n elif line[0] == 'P' and line[2] == 'D' and line[4] == '1':\n players[int(line[1])-1].intent = line[3]\n elif line[0] == 'P' and line[2] == 'D' and line[4] == '0' and players[int(line[1])-1].intent == line[3]:\n players[int(line[1])-1].intent = None\n elif line[0] == 'P' and line[2] == 'F' and line[4] == '1':\n players[int(line[1])-1].intent = 'B'\n","repo_name":"hgcummings/floorcade","sub_path":"games/bomberperson/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"32464158017","text":"from typing import NamedTuple, Sequence, Optional, Any\nfrom collections import namedtuple\n\nimport numpy as np\n\nfrom PDE.typing import (Generator as _Gen, FloatArray)\n\n\ndef new_argument_builder(\n name: str,\n field_names: Sequence[str],\n defaults: Optional[Any]=None,\n target_name: Optional[str]=None,\n target_purpose: Optional[str]=None,\n doc_string: Optional[str]=None\n ):\n \"\"\"\n Construct FuncArg class derivated from namedtuple\n :parameter target_name: Can be function name \"foo\" or more detailed like \"foo from module bar\"\n :parameter target_purpose: Usefull if you have multiple argument builder for one function\n \"\"\"\n field_names = list(field_names)\n if defaults is None:\n defaults = list()\n else:\n defaults = list(defaults)\n\n if target_name is None:\n doc = f\"An argument constructor\\n\"\n else:\n doc = f\"An argument constructor for funtion {target_name}\\n\"\n if target_purpose is None:\n doc += f\"\\n\"\n else:\n doc += f\"{target_name}\\n\\n\"\n if doc_string is not None:\n doc += doc_string\n\n class FuncArg(namedtuple(name, field_names, defaults=defaults)):\n __doc__ = doc\n @classmethod\n def build(cls, *arg):\n return cls(*arg)._asdict()\n\n return FuncArg\n\n\ndef close_range(n: int, start: int=0, step: int=1) -> _Gen[tuple[int, int], None, None]:\n if step <= 0:\n raise ValueError(\"Negativ step not supported\")\n \n head = -(start + 1)\n queu = start\n for _ in range(n // step):\n yield queu, head\n queu += step\n head -= step\n\ndef tridiag(values: FloatArray, size: int) -> FloatArray:\n n = len(values) % 2\n out = np.zeros((size, size))\n for i in range(-n, n + 1):\n out = out + np.eye(size, k=i) * values[i + n]\n return out\n\n\n\nif __name__ == \"__main__\":\n print(tridiag(np.array([2, 3, 2],), 6))","repo_name":"zazbone/PDE","sub_path":"PDE/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"45369551374","text":"###яндекс, ВК, деливери, Тинькоф, Сбер сервис, ВТБ, инфотекс, мтс, КРОК, АЙ-ТЕКО\nimport psycopg2\nimport requests, json\nimport config\n\n# список id компаний\ncompanies_id = [1740, 15478, 592442, 78638, 1473866, 4181, 3778, 3776, 2987, 115]\ndef get_hh_data_vacancy():\n \"\"\"\n Получает данные о вакансиях через API\n :return: список данных\n \"\"\"\n list = []\n for id in companies_id:\n params = {\n 'employer_id': id,\n 'per_page': 30\n }\n request = requests.get(\"https://api.hh.ru/vacancies\", params=params)\n data = request.json()['items']\n list.append(data)\n return list\n\n\ndef get_necessary_vacancy_info():\n \"\"\"\n Убирает лишнюю информацию о вакансиях\n :return: список вакансий\n \"\"\"\n list = []\n for item in get_hh_data_vacancy():\n for vacancy in item:\n dict = {\"vacancy_name\": vacancy['name'],\n \"employer\": vacancy['employer']['name'], \"vacancy_url\": vacancy['alternate_url'],\n }\n list.append(dict)\n\n if vacancy['salary'] is None:\n dict['salary_from'] = 0\n dict['salary_to'] = 0\n dict['currency'] = 'Нет валюты'\n else:\n if vacancy['salary']['from'] is None:\n dict['salary_from'] = 0\n else:\n dict['salary_from'] = vacancy['salary']['from']\n\n if vacancy['salary']['to'] is None:\n dict['salary_to'] = 0\n else:\n dict['salary_to'] = vacancy['salary']['to']\n if vacancy['salary']['currency'] == 'RUR':\n dict['currency'] = 'RUB'\n else:\n dict['currency'] = vacancy['salary']['currency']\n return list\n\n\ndef get_employers_info():\n \"\"\"\n Получает данные о работодателях через API\n :return: список работодателей\n \"\"\"\n list = []\n for id in companies_id:\n request = requests.get(f\"https://api.hh.ru/employers/{id}\")\n data = request.json()\n list.append(data)\n return list\n\ndef get_necessary_employers_data():\n \"\"\"\n Убирает лишнюю информацию о работодателях\n :return: список работодателей\n \"\"\"\n list =[]\n for employer in get_employers_info():\n dict = {\"title\": employer[\"name\"], \"site_url\": employer[\"site_url\"]}\n list.append(dict)\n return list\n\ndef create_database(database_name, params):\n \"\"\"\n Создает базу данных и создает таблицы employers, vacancies в postgresql\n :param database_name: имя базы данных\n :param params: параметры подключения\n \"\"\"\n conn = psycopg2.connect(dbname='postgres', **params)\n conn.autocommit = True\n cur = conn.cursor()\n cur.execute(f\"drop database if exists {database_name}\")\n cur.execute(f\"create database {database_name}\")\n conn.close()\n\n conn = psycopg2.connect(dbname=database_name, **params)\n\n with conn.cursor() as cur:\n cur.execute(\"\"\"\n CREATE TABLE employers (\n\t employer_id SERIAL PRIMARY KEY,\n\t title VARCHAR(255) NOT NULL,\n site_url VARCHAR(255)\n )\n \"\"\")\n\n with conn.cursor() as cur:\n cur.execute(\"\"\"\n create table vacancies (\n vacancy_id serial primary key,\n vacancy_name varchar(255),\n salary_from integer,\n salary_to integer,\n currency varchar(10),\n employer varchar(255),\n employer_id int ,\n vacancy_url varchar(255)\n );\n alter table vacancies\n add constraint fk_vacancies_employers foreign key(employer_id) references employers(employer_id);\n \"\"\")\n conn.commit()\n conn.close()\n\ndef save_data_employer_to_database(data, database_name, params):\n \"\"\"\n Заполняет таблицу employers полученными данными по API\n :param data: данные о работодателях\n :param database_name: имя базы данных\n :param params: параметры подключения\n \"\"\"\n conn = psycopg2.connect(dbname=database_name, **params)\n\n with conn.cursor() as cur:\n for employer in data:\n cur.execute(\n \"\"\"\n insert into employers (title, site_url)\n values (%s, %s)\n \"\"\",\n (employer['title'], employer['site_url'])\n )\n conn.commit()\n conn.close()\n\ndef save_data_vacancy_to_database(data, database_name, params):\n \"\"\"\n Заполняет таблицу vacancies данных о вакансиях полученных по API\n :param data: данные о вакансиях\n :param database_name: имя базы данных\n :param params: параметры подключения\n \"\"\"\n conn = psycopg2.connect(dbname=database_name, **params)\n\n with conn.cursor() as cur:\n for vacancy in data:\n cur.execute(\n \"\"\"\n insert into vacancies (vacancy_name, salary_from, salary_to, currency, employer, vacancy_url)\n values (%s, %s, %s, %s, %s, %s)\n \"\"\",\n (vacancy['vacancy_name'], vacancy['salary_from'], vacancy['salary_to'], vacancy['currency'], vacancy['employer'], vacancy['vacancy_url'])\n )\n cur.execute(\"\"\"\n update vacancies set employer_id = 1 where employer='Яндекс';\n update vacancies set employer_id = 2 where employer='VK';\n update vacancies set employer_id = 3 where employer='Маркет Деливери';\n update vacancies set employer_id = 4 where employer='Тинькофф';\n update vacancies set employer_id = 5 where employer='Сбербанк-Сервис';\n update vacancies set employer_id = 6 where employer='Банк ВТБ (ПАО)';\n update vacancies set employer_id = 7 where employer='ИнфоТеКС';\n update vacancies set employer_id = 8 where employer='МТС';\n update vacancies set employer_id = 9 where employer='КРОК';\n update vacancies set employer_id = 10 where employer='Ай-Теко (I-Teco)'; \n \"\"\")\n\n conn.commit()\n conn.close()\n\n","repo_name":"RenatUrazbakhtin/hh_superjob_parser","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26576366712","text":"from odoo import models, _\nfrom odoo.exceptions import UserError\n\n\nclass FSMWizard(models.TransientModel):\n _inherit = 'fsm.wizard'\n\n def action_convert_location(self, partner):\n res = self.env['fsm.location'].search_count(\n [('partner_id', '=', partner.id)])\n if res == 0:\n vals = {'partner_id': partner.id,\n 'owner_id': partner.id,\n 'customer_id': partner.id,\n 'inventory_location_id':\n partner.property_stock_customer.id}\n self.env['fsm.location'].create(vals)\n partner.write({'fsm_location': True})\n else:\n raise UserError(_('A Field Service Location related to that'\n ' partner already exists.'))\n return res\n","repo_name":"Jeisonpernia/modules","sub_path":"addons/fieldservice_stock/models/fsm_wizard.py","file_name":"fsm_wizard.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20205628873","text":"import speech_recognition as sr\r\nimport pyttsx3\r\nimport pywhatkit\r\nimport datetime\r\nimport wikipedia\r\nimport pyjokes\r\n\r\n# Import additional libraries for OpenAI API\r\nimport requests\r\nimport json\r\n\r\n# Define OpenAI API key\r\nopenai_key = \"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\"\r\n\r\n# Set up SpeechRecognizer\r\nlistener = sr.Recognizer()\r\n\r\n# Set up text-to-speech engine\r\nengine = pyttsx3.init()\r\nvoices = engine.getProperty('voices')\r\nengine.setProperty('voice', voices[1].id)\r\n\r\n# Function to speak text\r\ndef talk(text):\r\n engine.say(text)\r\n engine.runAndWait()\r\n\r\n# Function to listen for and recognize speech commands\r\ndef take_command():\r\n try:\r\n with sr.Microphone() as source:\r\n print('listening...')\r\n voice = listener.listen(source)\r\n command = listener.recognize_google(voice)\r\n print(command)\r\n except:\r\n pass\r\n return command\r\n\r\n# Function to answer questions using OpenAI API\r\ndef answer_question(question):\r\n # Set up OpenAI API request\r\n request_data = {\r\n \"prompt\": question,\r\n \"length\": 50\r\n }\r\n headers = {\"Authorization\": \"Bearer \" + openai_key}\r\n response = requests.post(\"https://api.openai.com/v1/engines/davinci/completions\", json=request_data, headers=headers)\r\n response_data = response.json()\r\n # Get answer from response data\r\n answer = response_data[\"choices\"][0][\"text\"]\r\n # Return answer\r\n return answer\r\n\r\n# Function to run the Alexa skill\r\ndef run_alexa(command):\r\n # If command is a question, answer it\r\n if 'who' in command or 'what' in command or 'when' in command or 'where' in command or 'why' in command or 'how' in command:\r\n if 'why' in command:\r\n command = command.replace('why', '')\r\n answer = answer_question(command)\r\n talk(answer)\r\n # If command is not a question, respond normally\r\n else:\r\n if 'play' in command:\r\n song = command.replace('play', '')\r\n talk('playing ' + song)\r\n pywhatkit.playonyt(song)\r\n elif 'time' in command:\r\n time = datetime.datetime.now().strftime('%I%M %p')\r\n talk('Current time is ' + time)\r\n elif 'who is' in command:\r\n person = command.replace('who is', '')\r\n info = wikipedia.summary(person, 1)\r\n print(info)\r\n talk(info)\r\n elif 'date' in command:\r\n date = datetime.datetime.now().strftime('%B %d, %Y')\r\n talk('Current date is ' + date)\r\n elif 'are you single' in command:\r\n talk('I am in a relationship with wifi')\r\n elif 'joke' in command:\r\n talk(pyjokes.get_joke())\r\n else:\r\n talk('please say the command again')\r\n\r\n# Run Alexa skill\r\nwhile True:\r\n command_type = input(\"Enter 't' for text command or 'v' for voice command: \")\r\n if command_type == 'v':\r\n run_alexa(take_command())\r\n elif command_type == 't':\r\n command = input(\"Enter your command: \")\r\n run_alexa(command)\r\n\r\n","repo_name":"mootezchachia/Alexa-Clone-in-python-using-voice-command","sub_path":"a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"35219796709","text":"from ckeditor_uploader.widgets import CKEditorUploadingWidget\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom apps.crm.models import MyUser, Company, Project, ContactMessage\n\nUser = get_user_model()\n\n\nclass MyUserModelForm(forms.ModelForm):\n class Meta:\n model = MyUser\n fields = (\n 'username',\n 'first_name',\n 'last_name',\n 'email',\n 'profile_picture'\n )\n\n\nclass ProjectModelForm(forms.ModelForm):\n description = forms.CharField(widget=CKEditorUploadingWidget, )\n\n class Meta:\n model = Project\n fields = (\n 'name',\n 'company',\n 'description',\n 'start_date',\n 'end_date',\n 'price',\n )\n\n\nclass ContactMessageModelForm(forms.ModelForm):\n description = forms.CharField(widget=CKEditorUploadingWidget, )\n\n class Meta:\n model = ContactMessage\n fields = (\n 'title',\n 'project',\n 'type_of_message',\n 'description',\n )\n\nclass CompanyModelForm(forms.ModelForm):\n sh_description = forms.CharField(widget=CKEditorUploadingWidget, )\n description = forms.CharField(widget=CKEditorUploadingWidget, )\n\n class Meta:\n model = Company\n fields = (\n 'name',\n 'contact',\n 'sh_description',\n 'description',\n 'address',\n 'phone',\n 'ad_phone_1',\n 'ad_phone_2',\n 'email',\n 'ad_email_1',\n 'ad_email_2',\n )\n\n\nclass UserLoginForm(forms.ModelForm):\n class Meta:\n model = MyUser\n fields = ('username', 'password')\n\n def clean_first_name(self):\n data = self.cleaned_data[\"first_name\"]\n return data\n\n def clean(self):\n pass\n\n","repo_name":"OMAY/cr-case","sub_path":"apps/crm/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"2308668906","text":"#!/usr/bin/env python3\n\n\n# Diagonalize 3d-block of Hamiltonian $H_0$\n# Find basis (eigenvectors) which diagonalizes non-interacting local Hamiltonian $H_0$.\n\n# Choose simulation folder to study.\n\n# The script will read `Local hamiltonian` from RSPt `out` file.\n# Then, diagonalize Hamiltonian and use eigenvectors to transform from spherical harmonics basis\n# to basis where Hamiltonian is diagonal.\n# Finally the script will save transformation vectors in RSPt-friendly format to disk.\n\n\nimport numpy as np\nimport sys\n\nfrom rspt2spectra import energies, orbitals\n\n\ndef main():\n\n if len(sys.argv) > 1 and sys.argv[1] == 'spinpol':\n # RSPt calculations are spin-polarized\n spinpol = True\n else:\n spinpol = False\n\n # Label for user defined transformation matrix.\n # E.g. name of irreducible representation.\n if spinpol and len(sys.argv) == 3:\n irrlabel = sys.argv[2]\n elif not spinpol and len(sys.argv) == 2:\n irrlabel = sys.argv[1]\n else:\n irrlabel = '1'\n\n hs, labels = energies.parse_matrices('out')\n # Loop over all the local Hamiltonians\n for clusterIndex, (h, label) in enumerate(zip(hs, labels)):\n print('------ Cluster label:', label, '------')\n # Number of correlated orbitals\n # 5 for d-orbitals, 7 for f-orbitals\n norb = np.shape(h)[0]//2\n # Name of the file to write into\n fwrite = 'proj-' + label + '-Irr' + irrlabel + '.inp'\n if spinpol:\n hd = np.copy(h)\n else:\n hd = h[0:norb, 0:norb] # dn block\n print('Hamiltonian:')\n print('Real part:')\n print(energies.print_matrix(hd.real))\n print('Imag part:')\n print(energies.print_matrix(hd.imag))\n print()\n # Eigenvalues and eigenvectors (column vectors)\n e, v = np.linalg.eigh(hd)\n # Reorder eigenvectors\n if spinpol:\n vs = np.zeros_like(v)\n es = np.zeros_like(e)\n a, b = 0, norb\n for i in range(2*norb):\n if np.sum(np.abs(v[:norb, i])**2) > 0.5:\n vs[:, a] = v[:, i]\n es[a] = e[i]\n a += 1\n else:\n vs[:, b] = v[:, i]\n es[b] = e[i]\n b += 1\n else:\n vs = np.copy(v)\n es = np.copy(e)\n print(\"Eigenvalues:\")\n print(es)\n if spinpol:\n print('Eigenvalues: e(dn)-e(up):')\n print(es[:5]-es[5:])\n print(\"Eigenvectors:\")\n print('Real part:')\n print(energies.print_matrix(vs.real))\n print('Imag part:')\n print(energies.print_matrix(vs.imag))\n print('Diagonalized Hamiltonian:')\n hdiag = np.dot(np.transpose(np.conj(vs)), np.dot(hd, vs))\n print('Real part:')\n print(energies.print_matrix(hdiag.real))\n print('Imag part:')\n print(energies.print_matrix(hdiag.imag))\n print()\n # Save rotation matrices to file in a RSPt adapted format\n orbitals.write_proj_file(vs, spinpol, fwrite)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JohanSchott/rspt2spectra","sub_path":"scripts/diagonalize_local_H0.py","file_name":"diagonalize_local_H0.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"10801580048","text":"import numpy as np\nimport math\nfrom scipy.interpolate import griddata\n#因为项目保存的图像数据是8*8,所以分析时要对图像进行插值\nrow = 32\ncol = 32\npoints = [(math.floor(i/8),i%8) for i in range(64)]\ngrid_x,grid_y = np.mgrid[0:7:32j,0:7:32j]\ndef imageInterpolate(img ,method = 'linear'):\n '''\n 默认内插方式是linear,也可以传递cubic等\n '''\n if len(img.shape) == 2:\n #只有一帧图片\n return griddata(points,img.ravel() ,(grid_x,grid_y),method=method)\n else:\n #多帧图片\n newImage = []\n for item in img:\n newImage.append(griddata(points,item.ravel(),(grid_x,grid_y),method=method))\n return np.array(newImage)\n\n\n\n","repo_name":"grid-eye/grideye","sub_path":"countpeople/interpolate.py","file_name":"interpolate.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"21922252373","text":"#import os\nimport os\n#import csv\nimport csv\n#create a path for csv\nbudget_csv = os.path.join('Resources','budget_data.csv')\n#lists for months, profits, difference\nmonths = []\nprofits = []\ndifference = []\n#create a with statement\nwith open(budget_csv) as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n csv_header = next(csvreader)\n month = 0\n data = list(csvreader)\n month = len(data)\n total = 0\n firstmonth = []\n for row in data:\n months.append(row[0])\n profits.append(row[1])\n for row in range(1,len(profits)):\n x = int(profits[row]) - int(profits[row - 1])\n difference.append(x)\n total = sum(float(row[1]) for row in data)\n maximum = max(float(row[1]) for row in data)\n minimum = min(float(row[1]) for row in data)\n for (a,b) in zip(months, profits):\n if float(b) == minimum:\n minmonth = (a)\n mindecrease = (b)\n if float(b) == maximum:\n maxmonth = (a)\n maxincrease = (b)\n averagechange = round(sum(difference)/len(difference),2)\n print('Financial Analysis\\n-------------------------------')\n print(f'Total Months: {month}')\n print(f'Total: $ {total}')\n print(f'Average Change: ${averagechange}')\n print('Greatest Increase in Profits: ' + str(maxmonth) + ' $' + str(maxincrease))\n print('Greatest Decrease in Profits: ' + str(minmonth) + ' $' +str(mindecrease))\n","repo_name":"mjodonnell95/python-challenge","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4576675258","text":"#!/usr/bin/env python3\r\n\r\nimport socket\r\nimport re\r\n\r\ndef isValidEmail(email):\r\n if len(email) > 7:\r\n if re.match(\"^.+@(\\[?)[a-zA-Z0-9-.]+.([a-zA-Z]{2,3}|[0-9]{1,3})(]?)$\", email) != None:\r\n return True\r\n return False\r\n\r\ndef encode(Type, email):\r\n length = len(email)\r\n message = Type+','+str(length)+','+email\r\n #turn string into byte\r\n Emessage = str.encode(message)\r\n return Emessage\r\n\r\nHOST = '127.0.0.1' # The server's hostname or IP address\r\nPORT = 65432 # The port used by the server\r\nmessage = ''\r\nType = 'Q'\r\n#email = \"jane.doe@gmail.com\"\r\nemail = input(\"Enter email address:\") # Python 3\r\nwhile isValidEmail(email) != True:\r\n print(\"This is not a valid email address try again\")\r\n email = input(\"Enter email address:\")\r\n\r\n\r\nmessage = encode(Type, email)\r\n#length = len(email)\r\n#message = Type+','+str(length)+','+email\r\n#turn string into byte\r\n#Emessage = str.encode(message)\r\n#print(message)\r\n#print(Emessage.decode())\r\nif len(message) < 257:\r\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\r\n s.connect((HOST, PORT))\r\n s.sendall(message)\r\n data = s.recv(1024)\r\n #s.close() \r\n print(data)\r\n answer = data.decode()\r\n parse = answer.split(',')\r\n Tcheck = parse[0]\r\n Lcheck = parse[1]\r\n Lcheck = int(Lcheck)\r\n Mcheck = parse[2]\r\n if Tcheck != \"R\":\r\n print(\"Type Check Error:Message is corrupted\")\r\n if Lcheck > 255:\r\n print(\"Overflow Error: Message is too big\") \r\n if Tcheck == \"R\":\r\n if Lcheck < 255:\r\n print('The owner of the email is:',Mcheck )\r\nelse:\r\n print(\"Overflow Error: Message is too big\")\r\n","repo_name":"PCStorage/Projects","sub_path":"CSE310Assignment1/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32597999070","text":"# Comparacao dos dados de reanalise do WW3 com os dados\n# da boia de Fortaleza\n# Henrique Pereira - 26/01/2022\n\nimport os, sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport importlib\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'ocean-wave'))\nimport waveplot, waveaux\nimportlib.reload(waveplot)\nimportlib.reload(waveaux)\nplt.close('all')\n\npth_ww3 = '/home/hp/gdrive/salinopolis/ww3ncep/'\npth_boia = '/media/hp/HIGGINS/database/PNBOIA/fortaleza/'\n\nfln_ww3 = 'output_ww3_pnboia_fortaleza.csv'\nfln_boia = 'fortaleza.csv'\n\nww3 = pd.read_csv(pth_ww3 + fln_ww3, parse_dates=True, index_col='valid_time')\n\nboia = pd.read_csv(pth_boia + fln_boia, parse_dates=True, index_col='Datetime')\n\n#interpola o ww3 para 1h\nww3 = ww3.resample('1H').mean().interpolate()\n\n# reamostra dados da boia para 1h e seleciona o periodo do ww3\n\n# qualificacao nos dados\nboia['Dpd'].loc[boia.Dpd < 0] = np.nan\n\nboia = boia.resample('1H').mean().interpolate()\nboia = boia.loc[ww3.index[0]:ww3.index[-1]]\n\n# calcula intensidade e direcao do vento\nww3['ws'], ww3['wd'] = waveaux.uv2id(ndr_ucomp=ww3.u, ndr_vcomp=ww3.v, str_conv='meteo')\n\n# waveplot.plot_serie_wind_hs_tp_dp(date=ww3.index,\n# \t\t\t\t\t\t\t\t ws=ww3.ws,\n# \t\t\t\t\t\t\t\t wd=ww3.wd,\n# \t\t\t\t\t\t\t\t hs=ww3.swh,\n# \t\t\t\t\t\t\t\t tp=ww3.perpw,\n# \t\t\t\t\t\t\t\t dp=ww3.dirpw,\n# \t\t\t\t\t\t\t\t title='WW3')\n\nfig = plt.figure()\nax1 = fig.add_subplot(211)\nax1.plot(boia.Wspd, label='boia')\nax1.plot(ww3.ws, label='ww3')\nax1.legend()\nax1.set_ylabel('vel vento')\nax2 = fig.add_subplot(212, sharex=ax1)\nax2.plot(boia.Wdir)\nax2.plot(ww3.wd)\nax2.set_ylabel('dir vento')\n\nfig = plt.figure()\nax1 = fig.add_subplot(311)\nax1.plot(boia.Wvht, label='boia')\nax1.plot(ww3.swh, label='ww3')\nax1.legend()\nax1.set_ylabel('altura')\nax2 = fig.add_subplot(312, sharex=ax1)\nax2.plot(boia.Dpd)\nax2.plot(ww3.perpw)\nax2.set_ylabel('periodo')\nax3 = fig.add_subplot(313, sharex=ax1)\nax3.plot(boia.Mwd)\nax3.plot(ww3.dirpw)\nax3.set_ylabel('direcao')\n\nplt.show()","repo_name":"hpppereira/wave-climate-salinopolis","sub_path":"compara_ww3_boia.py","file_name":"compara_ww3_boia.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"3237281218","text":"from django.core.exceptions import ValidationError\nfrom rest_framework import status\nfrom rest_framework import serializers\nfrom rest_framework.viewsets import ViewSet\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ModelSerializer, BooleanField\nfrom django.contrib.auth.models import User\nfrom rarerestapi.models import RareUsers, Posts, Categories\nfrom rest_framework.decorators import action\n\nclass PostView(ViewSet):\n def create(self, request):\n user = RareUsers.objects.get(user=request.auth.user)\n category = Categories.objects.get(pk=request.data['categoryId'])\n \n try: \n post = Posts.objects.create(\n user=user,\n title = request.data[\"title\"],\n category = category,\n publication_date=request.data['publicationDate'],\n content=request.data['content'],\n approved=request.data['approved'],\n image_url=request.data['imageUrl']\n )\n post_serializer = PostSerializer(post, context={'request': request})\n return Response(post_serializer.data, status=status.HTTP_201_CREATED)\n except ValidationError as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_400_BAD_REQUEST)\n\n def list(self, request):\n posts = Posts.objects.all()\n serializer = PostSerializer(\n posts, many=True, context={'request': request})\n return Response(serializer.data)\n\n \n def retrieve(self, request, pk):\n post = Posts.objects.get(pk=pk)\n post_serializer = PostSerializer(post, context={'request': request})\n return Response(post_serializer.data)\n\n @action(methods=['GET'], detail=False)\n def myPosts(self, request):\n user = RareUsers.objects.get(user=request.auth.user)\n\n try:\n post = Posts.objects.filter(user = user)\n post_serializer = PostSerializer(post, many=True, context={'request': request})\n return Response(post_serializer.data)\n except Posts.DoesNotExist:\n return Response(\n {'message': 'Post does not exist.'},\n status=status.HTTP_400_BAD_REQUEST\n ) \n\nclass UserSerializer(ModelSerializer):\n class Meta:\n model = User\n fields = ['id']\n\nclass PostSerializer(ModelSerializer):\n user = UserSerializer()\n \n class Meta:\n model = Posts\n fields = ['id', 'user', 'title', 'category', 'publication_date', 'approved', 'content', 'image_url']\n\n","repo_name":"nss-day-cohort-50/Rare-Api-TimeTravelers","sub_path":"rarerestapi/views/post_view.py","file_name":"post_view.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13883136342","text":"import os\nfrom adapters.slack import SlackAlert\nfrom domain import events\n\n\nSLACK_CHANNEL = os.environ.get(\"SLACK_CHANNEL\", \"CHANNEL_SAMPLE\")\nSLACK_TOKEN = os.environ.get(\"SLACK_BOT_TOKEN\", \"BOT_TOKEN\")\nslack_alert = SlackAlert(SLACK_TOKEN)\n\nparams = {\"channel\": SLACK_CHANNEL}\n\nNOTIFICATION_MAPPING = {\n events.PipelineStarted: [\n slack_alert,\n ],\n events.PipelineFinish: [\n slack_alert,\n ],\n}\n\n\ndef handle(event: events.Event):\n for handle in NOTIFICATION_MAPPING[type(event)]:\n handle.alert(event.msg, **params)\n","repo_name":"nickrvieira/de-jobsity","sub_path":"src/services/message_bus.py","file_name":"message_bus.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"10616567048","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 20 07:42:19 2021\n\n@author: Ahmad\n\"\"\"\n#== API Description ==\n# Endpoint:GET /calculus?query=[input]\n\nfrom flask import Flask, request, jsonify\nimport base64\nimport re\n\napp = Flask(__name__)\n\n@app.route('/calculus', methods=['GET'])\ndef respond():\n\n # Retrieve the querry from url parameter\n query = request.args.get(\"query\", None)\n\n try:\n #decode the query string\n base64decodedbytes = base64.b64decode(query)\n \n # decode to utf-8 string\n base64decodedString = base64decodedbytes.decode('UTF-8')\n\n # verify the math string and check for non math characters and also to counter injection\n # allowedOperations = ['1','2','3','4','5','6','7','8','9','0', '.' , '*', '+', '-','/', '(',')',' ']\n if not bool(re.match('[\\d/\\-\\(\\)\\*\\+\\.\\W]+$', base64decodedString )):\n return jsonify({'error': 'true', 'message': 'Your query:\"'+ base64decodedString +'\" contains invalid character please use numbers and allowed operations: +, -, /, *, (, )'})\n \n queryResult = eval(base64decodedString)\n return jsonify({'error': False, 'message': queryResult})\n\n # handle if length of base64string is not correct\n except base64.binascii.Error as err:\n print('wrong length of base64 string it should be a multiple of 4')\n return jsonify({'error': True, 'message': 'Wrong length of base64 string it should be a multiple of 4; error:' + str(err)})\n \n # handle if string is not a valid utf-8\n except UnicodeDecodeError:\n return jsonify({'error': True, 'message': 'Decoding error while decoding base64 byte to string'})\n \n # handle syntax error in the mathematical expression\n except SyntaxError:\n return jsonify({'error': True, 'message': 'Syntax error in query:'+ base64decodedString})\n\n # handle general exceptions\n except Exception as err:\n return jsonify({'error': True, 'message': 'An error occured: '+ str(err) })\n\n # add gunicorn\n # add either nginx load balancer or use from Azure/Heroku\n\n# A test message\n@app.route('/')\ndef index():\n return \"

I am a Futuricetic calculator and I work!!!

\"\n\n@app.route('/health')\ndef healthCheck():\n return \"

I am a healthy app!

\"\n\n@app.route('/health3')\ndef healthCheck3():\n return \"

I am a healthy app for MatchbaseX!

\" \n\n\nif __name__ == '__main__':\n # Threaded option to enable multiple instances for multiple user access support\n app.run(threaded=True, port=80)","repo_name":"ahmadkarim/my-futuricetic-calculator","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"28888739136","text":"# copy the two checking functions here and test combined version\n\ndef verify_input(question):\n while True:\n try:\n x = float(input(question))\n break\n except ValueError:\n print(\"Not a valid number. Try again or enter control-C to exit.\")\n return x\n\ndef check_it(x, rangelow, rangehi):\n if x >= rangelow and x <= rangehi:\n return True\n else:\n return False\n\ndef calc_payment(years, annual_rate, principal):\n months = 12 * years\n monthly_rate = annual_rate/1200\n # convert from annual percentage to monthly decimal rate\n formula1 = (1 + monthly_rate) ** months\n # print(formula)\n payment = monthly_rate * principal * formula1 / (formula1 - 1)\n return payment\n\nprint(\"This is a mortgage calculator.\")\nprint(\"Entries include years to pay off, annual interest rate in percent,\")\nprint(\"and starting principal -- don't include commas or dollar sign.\\n\")\n\n# get years to pay off\n\nrangelow = 1.0\nrangehi = 30.0\nmyflag = False\nquestion = \"Enter years to pay off (1.0 to 30.0): \"\nwhile myflag == False:\n years = verify_input(question)\n myflag = check_it(years, rangelow, rangehi)\n\n# get annual interest rate\n\nrangelow = 1.0\nrangehi = 12.0\nmyflag = False\nquestion = \"Enter annual interest rate (1.0 to 12.0): \"\nwhile myflag == False:\n annual_rate = verify_input(question)\n myflag = check_it(annual_rate, rangelow, rangehi)\n\n# get principal\n\nrangelow = 100.0\nrangehi = 1000000000.0\nmyflag = False\nquestion = \"Enter principal (no punctuation, one hundred to one million): \"\nwhile myflag == False:\n principal = verify_input(question)\n myflag = check_it(principal, rangelow, rangehi)\n\npayment = calc_payment(years, annual_rate, principal)\n\nprint(\"\\nTo pay off $\" + str(principal) + \" in \" + str(years) + \" years at \" + str(annual_rate) + \" percent interest,\")\nprint(\"the monthly payment is $\" + str(payment))\n\n \n","repo_name":"ericjfenton/CS101","sub_path":"mortgage.py","file_name":"mortgage.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"34172100802","text":"from random import randrange, random\nfrom tkinter import StringVar\n\nimport pandas as pd\nimport numpy as np\nimport csv\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\nfrom math import sqrt\nfrom matplotlib import pyplot as plt\n\npd.options.mode.chained_assignment = None\nimport pyeasyga\n\nmovie_list = []\nwith open('u.txt', newline='') as movie:\n movie_reader = csv.reader(movie, delimiter='\\t')\n for movie in movie_reader:\n movie_list.append(movie)\nTrainFrame = pd.DataFrame(movie_list, columns=['userid', 'movieid', 'rating', 'timestamp'])\nTrainFrame = TrainFrame.apply(pd.to_numeric)\n\nusers = TrainFrame.userid.unique() # get unique users\n\n# create a dataframe with all users and all the rating (even the null ones)\nuser_movie_nan = pd.pivot_table(TrainFrame, index='userid', columns='movieid', values='rating')\nuser_movie = pd.pivot_table(TrainFrame, index='userid', columns='movieid', values='rating')\n\nfor user in users:\n new_mean = round(user_movie.loc[user].mean(skipna=True))\n user_movie.loc[user].fillna(new_mean, inplace=True)\n\nuser_ga = 0 #USER\n\n# calculate pearson correlation to find neighbours\nPearsonCorr = user_movie.T.corr(method='pearson')\nneighbour = PearsonCorr.nlargest(11, 1).index.tolist() # get the 10 closest neighbours\nneighbour = [x - 1 for x in neighbour] # iloc index starts at 0\n\nNeighbframe = user_movie.iloc[neighbour]\n# get all movies without rating\nnan_columns = user_movie_nan.columns[user_movie_nan.iloc[user_ga].apply(np.isnan)]\nnan_columns = [x - 1 for x in nan_columns] # iloc index starts at 0\nnan_columns = list(range(nan_columns[0] - 10, nan_columns[0])) + nan_columns # HOLDOUT\ndata_movie_nan = user_movie_nan.iloc[user_ga, nan_columns].array\n\n# initialize genetic algorithm with the specified parameters\nga = pyeasyga.GeneticAlgorithm(data_movie_nan,\n population_size=200,\n generations=300,\n crossover_probability=0.9,\n mutation_probability=0.01,\n elitism=True,\n maximise_fitness=True)\n\n\n# create an individual for the starting population\ndef create_individual(data):\n individual = np.random.randint(1, 6, data.shape)\n return individual\n\n\nga.create_individual = create_individual\n\n\n# define crossover operator for the genetic algorithm (single point)\ndef crossover(parent_1, parent_2):\n if type(parent_1) is np.ndarray:\n parent_1 = parent_1.tolist()\n if type(parent_2) is np.ndarray:\n parent_2 = parent_2.tolist()\n index = randrange(1, len(parent_1))\n child_1 = parent_1[:index] + parent_2[index:]\n child_2 = parent_2[:index] + parent_1[index:]\n return child_1, child_2\n\n\nga.crossover_function = crossover\n\n\n# define and set the GA's mutation operation\ndef mutate(individual):\n # change a random gene in the chromosome\n mutate_index = randrange(len(individual))\n individual[mutate_index] = randrange(1, 6)\n\n\nga.mutate_function = mutate\n\n# define and set the GA's selection operation (tournament selection)\n\nga.selection_function = ga.tournament_selection\n\n\n# define a fitness function\ndef fitness(individual, data):\n pearson_corr = 0\n for no, index in zip(individual, nan_columns):\n Neighbframe.iloc[0, index] = int(no)\n PearsonCorr = Neighbframe.T.corr(method='pearson')\n pearson_corr = ((PearsonCorr.sum().iloc[0] - 1) / 10) + 1\n return pearson_corr\n\n\nga.fitness_function = fitness # set the GA's fitness function\ntemp_chrom, holdout = ga.run() # run the GA\nbest_chrom_mean = temp_chrom\ny_actual = user_movie.iloc[0, 0:10].tolist()\nrmse = []\nmae = []\nt_rmse = []\nt_mae = []\n\nfor genes in holdout:\n genes = genes[:10]\n rmse.append(sqrt(mean_squared_error(y_actual, genes)))\n mae.append((mean_absolute_error(y_actual, genes)))\nprint(ga.best_individual())\nfor i in range(9):\n temp_chrom, holdout = ga.run()\n t_rmse = []\n t_mae = []\n for genes in holdout:\n genes = genes[:10]\n t_rmse.append(sqrt(mean_squared_error(y_actual, genes)))\n t_mae.append((mean_absolute_error(y_actual, genes)))\n rmse = [x + y for x, y in zip(rmse, t_rmse)]\n mae = [x + y for x, y in zip(mae, t_mae)]\n\n if len(temp_chrom) < len(best_chrom_mean):\n temp_chrom.extend([ga.best_individual()[0]] * (len(best_chrom_mean) - len(temp_chrom)))\n best_chrom_mean = [x + y for x, y in zip(best_chrom_mean, temp_chrom)]\n print(ga.best_individual())\nbest_chrom_mean = [number / 10 for number in best_chrom_mean]\nrmse = [number / 10 for number in rmse]\nmae = [number / 10 for number in mae]\nprint(best_chrom_mean)\nplt.plot(best_chrom_mean)\nplt.show()\n\nplt.plot(rmse)\nplt.show()\n\nplt.plot(mae)\nplt.show()\nprint(rmse)\nprint(mae)\n","repo_name":"kandrew5/Neural-Network-and-Genetic-Algorithms","sub_path":"Genetic_Algo2.py","file_name":"Genetic_Algo2.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"36327686756","text":"import contextlib\nimport io\nimport unittest\n\n\nclass TestCase(unittest.TestCase):\n def test_out(self):\n f = io.StringIO()\n with contextlib.redirect_stdout(f):\n import read_file\n output = f.getvalue().split('\\n')\n print(output)\n\n self.assertTrue(len(output) == 4 and output[2] == output[3] == '',\n msg='Your output should only contain 2 lines.')\n expected, actual = 'I am a temporary file. Maybe someday, I\\'ll become a real file.', output[0]\n self.assertEqual(expected, actual,\n msg='The first line of the output should be the line from input.txt.')\n expected, actual = 'My first line', output[1]\n self.assertEqual(expected, actual,\n msg='The second line of the output should be the first line from input1.txt.')\n","repo_name":"jetbrains-academy/introduction_to_python","sub_path":"File input output/Read file/tests/test_task.py","file_name":"test_task.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"27"} +{"seq_id":"29865803622","text":"from rest_framework.generics import CreateAPIView,RetrieveAPIView,ListAPIView\nfrom.serialziers import *\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\n\n\nclass ContactApiView(CreateAPIView):\n serializer_class=ContactSerializer\n queryset=Contact.objects.all()\n\n def post(self, request, *args, **kwargs):\n serializer=self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n headers=self.get_success_headers(data=serializer.data) \n return Response(serializer.data, status=status.HTTP_201_CREATED,headers=headers)\n\n \n\n\nclass ConsultantApiView(CreateAPIView):\n serializer_class=ConsultantSerializer\n queryset=Consultant.objects.all()\n\n\n def post(self, request, *args, **kwargs):\n serializer=self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n headers=self.get_success_headers(data=serializer.data) \n return Response(serializer.data, status=status.HTTP_201_CREATED,headers=headers)\n\n \n ","repo_name":"fakhri299/BreynliUpdated","sub_path":"apps/contact/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"25127336349","text":"#!/usr/bin/env python\n\n## Import Modules ## \nimport pandas as pd \nimport eyed3\nimport sys, glob, os\n\nimport settings\nfrom utility import optdict, audio_ext, sepdict\n\n## Define Classes ##\n\n# DTF Object type\nclass ListFile:\n def __init__(self, name, ext):\n self.name = name\n self.ext = ext\n\n\n## Define Functions ##\n\n# Extract audio metadata and return it in a pandas dataframe\ndef get_audio_data(audio_files):\n\n try:\n audio_df = pd.DataFrame(columns=['title', 'artist', 'file'])\n\n total = len(audio_files)\n iteration = 0\n sys.stdout.write(\"Audio files parsed: 0/{}\".format(total))\n\n for file in audio_files:\n # Load audio file using absolute path and eyeD3\n a_file = eyed3.load(\"{path}/{file}\".format(\n path = settings.dir_PATH, file = file))\n audio_df = audio_df.append(\n {'title':a_file.tag.title, 'artist':a_file.tag.artist, 'file':file}, ignore_index=True)\n # Update progress\n iteration += 1\n sys.stdout.write('\\r')\n sys.stdout.write(\"Audio files parsed: {}/{}\".format(iteration, total))\n \n print('\\nComplete!')\n return audio_df\n\n except:\n print(\"Something went wrong: get_audio_data()\")\n\n# Evaluate dataframes and produce outputs\ndef evaluate_df(dtf_df, a_df, ext):\n try: \n # Find intersection on both title and artist fields\n int_df = pd.merge(dtf_df, a_df, how='inner', on=['title','artist'])\n # Find difference between initial dtf_df and new int_df\n dif_dtf_df = pd.concat([dtf_df, int_df]).drop_duplicates(subset=['title', 'artist'], keep=False)\n \n # Compare DTF file and locate missing audio files\n if dif_dtf_df.shape[0] == 0: # If no missing entries\n print(\"All audio files in DTF were found.\")\n else: # If missing entries exist\n print(\"{}/{} audio files were missing from DTF.\".format(dif_dtf_df.shape[0], dtf_df.shape[0]))\n dtf_ans = input(\"Print missing audio files to DTF file? y/n\\n\")\n if dtf_ans.lower() == 'y':\n # Output missing entries to DTF file of original file type\n dif_dtf_df.to_csv('missing.{}'.format(ext), sep=sepdict[ext])\n print(\"missing.{} was saved to {}\".format(ext, settings.dir_PATH)) \n\n # Find difference between initial a_df and new int_df\n dif_a_df = pd.concat([a_df, int_df]).drop_duplicates(subset=['title', 'artist'], keep=False)\n # Identify audio files not present in DTF file\n if dif_a_df.shape[0] == 0: # If no non-matching audio files\n print(\"No audio files were found that did not match an entry in DTF.\")\n else:\n print(\"{} audio files were found that did not match any entry in DTF\".format(dif_a_df.shape[0]))\n a_ans = input(\"Print unmatched audio files to DTF file? y/n\\n\")\n if a_ans.lower() == 'y':\n # Output extra audio files to DTF file of original file type\n dif_a_df.to_csv('extra.{}'.format(ext), sep=sepdict[ext])\n print(\"extra.{} was saved to {}\".format(ext, settings.dir_PATH)) \n except:\n print(\"Something went wrong: evaluate_df()\")\n\n # ! Last edit here ! #\n\n# Initial function run when program is called through terminal\ndef initial():\n\n # Clean up warnings\n eyed3.log.setLevel(\"ERROR\")\n\n arg_length = len(sys.argv)\n\n # Print Help\n if arg_length == 2 and sys.argv[1] == '-h':\n optdict['-h']()\n\n # Normal Usage\n elif arg_length >= 3:\n\n # Parse optional flags\n if arg_length > 3:\n optargs = sys.argv[3:arg_length]\n # Run optional flags\n for arg in optargs:\n try:\n # Run optional flag if it exists in optdict\n optdict[arg]()\n except KeyError:\n # Stop program if flag does not exist\n error_msg = \"Invalid flag \\\"{0}\\\"\".format(arg)\n raise SystemExit(error_msg)\n \n # Set DTF and DIR arguments. \n dtf_arg = str(sys.argv[1])\n dir_arg = str(sys.argv[2])\n\n try:\n # Try split DTF argument string\n split_dtf = dtf_arg.split(\".\")\n dtf = ListFile(split_dtf[0], split_dtf[1])\n except IndexError:\n # Stop program if argument is invalid\n error_msg = \"Invalid file name \\\"{0}\\\"\".format(dtf_arg)\n raise SystemExit(error_msg)\n\n try:\n # Try load DTF into Pandas Dataframe\n dtf_df = pd.read_csv(dtf_arg, header=None, sep=sepdict[dtf.ext], skiprows=settings.skip)\n dtf_df = dtf_df.rename(columns={0:\"title\", 1:\"artist\"})\n except FileNotFoundError:\n # Stop program if file does not exist\n error_msg = \"File \\\"{0}\\\" does not exist!\".format(dtf_arg)\n raise SystemExit(error_msg)\n \n try: \n # Try locate Directory \n settings.dir_PATH = os.path.abspath(dir_arg) # Set global absolute PATH for directory\n os.chdir(dir_arg) # Change directory\n audio_files = []\n for files in audio_ext:\n # Appends all files to audio_files array that match *.ext_type from utility.audio_ext\n audio_files.extend(glob.glob(files)) \n\n # If any audio files are appended, run get_audio_data function\n if bool(audio_files):\n audio_df = get_audio_data(audio_files)\n else:\n error_msg = \"No suitable audio files found in directory.\"\n raise SystemExit(error_msg)\n # Stop if directory is invalid\n \n # Run evaluation function on both dataframes\n evaluate_df(dtf_df, audio_df, dtf.ext)\n\n except FileNotFoundError:\n error_msg = \"Directory \\\"{0}\\\" does not exist!\".format(dir_arg)\n raise SystemExit(error_msg)\n\n else:\n # Stop program if args are not at least 2\n print(\"\\nAt least two arguments required!\")\n print(\"Required:\\n- Delimeted-Text File: \\\"PATH\\\\file.ext\\\"\")\n print(\"- Audio Directory: \\\"PATH\\\\dir\\\"\\n\")\n print(\" Use -h for help with optional flags. \\n\")\n raise SystemExit\n\n# Process\ninitial()","repo_name":"adaeo/MP3-List","sub_path":"mp3_list.py","file_name":"mp3_list.py","file_ext":"py","file_size_in_byte":6362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"27724477545","text":"import random\nfrom collections import deque\n\nimport numpy as np\n\n\nclass Experience:\n def __init__(self, state, action, reward, next_state, done):\n self.state = state\n self.action = action\n self.reward = reward\n self.next_state = next_state\n self.done = done\n\n\nclass ReplayBuffer:\n def __init__(self, buffer_size, seed):\n self.seed = random.seed\n self.memory = deque(maxlen=buffer_size)\n\n def sample(self, batch_size):\n experiences = random.sample(self.memory, k=batch_size)\n states = np.vstack([e.state for e in experiences if e is not None])\n actions = np.vstack([e.action for e in experiences if e is not None])\n rewards = np.vstack([e.reward for e in experiences if e is not None])\n next_states = np.vstack([e.next_state for e in experiences if e is not None])\n dones = np.vstack([e.done for e in experiences if e is not None])\n return states, actions, rewards, next_states, dones\n\n def remember(self, states, actions, rewards, next_states, dones):\n for i in range(states.shape[0]):\n self.memory.append(\n Experience(states[i], actions[i], rewards[i], next_states[i], dones[i])\n )\n\n def reset(self):\n self.memory.clear()\n\n def __len__(self):\n return len(self.memory)\n","repo_name":"jarvick257/udacity-drl-collab","sub_path":"memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30819249834","text":"#!/usr/bin/env python3\n\"\"\"\nThis test case will verify if the provided solution by a student for SongList.py is correct.\n\"\"\"\nfrom SongList import *\n\n# Instantiate a song list\nlinkedlist = SongList()\nprint(linkedlist)\n\n# Instantiate some nodes with values \nlinkedlist.head = SongNode(\"A Hard Day's Night\")\nsecond = SongNode('A Day in the Life')\nthird = SongNode(\"Strawberry Fields Forever\")\n\n# Link nodes instances \nlinkedlist.head.next = second\nsecond.next = third\nthird.next = None\n\n# Traverse through the list and print each song title\nlinkedlist.printSongs()\n\n# Insert more songs\nlinkedlist.AddNewSong(\"She Loves You\")\nlinkedlist.AddNewSong(\"Something\")\nlinkedlist.printSongs()","repo_name":"aym00n-djrak/Blockchain","sub_path":"Homeworks/WK2/203_EX1_A03_Song_LinkedList/203_EX1_A03_Song_LinkedList/SongList_t.py","file_name":"SongList_t.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"37512230846","text":"from sensor import Sensor\nimport requests\nimport json\nfrom sensor_event import SensorEvent\nfrom time import time\n\nclass JsonSensor(Sensor):\n\n def getValue(self):\n try:\n response = requests.get(self.url)\n if response.status_code == 200:\n self.logger.debug(response.text)\n data = json.loads(response.text)\n if 'value' in data:\n self.value = data['value']\n except requests.exceptions.ConnectionError as e:\n self.logger.error(str(e))\n return\n except ValueError as e:\n self.logger.error(str(e))\n return\n\n\n","repo_name":"sonologic/thermo2","sub_path":"src/py/sensor_json.py","file_name":"sensor_json.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26933141516","text":"names = []\np = 0\nwhile p<2:\n cat = input('веди имя')\n names.append(cat)\n p = p + 1\np = len(names)-1\nwhile p>=0:\n print('привет' + ' ' + names[p])\n p = p - 1","repo_name":"denisshustov/Masha","sub_path":"py/task20.py","file_name":"task20.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12779967349","text":"import sys\nsys.stdin = open('gear.txt', 'r')\n\nmat = []\nfor i in range(0,4):\n x=str(input())\n mat.append(x)\nprint(mat)\nk = int(input())\nfor _ in range(k):\n num, direction=map(int, input().split())\n num-=1\n check = [0]*4\n check[num]=direction\n for i in range(num, 3):\n if mat[i][2]!=mat[i+1][6]:\n check[i+1]=check[i]*(-1)\n else:\n break\n for i in range(num, 0, -1):\n if mat[i][6]!=mat[i-1][2]:\n check[i-1]=check[i]*(-1)\n else:\n break\n print(check)\n for i in range(4):\n if check[i]==1:\n mat[i]=mat[i][7]+mat[i][:7]\n elif check[i]==-1:\n mat[i]=mat[i][1:]+mat[i][0]\nprint(mat)\nans=0\nfor i in range(4):\n ans += int(mat[i][0])*pow(2,i)\nprint(ans)","repo_name":"ggaem97/study","sub_path":"2022-01-22/gearAnswer.py","file_name":"gearAnswer.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"98102230","text":"import sys\nr=sys.stdin.readline\nN=int(r())\nswitchs=list(map(int,r().split()))\nfor _ in range(int(r())):\n sex,pos=map(int,r().split())\n if sex==1:#남학생\n for i in range(pos-1,len(switchs),pos):\n switchs[i]=int(not bool(switchs[i]))\n else:#여학생\n if pos-1==0 or pos-1==len(switchs)-1:\n switchs[pos-1]=int(not bool(switchs[pos-1]))\n else:\n a,b=pos-1,pos-1\n while a!=0 and b!=len(switchs)-1:\n a-=1\n b+=1\n if switchs[a]!=switchs[b]:\n a+=1\n b-=1\n break\n for i in range(a,b+1):\n switchs[i]=int(not bool(switchs[i]))\n\nfor i in range(1,len(switchs)+1):\n print(switchs[i-1],end=\" \")\n if i%20==0:\n print(\"\")\n","repo_name":"murane/PS","sub_path":"Python/BOJ/1244.py","file_name":"1244.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"33562961147","text":"#!/bin/env python\n\"\"\"\nExtrapolation nowcast\n=====================\n\nThis tutorial shows how to compute and plot an extrapolation nowcast using \nFinnish radar data.\n\n\"\"\"\n\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pprint import pprint\nfrom pysteps import io, motion, nowcasts, rcparams, verification\nfrom pysteps.utils import conversion, transformation\nfrom pysteps.visualization import plot_precip_field, quiver\n\n###############################################################################\n# Read the radar input images\n# ---------------------------\n#\n# First, we will import the sequence of radar composites.\n# You need the pysteps-data archive downloaded and the pystepsrc file\n# configured with the data_source paths pointing to data folders.\n\n# Selected case\ndate = datetime.strptime(\"201609281600\", \"%Y%m%d%H%M\")\ndata_source = rcparams.data_sources[\"fmi\"]\nn_leadtimes = 12\n\n###############################################################################\n# Load the data from the archive\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nroot_path = data_source[\"root_path\"]\npath_fmt = data_source[\"path_fmt\"]\nfn_pattern = data_source[\"fn_pattern\"]\nfn_ext = data_source[\"fn_ext\"]\nimporter_name = data_source[\"importer\"]\nimporter_kwargs = data_source[\"importer_kwargs\"]\ntimestep = data_source[\"timestep\"]\n\n# Find the input files from the archive\nfns = io.archive.find_by_date(\n date, root_path, path_fmt, fn_pattern, fn_ext, timestep, num_prev_files=2\n)\n\n# Read the radar composites\nimporter = io.get_method(importer_name, \"importer\")\nZ, _, metadata = io.read_timeseries(fns, importer, **importer_kwargs)\n\n# Convert to rain rate\nR, metadata = conversion.to_rainrate(Z, metadata)\n\n# Plot the rainfall field\nplot_precip_field(R[-1, :, :], geodata=metadata)\nplt.show()\n\n# Store the last frame for plotting it later later\nR_ = R[-1, :, :].copy()\n\n# Log-transform the data to unit of dBR, set the threshold to 0.1 mm/h,\n# set the fill value to -15 dBR\nR, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0)\n\n# Nicely print the metadata\npprint(metadata)\n\n###############################################################################\n# Compute the nowcast\n# -------------------\n#\n# The extrapolation nowcast is based on the estimation of the motion field,\n# which is here performed using a local tracking approach (Lucas-Kanade).\n# The most recent radar rainfall field is then simply advected along this motion\n# field in oder to produce an extrapolation forecast.\n\n# Estimate the motion field with Lucas-Kanade\noflow_method = motion.get_method(\"LK\")\nV = oflow_method(R[-3:, :, :])\n\n# Extrapolate the last radar observation\nextrapolate = nowcasts.get_method(\"extrapolation\")\nR[~np.isfinite(R)] = metadata[\"zerovalue\"]\nR_f = extrapolate(R[-1, :, :], V, n_leadtimes)\n\n# Back-transform to rain rate\nR_f = transformation.dB_transform(R_f, threshold=-10.0, inverse=True)[0]\n\n# Plot the motion field\nplot_precip_field(R_, geodata=metadata)\nquiver(V, geodata=metadata, step=50)\nplt.show()\n\n###############################################################################\n# Verify with FSS\n# ---------------\n#\n# The fractions skill score (FSS) provides an intuitive assessment of the\n# dependency of skill on spatial scale and intensity, which makes it an ideal\n# skill score for high-resolution precipitation forecasts.\n\n# Find observations in the data archive\nfns = io.archive.find_by_date(\n date,\n root_path,\n path_fmt,\n fn_pattern,\n fn_ext,\n timestep,\n num_prev_files=0,\n num_next_files=n_leadtimes,\n)\n# Read the radar composites\nR_o, _, metadata_o = io.read_timeseries(fns, importer, **importer_kwargs)\nR_o, metadata_o = conversion.to_rainrate(R_o, metadata_o, 223.0, 1.53)\n\n# Compute fractions skill score (FSS) for all lead times, a set of scales and 1 mm/h\nfss = verification.get_method(\"FSS\")\nscales = [2, 4, 8, 16, 32, 64, 128, 256, 512]\nthr = 1.0\nscore = []\nfor i in range(n_leadtimes):\n score_ = []\n for scale in scales:\n score_.append(fss(R_f[i, :, :], R_o[i + 1, :, :], thr, scale))\n score.append(score_)\n\nplt.figure()\nx = np.arange(1, n_leadtimes + 1) * timestep\nplt.plot(x, score)\nplt.legend(scales, title=\"Scale [km]\")\nplt.xlabel(\"Lead time [min]\")\nplt.ylabel(\"FSS ( > 1.0 mm/h ) \")\nplt.title(\"Fractions skill score\")\nplt.show()\n\n# sphinx_gallery_thumbnail_number = 3\n","repo_name":"pySTEPS/pysteps","sub_path":"examples/plot_extrapolation_nowcast.py","file_name":"plot_extrapolation_nowcast.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","stars":381,"dataset":"github-code","pt":"27"} +{"seq_id":"3899736760","text":"\nfrom DB_connectors.MySql_connect import Database\nfrom telethon import TelegramClient, events\n\nimport openai\nimport asyncio\nimport os\nusers_message = {}\nclients = {}\n# from main import gs\nfrom google_sheets import GS\ndb = Database(\"swm\")\ngs=GS()\nopenai.api_key = \"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\"\nfrom speech_to_text import speech_to_text\nasync def disconnect(phone):\n clients[phone].disconnect()\n\nusers_message = {}\n\n\nasync def start_client(a):\n\n api_id = a[2]\n api_hash = a[3]\n phone = a[4]\n is_active = a[-1]\n client = TelegramClient(\"sessions/\"+phone, api_id, api_hash)\n if is_active:\n try:\n await main(client)\n except Exception as ex:\n print(ex)\n else:\n print(phone)\n\n\nasync def main(client):\n async with client:\n me = await client.get_me()\n print('Working with', me.first_name, me.last_name)\n await client.start()\n client.add_event_handler(my_event_handler, events.NewMessage)\n await client.run_until_disconnected()\n\n\nasync def my_event_handler(event):\n me = await event.client.get_me()\n if event.is_channel:\n return\n if event.is_group:\n return\n print(event.document)\n message_text = event.text\n try:\n if event.document.mime_type == 'audio/ogg':\n filename = f\"media/{event.document.id}.ogg\"\n await event.download_media(file=filename)\n message_text = speech_to_text(f\"/home/fugguri/Документы/PROJECT/swm/{filename}\")\n except:\n pass\n phone = \"+\" + me.phone\n settings = db.get_data_for_client(phone)[5]\n\n try:\n users_message[event.chat_id]\n except:\n db.start_new_dialog_counter_update(phone)\n settings = db.get_data_for_client(phone)[5]\n messages = [{'role': \"system\", \"content\": settings},]\n users_message[event.chat_id] = messages\n\n if users_message[event.chat_id][0][\"content\"] != settings:\n messages = [\n {'role': \"system\", \"content\": settings},\n ]\n users_message[event.chat_id] = messages\n\n\n users_message[event.chat_id].append(\n {\"role\": \"user\", \"content\": message_text})\n sender = await event.get_sender()\n try:\n responce = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=users_message[event.chat_id]\n )\n answer = responce['choices'][0]['message']['content']\n\n users_message[event.chat_id].append(\n {\"role\": \"assistant\", \"content\": answer})\n await event.client.send_message(message=answer, entity=sender)\n try:\n gs.sheets_append_row(db.get_analytic_sheet_name(phone),\n sender.username,\n phone,\n message_text,\n answer)\n except Exception as ex :\n print(ex)\n except openai.error.InvalidRequestError:\n await event.client.send_message(message=\"Не понимаю.Слишком много информации\", entity=sender)\n except openai.error.RateLimitError as ex:\n print(ex)\n await asyncio.sleep(20)\n await my_event_handler(event)\n except ValueError:\n await event.client.send_message(message=\"Не понимаю.\\nПерефразируйте\", entity=sender)\n except Exception as ex:\n print(ex)\n\nif __name__ == \"__main__\":\n cl = [12,12,27044267,\"a7448d0befc9804176b9c917898d923a\",\"+79283529546\",True]\n asyncio.run(start_client(cl))","repo_name":"Fugguri/SWM","sub_path":"stt.py","file_name":"stt.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"8725190382","text":"import rest\nimport core\nimport view\nimport gplus\nimport errors\nfrom bson import ObjectId\nimport json\nfrom sessioninfo import get_session\nfrom urllib import urlencode\n\n\nclass ArrayDocCollection(rest.Collection):\n def _GET(self, docID, parents):\n return self.klass.find_obj_in_parent(parents.values()[0], docID)\n\nclass InterestCollection(ArrayDocCollection):\n '/papers/PAPER/likes/PERSON REST interface for AJAX calls'\n def check_permission(self, method, personID, *args, **kwargs):\n if method == 'GET': # permitted\n return False\n try:\n if personID != get_session()['person']._id:\n return view.report_error('TRAP set_interest by different user!', 403,\n \"You cannot change someone else's settings!\")\n except (KeyError,AttributeError):\n return view.report_error('TRAP set_interest, not logged in!', 401,\n 'You must log in to access this interface')\n def _POST(self, personID, topic, state, parents, topic2=''):\n 'add or remove topic from PaperInterest depending on state'\n topic = topic or topic2 # use whichever is non-empty\n topic = core.SIG.standardize_id(topic) # must follow hashtag rules\n personID = ObjectId(personID)\n state = int(state)\n if state: # make sure topic exists\n sig = core.SIG.find_or_insert(topic)\n interest = self.set_interest(personID, topic, state, parents)\n get_session()['person'].force_reload(True) # refresh user\n return interest\n def set_interest(self, personID, topic, state, parents):\n try:\n interest = self._GET(personID, parents)\n except KeyError:\n if state:\n person = core.Person(personID)\n docData = dict(author=personID, topics=[topic],\n authorName=person.name)\n return core.PaperInterest(docData=docData,\n parent=parents['paper'])\n else: # trying to rm something that doesn't exist\n raise\n if state:\n interest.add_topic(topic)\n else:\n interest.remove_topic(topic)\n return interest\n def post_json(self, interest, **kwargs):\n return json.dumps(dict(interest='very good'))\n def post_html(self, interest, **kwargs):\n 'display interest change by re-displaying the paper page'\n return view.redirect(interest.parent.get_value('local_url'))\n\n\nclass PaperCollection(rest.Collection):\n def _search(self, searchString, searchType):\n searchString = searchString.strip()\n if not searchString:\n s = view.report_error('empty searchString', 400,\n 'You did not provide a search string.')\n return rest.Response(s)\n # user may type \"Google Search:...\" into Google Search box\n if searchString.lower().startswith('arxiv:'):\n searchString = searchString[6:].strip()\n searchType = 'arxivID'\n if searchType == 'arxivID':\n return rest.Redirect('/arxiv/%s' % searchString.replace('/', '_'))\n elif searchType == 'arxiv':\n return rest.Redirect('/arxiv?' + urlencode(dict(searchString=searchString)))\n elif searchType == 'PMID':\n return rest.Redirect('/pubmed/%s' % searchString)\n elif searchType == 'pubmed':\n return rest.Redirect('/pubmed?' + urlencode(dict(searchString=searchString)))\n elif searchType == 'ncbipubmed':\n return rest.Redirect('http://www.ncbi.nlm.nih.gov/sites/entrez?'\n + urlencode(dict(term=searchString,\n db='pubmed')))\n elif searchType == 'shortDOI':\n return rest.Redirect('/shortDOI/%s' % searchString)\n elif searchType == 'DOI':\n dpd = core.DoiPaperData(DOI=searchString, insertNew='findOrInsert')\n return rest.Redirect('/shortDOI/%s' % dpd.id)\n elif searchType == 'spnetPerson':\n return rest.Redirect('/people?' + urlencode(dict(searchString=searchString)))\n elif searchType == 'topic':\n return rest.Redirect('/topics?' + urlencode(dict(searchString=searchString)))\n elif searchType == 'comment':\n return rest.Redirect('/posts?' + urlencode(dict(searchAll=searchString)))\n else:\n raise KeyError('unknown searchType ' + searchType)\n \n \n\nclass ParentCollection(rest.Collection):\n def _GET(self, docID, parents=None):\n try: # use cached query results if present\n queryResults = get_session()['queryResults']\n except (AttributeError, KeyError):\n pass\n else:\n try: # use cached docData if found for this docID\n docData = queryResults.get_doc_data(docID,\n self.collectionArgs['uri'])\n except KeyError: # not in query results\n pass\n else:\n return self.klass(docData=docData,\n insertNew='findOrInsert').parent\n return self.klass(docID, insertNew='findOrInsert').parent\n def _search(self, searchID):\n return rest.Redirect('%s/%s' % (self.collectionArgs['uri'], \n searchID.replace('/', '_')))\n\nclass ArxivCollection(ParentCollection):\n def _POST(self, docID, showLatex=None):\n paper = self._GET(docID)\n if showLatex: # save on user session\n showLatex = int(showLatex)\n paper.update({'texDollars': showLatex and 1 or -1}, op='$inc')\n viewArgs = view.get_view_options()\n viewArgs.setdefault('showLatex', {})[paper] = showLatex\n return paper\n def post_html(self, paper, **kwargs):\n return self.get_html(paper, **kwargs)\n def _search(self, searchString=None, searchID=None, ipage=0,\n block_size=10, session=None):\n import arxiv\n ipage = int(ipage)\n block_size = int(block_size)\n if session is None:\n session = get_session()\n if searchID: # just get this ID\n return ParentCollection._search(self, searchID)\n if not searchString:\n s = view.report_error('empty searchString', 400,\n 'You did not provide a search string.')\n return rest.Response(s)\n elif arxiv.is_id_string(searchString): # just get this ID\n return ParentCollection._search(self, searchString)\n try: # get from existing query results\n queryResults = session['queryResults']\n if queryResults.get_page(ipage, self.collectionArgs['uri'],\n searchString=searchString):\n return queryResults\n except KeyError:\n pass # no stored queryResults, so construct it\n pbl = view.PaperBlockLoader(arxiv.search_arxiv,\n uri=self.collectionArgs['uri'])\n queryResults = view.MultiplePages(pbl, block_size, ipage,\n self.collectionArgs['uri'],\n 'arXiv.org Search Results',\n searchString=searchString)\n session['queryResults'] = queryResults # keep for this user\n return queryResults\n\nclass PubmedCollection(ParentCollection):\n def _search(self, searchString=None, searchID=None, ipage=0,\n block_size=20):\n import pubmed\n if not searchString:\n s = view.report_error('empty searchString', 400,\n 'You did not provide a search string.')\n return rest.Response(s)\n ipage = int(ipage)\n block_size = int(block_size)\n try: # get from existing query results\n queryResults = get_session()['queryResults']\n if queryResults.get_page(ipage, self.collectionArgs['uri'],\n searchString=searchString):\n return queryResults\n except KeyError:\n pass # no stored queryResults, so construct it\n try:\n ps = pubmed.PubmedSearch(searchString, block_size)\n pbl = view.PaperBlockLoader(ps, uri=self.collectionArgs['uri'])\n queryResults = view.MultiplePages(pbl, block_size, ipage,\n self.collectionArgs['uri'],\n 'Pubmed Search Results',\n searchString=searchString)\n except (errors.BackendFailure,KeyError):\n s = view.report_error('eutils error: ' + searchString, 502,\n '''Unfortunately, the NCBI eutils server\nfailed to perform the requested query. \nTo run the same search on\nNCBI Pubmed, please click here. When you find a paper\nof interest, you can copy its PMID (Pubmed ID) and\npaste it in the search box on this page.''' \n % urlencode(dict(searchType='ncbipubmed',\n searchString=searchString)))\n return rest.Response(s)\n get_session()['queryResults'] = queryResults # keep for this user\n return queryResults\n \n\n\nclass PersonCollection(rest.Collection):\n def _GET(self, docID, getUpdates=False, timeframe=None, **kwargs):\n user = get_session().get('person', None)\n if user and docID == user._id:\n person = user # use cached Person object so we can mark it for refresh\n else:\n person = rest.Collection._GET(self, docID, **kwargs)\n if getUpdates:\n try:\n gpd = person.gplus\n except AttributeError:\n pass\n else: # get list of new posts\n if timeframe == 'all': # get last 10 years\n l = gpd.update_posts(3650, recentEvents=view.recentEventsDeque)\n else:\n l = gpd.update_posts(recentEvents=view.recentEventsDeque)\n if l: # need to update our object representation to see them\n person = rest.Collection._GET(self, docID, **kwargs)\n return person\n def _search(self, searchString):\n if not searchString:\n raise KeyError('empty query')\n searchString = '(?i)' + searchString # default: case-insensitive\n l = list(self.klass.find_obj({'name': {'$regex': searchString}}))\n if not l:\n raise KeyError('no matches')\n return l\n\nclass PersonAuthBase(rest.Collection):\n 'only allow logged-in user to POST his own settings'\n def check_permission(self, method, *args, **kwargs):\n if method == 'GET': # permitted\n return False\n user = get_session().get('person', None)\n if not user:\n return view.report_error('TRAP set_interest, not logged in!', 401,\n 'You must log in to access this interface')\n person = kwargs['parents'].values()[0]\n if person != user:\n return view.report_error('TRAP set_interest by different user!', 403,\n \"You cannot change someone else's settings!\")\n\nclass ReadingList(PersonAuthBase):\n '/people/PERSON/reading/PAPER REST interface for AJAX calls'\n def _POST(self, paperID, state, parents):\n person = parents.values()[0]\n paperID = ObjectId(paperID)\n included = paperID in person._dbDocDict.get('readingList', ())\n state = (int(state) or False) and True # convert to boolean\n if state == included: # matches current state, so nothing to do\n return 0\n elif state: # add to reading list\n person.array_append('readingList', paperID)\n result = 1\n else:\n person.array_del('readingList', paperID)\n result = -1\n person.force_reload(True) # refresh user\n return result\n def post_json(self, status, **kwargs):\n return json.dumps(dict(status=status))\n\nclass PersonTopics(PersonAuthBase):\n '/people/PERSON/topics/TOPIC REST interface for AJAX calls'\n def _POST(self, topic, field, state, parents):\n person = parents.values()[0]\n try:\n tOpt = core.TopicOptions.find_obj_in_parent(person, topic)\n except KeyError:\n tOpt = core.TopicOptions(docData={'topic':topic, field:state}, \n parent=person)\n else:\n tOpt.update({field:state})\n person.force_reload(True) # refresh user\n return 1\n def post_json(self, status, **kwargs):\n return json.dumps(dict(status=status))\n\nclass PersonSubscriptions(PersonAuthBase):\n '/people/PERSON/subscriptions/PERSON REST interface for AJAX calls'\n def _POST(self, author, field, state, parents):\n person = parents.values()[0]\n author = ObjectId(author)\n try:\n sub = core.Subscription.find_obj_in_parent(person, author)\n except KeyError:\n sub = core.Subscription(docData={'author':author, field:state}, \n parent=person)\n else:\n sub.update({field:state})\n person.force_reload(True) # refresh user\n return 1\n def post_json(self, status, **kwargs):\n return json.dumps(dict(status=status))\n\nclass TopicCollection(rest.Collection):\n def _search(self, searchString=None, stem=None):\n if stem:\n return self.stem_search(stem)\n if not searchString:\n raise KeyError('empty query')\n searchString = '(?i)' + searchString # default: case-insensitive\n l = list(self.klass.find_obj({'_id': {'$regex': searchString}}))\n if not l:\n raise KeyError('no matches')\n return l\n def stem_search(self, stem): # return list of topics beginning with stem\n if not stem:\n return []\n return list(self.klass.find({'_id': {'$regex': '^' + stem}}))\n def search_json(self, data, **kwargs):\n return json.dumps(data)\n\nclass PostCollection(rest.Collection):\n def _search(self, searchAll=None):\n if not searchAll:\n raise KeyError('empty query')\n searchAll = '(?i)' + searchAll # default: case-insensitive\n l = list(core.Post.find_obj({'posts.text': {'$regex': searchAll}}))\n l += list(core.Reply.find_obj({'replies.text': {'$regex': searchAll}}))\n if not l:\n raise KeyError('no matches')\n return l\n\n \ndef get_collections(templateDir='_templates'):\n gplusClientID = gplus.get_keys()['client_id'] # most templates need this\n templateEnv = view.get_template_env(templateDir)\n view.report_error.bind_template(templateEnv, 'error.html') # error page\n\n # access Papers using our object ID\n papers = PaperCollection('paper', core.Paper, templateEnv, templateDir,\n gplusClientID=gplusClientID)\n # using arxivID\n arxivPapers = ArxivCollection('paper', core.ArxivPaperData, templateEnv,\n templateDir, gplusClientID=gplusClientID,\n collectionArgs=dict(uri='/arxiv'))\n # using shortDOI\n doiPapers = ParentCollection('paper', core.DoiPaperData, templateEnv,\n templateDir, gplusClientID=gplusClientID,\n collectionArgs=dict(uri='/shortDOI'))\n # using pubmedID\n pubmedPapers = PubmedCollection('paper', core.PubmedPaperData,\n templateEnv, templateDir,\n gplusClientID=gplusClientID,\n collectionArgs=dict(uri='/pubmed'))\n\n ## recs = ArrayDocCollection('rec', core.Recommendation,\n ## templateEnv, templateDir,\n ## gplusClientID=gplusClientID)\n ## papers.recs = recs # bind as subcollection\n\n likes = InterestCollection('like', core.PaperInterest, templateEnv,\n templateDir, gplusClientID=gplusClientID)\n papers.likes = likes # bind as subcollection\n\n people = PersonCollection('person', core.Person, templateEnv, templateDir,\n gplusClientID=gplusClientID)\n readingList = ReadingList('reading', core.Paper, templateEnv, templateDir,\n gplusClientID=gplusClientID)\n people.reading = readingList\n personTopics = PersonTopics('topics', core.SIG, templateEnv, templateDir,\n gplusClientID=gplusClientID)\n people.topics = personTopics\n personSubs = PersonSubscriptions('subscriptions', core.Subscription, \n templateEnv, templateDir,\n gplusClientID=gplusClientID)\n people.subscriptions = personSubs\n topics = TopicCollection('topic', core.SIG, templateEnv, templateDir,\n gplusClientID=gplusClientID)\n\n posts = PostCollection('post', core.Post, templateEnv, templateDir,\n gplusClientID=gplusClientID)\n replies = PostCollection('reply', core.Reply, templateEnv, templateDir,\n gplusClientID=gplusClientID)\n\n # load homepage template\n homePage = view.TemplateView(templateEnv.get_template('index.html'),\n gplusClientID=gplusClientID)\n\n # what collections to bind on the server root\n return dict(papers=papers,\n arxiv=arxivPapers,\n shortDOI=doiPapers,\n pubmed=pubmedPapers,\n people=people,\n topics=topics,\n posts=posts,\n replies=replies,\n index=homePage)\n","repo_name":"cjlee112/spnet","sub_path":"spnet/apptree.py","file_name":"apptree.py","file_ext":"py","file_size_in_byte":18014,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"27"} +{"seq_id":"5892001867","text":"__author__ = 'Niklas Rosenstein '\n__version__ = '1.0.0'\n\nimport c4d\nimport nr.pvrq2\n\ntry:\n import c4d.modules.takesystem as takesystem\nexcept ImportError:\n takesystem = None\n\n\nclass TakeJob(nr.pvrq2.RenderJob):\n\n def __init__(self, doc, take):\n super(TakeJob, self).__init__()\n self.doc = doc\n self.take = take\n self.scene_name = doc.GetDocumentName()\n self.take_name = take.GetName()\n\n def get_job_details(self):\n details = super(TakeJob, self).get_job_details()\n details.update({'scene_name': self.scene_name, 'take': self.take_name})\n return details\n\n @property\n def name(self):\n return self.take.GetName()\n\n def get_scene(self):\n take_data = self.doc.GetTakeData()\n take_data.SetCurrentTake(self.take)\n return self.doc\n\n\ndef main():\n if not takesystem:\n c4d.gui.MessageDialog('takes available in Cinema 4D R17+')\n return\n\n global doc\n doc = doc.GetClone(c4d.COPYFLAGS_0)\n\n take_data = doc.GetTakeData()\n takes = take_data.GetTakeSelection(True)\n if not takes:\n c4d.gui.MessageDialog('no takes selected')\n return\n\n name = '{0} Takes'.format(doc.GetDocumentName())\n folder = nr.pvrq2.Folder(name)\n for take in takes:\n folder.append(TakeJob(doc, take))\n nr.pvrq2.root.append(folder)\n c4d.EventAdd()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nrosenstein-c4d/c4d-pvrenderqueue","sub_path":"scripts/takes.py","file_name":"takes.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"27"} +{"seq_id":"26422178703","text":"import pgeocode\nfrom sqlalchemy import Column, Integer, String, Boolean,Date, ForeignKey\nfrom fastapi.encoders import jsonable_encoder\n\nimport requests\napi_key = \"x\"\ncountrycode = \"DE\"\nzip_code= 47226\nadress=\"Bertastrasse 19\"\n\n#url = f\"https://thezipcodes.com/api/v1/search?zipCode={zip_code}&countryCode={countrycode}&apiKey={api_key}\"\n\n#response = requests.get(url)\n#json_data = response.json()\n#print(json_data['location'][0]['longitude'],json_data['location'][0]['latitude'])\n\ndef geozip(countrycode:str,zipcode:int):\n url = f\"https://thezipcodes.com/api/v1/search?zipCode={zip_code}&countryCode={countrycode}&apiKey={api_key}\"\n response = requests.get(url)\n json_data = response.json()\n \n return(json_data['location'][0]['longitude'],json_data['location'][0]['latitude'])\naa = geozip(countrycode,zip_code)\nbb = float(aa[0])\nprint(bb)\n","repo_name":"Ocalak/Rental-House-Portal-FastAPI-React-Postgres-AWS-","sub_path":"backend/apis/ff.py","file_name":"ff.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"17398595047","text":"from bs4 import BeautifulSoup\nimport requests\nimport spotipy\nfrom spotipy.oauth2 import SpotifyOAuth\n\n# beautiful soup---getting titles using dates----\nURL = \"https://www.billboard.com/charts/hot-100/\"\n\nuserInput = input(\"Which year do you want to travel? Type the date in this format YYYY-MM-DD:\")\n\nresponse = requests.get(URL + userInput)\n\nmusicPage = response.text\n\nsoup = BeautifulSoup(musicPage, \"html.parser\")\n\nsongs = soup.select(\".a-no-trucate\")\n\ntopHundred = [song.getText().strip() for song in songs]\n\n# print(topHundred)\n\n##spotify urls\n\nCLIENT_APP_ID = \"CLIENT APP ID\"\nCLIENT_APP_SECRET_KEY = \"CLIENT APP SECRET KEY\"\nREDIRECT_URI = \"REDIRECT URI\"\n\nsp = spotipy.Spotify(auth_manager=SpotifyOAuth(\n client_id=CLIENT_APP_ID,\n client_secret=CLIENT_APP_SECRET_KEY,\n redirect_uri=REDIRECT_URI,\n scope=\"playlist-modify-private\",\n))\n\nresults = sp.current_user()\nuserId = results['id']\n\nuris = [sp.search(title)['tracks']['items'][0]['uri'] for title in songs]\n\n\n#playlist\n\nplaylist = sp.user_playlist_create(user=userId,public=False,name=f\"{userInput} Billboard-100['id\")\n\nsp.playlist_add_items(playlist_id=playlist['id'], items=uris)","repo_name":"Oluwatobiloba777/100-days-of-code","sub_path":"DAY 46/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13525551870","text":"def get_last_position(pos_file):\n pos = list()\n with open(pos_file, \"r\") as f:\n contains = f.readlines()\n #lines = contains.splitlines()\n\n last_line = contains[-1]\n\n positions = last_line.split(\",\")\n pos_x = float(positions[0])\n pos_y = float(positions[1])\n pos_z = 0\n pos.append(pos_x)\n pos.append(pos_y)\n pos.append(pos_z)\n return pos\n\nif __name__ == \"__main__\":\n # file = \"/home/wifi/mininet-wifi/examples/position-car9-mn-telemetry.txt\"\n # pos = get_last_position(file)\n #\n # print(\"Last position is : \")\n #\n # print(pos)\n\n distances = list(dict())\n d = dict()\n d['name'] = 'a'\n d['distance'] = 20.5\n distances.append(d)\n d = dict()\n d['name'] = 'b'\n d['distance'] = 1\n distances.append(d)\n d = dict()\n d['name'] = 'c'\n d['distance'] = -1\n distances.append(d)\n d = dict()\n d['name'] = 'd'\n d['distance'] = 0\n distances.append(d)\n d = dict()\n d['name'] = 'e'\n d['distance'] = 40.5\n distances.append(d)\n print(\"distances before sorting\")\n print(distances)\n new_distances = sorted(distances, key=lambda i:i['distance'])\n\n print(\"New distances after sorting : \")\n print(new_distances)","repo_name":"tsylla/5grail-emu5gnet","sub_path":"paper_testbed_code/scratch_1.py","file_name":"scratch_1.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"27"} +{"seq_id":"6654888458","text":"import hashlib\nimport os\nimport fcntl\nimport subprocess as sp\nfrom datetime import datetime\nfrom ctypes import c_int16, c_void_p, c_int, c_char_p, POINTER, byref, cast\n\nimport soundfile\nfrom numpy import array, int16\nfrom mingus.midi.pyfluidsynth import cfunc\nfrom mingus.midi.pyfluidsynth import new_fluid_settings, fluid_settings_setstr\nfrom mingus.midi.pyfluidsynth import fluid_synth_sfload, fluid_settings_setint\nfrom mingus.midi.pyfluidsynth import delete_fluid_synth, delete_fluid_settings\nfrom mingus.midi.pyfluidsynth import new_fluid_synth\n\n\nnew_fluid_player = cfunc('new_fluid_player', c_void_p, ('synth', c_void_p, 1))\n\nfluid_player_add = cfunc('fluid_player_add', c_int, ('player', c_void_p, 1),\n ('midifile', c_char_p, 1))\n\nfluid_player_play = cfunc('fluid_player_play', c_int, ('player', c_void_p, 1))\n\nfluid_player_join = cfunc('fluid_player_join', c_int, ('player', c_void_p, 1))\n\ndelete_fluid_player = cfunc('delete_fluid_player', c_int,\n ('player', c_void_p, 1))\n\nnew_fluid_file_renderer = cfunc('new_fluid_file_renderer', c_void_p,\n ('synth', c_void_p, 1))\n\ndelete_fluid_file_renderer = cfunc('delete_fluid_file_renderer', None,\n ('dev', c_void_p, 1))\n\ndelete_fluid_file_renderer = cfunc('delete_fluid_file_renderer', None,\n ('dev', c_void_p, 1))\n\nfluid_player_get_status = cfunc('fluid_player_get_status', c_int,\n ('player', c_void_p, 1))\n\nfluid_file_renderer_process_block = cfunc('fluid_file_renderer_process_block',\n c_int, ('dev', c_void_p, 1))\n\nfluid_synth_write_float = cfunc('fluid_synth_write_float', c_int,\n ('synth', c_void_p, 1),\n ('len', c_int, 1),\n ('lout', c_void_p, 1),\n ('loff', c_int, 1),\n ('lincr', c_int, 1),\n ('rout', c_void_p, 1),\n ('roff', c_int, 1),\n ('rincr', c_int, 1))\n\nfluid_synth_write_s16 = cfunc('fluid_synth_write_s16', c_int,\n ('synth', c_void_p, 1),\n ('len', c_int, 1),\n ('lout', c_void_p, 1),\n ('loff', c_int, 1),\n ('lincr', c_int, 1),\n ('rout', c_void_p, 1),\n ('roff', c_int, 1),\n ('rincr', c_int, 1))\n\n\nfluid_settings_getint = cfunc('fluid_settings_getint', c_int,\n ('settings', c_void_p, 1),\n ('name', c_char_p, 1),\n ('val', POINTER(c_int), 1))\n\n\n# class FluidFileRenderer(Structure):\n# _fields_ = [\n# ('a', c_uint8),\n# ('b', c_uint8),\n# ('c', c_uint32),\n# ('d', POINTER(c_uint8)),\n# \tSNDFILE* ('sndfile', ),\n# \tfloat* ('buf', ),\n# \tint ('period_size', ),\n# \tint ('buf_size', ),\n# ]\n\n# fluid_synth_write_float(fluid_synth_t* synth, int len, void* lout,\n# int loff, int lincr, void* rout, int roff, int rincr)\n# fluid_synth_write_float(dev->synth, dev->period_size, dev->buf, 0, 2,\n# dev->buf, 1, 2);\n\n# get period_size from 'audio.period-size'\n# buffer size = period_size\n# Two buffers = left and right\n\n\nFFMPEG_BIN = \"/usr/bin/avconv\"\n\n\ndef motherfucker(settings, synth, player):\n FLUID_PLAYER_PLAYING = 1\n period_size = c_int(0)\n fluid_settings_getint(settings, 'audio.period-size', byref(period_size))\n period_size = period_size.value\n try:\n proc = sp.Popen([FFMPEG_BIN, '-y', \"-f\", 's16le', '-ar', \"44100\",\n '-ac', '2', '-i', '-', '-vn', '-f', 'mp3', '-ac', '2',\n 'pipe:1'],\n stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE)\n fcntl.fcntl(proc.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)\n buff = (c_int16 * (period_size * 2))()\n while (fluid_player_get_status(player) == FLUID_PLAYER_PLAYING):\n r = fluid_synth_write_s16(\n synth,\n period_size,\n cast(buff, POINTER(c_int16)),\n 0,\n 2,\n cast(buff, POINTER(c_int16)),\n 1,\n 2\n )\n proc.stdin.write(buff)\n try:\n yield proc.stdout.read()\n except IOError:\n pass\n if (r != 0):\n print(\"Oh, that's embarassing...\")\n break\n proc.stdin.close()\n while proc.poll():\n try:\n yield proc.stdout.read()\n except IOError:\n pass\n except:\n proc.kill()\n\n\ndef fast_render_loop(settings, synth, player):\n FLUID_PLAYER_PLAYING = 1\n\n renderer = new_fluid_file_renderer(synth)\n if not renderer:\n return\n\n while (fluid_player_get_status(player) == FLUID_PLAYER_PLAYING):\n if (fluid_file_renderer_process_block(renderer) != 0):\n break\n\n delete_fluid_file_renderer(renderer)\n\n\ndef play(midifile, sffile, output_filename):\n settings = new_fluid_settings()\n fluid_settings_setstr(settings, 'audio.driver', 'alsa')\n fluid_settings_setstr(settings, 'synth.verbose', 'no')\n fluid_settings_setstr(settings, 'midi.driver', 'alsa_seq')\n fluid_settings_setstr(settings, 'audio.file.name', output_filename)\n fluid_settings_setstr(settings, \"player.timing-source\", \"sample\")\n fluid_settings_setint(settings, \"synth.parallel-render\", 1)\n\n synth = new_fluid_synth(settings)\n player = new_fluid_player(synth)\n\n fluid_synth_sfload(synth, sffile, 1)\n\n fluid_player_add(player, midifile)\n fluid_player_play(player)\n\n fast_render_loop(settings, synth, player)\n\n delete_fluid_player(player)\n delete_fluid_synth(synth)\n delete_fluid_settings(settings)\n\n\ndef stream(midifile, sffile):\n settings = new_fluid_settings()\n fluid_settings_setstr(settings, 'audio.driver', 'alsa')\n fluid_settings_setstr(settings, 'synth.verbose', 'no')\n fluid_settings_setstr(settings, 'midi.driver', 'alsa_seq')\n fluid_settings_setstr(settings, 'audio.file.type', 'raw')\n fluid_settings_setstr(settings, \"player.timing-source\", \"sample\")\n fluid_settings_setint(settings, \"synth.parallel-render\", 1)\n\n synth = new_fluid_synth(settings)\n player = new_fluid_player(synth)\n\n fluid_synth_sfload(synth, sffile, 1)\n\n fluid_player_add(player, midifile)\n fluid_player_play(player)\n\n for piece in motherfucker(settings, synth, player):\n yield piece\n\n delete_fluid_player(player)\n delete_fluid_synth(synth)\n delete_fluid_settings(settings)\n\n\ndef get_hash():\n hasher = hashlib.sha1()\n hasher.update(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))\n return hasher.hexdigest()\n","repo_name":"mmarchini/minstrel-django","sub_path":"minstrel/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"39369603212","text":"from django.shortcuts import render\nfrom cp import models, forms\n\nfrom django.views.generic.edit import CreateView, DeleteView, UpdateView\nfrom django.http import HttpResponseRedirect\nimport datetime\n\n\n\ndef quest_board (request, context = {}):\n adventures=models.Adventure.objects.all()\n if (request.session['npc'] == False):\n context['player_adventures']=models.Adventure.objects.filter(characters__pk=request.session['pk_user'])\n context['open_adventures']=models.Adventure.objects.filter(status=1)\n context['selected']=\"quest_board\"\n return render(request, 'cp/player/quest_board.html', context)\n else:\n context['open_adventures']=models.Adventure.objects.filter(status=1)\n context['started_adventures']=models.Adventure.objects.filter(status=0)\n context['finished_adventures']=models.Adventure.objects.filter(status=-1)\n context['selected']=\"quest_board\"\n return render(request, 'cp/npc/quest_board.html', context)\n\n\n\n\ndef quest_enroll (request, adventure_id):\n if (request.session['npc']==True):\n return quest_board(request)\n else:\n\n p=models.Player.objects.get(pk=request.session['pk_user'])\n a=models.Adventure.objects.get(pk=adventure_id)\n j=models.Journal.objects.get(player=p)\n j.adventures.add(a)\n j.save()\n a.characters.add(models.Character.objects.get(player=p))\n a.party_size = a.party_size - 1\n a.save()\n return quest_board (request)\n\n\ndef quest_start (request, adventure_id):\n if (request.session['npc']==True):\n a=models.Adventure.objects.get(pk=adventure_id)\n a.status=0\n a.save()\n return quest_board(request)\n else:\n return quest_board(request)\n\n\n\ndef quest_finish (request, adventure_id):\n if (request.session['npc']==True):\n a=models.Adventure.objects.get(pk=adventure_id)\n a.status=-1\n a.save()\n return quest_board(request)\n else:\n return quest_board(request)\n\n\n\n\n\n\ndef quest_create (request):\n if (request.session['npc']==False):\n return quest_board(request)\n else:\n form=forms.QuestForm(request.POST)\n n=models.Player.objects.get(pk=request.session['pk_user'])\n\n\n context = {\n 'player' : n,\n 'form' : form,\n }\n if form.is_valid():\n s=form.cleaned_data['party_size']\n d=form.cleaned_data['description']\n nex=form.cleaned_data['date_next']\n m=form.cleaned_data['name']\n a=models.Adventure.objects.create(\n creator=n,\n name=m,\n date_created=datetime.datetime.today(),\n status=1,\n description=models.Description.objects.create(text=d),\n date_next=nex\n )\n return quest_board(request)\n else:\n return render(request, 'cp/npc/quest_create.html', context)\n return render(request, 'cp/npc/quest_create.html', context)\n\n return render(request, 'cp/npc/quest_create.html', context)\n\n\n#/quest%id\ndef quest_details (request, adventure_id):\n a=models.Adventure.objects.get(pk=adventure_id)\n context = { 'adventure' : a }\n if (request.session['npc']==True):\n return render(request, 'cp/npc/quest_details.html', context)\n else:\n\n return render(request, 'cp/player/quest_details.html', context)\n\n\n\n#add edit quest and remove quest\n\n\n\n\n#edit quest quest_edit%id\n#class QuestUpdate (UpdateView):\n","repo_name":"lkzm/rektar","sub_path":"cp/views/quest.py","file_name":"quest.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"15496485471","text":"import os\nimport cv2\n\nCLASSES_DICT = {\n 'Adh Dense': 0, 'Adh Filmy': 1, 'Sup Black': 2, 'Sup White': 3,\n 'Sup Red': 4, 'Sup Subtle': 5, 'Ov. Endometrioma': 6, 'Ov. Chocolate Fluid': 7, 'Deep Endometriosis': 8\n}\nimage_path = '/data/projects/datasets/LesionDatasetImages'\nlabel_path = '/data/projects/datasets/LesionLabelsYOLO'\nNUM_TO_VISUALIZE = 20\ndetect_path = '/data/projects/yolov5/runs/detect/exp101/'\n\n\ndef find_key(num):\n for k, v in CLASSES_DICT.items():\n if int(v) == int(num):\n return k\n\n\ndef visualize_sample(image, annot_file):\n img_width = len(image[0])\n img_height = len(image)\n with open(annot_file) as f:\n box = [0] * 4\n for line in f:\n string = line.split(' ')\n label = find_key(string[0])\n width = float(string[3])\n height = float(string[4][:-1])\n xcenter = float(string[1])\n ycenter = float(string[2])\n box[0] = (xcenter - width / 2) * img_width\n box[1] = (ycenter - height / 2) * img_height\n box[2] = (xcenter + width / 2) * img_width\n box[3] = (ycenter + height / 2) * img_height\n cv2.rectangle(\n image,\n (int(box[0]), int(box[1])), (int(box[2]), int(box[3])),\n (255, 0, 0), 4\n )\n cv2.putText(\n image, label, (int(box[0]), int(box[1] - 5)),\n cv2.FONT_HERSHEY_SIMPLEX, .7, (255, 0, 0), 2\n )\n cv2.imwrite(detect_path + image_name, image)\n\n\nimages = os.listdir(detect_path)\n\nfor i in range(len(images)):\n image_name = images[i]\n visualize_sample(cv2.imread(os.path.join(detect_path, image_name)),\n os.path.join(label_path, image_name[:-4] + '.txt'))\n","repo_name":"samannrz/endoScripts","sub_path":"save_yolo_GT_image.py","file_name":"save_yolo_GT_image.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"74396958471","text":"from distutils.core import setup\nfrom setuptools import find_packages\n\nwith open(\"README.md\", 'rb') as f:\n long_description = f.read()\n\nsetup(name='interfaceTest', # 你包的名称\n version='1.0.0', # 版本号\n description='test', # 描述\n long_description=long_description, # 长描述\n long_description_content_type='text/markdown',\n author='The-Ruffian',\n author_email='',\n url='https://github.com/the-ruffian/interfaceTest_python',\n download_url='https://github.com/the-ruffian/interfaceTest_python',\n install_requires=['requests', 'PyMySQL', 'pytest', 'pytest-assume', 'allure-pytest', 'xlrd'], # 依赖第三方库\n license='MIT License',\n keywords=['bugpz', 'the-ruffian', 'interfaceTest'],\n packages=find_packages(),\n platforms=[\"all\"], # 平台\n classifiers=[\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Natural Language :: Chinese (Simplified)',\n 'Programming Language :: Python :: 3.0',\n 'Topic :: Software Development :: Libraries'\n ],\n )\n","repo_name":"GavinHaydy/interfaceTest_python","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20357804868","text":"'''\n这段代码的作用是将ONNX模型转换为TensorRT引擎,并在引擎上进行推理,输出模型的预测结果。\n具体步骤如下:\n 1. 导入需要的Python模块,包括tensorrt、pycuda.autoinit和pandas等。\n 2. 使用TensorRT的OnnxParser方法加载ONNX模型,并获取模型的输入和输出张量的名称以及张量。\n 3. 创建TensorRT Builder、Engine和Context,设置相关参数。\n 4. 加载给定的数据集,并将其传递给TensorRT引擎。\n 5. 定义输入、输出和缓存区,并在GPU上执行推理以获取预测结果。\n 6. 打印预测结果。\n'''\nimport tensorrt as trt\nimport pycuda.autoinit\nimport pycuda.driver as cuda\nimport numpy as np\nimport pandas as pd\n\n\n# 加载 ONNX 模型\nonnx_file_path = \"./model/regression_model.onnx\"\nonnx_parser = trt.OnnxParser(trt.Logger(trt.Logger.WARNING))\nwith open(onnx_file_path, \"rb\") as f:\n onnx_parser.parse(f.read())\nonnx_inputs = [onnx_parser.get_input_name(i) for i in range(onnx_parser.num_inputs)]\nonnx_outputs = [onnx_parser.get_output_name(i) for i in range(onnx_parser.num_outputs)]\nonnx_tensors = [onnx_parser.get_output(i) for i in range(onnx_parser.num_outputs)]\n\n# 创建 TensorRT Builder、Engine 和 Context\ntrt_logger = trt.Logger(trt.Logger.WARNING)\ntrt_builder = trt.Builder(trt_logger)\ntrt_network = trt_builder.create_network()\ntrt_parser = trt.OnnxParser(trt_network, trt_logger)\ntrt_parser.parse(onnx_tensors[0].raw_data)\ntrt_builder.max_workspace_size = 1 << 28\ntrt_builder.max_batch_size = 1\ntrt_builder.fp16_mode = True\ntrt_engine = trt_builder.build_cuda_engine(trt_network)\ntrt_context = trt_engine.create_execution_context()\n\n# 加载数据集\ndata = pd.read_csv(\"./data/infer_file.csv\")\ninputs = data.iloc[:, 3:-1].values\ninputs = inputs.astype(\"float32\")\n\n# 定义输入、输出和缓存区\nh_input = cuda.pagelocked_empty(trt.volume(trt_engine.get_binding_shape(0)), dtype=np.float32)\nh_output = cuda.pagelocked_empty(trt.volume(trt_engine.get_binding_shape(1)), dtype=np.float32)\nd_input = cuda.mem_alloc(h_input.nbytes)\nd_output = cuda.mem_alloc(h_output.nbytes)\nstream = cuda.Stream()\n\n# 输入数据传输\ncuda.memcpy_htod_async(d_input, inputs.reshape(-1), stream)\n\n# 运行 TensorRT 推理\ntrt_context.execute_async_v2(bindings=[int(d_input), int(d_output)], stream_handle=stream.handle)\ncuda.memcpy_dtoh_async(h_output, d_output, stream)\n\n# 同步 stream 和 CPU\nstream.synchronize()\n\n# 打印输出结果\noutput_data = h_output.reshape(trt_engine.get_binding_shape(1))\nprint(\"预测结果:\")\nprint(output_data)","repo_name":"BZsun/regression_code","sub_path":"dnn/onnx2rt.py","file_name":"onnx2rt.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"5878105328","text":"from appium import webdriver\nfrom appium.webdriver.common.mobileby import MobileBy\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nimport subprocess\nimport json\nimport sys\n\nuserName = sys.argv[3]\naccessKey = sys.argv[4]\n\nappPath = sys.argv[5]\nuploadApp = subprocess.check_output(['curl', '-s', '-u', userName+':'+accessKey, '-X', 'POST', 'https://api-cloud.browserstack.com/app-automate/upload', '-F', 'file=@'+appPath])\nuploadApp = uploadApp.decode('utf-8')\nappID = json.loads(uploadApp)\nappID = appID[\"app_url\"]\n\nfilePath = sys.argv[6]\npostVideo = subprocess.check_output(['curl', '-s', '-u', userName+':'+accessKey, '-X', 'POST', 'https://api-cloud.browserstack.com/app-automate/upload-media', '-F', 'file=@'+filePath])\nmediaLink = postVideo.decode('utf-8')\nres = json.loads(mediaLink)\nmediaLink = res[\"media_url\"]\n\ndesired_caps = {\n \"build\": \"Python Android\",\n \"device\": \"Google Pixel 3\",\n \"app\": appID,\n 'browserstack.appium_version': '1.9.1',\n \"browserstack.uploadMedia\": [mediaLink]\n}\n\nusername = sys.argv[1]\npassword = sys.argv[2]\ncaption = sys.argv[7]\n\ndriver = webdriver.Remote(\"https://\" + userName + \":\" + accessKey + \"@hub-cloud.browserstack.com/wd/hub\", desired_caps)\n\ntime.sleep(5)\n\nlogInButton = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.ID, \"com.instagram.android:id/log_in_button\"))\n)\nlogInButton.click()\ntime.sleep(2)\n\nuserID = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.ID, \"com.instagram.android:id/login_username\"))\n)\nuserID.send_keys(username)\n\npasswordBox = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.ID, \"com.instagram.android:id/password\"))\n)\npasswordBox.send_keys(password)\ntime.sleep(2)\n\nlogIn = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.ID, \"com.instagram.android:id/button_text\"))\n)\nlogIn.click()\ntime.sleep(5)\n\nuploadMedia = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.XPATH, \"//android.widget.Button[@content-desc='Camera']\"))\n)\nuploadMedia.click()\ntime.sleep(3)\n\nstoragePermission = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.XPATH, \"//android.widget.Button[@text='Allow']\"))\n)\nstoragePermission.click()\ntime.sleep(5)\n\nchooseFile = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.XPATH, \"//android.widget.CheckBox[@index='0']\"))\n)\nchooseFile.click()\ntime.sleep(5)\n\ncropToggle = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.ACCESSIBILITY_ID, \"Toggle square\"))\n)\ncropToggle.click()\ntime.sleep(3)\n\nselectMedia = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.ACCESSIBILITY_ID, \"Next\"))\n)\nselectMedia.click()\ntime.sleep(3)\n\nnextStep = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.ACCESSIBILITY_ID, \"Next\"))\n)\nnextStep.click()\ntime.sleep(2)\n\ncaptionBox = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.ID, \"com.instagram.android:id/caption_text_view\"))\n)\ncaptionBox.send_keys(caption)\ntime.sleep(5)\n\nshare = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.ACCESSIBILITY_ID, \"Next\"))\n)\nshare.click()\ntime.sleep(60)\n\ngoToProfile = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.ID, \"com.instagram.android:id/profile_tab\"))\n)\ngoToProfile.click()\ntime.sleep(3)\n\nselectRecentPost = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.XPATH, \"//androidx.recyclerview.widget.RecyclerView/android.widget.LinearLayout[@index='1']/android.widget.ImageView[@index='0']\"))\n)\nselectRecentPost.click()\ntime.sleep(3)\n\nmoreFeedOptions = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.ID, \"com.instagram.android:id/feed_more_button_stub\"))\n)\nmoreFeedOptions.click()\ntime.sleep(3)\n\ncopyLink = WebDriverWait(driver, 30).until(\n EC.element_to_be_clickable((MobileBy.XPATH, \"//android.widget.TextView[@text='Copy Link']\"))\n)\ncopyLink.click()\ntime.sleep(15)\n\npostLink = driver.get_clipboard_text()\nprint(\"Instagram Link: \" + postLink)\n\nappHash = appID.replace(\"bs://\",\"\")\nsubprocess.call(['curl', '-s', '-u', userName+':'+accessKey, '-X', 'DELETE', 'https://api-cloud.browserstack.com/app-automate/app/delete/'+appHash])\n\nmediaHash = mediaLink.replace(\"media://\",\"\")\nsubprocess.call(['curl', '-s', '-u', userName+':'+accessKey, '-X', 'DELETE', 'https://api-cloud.browserstack.com/app-automate/custom_media/delete/'+mediaHash])\n\ndriver.quit()","repo_name":"oduwsdl/wdill","sub_path":"instagramWithBrowserStack.py","file_name":"instagramWithBrowserStack.py","file_ext":"py","file_size_in_byte":4577,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"27"} +{"seq_id":"32324808049","text":"from flask import Flask, request, url_for\n\nimport os\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.models import model_from_json\nimport numpy as np\nimport json\nimport sys\nimport pandas as pd\nimport tensorflow as tf\n\napp = Flask(__name__)\n\nimport EmotionExtractor\n\n#em = EmotionExtractor.EmotionExtractor('baseline.npy', 'baseline_mean_sd.pickle', Conv=False)\n#em = EmotionExtractor.EmotionExtractor('baseline.npy', 'baseline_mean_sd.pickle', Conv=True)\n\nem = EmotionExtractor.EmotionExtractor('baseline_context5_conv_simple2.weights', 'mean_std.csv', Conv=False)\n\ngraph = tf.get_default_graph()\n\n@app.route('/alive', methods= ['GET'])\ndef alive():\n print('Hello world!')\n return \"ALIVE\"\n\n\"\"\" endpoint for target verification Loader.io \"\"\"\n@app.route('/loaderio-c2146f3353da3bd1786e4233a8de669a/', methods=['GET'])\ndef loaderio():\n return 'loaderio-c2146f3353da3bd1786e4233a8de669a'\n\n@app.route('/loaderio-c59890271e009cafd5ad3fd5c5a3f542/', methods=['GET'])\ndef loaderio2():\n return 'loaderio-c59890271e009cafd5ad3fd5c5a3f542'\n\n\n@app.route('/annotate', methods=['POST'])\ndef annotate():\n if request.method == 'POST':\n\n mydata = request.data\n Stringcodio = mydata.decode('utf_8').replace(\"[\", \"\").replace(\"]\",\"\").split(\",\")\n values = [float(val) for val in Stringcodio]\n valpred = np.reshape(np.array(values), (5,136))\n prediction = None\n global graph\n with graph.as_default():\n prediction = em.predict_emotion(valpred)\n\n jsonpred = pd.Series(prediction).to_json(orient='values')\n\n return jsonpred\n\n@app.route('/annotate2', methods=['POST'])\ndef annotate2():\n if request.method == 'POST':\n\n mydata = request.data\n Stringcodio = mydata.replace(\"[\", \"\").replace(\"]\",\"\").split(\",\")\n values = [float(val) for val in Stringcodio]\n valpred = np.reshape(np.array(values), (119,34))\n prediction = em.predict_emotion(valpred)\n\n jsonpred = pd.Series(prediction).to_json(orient='values')\n\n return jsonpred\n\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=int(\"50001\"), debug=True)\n\n","repo_name":"SB-BISS/EMODASH","sub_path":"PythonScripts/FlaskNeuralAnnotator_tf.py","file_name":"FlaskNeuralAnnotator_tf.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"31524385100","text":"#!/usr/bin/env python2.7\n# -*- coding: utf8 -*-\n\n\"\"\"\nFile: contributions_parser.py\nAuthor: Andrew Rose\nLast Maintained: Andrew Rose\nLast Updated: 08/09/2017\n\nDescription:\n - imports contributions for NY from followthemoney.org\n\nTables affected:\n - Organizations\n - Contribution\n\"\"\"\nimport datetime as dt\nfrom Models.Contribution import Contribution\n\nclass ContributionParser(object):\n def __init__(self, state, api):\n if dt.date.today().weekday() == 6:\n self.comprehensive_flag = 1\n else:\n self.comprehensive_flag = 0\n\n self.state = state\n self.api = api\n\n def clean_name(self, name):\n \"\"\"\n Scrapes a candidate's name from their FollowTheMoney profile\n :param name: The candidate's entity ID\n :return: A tuple containing the candidates first and last names\n \"\"\"\n name = name.split(' - ')[0]\n name = name.strip()\n name = name.split(',')\n\n first = None\n last = None\n\n if len(name) >= 2:\n first = name[1].strip()\n last = name[0].strip()\n\n if len(first.split(' ')) == 1:\n first = first.strip()\n\n else:\n first = first.split(' ')[0]\n\n if len(last.split(' ')) > 1:\n last = last.split(' ')\n if len(last[0]) == 1:\n last = last[1]\n else:\n last = last[0]\n\n first = first.strip()\n last = last.strip()\n\n return first, last\n\n def get_records(self, eid):\n \"\"\"\n Gets a JSON-formatted list of contribution records for a candidate\n from FollowTheMoney's API\n :param eid: The candidate to get contribution records for\n :return: A JSON-formatted list of contribution records\n \"\"\"\n records, max_pages = self.api.get_records_and_max_pages_json(eid, 0)\n for i in range(1, max_pages):\n records += self.api.get_records_and_max_pages_json(eid, i)\n\n return records\n\n def get_updated_records(self, min_date, max_date):\n \"\"\"\n Returns all contributions updated within a timeframe bounded by min_date and max_date\n :param min_date: The lower bound on the date updated\n :param max_date: The upper bound on the date updated\n :return: A list of contribution records from FollowTheMoney, in JSON format\n \"\"\"\n\n records, max_pages = self.api.get_updated_records_and_max_pages_json(min_date, max_date, 0)\n for i in range(1, max_pages):\n records += self.api.get_updated_records_and_max_pages_json(min_date, max_date, 0)[0]\n return records\n\n def get_eid_list(self, year):\n \"\"\"\n Returns a list of FollowTheMoney entity IDs for candidates that participated in a certain election\n :param year: The year of the election to get candidates from\n :return: A list of FollowTheMoney entity IDs\n \"\"\"\n records, max_pages = self.api.get_candidates_records_and_max_pages_json(year, 0)\n for i in range(1, max_pages):\n records += self.api.get_candidates_records_and_max_pages_json(year, i)[0]\n\n eid_list = list()\n for record in records:\n eid_list.append(record['Candidate_Entity']['id'])\n\n return eid_list\n\n def format_contribution_record(self, record):\n \"\"\"\n Formats a FollowTheMoney contribution record into a Contribution model object\n :param record: A contribution record from FollowTheMoney, in JSON format\n :return: A Contribution model object\n \"\"\"\n name = record['Candidate']['Candidate']\n\n first, last = self.clean_name(name)\n date = record['Date']['Date']\n contributor_type = record['Type_of_Contributor']['Type_of_Contributor']\n contributor = record['Contributor']['Contributor']\n amount = record['Amount']['Amount']\n sector = record['Broad_Sector']['Broad_Sector']\n\n # some donations apparently dont have dates\n if str(date) == '' or str(date) == '0000-00-00' or date < '1970-01-01' or date > str(dt.datetime.now().date()):\n date = None\n record_year = None\n else:\n date = str(date) + \" 00:00:00\"\n record_year = date.split(\"-\")[0]\n\n donor_name = None\n donor_org = None\n\n if contributor_type == \"Individual\" or \"FRIENDS\" in contributor or contributor_type == \"Other\":\n if ',' in contributor:\n temp_name = contributor.split(',')\n contributor = temp_name[1] + \" \" + temp_name[0]\n contributor = contributor.strip()\n donor_name = contributor\n elif contributor_type == \"Non-Individual\":\n donor_name = donor_org = contributor\n\n contribution = Contribution(first_name=first, last_name=last, donor_name=donor_name,\n donor_org=donor_org, sector=sector, amount=amount, state=self.state,\n date=date, year=record_year)\n\n return contribution\n\n def parse_all_contributions(self, year):\n \"\"\"\n Gets all contributions received by candidates in the given election year\n :param year: The election year to get contribution records from\n :return: A list of Contribution model objects\n \"\"\"\n #contribution_list = list()\n\n for eid in self.get_eid_list(year):\n for record in self.get_records(eid):\n print(type(record))\n if type(record) is list:\n for subrecord in record:\n yield self.format_contribution_record(subrecord)\n elif type(record) is not dict:\n continue\n else:\n yield self.format_contribution_record(record)\n\n def parse_recent_contributions(self):\n \"\"\"\n Gets contributions that have been recently updated on FollowTheMoney\n :return: A list of Contribution model objects\n \"\"\"\n contribution_list = list()\n\n min_date = dt.datetime.today() - dt.timedelta(weeks=1)\n max_date = dt.datetime.today()\n\n for record in self.get_updated_records(min_date, max_date):\n contribution_list.append(self.format_contribution_record(record))\n\n return contribution_list\n\n def get_contribution_list(self, years):\n \"\"\"\n Builds a list of Contribution model objects.\n Gets either all contributions for a certain election year or\n all contributions that have been updated in the past week, depending\n on when the contribution script is run\n :param year: The election year to get contribution records for\n :return: A list of Contribution model objects\n \"\"\"\n if self.comprehensive_flag == 1:\n #print(\"Comprehensive\")\n for year in years:\n return self.parse_all_contributions(year)\n else:\n #print(\"Partial\")\n return self.parse_recent_contributions()\n","repo_name":"digitaldemocracy/dd-Data3.0","sub_path":"CurrentScripts/OpenStatesParsers/contributions_parser.py","file_name":"contributions_parser.py","file_ext":"py","file_size_in_byte":7082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"25134782183","text":"from datetime import datetime\nimport json, os, copy\nimport boto3\nfrom openpyxl import Workbook\nfrom openpyxl.styles import Alignment, NamedStyle, PatternFill, Font, numbers\nfrom openpyxl.styles.borders import Border, Side\nfrom openpyxl.utils import get_column_letter\nfrom openpyxl.utils.cell import get_column_letter\nfrom openpyxl.worksheet.pagebreak import Break\nfrom openpyxl.worksheet.page import PrintPageSetup\nfrom openpyxl.drawing.image import Image\n\n\nclass GlobalVar:\n border_thin = Border(\n left=Side(style='thin'),\n right=Side(style='thin'),\n top=Side(style='thin'),\n bottom=Side(style='thin'))\n \n border_thick = Border(\n left=Side(style='thick'),\n right=Side(style='thick'),\n top=Side(style='thick'),\n bottom=Side(style='thick'))\n\n # thick outer, thin inner\n border_top_left = Border(\n left=Side(style='thick'),\n right=Side(style='thin'),\n top=Side(style='thick'),\n bottom=Side(style='thin'))\n\n border_top = Border(\n left=Side(style='thin'),\n right=Side(style='thin'),\n top=Side(style='thick'),\n bottom=Side(style='thin'))\n\n border_top_right = Border(\n left=Side(style='thin'),\n right=Side(style='thick'),\n top=Side(style='thick'),\n bottom=Side(style='thin'))\n\n border_right = Border(\n left=Side(style='thin'),\n right=Side(style='thick'),\n top=Side(style='thin'),\n bottom=Side(style='thin'))\n\n border_bottom_right = Border(\n left=Side(style='thin'),\n right=Side(style='thick'),\n top=Side(style='thin'),\n bottom=Side(style='thick'))\n\n border_bottom = Border(\n left=Side(style='thin'),\n right=Side(style='thin'),\n top=Side(style='thin'),\n bottom=Side(style='thick'))\n\n border_bottom_left = Border(\n left=Side(style='thick'),\n right=Side(style='thin'),\n top=Side(style='thin'),\n bottom=Side(style='thick'))\n\n border_left = Border(\n left=Side(style='thick'),\n right=Side(style='thin'),\n top=Side(style='thin'),\n bottom=Side(style='thin'))\n \n border_left_closed = Border(\n left=Side(style='thick'),\n right=Side(style='thin'),\n top=Side(style='thick'),\n bottom=Side(style='thick'))\n \n border_right_closed = Border(\n left=Side(style='thin'),\n right=Side(style='thick'),\n top=Side(style='thick'),\n bottom=Side(style='thick'))\n\n border_top_bottom = Border(\n left=Side(style='thin'),\n right=Side(style='thin'),\n top=Side(style='thick'),\n bottom=Side(style='thick'))\n \n\n border_top_closed = Border(\n left=Side(style='thick'),\n right=Side(style='thick'),\n top=Side(style='thick'),\n bottom=Side(style='thin'))\n \n border_bottom_closed = Border(\n left=Side(style='thick'),\n right=Side(style='thick'),\n top=Side(style='thin'),\n bottom=Side(style='thick'))\n\n border_left_right = Border(\n left=Side(style='thick'),\n right=Side(style='thick'),\n top=Side(style='thin'),\n bottom=Side(style='thin'))\n\n # THICK outer; none inner\n border_thick_top_left = Border(\n left=Side(style='thick'),\n right=Side(style=None),\n top=Side(style='thick'),\n bottom=Side(style=None))\n\n border_thick_top = Border(\n left=Side(style=None),\n right=Side(style=None),\n top=Side(style='thick'),\n bottom=Side(style=None))\n\n border_thick_top_right = Border(\n left=Side(style=None),\n right=Side(style='thick'),\n top=Side(style='thick'),\n bottom=Side(style=None))\n\n border_thick_right = Border(\n left=Side(style=None),\n right=Side(style='thick'),\n top=Side(style=None),\n bottom=Side(style=None))\n\n border_thick_bottom_right = Border(\n left=Side(style=None),\n right=Side(style='thick'),\n top=Side(style=None),\n bottom=Side(style='thick'))\n\n border_thick_bottom = Border(\n left=Side(style=None),\n right=Side(style=None),\n top=Side(style=None),\n bottom=Side(style='thick'))\n\n border_thick_bottom_left = Border(\n left=Side(style='thick'),\n right=Side(style=None),\n top=Side(style=None),\n bottom=Side(style='thick'))\n\n border_thick_left = Border(\n left=Side(style='thick'),\n right=Side(style=None),\n top=Side(style=None),\n bottom=Side(style=None))\n \n border_thick_left_closed = Border(\n left=Side(style='thick'),\n right=Side(style=None),\n top=Side(style='thick'),\n bottom=Side(style='thick'))\n \n border_thick_right_closed = Border(\n left=Side(style=None),\n right=Side(style='thick'),\n top=Side(style='thick'),\n bottom=Side(style='thick'))\n \n border_thick_top_bottom = Border(\n left=Side(style=None),\n right=Side(style=None),\n top=Side(style='thick'),\n bottom=Side(style='thick'))\n \n\n border_thick_top_closed = Border(\n left=Side(style='thick'),\n right=Side(style='thick'),\n top=Side(style='thick'),\n bottom=Side(style=None))\n \n border_thick_bottom_closed = Border(\n left=Side(style='thick'),\n right=Side(style='thick'),\n top=Side(style=None),\n bottom=Side(style='thick'))\n\n border_thick_left_right = Border(\n left=Side(style='thick'),\n right=Side(style='thick'),\n top=Side(style=None),\n bottom=Side(style=None))\n\n # thin outer; none inner\n border_thin_top_left = Border(\n left=Side(style='thin'),\n right=Side(style=None),\n top=Side(style='thin'),\n bottom=Side(style=None))\n\n border_thin_top = Border(\n left=Side(style=None),\n right=Side(style=None),\n top=Side(style='thin'),\n bottom=Side(style=None))\n\n border_thin_top_right = Border(\n left=Side(style=None),\n right=Side(style='thin'),\n top=Side(style='thin'),\n bottom=Side(style=None))\n\n border_thin_right = Border(\n left=Side(style=None),\n right=Side(style='thin'),\n top=Side(style=None),\n bottom=Side(style=None))\n\n border_thin_bottom_right = Border(\n left=Side(style=None),\n right=Side(style='thin'),\n top=Side(style=None),\n bottom=Side(style='thin'))\n\n border_thin_bottom = Border(\n left=Side(style=None),\n right=Side(style=None),\n top=Side(style=None),\n bottom=Side(style='thin'))\n\n border_thin_bottom_left = Border(\n left=Side(style='thin'),\n right=Side(style=None),\n top=Side(style=None),\n bottom=Side(style='thin'))\n\n border_thin_left = Border(\n left=Side(style='thin'),\n right=Side(style=None),\n top=Side(style=None),\n bottom=Side(style=None))\n \n border_thin_left_closed = Border(\n left=Side(style='thin'),\n right=Side(style=None),\n top=Side(style='thin'),\n bottom=Side(style='thin'))\n \n border_thin_right_closed = Border(\n left=Side(style=None),\n right=Side(style='thin'),\n top=Side(style='thin'),\n bottom=Side(style='thin'))\n \n border_thin_top_bottom = Border(\n left=Side(style=None),\n right=Side(style=None),\n top=Side(style='thin'),\n bottom=Side(style='thin'))\n\n border_thin_top_closed = Border(\n left=Side(style='thin'),\n right=Side(style='thin'),\n top=Side(style='thin'),\n bottom=Side(style=None))\n \n border_thin_bottom_closed = Border(\n left=Side(style='thin'),\n right=Side(style='thin'),\n top=Side(style=None),\n bottom=Side(style='thin'))\n\n border_thin_left_right = Border(\n left=Side(style='thin'),\n right=Side(style='thin'),\n top=Side(style=None),\n bottom=Side(style=None))\n\ndef create_workbook(workbook_content=None, filename=None, add_footer_index=True, convert_str_to_number=False):\n '''\n INPUTS:\n workbook_content: a list that stores all data to be written into a workbook\n each list item is a dictionary corresponding to a worksheet\n A worksheet item is composed by data below:\n key='ws_name'; value: name of the worksheet\n key='ws_content'; value: 2D list corresponding to worksheet cells\n key='page_orientation'; value: 0-portrait; 1-landscape\n key='paper_size'; value (Integer): \n 1 = PAPERSIZE_LETTER\n 2 = PAPERSIZE_LETTER_SMALL\n 3 = PAPERSIZE_TABLOID\n 4 = PAPERSIZE_LEDGER\n 5 = PAPERSIZE_LEGAL\n 6 = PAPERSIZE_STATEMENT\n 7 = PAPERSIZE_EXECUTIVE\n 8 = PAPERSIZE_A3\n 9 = PAPERSIZE_A4\n 10 = PAPERSIZE_A4_SMALL\n 11 = PAPERSIZE_A5\n\n key='list_img': list of tuples [(anchor_cell, img_path)], e.g. [('A1', path_to_image, width (optional), height (optional))]\n\n key='cell_range_style': a dictionary that specifies the formatting parameters\n 'range_merge': list of tuples; each tuple is composed of:\n start_row (int), start_column (int),\n end_row (int), end_column (int), horizontal_align (str, optional)\n merged cell will be aligned: vertical centered, horizontal per spec\n\n 'range_color': list of tuples for color range (row_start, col_start, row_end, col_end, rgb_color_code)\n\n 'range_font_bold': list of tuples (row_start, col_start, row_end, col_end)\n\n 'range_font_size': List of tuples for specific font size (row_start, col_start, row_end, col_end, font_size)\n\n 'range_border': list of cell ranges for border (row_min, column_min, row_max, column_max, style_layout (int))\n style_layout = 0:\n Thin borders will be added for all cells in range;\n bold borders will be added to the edging sides\n style_layout = 1: thin border all, outer and inner\n style_layout = 2: thick outer border only, no inner border\n style_layout = 3: thin outer border only, no inner border\n\n\n\n 'range_align': list of tuples (row_start, col_start, row_end, col_end, alignment_label)\n 'range_row_group': list of tuples (row_start, row_end, is_hidden); [is_hidden=False by default]\n 'column_width': list of tuples (col_index_based_1, width_in_pt)\n 'row_height': list of tuples (row_index_based_1, height_in_pt)\n 'range_unwrap': list of tuples (row_start, col_start, row_end, col_end); index is one based\n 'range_print_area': list of tuples (row_start, col_start, row_end, col_end)\n \n\n 'footer': list of string, added as notes below table\n\n filename: name of file with extension .xlsx\n '''\n\n if workbook_content is None:\n print(f'::: No content is identified for workbook')\n return None\n\n workbook = Workbook()\n\n # iterate thru worksheet data\n for ws in workbook_content:\n try:\n ws_name = ws['ws_name']\n worksheet = workbook.create_sheet()\n worksheet.title = ws_name\n ws_cell_value = ws.get('ws_content')\n\n # set value for each cell\n for i, row in enumerate(ws_cell_value):\n for j, cell_value in enumerate(row):\n worksheet.cell(row=i + 1, column=j + 1, value=str(cell_value))\n worksheet.cell(row=i + 1, column=j + 1).alignment = Alignment(wrap_text=True)\n\n if convert_str_to_number:\n if _is_digit(cell_value):\n if '.' in str(cell_value):\n worksheet.cell(row=i + 1, column=j + 1).value = float(str(cell_value))\n worksheet.cell(row=i + 1, column=j + 1).number_format = '0.00'\n else:\n worksheet.cell(row=i + 1, column=j + 1).value = int(str(cell_value))\n worksheet.cell(row=i+1, column=j+1).number_format = numbers.FORMAT_NUMBER\n\n # formating cells as required: merge/border/color/column-width\n cell_range_style = ws.get('cell_range_style')\n\n if cell_range_style:\n # unwrap text\n if 'range_unwrap' in cell_range_style:\n _set_cells_unwrap(worksheet=worksheet, list_cell_range=cell_range_style.get('range_unwrap'))\n\n # step 1 - align\n if 'range_align' in cell_range_style:\n _set_format_align(worksheet=worksheet, list_cell_range=cell_range_style['range_align'])\n\n # Step 2 - color\n if 'range_color' in cell_range_style:\n _set_format_color(worksheet=worksheet, list_cell_range=cell_range_style['range_color'])\n\n # Step 3 - font and bold\n if 'range_font_size' in cell_range_style:\n _set_format_font_size(worksheet=worksheet, list_cell_range=cell_range_style['range_font_size'])\n\n if 'range_font_bold' in cell_range_style:\n # will retain font size\n _set_format_font_bold(worksheet=worksheet, list_cell_range=cell_range_style['range_font_bold'])\n\n if 'range_merge' in cell_range_style:\n _set_format_merge(worksheet=worksheet, list_cell_range=cell_range_style['range_merge'])\n\n if 'range_border' in cell_range_style:\n _set_format_border(worksheet=worksheet, list_cell_range=cell_range_style['range_border'])\n\n if 'range_row_group' in cell_range_style:\n for g in cell_range_style.get('range_row_group'):\n try:\n is_hidden = g[2]\n except:\n is_hidden = False\n\n worksheet.row_dimensions.group(g[0], g[1], hidden=is_hidden)\n\n if 'column_width' in cell_range_style:\n for col_width in cell_range_style['column_width']:\n worksheet.column_dimensions[get_column_letter(col_width[0])].width = col_width[1]\n if 'row_height' in cell_range_style:\n for row_ht in cell_range_style['row_height']:\n worksheet.row_dimensions[row_ht[0]].height = row_ht[1]\n \n if 'range_print_area' in cell_range_style:\n _set_printing_area(worksheet=worksheet, list_cell_range=cell_range_style.get('range_print_area'), \n paper_size=ws.get('paper_size'), orientation=ws.get('page_orientation'))\n\n # add footer\n list_footer = ws.get('footer')\n row_current = len(ws_cell_value) + 1 # add a blank line between table and footer\n if list_footer:\n for k, footer in enumerate(list_footer):\n if add_footer_index:\n footer_row = ' {:d}. {:s}'.format(k+1, footer)\n else:\n footer_row = footer\n\n worksheet.cell(row=row_current+k+1, column=1, value=str(footer_row))\n \n # insert images\n list_img = ws.get('list_img')\n if list_img:\n _insert_images(worksheet=worksheet, list_img=list_img)\n\n \n except ValueError as ve:\n print(f'>>> Error: value error detected when creating excel worksheets')\n print(ve.__doc__)\n print(ve.__dict__)\n\n except Exception as e:\n print(e.__doc__)\n print(f'>>> Error when creating excel worksheets: {e.__doc__}')\n\n # remove default sheet\n worksheet_default = workbook.get_sheet_by_name('Sheet')\n workbook.remove_sheet(worksheet_default)\n # rename the first default tab and set as coversheet\n #_add_coversheet(workbook=workbook)\n workbook.save(filename)\n workbook.close()\n\n \n # return the file name without path info\n return filename\n\n\ndef _set_format_merge(worksheet=None, list_cell_range=None):\n if worksheet is None or list_cell_range is None:\n return\n\n for cell_range in list_cell_range:\n start_row = cell_range[0]\n start_col = cell_range[1]\n end_row = cell_range[2]\n end_col = cell_range[3]\n worksheet.merge_cells(start_row=start_row, start_column=start_col,\n end_row=end_row, end_column=end_col)\n\n try:\n horizontal_align = cell_range[4]\n except:\n horizontal_align = 'left'\n\n worksheet.cell(row=start_row, column=start_col).alignment = Alignment(wrapText=True, horizontal=horizontal_align, vertical='center')\n\n\ndef _set_format_border(worksheet=None, list_cell_range=None):\n \"\"\"\n style_layout: fifth item of a cell range\n 0 - thick outer border, thin inner border\n 1 - thin border all, outer and inner\n 2 - thick outer border only, no inner border\n 3 - thin outer border only, no inner border\n \"\"\"\n\n if worksheet is None or list_cell_range is None:\n return\n\n \n\n for cell_range in list_cell_range:\n row_start = cell_range[0]\n col_start = cell_range[1]\n row_end = cell_range[2]\n col_end = cell_range[3]\n\n if row_start != row_end and col_start != col_end:\n __set_border_2d_rows_cols(worksheet=worksheet, cell_range_border=cell_range)\n elif row_start == row_end and col_start != col_end:\n __set_border_one_row(worksheet=worksheet, cell_range_border=cell_range)\n elif row_start != row_end and col_start == col_end:\n __set_border_one_col(worksheet=worksheet, cell_range_border=cell_range)\n elif row_start == row_end and col_start == col_end:\n __set_border_one_cell(worksheet=worksheet, cell_range_border=cell_range)\n else:\n return\n\n \ndef __set_border_one_cell(worksheet, cell_range_border):\n row = cell_range_border[0]\n col = cell_range_border[1]\n if len(cell_range_border) > 4:\n style_layout = cell_range_border[4]\n else:\n style_layout = 0\n \n cell = worksheet.cell(row=row, column=col)\n if style_layout == 0 or style_layout == 2:\n cell.border = GlobalVar.border_thick\n\n else:\n cell.border = GlobalVar.border_thin\n\ndef __set_border_one_row(worksheet, cell_range_border):\n row = cell_range_border[0]\n col_start = cell_range_border[1]\n col_end = cell_range_border[3]\n if len(cell_range_border) > 4:\n style_layout = cell_range_border[4]\n else:\n style_layout = 0\n \n if style_layout == 0 or style_layout == 1:\n # thin border for all cells\n for col in range(col_start, col_end+1):\n cell = worksheet.cell(row=row, column=col)\n cell.border = GlobalVar.border_thin\n if style_layout == 0:\n # thick outer\n for col in range(col_start, col_end + 1):\n cell = worksheet.cell(row=row, column=col)\n if col == col_start:\n cell.border = GlobalVar.border_left_closed\n elif col == col_end:\n cell.border = GlobalVar.border_right_closed\n else:\n cell.border = GlobalVar.border_top_bottom\n elif style_layout == 2:\n # think outer, empty inner\n for col in range(col_start, col_end + 1):\n cell = worksheet.cell(row=row, column=col)\n if col == col_start:\n cell.border = GlobalVar.border_thick_left_closed\n elif col == col_end:\n cell.border = GlobalVar.border_thick_right_closed\n else:\n cell.border = GlobalVar.border_thick_top_bottom\n elif style_layout == 3:\n # thin outer, empty inner\n for col in range(col_start, col_end + 1):\n cell = worksheet.cell(row=row, column=col)\n if col == col_start:\n cell.border = GlobalVar.border_thin_left_closed\n elif col == col_end:\n cell.border = GlobalVar.border_thin_right_closed\n else:\n cell.border = GlobalVar.border_thin_top_bottom\n\n\ndef __set_border_one_col(worksheet, cell_range_border):\n row_start = cell_range_border[0]\n col = cell_range_border[1]\n row_end = cell_range_border[2]\n \n if len(cell_range_border) > 4:\n style_layout = cell_range_border[4]\n else:\n style_layout = 0\n\n if style_layout == 0 or style_layout == 1:\n # thin border for all cells\n for row in range(row_start, row_end+1):\n cell = worksheet.cell(row=row, column=col)\n cell.border = GlobalVar.border_thin\n if style_layout == 0:\n # thick outer\n for row in range(row_start, row_end + 1):\n cell = worksheet.cell(row=row, column=col)\n if row == row_start:\n cell.border = GlobalVar.border_top_closed\n elif row == row_end:\n cell.border = GlobalVar.border_bottom_closed\n else:\n cell.border = GlobalVar.border_left_right\n elif style_layout == 2:\n # think outer, empty inner\n for row in range(row_start, row_end + 1):\n cell = worksheet.cell(row=row, column=col)\n if row == row_start:\n cell.border = GlobalVar.border_thick_top_closed\n elif row == row_end:\n cell.border = GlobalVar.border_thick_bottom_closed\n else:\n cell.border = GlobalVar.border_thick_left_right\n elif style_layout == 3:\n # thin outer, empty inner\n for row in range(row_start, row_end + 1):\n cell = worksheet.cell(row=row, column=col)\n if row == row_start:\n cell.border = GlobalVar.border_thin_top_closed\n elif row == row_end:\n cell.border = GlobalVar.border_thin_bottom_closed\n else:\n cell.border = GlobalVar.border_thin_left_right\n \n\ndef __set_border_2d_rows_cols(worksheet, cell_range_border):\n\n row_start = cell_range_border[0]\n col_start = cell_range_border[1]\n row_end = cell_range_border[2]\n col_end = cell_range_border[3]\n\n if len(cell_range_border) >4:\n # style_layout specified\n style_layout = cell_range_border[4]\n else:\n style_layout = 0\n\n \n if style_layout == 0 or style_layout == 1:\n # think border for all cells\n for row in range(row_start, row_end+1):\n for col in range(col_start, col_end+1):\n cell = worksheet.cell(row=row, column=col)\n cell.border = GlobalVar.border_thin\n\n if style_layout == 0:\n __set_border_2d_rows_cols_outer(worksheet=worksheet, row_start=row_start, row_end=row_end, \n col_start=col_start, col_end=col_end, style_layout=style_layout)\n elif style_layout == 2 or style_layout == 3:\n __set_border_2d_rows_cols_outer(worksheet=worksheet, row_start=row_start, row_end=row_end, \n col_start=col_start, col_end=col_end, style_layout=style_layout)\n\ndef __set_border_2d_rows_cols_outer(worksheet, row_start, row_end, col_start, col_end, style_layout):\n \"\"\"\n Set outer border\n \"\"\"\n if style_layout == 0:\n # thick outer, thin inner\n for i in range(row_start, row_end + 1):\n for j in range(col_start, col_end + 1):\n cell = worksheet.cell(row=i, column=j)\n if i == row_start and j == col_start:\n # upper left corner\n cell.border = GlobalVar.border_top_left\n elif i == row_start and j < col_end:\n cell.border = GlobalVar.border_top\n elif i == row_start and j == col_end:\n cell.border = GlobalVar.border_top_right\n elif i == row_end and j == col_start:\n cell.border = GlobalVar.border_bottom_left\n elif i == row_end and j < col_end:\n cell.border = GlobalVar.border_bottom\n elif i == row_end and j == col_end:\n cell.border = GlobalVar.border_bottom_right\n elif j == col_start and i < row_end:\n cell.border = GlobalVar.border_left\n elif j == col_end and i < row_end:\n cell.border = GlobalVar.border_right\n \n elif style_layout == 2:\n # thick outer, empty inner\n for i in range(row_start, row_end + 1):\n for j in range(col_start, col_end + 1):\n cell = worksheet.cell(row=i, column=j)\n if i == row_start and j == col_start:\n # upper left corner\n cell.border = GlobalVar.border_thick_top_left\n elif i == row_start and j < col_end:\n cell.border = GlobalVar.border_thick_top\n elif i == row_start and j == col_end:\n cell.border = GlobalVar.border_thick_top_right\n elif i == row_end and j == col_start:\n cell.border = GlobalVar.border_thick_bottom_left\n elif i == row_end and j < col_end:\n cell.border = GlobalVar.border_thick_bottom\n elif i == row_end and j == col_end:\n cell.border = GlobalVar.border_thick_bottom_right\n elif j == col_start and i < row_end:\n cell.border = GlobalVar.border_thick_left\n elif j == col_end and i < row_end:\n cell.border = GlobalVar.border_thick_right\n elif style_layout == 3:\n # thin outer, empty inner\n for i in range(row_start, row_end + 1):\n for j in range(col_start, col_end + 1):\n cell = worksheet.cell(row=i, column=j)\n if i == row_start and j == col_start:\n # upper left corner\n cell.border = GlobalVar.border_thin_top_left\n elif i == row_start and j < col_end:\n cell.border = GlobalVar.border_thin_top\n elif i == row_start and j == col_end:\n cell.border = GlobalVar.border_thin_top_right\n elif i == row_end and j == col_start:\n cell.border = GlobalVar.border_thin_bottom_left\n elif i == row_end and j < col_end:\n cell.border = GlobalVar.border_thin_bottom\n elif i == row_end and j == col_end:\n cell.border = GlobalVar.border_thin_bottom_right\n elif j == col_start and i < row_end:\n cell.border = GlobalVar.border_thin_left\n elif j == col_end and i < row_end:\n cell.border = GlobalVar.border_thin_right\n\ndef _set_format_color(worksheet=None, list_cell_range=None):\n\n if worksheet is None or list_cell_range is None:\n return\n\n #print(f'list_color_range={list_cell_range}')\n for cell_range in list_cell_range:\n color_rgb = cell_range[4]\n pattern_fill = PatternFill('solid', fgColor=color_rgb)\n\n for i in range(cell_range[0], cell_range[2]+1):\n for j in range(cell_range[1], cell_range[3]+1):\n cell = worksheet.cell(row=i, column=j)\n cell.fill = pattern_fill\n\n #rows = worksheet.iter_rows(min_row=cell_range[0], max_row=cell_range[2], max_col=cell_range[3])\n #for row in rows:\n # for cell in row:\n # cell.fill = pattern_fill\n\n\ndef _set_cells_unwrap(worksheet=None, list_cell_range=None):\n if worksheet is None or list_cell_range is None:\n return\n\n for cell_range in list_cell_range:\n row_start = cell_range[0]\n col_start = cell_range[1]\n row_end = cell_range[2]\n col_end = cell_range[3]\n for idx_row in range(row_end - row_start + 1): # including start and end\n for idx_col in range(col_end - col_start + 1):\n worksheet.cell(row=row_start + idx_row, column=col_start + idx_col). \\\n alignment = Alignment(wrapText=False)\n\n\ndef _set_format_align(worksheet=None, list_cell_range=None):\n if worksheet is None or list_cell_range is None:\n return\n for cell_range in list_cell_range:\n # cell_ranger: row_start, col_start, row_end, col_end, alignment_label\n row_start = cell_range[0]\n col_start = cell_range[1]\n row_end = cell_range[2]\n col_end = cell_range[3]\n align = cell_range[4]\n for idx_row in range(row_end - row_start + 1): # including start and end\n for idx_col in range(col_end - col_start + 1):\n worksheet.cell(row=row_start + idx_row, column=col_start + idx_col). \\\n alignment = Alignment(horizontal=align, vertical='center')\n\n\ndef _set_format_font_size(worksheet=None, list_cell_range=None):\n if worksheet is None or list_cell_range is None:\n return\n\n for cell_range in list_cell_range:\n font_style = Font(size=cell_range[4])\n for i in range(cell_range[0], cell_range[2]+1):\n for j in range(cell_range[1], cell_range[3]+1):\n worksheet.cell(row=i, column=j).font = font_style\n\n\ndef _set_format_font_bold(worksheet=None, list_cell_range=None):\n if worksheet is None or list_cell_range is None:\n return\n\n for cell_range in list_cell_range:\n # cell_ranger: row_start, col_start, row_end, col_end\n row_start = cell_range[0]\n col_start = cell_range[1]\n row_end = cell_range[2]\n col_end = cell_range[3]\n for idx_row in range(row_end - row_start + 1): # including start and end\n for idx_col in range(col_end - col_start + 1):\n cell = worksheet.cell(row=row_start + idx_row, column=col_start + idx_col)\n # retain font size\n font_size_ori = cell.font.size\n font_style = Font(size=font_size_ori, bold=True)\n worksheet.cell(row=row_start + idx_row, column=col_start + idx_col).font = font_style\n\n\ndef _set_printing_area(worksheet=None, list_cell_range=None, orientation=None, paper_size=None):\n list_range_print = []\n for cell_range in list_cell_range:\n first_row = cell_range[0]\n first_col = cell_range[1]\n last_row = cell_range[2]\n last_col = cell_range[3]\n print_area = f'{get_column_letter(first_col)}{first_row}:{get_column_letter(last_col)}{last_row}'\n list_range_print.append(print_area)\n \n # add the printing area specs\n worksheet.print_area = list_range_print\n\n # scale fit to one page width\n #worksheet.page_setup.fitToWidth = True\n \n p = PrintPageSetup(worksheet=worksheet, fitToWidth=1, scale=30)\n if paper_size:\n p.paperSize = paper_size # 3 = Tabloid\n \n if orientation:\n p.orientation = orientation\n\n p.autoPageBreaks = False\n p.fitToWidth = 1\n p.scale = None\n \n worksheet.page_setup = p\n print('Completed setting priting area.')\n\n\ndef _insert_images(worksheet, list_img):\n for img in list_img:\n obj_img = Image(img[0])\n obj_img.anchor = img[1]\n if len(img) > 2:\n obj_img.width = img[2]\n if len(img) > 3:\n obj_img.height = img[3]\n obj_img.top = 5\n obj_img.left = 5\n worksheet.add_image(obj_img)\n\n\ndef _is_digit(data):\n if type(data) is not str:\n data_new = copy.copy(str(data))\n else:\n data_new = copy.copy(data)\n\n if data_new.startswith('-'):\n data_new = data_new[1:]\n\n is_digit = data_new.replace('.', '', 1).isdigit()\n\n return is_digit","repo_name":"yehinkang/CSA-clearances","sub_path":"commons/report_xlsx_general.py","file_name":"report_xlsx_general.py","file_ext":"py","file_size_in_byte":32376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4963115092","text":"import rabbitpy\nimport logging\nimport datetime\nimport uuid\nfrom time import time\n\n\nlogging.basicConfig(level= logging.DEBUG)\n\n\"\"\"\n'amqp://admin:mypass@rabbitmq:5672/%2f'\njames\n\"\"\"\n\nclass Producer:\n\n def __init__(self, rabbit_mq_path, current_queue_name):\n self.rabbit_mq_path = rabbit_mq_path\n self.current_queue_name = current_queue_name\n\n def send_message(self, uuid, consumer_name, question, forward= False, answer= None):\n\n with rabbitpy.Connection(self.rabbit_mq_path) as conn:\n\n # Use the channel as a context manager\n with conn.channel() as channel:\n\n # Create the exchange\n exchange = rabbitpy.Exchange(channel, '%s_exchange' % consumer_name)\n exchange.declare()\n\n # Create the queue\n queue = rabbitpy.Queue(channel, consumer_name)\n queue.declare()\n\n # Bind the queue\n queue.bind(exchange, consumer_name)\n\n\n body = {\n 'uuid' : uuid,\n 'question' : question,\n 'answer' : answer,\n 'forward' : forward,\n 'from' : self.current_queue_name,\n 'to' : consumer_name,\n 'timestamp' : time(),\n }\n\n # Create the msg by passing channel, message and properties (as a dict)\n message = rabbitpy.Message(channel, body)\n\n # Publish the message\n message.publish(exchange, consumer_name)\n logging.info('AFTER message publish: %s' % consumer_name)\n\n return uuid","repo_name":"OscarGibson/docker-messenger-test","sub_path":"james-service/app/message_broker/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11108664880","text":"# from pygments import highlight\n# from pygments.lexers import get_lexer_by_name\n# from pygments.formatters import HtmlFormatter\n# code = 'print \"Hello World\"'\n# lexer = get_lexer_by_name(\"python\", stripall=True)\n# formatter = HtmlFormatter(linenos=True, cssclass=\"source\")\n# result = highlight(code, lexer, formatter)\n#print(result)\nimport markdown\nimport yaml \nimport yamlordereddictloader\nimport sys\nimport re\n\n# -*- coding: utf-8 -*-\n\"\"\"\n The Pygments Markdown Preprocessor\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n This fragment is a Markdown_ preprocessor that renders source code\n to HTML via Pygments. To use it, invoke Markdown like so::\n\n import markdown\n\n html = markdown.markdown(someText, extensions=[CodeBlockExtension()])\n\n This uses CSS classes by default, so use\n ``pygmentize -S -f html > pygments.css``\n to create a stylesheet to be added to the website.\n\n You can then highlight source code in your markdown markup::\n\n [sourcecode:lexer]\n some code\n [/sourcecode]\n\n .. _Markdown: https://pypi.python.org/pypi/Markdown\n\n :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\n# Options\n# ~~~~~~~\n\n# Set to True if you want inline CSS styles instead of classes\nINLINESTYLES = False\n\n\nimport re\n\nfrom markdown.preprocessors import Preprocessor\nfrom markdown.extensions import Extension\n\nfrom pygments import highlight\nfrom pygments.formatters import HtmlFormatter\nfrom pygments.lexers import get_lexer_by_name, TextLexer\nimport yaml\n\nclass CodeBlockPreprocessor(Preprocessor):\n\n pattern = re.compile(r'\\[sourcecode:(.+?)\\](.+?)\\[/sourcecode\\]', re.S)\n\n formatter = HtmlFormatter(noclasses=INLINESTYLES)\n\n def run(self, lines):\n def repl(m):\n try:\n lexer = get_lexer_by_name(m.group(1))\n #lexer = get_lexer_by_name(\"python\", stripall=True)\n except ValueError:\n lexer = TextLexer()\n code = highlight(m.group(2), lexer, self.formatter)\n code = code.replace('\\n\\n', '\\n \\n').replace('\\n', '
')\n return '\\n\\n
%s
\\n\\n' % code\n joined_lines = \"\\n\".join(lines)\n joined_lines = self.pattern.sub(repl, joined_lines)\n return joined_lines.split(\"\\n\")\n\nclass CodeBlockExtension(Extension):\n def extendMarkdown(self, md, md_globals):\n #deprecated , use register instead\n md.preprocessors.add('CodeBlockPreprocessor', CodeBlockPreprocessor(), '_begin')\ndef add_line_breaks( string,width=100):\n char_count = 0\n index = 0;\n last_space = 0\n comment_line = False\n py_comment_line = False\n ignore_line = False;\n line_breaker = \"\\n\\t\"\n for char in string:\n if char ==\"<\":\n if string[index+1] == 'a':\n ignore_line = True\n if char =='#':\n py_comment_line = True\n elif char == '/':\n if string[index+1] == '/':\n comment_line = True\n elif char == '\\n':\n char_count = 0 \n comment_line = False\n py_comment_line = False\n ignore_line = False\n elif char == ' ':\n last_space = index\n else:\n char_count +=1\n if char_count >= width:\n if ignore_line:\n string = string\n else:\n string= string[0:last_space] + line_breaker+ comment_line*'//'+py_comment_line*\"#\" + string[last_space:]\n char_count = 0\n index +=1\n return string \n\ndef process(highlight_string):\n \n # someText= ''' \n # [sourcecode:python]\n # def foo(stuff):\n # print(\"hello world\")\n # print(\"hi\")\n # [/sourcecode]'''\n highlight_string = add_line_breaks(width=55, string = highlight_string)\n html = markdown.markdown(highlight_string, extensions=[CodeBlockExtension()])\n print(html)\n return html\ndef writeYaml(data, name=\"public/rendered-sections.yaml\"):\n yaml.dump(\n data,\n open(name, 'w'),\n Dumper=yamlordereddictloader.Dumper,\n default_flow_style=False,explicit_start=True)\n\ndef googleReplace(string):\n\n #import pdb; pdb.set_trace();\n google_string = r'Google Search \\1'\n result = re.sub(r\"google\\((.*)\\)\", google_string, string)\n \n print(\"final\"+ result)\n return result\n\ndef readYaml():\n file = sys.argv[1]\n data = yaml.load(open(file),Loader=yamlordereddictloader.Loader)\n #import pdb;pdb.set_trace()\n #data['languages'][1]['sections'][1].keys()\n languages = data['languages']\n for language in languages:\n if language =='global':\n continue\n sections= language['sections']\n for section in sections:\n for key, value in section.items():\n if type(value) == type([]):\n value = [googleReplace(value[0])]\n else:\n value = googleReplace(value)\n if key ==\"example_reading\":\n pass \n #section[key] = value\n #continue;\n if key == \"instructions\":\n section[key] = value\n continue # no formatting on the instructions, that is all manual\n if key == \"title\":\n continue # no formatting on the title, that is all manual\n if type(value) == type([]):\n section[key] =[process(value[0])]\n else:\n section[key] = process(value)\n writeYaml(data)\n\nreadYaml() \n\n\n\n","repo_name":"fornof/rosetty","sub_path":"Converter.py","file_name":"Converter.py","file_ext":"py","file_size_in_byte":5678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71547358793","text":"import cv2\nimport face_recognition\nimport numpy as np\n\n\nclass PersonLocationDetector:\n def __init__(self):\n super().__init__()\n self.proto_path = 'models/deploy.prototxt'\n self.model_path = 'models/MobileNetSSD_deploy.caffemodel'\n self.net = cv2.dnn.readNetFromCaffe(self.proto_path, self.model_path)\n self.classNames = {15: 'person'}\n\n def predict(self, frame, ssd_thr=0.6):\n frame_resized = cv2.resize(frame, (300, 300))\n blob = cv2.dnn.blobFromImage(frame_resized, 0.007843, (300, 300),\n (127.5, 127.5, 127.5), True)\n self.net.setInput(blob)\n # Prediction of network\n detections = self.net.forward()\n # Size of frame resize (300x300)\n rows = frame_resized.shape[0]\n cols = frame_resized.shape[1]\n output = []\n\n for i in range(detections.shape[2]):\n confidence = detections[0, 0, i, 2] # Confidence of prediction\n if confidence > ssd_thr: # Filter prediction\n class_id = int(detections[0, 0, i, 1]) # Class label\n\n # Object location\n xLeftBottom = int(detections[0, 0, i, 3] * cols)\n yLeftBottom = int(detections[0, 0, i, 4] * rows)\n xRightTop = int(detections[0, 0, i, 5] * cols)\n yRightTop = int(detections[0, 0, i, 6] * rows)\n\n # Factor for scale to original size of frame\n heightFactor = frame.shape[0] / 300.0\n widthFactor = frame.shape[1] / 300.0\n # Scale object detection to frame\n xLeftBottom = int(widthFactor * xLeftBottom)\n yLeftBottom = int(heightFactor * yLeftBottom)\n xRightTop = int(widthFactor * xRightTop)\n yRightTop = int(heightFactor * yRightTop)\n if class_id in self.classNames:\n if (xLeftBottom <= 0) or (xRightTop <= 0) or (\n yLeftBottom <= 0) or (yRightTop <= 0): # 負座標\n continue\n output.append([(xLeftBottom, yLeftBottom),\n (xRightTop, yRightTop)])\n return output\n\n\nclass FaceLocationDetector:\n def predict(self, img):\n locations = face_recognition.face_locations(img)\n \"\"\"\n fr 回傳格式:(top, right, bottom, left)\n 故修改成 (start_x, start_y, end_x, end_y)\n \"\"\"\n locations = [[(loc[3], loc[0]), (loc[1], loc[2])] for loc in locations]\n return locations\n\n\nclass SsdFaceLocationDetector:\n def __init__(self):\n super().__init__()\n self.proto_path = 'models/fr_deploy.prototxt.txt'\n self.model_path = 'res10_300x300_ssd_iter_140000.caffemodel'\n self.net = cv2.dnn.readNetFromCaffe(self.proto_path, self.model_path)\n\n def predict(self, img, ssd_thr=0.6):\n face_location_list = list()\n h, w = img.shape[:2]\n blob = cv2.dnn.blobFromImage(\n cv2.resize(img, (300, 300)), 1.0, (300, 300),\n (104.0, 177.0, 123.0))\n self.net.setInput(blob)\n detections = self.net.forward()\n for i in range(0, detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n if confidence < ssd_thr:\n continue\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n if (startX <= 0) or (startY <= 0) or (endX <= 0) or (endY <= 0):\n continue\n if (w < startX) or (h < startY) or (w < endX) or (h < endY):\n continue\n face_location_list.append((startX, startY, endX, endY))\n\n return face_location_list\n\n\nclass FaceVectorEncoder():\n def predict(self, face_img):\n h, w, _ = face_img.shape\n return face_recognition.face_encodings(face_img, [(0, w, h, 0)])[0]\n","repo_name":"jason9075/Dataset-Helper","sub_path":"detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"72822570631","text":"\"\"\"Baseclass for system fixup.\"\"\"\nfrom abc import ABC, abstractmethod, abstractproperty\nimport logging\nfrom typing import List, Optional\n\nfrom ...coresys import CoreSys, CoreSysAttributes\nfrom ...exceptions import ResolutionFixupError\nfrom ..const import ContextType, IssueType, SuggestionType\nfrom ..data import Issue, Suggestion\n\n_LOGGER: logging.Logger = logging.getLogger(__name__)\n\n\nclass FixupBase(ABC, CoreSysAttributes):\n \"\"\"Baseclass for fixup.\"\"\"\n\n def __init__(self, coresys: CoreSys) -> None:\n \"\"\"Initialize the fixup class.\"\"\"\n self.coresys = coresys\n\n async def __call__(self) -> None:\n \"\"\"Execute the evaluation.\"\"\"\n # Get suggestion to fix\n fixing_suggestion: Optional[Suggestion] = None\n for suggestion in self.sys_resolution.suggestions:\n if suggestion.type != self.suggestion or suggestion.context != self.context:\n continue\n fixing_suggestion = suggestion\n break\n\n # No suggestion\n if fixing_suggestion is None:\n return\n\n # Process fixup\n _LOGGER.debug(\"Run fixup for %s/%s\", self.suggestion, self.context)\n try:\n await self.process_fixup(reference=fixing_suggestion.reference)\n except ResolutionFixupError:\n return\n\n self.sys_resolution.dismiss_suggestion(fixing_suggestion)\n\n # Cleanup issue\n for issue_type in self.issues:\n issue = Issue(issue_type, self.context, fixing_suggestion.reference)\n if issue not in self.sys_resolution.issues:\n continue\n self.sys_resolution.dismiss_issue(issue)\n\n @abstractmethod\n async def process_fixup(self, reference: Optional[str] = None) -> None:\n \"\"\"Run processing of fixup.\"\"\"\n\n @property\n @abstractproperty\n def suggestion(self) -> SuggestionType:\n \"\"\"Return a SuggestionType enum.\"\"\"\n\n @property\n @abstractproperty\n def context(self) -> ContextType:\n \"\"\"Return a ContextType enum.\"\"\"\n\n @property\n def issues(self) -> List[IssueType]:\n \"\"\"Return a IssueType enum list.\"\"\"\n return []\n\n @property\n def auto(self) -> bool:\n \"\"\"Return if a fixup can be apply as auto fix.\"\"\"\n return False\n","repo_name":"OpenPeerPower/supervisor","sub_path":"supervisor/resolution/fixups/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"18533280268","text":"import datetime\nimport os\nimport json\nimport requests\nfrom flask import Flask, render_template, redirect, url_for\nfrom flask_bootstrap import Bootstrap\nfrom flask import Response\nfrom flask import request\n#from forms import SignupForm, noaaDataForm\n#from models import Signups\n#from database import db_session\n#from utils import *\n#from noaaApi import NOAAData\n#from google import *\nimport pandas as pd\nimport numpy as np\nimport dataprocess as csvData\nfrom datetime import datetime\n\n\ndef create_app():\n app = Flask(__name__)\n Bootstrap(app)\n\n return app\n\n# app = Flask(__name__)\napp = create_app()\n\n#fileName = \"api_credentials.json\"\n\n#credentials = loadCreds(fileName)\n#print(credentials)\n\n\napp.secret_key = os.environ['APP_SECRET_KEY']\ngoogle_api_key = os.environ['GOOGLE_API_KEY']\nnoaa_api_key = os.environ['NOAA_API_KEY']\n\n@app.route(\"/\")\ndef index():\n state = request.args.get('state')\n # result = csvData.getStateRating(\"MARYLAND\")\n currentMonth = datetime.now().month\n print(\"CURRENTMONTH: \", currentMonth)\n result = csvData.getPrediction(state, currentMonth)\n data = {\n 'results' : state,\n 'number' : 0\n }\n data['number'] = int(result)\n js = json.dumps(data)\n # js = json.dumps(result)\n resp = Response(js, status=200, mimetype='application/json')\n #resp.headers['Link'] = 'http://placeholder.com'\n return resp\n\n@app.route(\"/events\")\ndef eventLocations():\n state = request.args.get('state')\n result = csvData.getStateEvents(state)\n data = {\n 'location' : state,\n 'results' : result\n }\n # data['number'] = int(result)\n js = json.dumps(data)\n # js = json.dumps(result)\n resp = Response(js, status=200, mimetype='application/json')\n #resp.headers['Link'] = 'http://placeholder.com'\n return resp\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return (\"Bad reques\")\n#return render_template('404.html'), 404\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5091, debug=True, use_reloader=False)\n","repo_name":"adunn2/Project_GILDI","sub_path":"dataApp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41953127551","text":"from mftma.utils.activation_extractor import extractor\r\nfrom collections import defaultdict\r\nimport numpy as np\r\nimport torch\r\n\r\n\r\ndef project( activation, max_dim=5000):\r\n for layer, data, in activation.items():\r\n X = [d.reshape(d.shape[0], -1).T for d in data]\r\n # Get the number of features in the flattened data\r\n N = X[0].shape[0]\r\n # If N is greater than 5000, do the random projection to 5000 features\r\n if N > max_dim:\r\n print(\"Projecting {}\".format(layer))\r\n M = np.random.randn(max_dim, N)\r\n M /= np.sqrt(np.sum(M * M, axis=1, keepdims=True))\r\n X = [np.matmul(M, d) for d in X]\r\n activation[layer] = X\r\n return activation\r\n\r\nclass mftma_extractor(object):\r\n def __init__(self,model=None, exm_per_class=50, nclass=50, data=None,max_dim=5000):\r\n self.extractor=extractor\r\n self.exm__per_class=exm_per_class\r\n self.nclass=nclass\r\n self.data=data\r\n self.max_dim=max_dim\r\n self.project=project\r\n\r\n # there should be a section for hierarchical data used\r\n\r\ndef make_manifold_data(dataset, sampled_classes, examples_per_class, max_class=None, seed=0,randomize=True):\r\n '''\r\n Samples manifold data for use in later analysis\r\n\r\n Args:\r\n dataset: PyTorch style dataset, or iterable that contains (input, label) pairs\r\n sampled_classes: Number of classes to sample from (must be less than or equal to\r\n the number of classes in dataset)\r\n examples_per_class: Number of examples per class to draw (there should be at least\r\n this many examples per class in the dataset)\r\n max_class (optional): Maximum class to sample from. Defaults to sampled_classes if unspecified\r\n seed (optional): Random seed used for drawing samples\r\n randomize= True/False, if false the function starts from the first index and samples sequentially until all groups are filled,\r\n otherwise it will randomly sample from the dataset.\r\n\r\n Returns:\r\n data: Iterable containing manifold input data\r\n '''\r\n if max_class is None:\r\n max_class = sampled_classes\r\n assert sampled_classes <= max_class, 'Not enough classes in the dataset'\r\n assert examples_per_class * max_class <= len(dataset), 'Not enough examples per class in dataset'\r\n\r\n # Set the seed\r\n #np.random.seed(seed)\r\n # Storage for samples\r\n sampled_data = defaultdict(list)\r\n # Sample the labels\r\n sampled_labels = np.sort(np.random.choice(list(range(max_class)), size=sampled_classes, replace=False))\r\n # Shuffle the order to iterate through the dataset\r\n idx = [i for i in range(len(dataset))]\r\n if randomize:\r\n np.random.shuffle(idx)\r\n print(idx[0:10])\r\n # Iterate through the dataset until enough samples are drawn\r\n for i in idx:\r\n sample, label = dataset[i]\r\n label = int(label)\r\n if label in sampled_labels and len(sampled_data[label]) < examples_per_class:\r\n sampled_data[label].append(sample)\r\n # Check if enough samples have been drawn\r\n complete = True\r\n for s in sampled_labels:\r\n if len(sampled_data[s]) < examples_per_class:\r\n complete = False\r\n if complete:\r\n break\r\n # Check that enough samples have been found\r\n assert complete, 'Could not find enough examples for the sampled classes'\r\n # Combine the samples into batches\r\n data = []\r\n for s in sorted(sampled_data.keys()):\r\n print(f'{s}\\t')\r\n data.append(torch.stack(sampled_data[s]))\r\n return data\r\n\r\n","repo_name":"eghbalhosseini/neural_manifolds","sub_path":"utils/extractor_utils.py","file_name":"extractor_utils.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"33641363801","text":"# coding: utf-8\n\n\"\"\"\n Netilion API Documentation\n\n Welcome to the Netilion API Documentation, which provides interactive access and documentation to our REST API. Please visit our developer portal for further instructions and information: https://developer.netilion.endress.com/ # noqa: E501\n\n OpenAPI spec version: 01.00.00\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nclass NestedIDEmail(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'id': 'int',\n 'email': 'str'\n }\n\n attribute_map = {\n 'id': 'id',\n 'email': 'email'\n }\n\n def __init__(self, id=None, email=None): # noqa: E501\n \"\"\"NestedIDEmail - a model defined in Swagger\"\"\" # noqa: E501\n self._id = None\n self._email = None\n self.discriminator = None\n if id is not None:\n self.id = id\n if email is not None:\n self.email = email\n\n @property\n def id(self):\n \"\"\"Gets the id of this NestedIDEmail. # noqa: E501\n\n ID of the nested resources # noqa: E501\n\n :return: The id of this NestedIDEmail. # noqa: E501\n :rtype: int\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this NestedIDEmail.\n\n ID of the nested resources # noqa: E501\n\n :param id: The id of this NestedIDEmail. # noqa: E501\n :type: int\n \"\"\"\n\n self._id = id\n\n @property\n def email(self):\n \"\"\"Gets the email of this NestedIDEmail. # noqa: E501\n\n email to the nested resource # noqa: E501\n\n :return: The email of this NestedIDEmail. # noqa: E501\n :rtype: str\n \"\"\"\n return self._email\n\n @email.setter\n def email(self, email):\n \"\"\"Sets the email of this NestedIDEmail.\n\n email to the nested resource # noqa: E501\n\n :param email: The email of this NestedIDEmail. # noqa: E501\n :type: str\n \"\"\"\n\n self._email = email\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(NestedIDEmail, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, NestedIDEmail):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","repo_name":"endresshauser-lp/netilion-api-py","sub_path":"src/netilion_api/models/nested_id_email.py","file_name":"nested_id_email.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"33215087660","text":"import numpy as np\n\n\ndef custom_min_sup(A: (np.ndarray, np.generic), prev_min: float, visited_nodes: list):\n \"\"\"\n :param A: list L of shortest path current states\n :param prev_min: previous_minimum registered\n :param visited_nodes: list of visited nodes\n :return: int: index of vertex u with minimal A(u) not in visited_nodes\n \"\"\"\n greater = []\n for i in range(len(A)):\n if i not in visited_nodes:\n if A[i] == prev_min: # if index no registered in visited nodes and equal to previous minimum\n return i\n if A[i] > prev_min:\n greater.append((A[i], i)) # append (value, index)\n if greater:\n greater.sort() # sort by value\n return greater[0][1] # return the minimum and first in list\n return -1\n\n\ndef Dijkstra(W):\n \"\"\"\n Return the shortest path between all the vertices by running the dijkstra algorithm several times\n :param W: is the matrix of edges costs\n :return: L matrix of all shortest path between each node\n \"\"\"\n # check if list is already an numpy array, if not it convert it\n if not isinstance(W, (np.ndarray, np.generic)):\n W = np.array(W)\n\n W_shape_x, W_shape_y = W.shape # get shape in var for optimisation (not call it in loop for example)\n # check if matrix is square with a shape of n x n\n if W_shape_x != W_shape_y:\n raise Exception('Matrix of costs must be a n x n square matrix')\n\n # check if matrix is symmetrical\n if not np.allclose(W, W.T):\n raise Exception('Matrix must be symmetrical')\n\n # Start of Dijkstra algorithm\n L = np.full(W.shape, float('inf')) # setup matrix L of all shortest path as infinite and same shape as W\n for i in range(W_shape_x):\n L[i][i] = 0 # setup all shortest path between a node and itself to 0\n\n for elem in L: # repeat dijkstra for each line of L matrix\n S = [] # list of visited node as empty list\n u = np.where(np.amin(elem) == elem)[0][0] # first minimum in the line of the L matrix equivalent to L(a) := 0\n len_elem = len(elem) # micro optimisation to not call len(elem) in loop\n while len_elem != len(S):\n if S:\n u = custom_min_sup(elem, elem[u], S) # take a vertex u with minimal L(u)\n S.append(u) # append vertex to visited node list\n for i in range(len_elem): # for vertex not in S\n if i not in S:\n if elem[u] + W[u, i] < elem[i]: # if L(u) + w(u,v) < L(v)\n elem[i] = elem[u] + W[u, i] # L(v) := L(u) + w(u,v)\n return L\n","repo_name":"RainMaker1707/LINFO1114_Groupe17","sub_path":"scripts/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"73642119753","text":"\"\"\" test_preprocessing\n\n\"\"\"\n#Standard Library\nimport unittest, os\n\n#Third Party\nimport numpy as np\nfrom numpy.testing import *\n\n#First Party\nfrom preprocessing import Preprocessing\n\n#parse_training_data, create_datasets, \\\n# clean_up_sentence, bow, additional_features\n\nclass Preprocessing_Test_Cases(unittest.TestCase):\n def test_parse_training_data(self):\n data_path = os.path.join(\"data/\", \"data_intents.json\")\n words, classes, documents = Preprocessing.parse_training_data(data_path)\n\n # Types\n self.assertEqual(list, type(words), \"Incorrect type of words\")\n self.assertEqual(list, type(classes), \"Incorrect type of words\")\n self.assertEqual(list, type(documents), \"Incorrect type of documents\")\n\n for word in words:\n self.assertEqual(str, type(word), \"Incorrect type of word in words\")\n for class_ in classes:\n self.assertEqual(str, type(class_),\n \"Incorrect type of class in classes\")\n for doc in words:\n self.assertEqual(str, type(doc),\n \"Incorrect type of doc in documents\")\n\n # Number of vals\n self.assertEqual(105, len(words), \"Incorrect num of words\")\n self.assertEqual(9, len(classes), \"Incorrect num of classes\")\n self.assertEqual(127, len(documents), \"Incorrect num of documents\")\n\n\n def test_create_datasets(self):\n \"\"\"\n tests for traing data creation?\n \"\"\"\n\n data_path = os.path.join(\"data/\", \"data_intents.json\")\n words, classes, documents = Preprocessing.parse_training_data(data_path)\n (X_train, y_train), (X_test, y_test) = Preprocessing.create_datasets(words, classes,\n documents)\n\n # Types\n self.assertEqual(np.ndarray, type(X_train), \"Incorrect type of X_train\")\n self.assertEqual(np.ndarray, type(y_train), \"Incorrect type of y_train\")\n self.assertEqual(np.ndarray, type(X_test), \"Incorrect type of X_test\")\n self.assertEqual(np.ndarray, type(y_test), \"Incorrect type of y_test\")\n\n # Size\n self.assertEqual(114, len(X_train), \"Incorrect len of X_train\")\n self.assertEqual(114, len(y_train), \"Incorrect len of y_train\")\n self.assertEqual(13, len(X_test), \"Incorrect len of X_test\")\n self.assertEqual(13, len(X_test), \"Incorrect len of y_test\")\n\n # Values\n for label_vec in y_train:\n self.assertEqual(set([0,1]), set(label_vec),\n \"False val in train label vector\")\n for label_vec in y_test:\n self.assertEqual(set([0,1]), set(label_vec),\n \"False val in test label vector\")\n\n def test_create_additional_features(self):\n \"\"\"\n make sure computer calculated values match hand calculated values\n \"\"\"\n\n data_path = os.path.join(\"data/\", \"data_intents.json\")\n words, classes, documents = Preprocessing.parse_training_data(data_path)\n\n #Create Features\n featuresDF = Preprocessing.additional_features(words, classes, documents)\n\n #Size\n self.assertEqual((127, 12), featuresDF.shape, \"Incorrect shape of FeaturesDF\")\n \n #Word Count of the documents \n #assert\n #Character Count of the documents \n #Average Word Density of the documents\n #Puncutation Count in the Complete Essay\n #Upper Case Count in the Complete Essay \n #Title Word Count in the Complete Essay \n #Frequency distribution of Part of Speech Tags:(Noun Count, Verb Count, Adjective Count, Adverb Count, Pronoun Count)\n\n def test_clean_up_sentence(self):\n mock_sentence = \"This is a mock sentence\"\n w_list = Preprocessing.clean_up_sentence(mock_sentence)\n\n self.assertEqual(3, len(w_list),\n \"Incorrect number of words in word list\")\n\n\n def test_bow(self):\n mock_sentence = \"Bye, bye\"\n mock_words = [\"bye\", \"donkey\"]\n bow_list = Preprocessing.bow(mock_sentence, mock_words)\n\n assert_array_equal(bow_list, np.array([2, 0]),\n \"False BoW implementation\")\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Homefly/OND3WO_Challenge","sub_path":"test/test_preprocessing.py","file_name":"test_preprocessing.py","file_ext":"py","file_size_in_byte":4231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"29930656751","text":"# https://yeqiuquan.blogspot.com/2017/03/lintcode-597-subtree-with-maximum.html\n#\n# Description\n# Given a binary tree, find the subtree with maximum average. Return the root of the subtree.\n#\n# Notice\n# LintCode will print the subtree which root is your return node.\n# It's guaranteed that there is only one subtree with maximum average.\n#\n#\n# Example\n# Given a binary tree:\n#\n# 1\n# / \\\n# -5 11\n# / \\ / \\\n# 1 2 4 -2\n#\n# return the node 11.\n#\n# 思路\n# 这一类的题目都可以这样做:\n# 开一个ResultType的变量result,来储存拥有最大average的那个node的信息。\n# 然后用分治法来遍历整棵树。\n# 一个小弟找左子数的average,一个小弟找右子树的average。然后通过这两个来计算当前树的average。同时,我们根据算出来的当前树的average决定要不要更新result。\n# 当遍历完整棵树的时候,result里记录的就是拥有最大average的子树的信息。\n\n\n# ..maximum average subtree,这个tree是N-ary tree。\n# 求每个node以及其所有后代的value平均值,返回平均值最大的node\nimport sys\n\n\n# Definition for a Node.\nclass Node:\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\nclass Solution:\n def mas (self, root):\n if root is None:\n return None\n\n self.max_average = -sys.maxsize - 1\n self.node = None\n self.find_max_average(root)\n\n return self.node\n\n def find_max_average(self, root):\n if len(root.children) == 0:\n # if root.val > self.max_average:\n # self.max_average = root.val\n # self.node = root\n return 1, root.val\n\n count, sum = 0, 0\n for child in root.children:\n c_count, c_sum = self.find_max_average(child)\n count += c_count\n sum += c_sum\n\n if sum / count > self.max_average:\n self.max_average = sum / count\n self.node = root\n\n return sum, count\n\n\n\n\nbb = Solution()\n## case 1:\n# leaf1 = [Node(3, []), Node(4, [])]\n# leaf2 = [Node(6, []), Node(7, [])]\n# leaf3 = [Node(1, []), Node(10, [])]\n# c1 = Node(1, leaf1)\n# c2 = Node(2, leaf2)\n# c3 = Node(3, leaf3)\n# root = Node(4, [c1, c2, c3])\n\n\n## case2\nroot = Node(4,[])\nresult = bb.mas(root)\n\nprint(result.val)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#\n#\n#\n# import sys\n#\n#\n# # Definition for a Node.\n# class Node:\n# def __init__(self, val, children):\n# self.val = val\n# self.children = children\n#\n# class Solution:\n# def mas (self, root):\n# if root is None:\n# return None\n#\n# self.max_average = -sys.maxsize\n# self.node = None\n# self.find_max_average(root)\n#\n# return self.node\n#\n# def find_max_average(self, root):\n# if len(root.children) == 0:\n# if root.val > self.max_average:\n# self.max_average = root.val\n# self.node = root\n# return root.val, 1\n#\n# count, sum = 1, root.val\n# for child in root.children:\n# c_count, c_sum = self.find_max_average(child)\n# count += c_count\n# sum += c_sum\n#\n# if sum / count > self.max_average:\n# self.max_average = sum / count\n# self.node = root\n#\n# return sum, count","repo_name":"jinwei15/java-PythonSyntax-Leetcode","sub_path":"LeetCode/src/0SubtreewithMaximumAverage.py","file_name":"0SubtreewithMaximumAverage.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"40926803786","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\nfrom matplotlib.ticker import AutoMinorLocator, FixedLocator, MultipleLocator\nimport pandas as pd\n\ndef map_common(ax1,gl_loc=[True,True,False,True],gl_lon_info=range(-180,180,60),gl_dlat=30):\n\n # ax1.coastlines(color='silver',linewidth=1.)\n ax1.coastlines(resolution='10m', color='darkgray',linewidth=1.)\n ax1.stock_img()\n\n gl = ax1.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,\n linewidth=0.6, color='gray', alpha=0.5, linestyle='--')\n\n gl.ylabels_left = gl_loc[0]\n gl.ylabels_right = gl_loc[1]\n gl.xlabels_top = gl_loc[2]\n gl.xlabels_bottom = gl_loc[3]\n\n gl.xlocator = FixedLocator(gl_lon_info)\n gl.ylocator = MultipleLocator(gl_dlat)\n gl.xformatter = LONGITUDE_FORMATTER\n gl.yformatter = LATITUDE_FORMATTER\n gl.xlabel_style = {'size': 11, 'color': 'k'}\n gl.ylabel_style = {'size': 11, 'color': 'k'}\n\n \ngps_data=pd.read_csv('/Users/jung-ok/work1/ANA11/processed/DaDiS/GPS_1min.csv', index_col=[0], parse_dates=True) \n\n\n# latdata=gps_data['Latitude']\n# londata=gps_data['Longitude']\n\n\n\n\n# lon_boundary=np.arange(-240,-60,1.)\n# lat_boundary=np.arange(15,75,1.)\n# data=np.ones([lat_boundary.shape[0]-1,lon_boundary.shape[0]-1]) ## Data dimension is 1 less than boundaries\n# data=data*lat_boundary[:-1,None]\n\nlon_offset=-135 ##\nlatdata=gps_data['Latitude']\nlondata=gps_data['Longitude']\n\n\nfig=plt.figure()\n# fig.set_size_inches(7.5,5) ## (xsize, ysize)\nfig.set_size_inches(10,8) ## (xsize, ysize)\nax1=fig.add_subplot(111,projection=ccrs.PlateCarree(central_longitude=lon_offset))\nax1.set_extent([-250,-20,-80,50],crs=ccrs.PlateCarree())\n# ax1.plot(lon2d, lat2d, ls='-', c='orange')\n\nplt.plot(londata, latdata, ls='', marker='o', ms=0.5, c='orange', transform=ccrs.PlateCarree())\n\n\n\n# ax1.set_title('Lon_Offset=-90')\n# map_common(ax1,gl_lon_info=[-180,-120,-60,120,],gl_dlat=15)\n# map_common(ax1,gl_lon_info=[-180,-120,-50,-60, 110, 120,],gl_dlat=15)\nmap_common(ax1,gl_lon_info=[-180,-120, -60, 120,],gl_dlat=30)\n\n\n\n\nfnout='./exer05.png'\nplt.savefig(fnout,bbox_inches='tight',dpi=150)\nplt.show()\n","repo_name":"payton1004/ARA12B","sub_path":"code/exer05.py","file_name":"exer05.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73682035272","text":"presents = []\nwith open('2.in') as f:\n for x in f.read().splitlines():\n presents.append([int(y) for y in x.split('x')])\n\npaper_sqft = 0\nribbon_ft = 0\nfor p in presents:\n s = sorted(p)\n paper_sqft += 2*p[0]*p[1] + 2*p[1]*p[2] + 2*p[0]*p[2] + s[0]*s[1]\n ribbon_ft += p[0]*p[1]*p[2] + s[0]+s[0]+s[1]+s[1]\nprint(paper_sqft)\nprint(ribbon_ft)\n","repo_name":"ajdranse/adventOfCode","sub_path":"2015/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"74117139591","text":"import csv\nimport os\nfrom .link_scraper import search_category_links_for, search_modality_links_for, search_composed_modality_links_for\nfrom functions import get_driver\n\n\ndef search_and_save_game_links_for(game_name, game_url):\n category_links = {}\n modality_links = {}\n composed_modality_links = {}\n\n identified_composed = {}\n\n driver = get_driver()\n\n try:\n print('Coletando as URLs (Aguarde alguns segundos)')\n search_category_links_for(game_name, game_url, driver, category_links)\n search_modality_links_for(category_links, driver, modality_links, identified_composed)\n search_composed_modality_links_for(identified_composed, driver, composed_modality_links)\n\n all_links = {**modality_links, **composed_modality_links}\n\n destination_folder = f'game_links/{game_name}'\n\n if not os.path.exists(destination_folder):\n os.makedirs(destination_folder)\n\n with open(f'{destination_folder}/links.csv', 'w') as w:\n for key in all_links:\n w.write(f'{key},{all_links[key]}\\n')\n finally:\n print('Fechando browser')\n driver.quit()\n\n\n","repo_name":"DellGarcia/speedrun-scraper","sub_path":"functions/search_and_save_game_links.py","file_name":"search_and_save_game_links.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"4681337678","text":"class Solution:\n # @param {integer} x\n # @return {integer}\n def mySqrt(self, x):\n if x == 0:\n return 0\n k = 1.0\n while abs(k*k-x) >= 1:\n k = (k+x/k)/2\n return int(k)","repo_name":"gzc/leetcode","sub_path":"python/061-070/Sqrt(x).py","file_name":"Sqrt(x).py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":157,"dataset":"github-code","pt":"27"} +{"seq_id":"28900642306","text":"import random\n\nwords = ['cadena', 'capataz', 'piedra', 'cargador', 'pinza', 'arbol', 'auditorio', 'balon', 'mochila', 'trampa', 'lapiz', 'cadera', 'cabeza', 'audifonos', 'cubo', 'mueble', 'computadora', 'carro', 'camara', 'pantalla', 'hoja']\n\nguesses_cap = 5\n\nprint('''\n\nBIENVENIDO EL JUEGO \"WHAT´S THE WORD?\"\n\n¿PUEDES ADIVINAR LA PALABRA ANTES QUE SE ACABEN TUS VIDAS?\n\n| |\nv Esta es la lista de palabras, ¿puedes adivinar la correcta? v\n\n cadena | capataz | piedra \n cargador | pinza | arbol \n auditorio | balon | mochila \n trampa | lapiz | cadera \n cabeza | audifonos | cubo\n mueble | computadora | carro \n camara | pantalla | hoja\n\n''')\n\ndef main(guesses_cap, words):\n random_word = words[random.randint(0, len(words))]\n while guesses_cap > 0:\n user = input('Adivina la palabra: ').lower()\n if user == random_word:\n print('¡ADIVINASTE LA PALABRA!')\n else:\n guesses_cap -= 1\n print('Ups! Esta palabra no es correcta.')\n if guesses_cap == 0:\n print('GameOver! :(')\n print('Te quedan {} vidas.'.format(guesses_cap))\n\n\nif __name__ == '__main__':\n main(guesses_cap, words)","repo_name":"Daniel-Ortiz1210/whats_the_word","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"1381245593","text":"'''\tAuthor: Luciano Mejia\n\tDate: 9/4/14\n\tAssignment #2 Part 1\n\tCalculates the population of the U.S. in one year.\n'''\n\n# Variables\ncur_pop = 318892103\nsec_in_year = 31536000 # ((60*60)*24)*365\n\nnum_of_births = 31536000/8 # 3,942,000 births/year\nnum_of_deaths = 31536000/13 # About 2425846 deaths/year\nnum_of_immigrants = 31536000/40 # 788,400 immigrants/year\n\n# Calculating the projected population: First the population increase (births/immigrants) is added to the current population, then the number of deaths is subtracted from that sum\nprojected_pop = (cur_pop + num_of_births + num_of_immigrants) - num_of_deaths # Uses the previously found number of births/deaths/immigrants\n\nprint (\"The population will be %.f\" % projected_pop) # Population of 321,196,657","repo_name":"shybuffalo/CS-1300","sub_path":"Python/Homeworks/Assignment #2/Part 1/Census.py","file_name":"Census.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"5150484539","text":"#!/usr/bin/python3\n\nimport requests\n\nfrom requests.auth import HTTPBasicAuth\n# this is for supplying http basic authentication \ncred=HTTPBasicAuth('root','cisco')\n\nh={'Accept':'application/json'}\n#headers={'Accept':'text/html'}\n# defining data from that api in JSON format \nurl=\"http://172.16.6.131/level/15/exec/-/sh/ip/int/br/CR\"\n\n# Now connection to restconf -OR -- http protocol \noutput=requests.get(url,headers=h,auth=cred)\nprint(output)\n# only giving HTTP response code \nprint(output.text)\n# giving HTML based response \n","repo_name":"redashu/codes","sub_path":"http_get_ios.py","file_name":"http_get_ios.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"23601312794","text":"plik = open('Dane_2205/liczby.txt').readlines()\n\nilosc = 0\npierwsza = -1\nfor wiersz in plik:\n wiersz = wiersz.strip()\n if wiersz[0] == wiersz[len(wiersz) -1]:\n if pierwsza == -1:\n pierwsza = wiersz\n ilosc += 1\n\nprint('Zadanie 4.1')\nprint(ilosc, pierwsza)\n","repo_name":"Chr1skyy/Matura-Informatyka","sub_path":"2022_05/41.py","file_name":"41.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"pl","doc_type":"code","stars":17,"dataset":"github-code","pt":"27"} +{"seq_id":"19769816598","text":"import serial, time\n\nser = serial.Serial()\nser.port = \"COM4\"\nser.baudrate = 115200 \nser.bytesize = serial.EIGHTBITS #number of bits per bytes\nser.parity = serial.PARITY_NONE #set parity check\nser.stopbits = serial.STOPBITS_ONE #number of stop bits\nser.timeout = 0.5 #non-block read 0.5s\n\ntry:\n ser.open()\nexcept Exception as ex:\n print (\"open serial port error \" + str(ex))\n exit()\n \n''' \nser.write(b\"<>\\n\") #tell server we are ready to recieve\nreadline = lambda : iter(lambda:ser.read(1),\"\\n\")\nwith open(\"somefile.txt\",\"wb\") as outfile:\n while True:\n line = \"\".join(readline())\n if line == \"<>\":\n break #done so stop accumulating lines\n\n'''\npath = 'outputfile.txt'\nf = open(path,'w')\n\nwhile True: \n response = \"\".join(ser.readline().decode('utf-8'))\n if response != '':\n if response == \"<>\":\n print(response)\n break\n f.write(response)\n \nf.close()\nser.close()","repo_name":"alvinooimy/calibration-software_function","sub_path":"receive.py","file_name":"receive.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20479043246","text":"#!/usr/bin/python3\n\nimport zmq # ZeroMQ\nimport time\nimport datetime\nimport sys\nimport logging\nimport csv\nimport os\nimport glob\nimport pandas as pd\nsys.path.append(\"..\")\nimport config\nfrom DataProvider.lib.lsm6ds33 import LSM6DS33\nfrom DataProvider.lib.lis3mdl import LIS3MDL\nfrom DataProvider.lib.MedianFilter import MedianFilter\n\ndef setupLog():\n if not os.path.isdir(config.LOG_FOLDER):\n os.makedirs(config.LOG_FOLDER) \n\n now = datetime.datetime.now()\n logging.basicConfig(\n filename=config.LOG_FILE_PREFIX \n + now.strftime(\"%d%m%Y_%H%M%S%f\"))\n\ndef setupPub(pubAddr: str) -> zmq.Socket:\n context = zmq.Context()\n publisher = context.socket(zmq.PUB)\n publisher.bind(pubAddr)\n\n return publisher\n\ndef pubData(publisher: zmq.Socket, topic: str):\n # Create IMU objects\n accGyro = LSM6DS33()\n accGyro.enableLSM()\n mag = LIS3MDL()\n mag.enableLIS()\n\n if config.USE_MEDIAN_FILTER:\n # Median Filters\n axF = MedianFilter(config.MF_WINDOW_SIZE)\n ayF = MedianFilter(config.MF_WINDOW_SIZE)\n azF = MedianFilter(config.MF_WINDOW_SIZE)\n gxF = MedianFilter(config.MF_WINDOW_SIZE)\n gyF = MedianFilter(config.MF_WINDOW_SIZE)\n gzF = MedianFilter(config.MF_WINDOW_SIZE)\n mxF = MedianFilter(config.MF_WINDOW_SIZE)\n myF = MedianFilter(config.MF_WINDOW_SIZE)\n mzF = MedianFilter(config.MF_WINDOW_SIZE)\n\n while True:\n try:\n # Read IMU values\n ax, ay, az = accGyro.getAccelerometerRaw()\n gx, gy, gz = accGyro.getGyroscopeRaw()\n mx, my, mz = mag.getMagnetometerRaw()\n\n if config.USE_MEDIAN_FILTER:\n # Go through median filters\n ax = int(axF.filt(ax))\n ay = int(ayF.filt(ay))\n az = int(azF.filt(az))\n gx = int(gxF.filt(gx))\n gy = int(gyF.filt(gy))\n gz = int(gzF.filt(gz))\n mx = int(mxF.filt(mx))\n my = int(myF.filt(my))\n mz = int(mzF.filt(mz))\n\n # Publish onto topic\n publisher.send_string(\"%s %i %i %i %i %i %i %i %i %i\" % (topic, ax, ay, az, gx, gy ,gz, mx, my, mz))\n print(\"'%s': %i %i %i %i %i %i %i %i %i\" % (topic, ax, ay, az, gx, gy ,gz, mx, my, mz))\n\n time.sleep(0.020) # 50hz\n except KeyboardInterrupt:\n break\n\ndef pubMock(publisher: zmq.Socket, topic: str, filePath: str):\n #set working directory\n os.chdir(config.MOCK_DATA_FOLDER)\n \n #find all csv files in the folder\n all_filenames = []\n dir_files = os.listdir()\n for f in config.MOCK_DATA_PATHS:\n if f in dir_files:\n all_filenames.append(f)\n print(all_filenames)\n\n #combine all files in the list\n ## Note: Xavier the dataset files all have header so will need to account for that. I removed the header in the\n ## test data in mock_data to test first\n combined_csv = pd.concat([pd.read_csv(f, header=None) for f in all_filenames ])\n #export to csv\n if os.path.isfile(\"combined.csv\"):\n os.remove(\"combined.csv\")\n combined_csv.to_csv(\"combined.csv\", index=False, encoding='utf-8-sig', header=None)\n stream = open(\"combined.csv\", newline='')\n csvFile = csv.reader(stream, delimiter=',')\n\n if config.USE_MEDIAN_FILTER:\n # Median Filters\n axF = MedianFilter(config.MF_WINDOW_SIZE)\n ayF = MedianFilter(config.MF_WINDOW_SIZE)\n azF = MedianFilter(config.MF_WINDOW_SIZE)\n gxF = MedianFilter(config.MF_WINDOW_SIZE)\n gyF = MedianFilter(config.MF_WINDOW_SIZE)\n gzF = MedianFilter(config.MF_WINDOW_SIZE)\n mxF = MedianFilter(config.MF_WINDOW_SIZE)\n myF = MedianFilter(config.MF_WINDOW_SIZE)\n mzF = MedianFilter(config.MF_WINDOW_SIZE)\n\n while True:\n try:\n # Read IMU values\n r = next(csvFile)\n ax = float(r[1])\n ay = float(r[2])\n az = float(r[3])\n gx = float(r[4])\n gy = float(r[5])\n gz = float(r[6])\n mx = float(r[7])\n my = float(r[8])\n mz = float(r[9])\n\n if config.USE_MEDIAN_FILTER:\n # Go through median filters\n ax = int(axF.filt(ax))\n ay = int(ayF.filt(ay))\n az = int(azF.filt(az))\n gx = int(gxF.filt(gx))\n gy = int(gyF.filt(gy))\n gz = int(gzF.filt(gz))\n mx = int(mxF.filt(mx))\n my = int(myF.filt(my))\n mz = int(mzF.filt(mz))\n\n # Publish onto topic\n publisher.send_string(\"%s %i %i %i %i %i %i %i %i %i\" % (topic, ax, ay, az, gx, gy ,gz, mx, my, mz))\n print(\"'%s': %i %i %i %i %i %i %i %i %i\" % (topic, ax, ay, az, gx, gy ,gz, mx, my, mz))\n\n time.sleep(0.020) # 50hz\n except (KeyboardInterrupt, StopIteration) as e:\n break\n\n # Clean up\n os.remove(\"combined.csv\")\n\nif __name__ == \"__main__\":\n setupLog()\n publisher = setupPub(config.DATA_SOCK)\n if config.USE_MOCK_DATA:\n logging.info(\"Using MOCK data\")\n pubMock(publisher, config.LOCAL_IMU_TOPIC, config.MOCK_DATA_PATHS)\n else:\n logging.info(\"Using REAL data\")\n pubData(publisher, config.LOCAL_IMU_TOPIC)\n\n # Clean up\n context = zmq.Context.instance()\n context.destroy()\n","repo_name":"lekoook/Fog","sub_path":"DataProvider/DataPublisher.py","file_name":"DataPublisher.py","file_ext":"py","file_size_in_byte":5410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13369913858","text":"import py5\nfrom math import cos, pi, sin, sqrt, radians\nfrom grammar import grammar\nfrom collections import namedtuple\n\n\"\"\"\nmpeano.py by Martin Prout\nLSystem rules from The Euclidean Traveling Salesman Problem.... by MG Norman & P Moscato.\nFeatures a scaling adjustment and turtle reversing, use trignometry rather than processing affine\ntransforms to calculate the line, uses a grammar module to create production string.\n\"\"\"\n\nw = 600\nh = 600\n\nLSystem = namedtuple('MPeano', 'start, rules, turn_angle_deg')\n\nmpeano = LSystem(\n start = 'XFF2-AFF2-XFF2-AFF',\n rules = dict(\n F = '',\n Y = 'FFY',\n X = '+!X!FF-BQFI-!X!FF+',\n A = 'BQFI',\n B = 'AFF'\n ),\n turn_angle_deg = 45\n )\n\ndef settings():\n py5.size(w, h)\n\ndef setup():\n sketch_title('MPeano')\n\n production = grammar.generate(mpeano, 6)\n py5.background(0, 0, 255)\n py5.stroke(255, 255, 0)\n py5.stroke_weight(3)\n render(production)\n\ndef render(production):\n \"\"\"\n Render evaluates the production string and calls draw_line\n \"\"\"\n delta = radians(mpeano.turn_angle_deg)\n distance = 15\n turtle = {'x': w / 10, 'y': h / 10, 'angle': -delta}\n repeat = 1\n for val in production:\n if val == \"F\":\n turtle = draw_line(turtle, distance)\n elif val == \"+\":\n turtle['angle'] += delta * repeat\n repeat = 1\n elif val == \"-\":\n turtle['angle']-= delta * repeat\n repeat = 1\n elif val == \"I\":\n distance *= 1/sqrt(2)\n elif val == \"Q\":\n distance *= sqrt(2)\n elif val == \"!\":\n delta = -delta\n elif (val == '2'):\n repeat = 2\n else:\n pass\n\ndef draw_line(turtle, length):\n \"\"\"\n Draw line utility uses processing 'line' function to draw lines\n \"\"\"\n turtlecopy = turtle.copy()\n turtlecopy['x'] = turtle['x'] + length * cos(turtle['angle'])\n turtlecopy['y'] = turtle['y'] - length * sin(turtle['angle'])\n py5.line(turtle['x'], turtle['y'], turtlecopy['x'], turtlecopy['y'])\n return turtlecopy\n\ndef sketch_title(title):\n py5.get_surface().set_title(title)\n\npy5.run_sketch()\n","repo_name":"monkstone/py5-examples","sub_path":"module_mode/lsystems/mpeano.py","file_name":"mpeano.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"20600251874","text":"import os.path\r\nimport sqlite3\r\nimport six\r\nfrom six.moves import urllib_parse\r\nimport shutil\r\nfrom kodi_six import xbmc, xbmcaddon, xbmcplugin, xbmcgui, xbmcvfs\r\nimport os\r\nimport sys\r\n\r\n__scriptname__ = \"Cumination\"\r\n__author__ = \"Cumination\"\r\n__scriptid__ = \"plugin.video.cumination\"\r\n\r\naddon_handle = int(sys.argv[1])\r\naddon_sys = sys.argv[0]\r\naddon = xbmcaddon.Addon()\r\nTRANSLATEPATH = xbmcvfs.translatePath if six.PY3 else xbmc.translatePath\r\n\r\nrootDir = addon.getAddonInfo('path')\r\nif rootDir[-1] == ';':\r\n rootDir = rootDir[0:-1]\r\nrootDir = TRANSLATEPATH(rootDir)\r\nresDir = os.path.join(rootDir, 'resources')\r\nimgDir = os.path.join(resDir, 'images')\r\naboutDir = os.path.join(resDir, 'about')\r\nprofileDir = addon.getAddonInfo('profile')\r\nprofileDir = TRANSLATEPATH(profileDir)\r\ncookiePath = os.path.join(profileDir, 'cookies.lwp')\r\nif addon.getSetting('custom_favorites') == 'true':\r\n fav_path = addon.getSetting('favorites_path')\r\n if fav_path == '':\r\n fav_path = profileDir\r\n favoritesdb = os.path.join(fav_path, 'favorites.db')\r\nelse:\r\n favoritesdb = os.path.join(profileDir, 'favorites.db')\r\ncustomSitesDir = os.path.join(profileDir, 'custom_sites')\r\ntempDir = os.path.join(profileDir, 'temp')\r\n\r\ncuminationicon = TRANSLATEPATH(os.path.join(rootDir, 'icon.png'))\r\nchangelog = TRANSLATEPATH(os.path.join(rootDir, 'changelog.txt'))\r\n\r\nif not os.path.exists(profileDir):\r\n os.makedirs(profileDir)\r\n\r\nif not os.path.exists(customSitesDir):\r\n os.makedirs(customSitesDir)\r\n\r\nif not os.path.exists(tempDir):\r\n os.makedirs(tempDir)\r\n\r\nKODIVER = float(xbmcaddon.Addon('xbmc.addon').getAddonInfo('version')[:4])\r\n\r\n\r\ndef cum_image(filename, custom=False):\r\n if filename.startswith('http'):\r\n return filename\r\n else:\r\n img = os.path.join(customSitesDir if custom else imgDir, filename)\r\n return img\r\n\r\n\r\ndef eod(handle=addon_handle, cache=True):\r\n if addon.getSetting('customview') == 'true':\r\n skin = xbmc.getSkinDir().lower()\r\n viewtype = 55 if 'estuary' in skin else 50\r\n setview = addon.getSetting('setview')\r\n if ';' in setview:\r\n currentskin, viewno = setview.split(';')\r\n if currentskin == skin:\r\n viewtype = viewno\r\n xbmc.executebuiltin(\"Container.SetViewMode(%s)\" % str(viewtype))\r\n xbmcplugin.endOfDirectory(handle, cacheToDisc=cache)\r\n\r\n\r\ndef addImgLink(name, url, mode):\r\n u = (sys.argv[0]\r\n + \"?url=\" + urllib_parse.quote_plus(url)\r\n + \"&mode=\" + str(mode)\r\n + \"&name=\" + urllib_parse.quote_plus(name))\r\n liz = xbmcgui.ListItem(name)\r\n if KODIVER < 19.8:\r\n liz.setInfo(type='pictures', infoLabels={'title': name})\r\n liz.setArt({'thumb': url, 'icon': url, 'poster': url})\r\n ok = xbmcplugin.addDirectoryItem(handle=addon_handle, url=u, listitem=liz, isFolder=False)\r\n return ok\r\n\r\n\r\ndef addDownLink(name, url, mode, iconimage, desc='', stream=None, fav='add', noDownload=False, contextm=None, fanart=None, duration='', quality=''):\r\n contextMenuItems = []\r\n favtext = \"Remove from\" if fav == 'del' else \"Add to\" # fav == 'add' or 'del'\r\n dname = desc == name\r\n u = (sys.argv[0]\r\n + \"?url=\" + urllib_parse.quote_plus(url)\r\n + \"&mode=\" + str(mode)\r\n + \"&name=\" + urllib_parse.quote_plus(name))\r\n dwnld = (sys.argv[0]\r\n + \"?url=\" + urllib_parse.quote_plus(url)\r\n + \"&mode=\" + str(mode)\r\n + \"&download=\" + str(1)\r\n + \"&name=\" + urllib_parse.quote_plus(name))\r\n favorite = (sys.argv[0]\r\n + \"?url=\" + urllib_parse.quote_plus(url)\r\n + \"&fav=\" + fav\r\n + \"&favmode=\" + str(mode)\r\n + \"&mode=\" + str('favorites.Favorites')\r\n + \"&img=\" + urllib_parse.quote_plus(iconimage)\r\n + \"&name=\" + urllib_parse.quote_plus(name)\r\n + \"&duration=\" + duration\r\n + \"&quality=\" + quality)\r\n ok = True\r\n if not iconimage:\r\n iconimage = cuminationicon\r\n if duration:\r\n if addon.getSetting('duration_in_name') == 'true':\r\n duration = \" [COLOR deeppink]\" + duration + \"[/COLOR]\"\r\n name = name + duration if six.PY3 else (name.decode('utf-8') + duration).encode('utf-8')\r\n else:\r\n secs = None\r\n try:\r\n duration = duration.upper().replace('H', ':').replace('M', ':').replace('S', '').replace('EC', '').replace(' ', '').replace('IN', '0').replace('::', ':').strip()\r\n if ':' in duration:\r\n if duration.endswith(':'):\r\n duration += '0'\r\n secs = sum(int(x) * 60 ** i for i, x in enumerate(reversed(duration.split(':'))))\r\n elif duration.isdigit():\r\n secs = int(duration)\r\n if secs is None and len(duration) > 0:\r\n xbmc.log(\"@@@@Cumination: Duration format error: \" + str(duration), xbmc.LOGERROR)\r\n except:\r\n xbmc.log(\"@@@@Cumination: Duration format error: \" + str(duration), xbmc.LOGERROR)\r\n width = None\r\n if quality:\r\n if addon.getSetting('quality_in_name') == 'true':\r\n quality = \" [COLOR orange]\" + quality + \"[/COLOR]\"\r\n name = name + quality if six.PY3 else (name.decode('utf-8') + quality).encode('utf-8')\r\n else:\r\n width, height = get_resolution(quality)\r\n if dname:\r\n desc = name\r\n liz = xbmcgui.ListItem(name)\r\n if KODIVER > 19.8:\r\n vtag = liz.getVideoInfoTag()\r\n vtag.setTitle(name)\r\n if duration and addon.getSetting('duration_in_name') != 'true':\r\n vtag.setDuration(secs)\r\n if desc:\r\n vtag.setPlot(desc)\r\n vtag.setPlotOutline(desc)\r\n if width:\r\n vtag.addVideoStream(xbmc.VideoStreamDetail(width=width, height=height, codec='h264'))\r\n else:\r\n vtag.addVideoStream(xbmc.VideoStreamDetail(codec='h264'))\r\n else:\r\n if duration and addon.getSetting('duration_in_name') != 'true':\r\n liz.setInfo(type=\"Video\", infoLabels={\"Duration\": secs})\r\n if desc:\r\n liz.setInfo(type=\"Video\", infoLabels={\"Title\": name, \"plot\": desc, \"plotoutline\": desc})\r\n else:\r\n liz.setInfo(type=\"Video\", infoLabels={\"Title\": name})\r\n if width:\r\n video_streaminfo = {'codec': 'h264', 'width': width, 'height': height}\r\n else:\r\n video_streaminfo = {'codec': 'h264'}\r\n liz.addStreamInfo('video', video_streaminfo)\r\n\r\n liz.setArt({'thumb': iconimage, 'icon': \"DefaultVideo.png\", 'poster': iconimage})\r\n if not fanart:\r\n fanart = os.path.join(rootDir, 'fanart.jpg')\r\n if addon.getSetting('posterfanart') == 'true':\r\n fanart = iconimage\r\n liz.setArt({'fanart': fanart})\r\n if stream:\r\n liz.setProperty('IsPlayable', 'true')\r\n\r\n if contextm:\r\n if isinstance(contextm, list):\r\n for i in contextm:\r\n if isinstance(i, tuple):\r\n contextMenuItems.append(i)\r\n else:\r\n if isinstance(contextm, tuple):\r\n contextMenuItems.append(contextm)\r\n favorder = addon.getSetting(\"favorder\") or 'date added'\r\n if fav == 'del' and favorder == 'date added':\r\n favorite_move_to_top = (sys.argv[0]\r\n + \"?url=\" + urllib_parse.quote_plus(url)\r\n + \"&fav=\" + 'move_to_top'\r\n + \"&favmode=\" + str(mode)\r\n + \"&mode=\" + str('favorites.Favorites')\r\n + \"&img=\" + urllib_parse.quote_plus(iconimage)\r\n + \"&name=\" + urllib_parse.quote_plus(name)\r\n + \"&duration=\" + urllib_parse.quote_plus(duration)\r\n + \"&quality=\" + urllib_parse.quote_plus(quality))\r\n contextMenuItems.append(('[COLOR hotpink]Move favorite to Top[/COLOR]', 'RunPlugin(' + favorite_move_to_top + ')'))\r\n favorite_move_up = (sys.argv[0]\r\n + \"?url=\" + urllib_parse.quote_plus(url)\r\n + \"&fav=\" + 'move_up'\r\n + \"&favmode=\" + str(mode)\r\n + \"&mode=\" + str('favorites.Favorites')\r\n + \"&img=\" + urllib_parse.quote_plus(iconimage)\r\n + \"&name=\" + urllib_parse.quote_plus(name)\r\n + \"&duration=\" + urllib_parse.quote_plus(duration)\r\n + \"&quality=\" + urllib_parse.quote_plus(quality))\r\n contextMenuItems.append(('[COLOR hotpink]Move favorite Up[/COLOR]', 'RunPlugin(' + favorite_move_up + ')'))\r\n favorite_move_down = (sys.argv[0]\r\n + \"?url=\" + urllib_parse.quote_plus(url)\r\n + \"&fav=\" + 'move_down'\r\n + \"&favmode=\" + str(mode)\r\n + \"&mode=\" + str('favorites.Favorites')\r\n + \"&img=\" + urllib_parse.quote_plus(iconimage)\r\n + \"&name=\" + urllib_parse.quote_plus(name)\r\n + \"&duration=\" + urllib_parse.quote_plus(duration)\r\n + \"&quality=\" + urllib_parse.quote_plus(quality))\r\n contextMenuItems.append(('[COLOR hotpink]Move favorite Down[/COLOR]', 'RunPlugin(' + favorite_move_down + ')'))\r\n favorite_move_to_bottom = (sys.argv[0]\r\n + \"?url=\" + urllib_parse.quote_plus(url)\r\n + \"&fav=\" + 'move_to_bottom'\r\n + \"&favmode=\" + str(mode)\r\n + \"&mode=\" + str('favorites.Favorites')\r\n + \"&img=\" + urllib_parse.quote_plus(iconimage)\r\n + \"&name=\" + urllib_parse.quote_plus(name)\r\n + \"&duration=\" + urllib_parse.quote_plus(duration)\r\n + \"&quality=\" + urllib_parse.quote_plus(quality))\r\n contextMenuItems.append(('[COLOR hotpink]Move favorite to Bottom[/COLOR]', 'RunPlugin(' + favorite_move_to_bottom + ')'))\r\n contextMenuItems.append(('[COLOR hotpink]' + favtext + ' favorites[/COLOR]', 'RunPlugin(' + favorite + ')'))\r\n if not noDownload:\r\n contextMenuItems.append(('[COLOR hotpink]Download Video[/COLOR]', 'RunPlugin(' + dwnld + ')'))\r\n settings_url = (sys.argv[0]\r\n + \"?mode=\" + str('utils.openSettings'))\r\n contextMenuItems.append(\r\n ('[COLOR hotpink]Addon settings[/COLOR]', 'RunPlugin(' + settings_url + ')'))\r\n setview = (sys.argv[0]\r\n + \"?mode=\" + str('utils.setview'))\r\n contextMenuItems.append(\r\n ('[COLOR hotpink]Set this view as default[/COLOR]', 'RunPlugin(' + setview + ')'))\r\n liz.addContextMenuItems(contextMenuItems, replaceItems=False)\r\n ok = xbmcplugin.addDirectoryItem(handle=addon_handle, url=u, listitem=liz, isFolder=False)\r\n return ok\r\n\r\n\r\ndef get_resolution(quality):\r\n resolution = (None, None)\r\n try:\r\n quality = str(quality).upper()\r\n\r\n if quality.endswith('P'):\r\n quality = quality[:-1]\r\n if quality.isdigit():\r\n resolution = (int(quality) * 16 // 9, int(quality))\r\n resolutions = {'SD': (640, 480), 'FULLHD': (1920, 1080), 'FHD': (1920, 1080), '2K': (2560, 1440), '4K': (3840, 2160), 'UHD': (3840, 2160), 'HD': (1280, 720), '8K': (7680, 4320)}\r\n for x in resolutions.keys():\r\n if x in quality:\r\n quality = x\r\n break\r\n\r\n if quality in resolutions.keys():\r\n resolution = resolutions[quality]\r\n if len(quality) > 0 and resolution == (None, None):\r\n xbmc.log(\"@@@@Cumination: Quality format error: \" + str(quality), xbmc.LOGERROR)\r\n except:\r\n xbmc.log(\"@@@@Cumination: Quality format error: \" + str(quality), xbmc.LOGERROR)\r\n return resolution\r\n\r\n\r\ndef addDir(name, url, mode, iconimage=None, page=None, channel=None, section=None, keyword='', Folder=True, about=None,\r\n custom=False, list_avail=True, listitem_id=None, custom_list=False, contextm=None, desc=''):\r\n u = (sys.argv[0]\r\n + \"?url=\" + urllib_parse.quote_plus(url)\r\n + \"&mode=\" + str(mode)\r\n + \"&page=\" + str(page)\r\n + \"&channel=\" + str(channel)\r\n + \"§ion=\" + str(section)\r\n + \"&keyword=\" + urllib_parse.quote_plus(keyword)\r\n + \"&name=\" + urllib_parse.quote_plus(name))\r\n ok = True\r\n if not iconimage:\r\n iconimage = cuminationicon\r\n liz = xbmcgui.ListItem(name)\r\n fanart = os.path.join(rootDir, 'fanart.jpg')\r\n art = {'thumb': iconimage, 'icon': \"DefaultFolder.png\", 'fanart': fanart}\r\n if addon.getSetting('posterfanart') == 'true':\r\n fanart = iconimage\r\n art.update({'poster': iconimage})\r\n liz.setArt(art)\r\n if KODIVER > 19.8:\r\n vtag = liz.getVideoInfoTag()\r\n vtag.setTitle(name)\r\n if desc:\r\n vtag.setPlot(desc)\r\n vtag.setPlotOutline(desc)\r\n else:\r\n liz.setInfo(type=\"Video\", infoLabels={\"Title\": name})\r\n if desc:\r\n liz.setInfo(type=\"Video\", infoLabels={\"Title\": name, \"plot\": desc, \"plotoutline\": desc})\r\n contextMenuItems = []\r\n if contextm:\r\n if isinstance(contextm, list):\r\n for i in contextm:\r\n if isinstance(i, tuple):\r\n contextMenuItems.append(i)\r\n else:\r\n if isinstance(contextm, tuple):\r\n contextMenuItems.append(contextm)\r\n if about:\r\n about_url = (sys.argv[0]\r\n + \"?mode=\" + str('main.about_site')\r\n + \"&img=\" + urllib_parse.quote_plus(iconimage)\r\n + \"&name=\" + urllib_parse.quote_plus(name)\r\n + \"&about=\" + str(about)\r\n + \"&custom=\" + str(custom))\r\n contextMenuItems.append(\r\n ('[COLOR hotpink]About site[/COLOR]', 'RunPlugin(' + about_url + ')'))\r\n if len(keyword) >= 1:\r\n keyw = (sys.argv[0]\r\n + \"?mode=\" + str('utils.delKeyword')\r\n + \"&keyword=\" + urllib_parse.quote_plus(keyword))\r\n keywedit = (sys.argv[0]\r\n + \"?mode=\" + str('utils.newSearch')\r\n + \"&keyword=\" + urllib_parse.quote_plus(keyword))\r\n contextMenuItems.append(('[COLOR hotpink]Remove keyword[/COLOR]', 'RunPlugin(' + keyw + ')'))\r\n contextMenuItems.append(('[COLOR hotpink]Edit keyword[/COLOR]', 'RunPlugin(' + keywedit + ')'))\r\n if list_avail:\r\n list_item_name = 'Add item to ...'\r\n list_url = (sys.argv[0]\r\n + \"?url=\" + urllib_parse.quote_plus(url)\r\n + \"&favmode=\" + str(mode)\r\n + \"&mode=\" + str('favorites.add_listitem')\r\n + \"&img=\" + urllib_parse.quote_plus(iconimage)\r\n + \"&name=\" + urllib_parse.quote_plus(name))\r\n contextMenuItems.append(('[COLOR hotpink]%s[/COLOR]' % list_item_name, 'RunPlugin(' + list_url + ')'))\r\n if listitem_id:\r\n move_listitem_url = (sys.argv[0]\r\n + \"?mode=\" + str('favorites.move_listitem')\r\n + \"&listitem_id=\" + str(listitem_id))\r\n contextMenuItems.append(('[COLOR hotpink]Move item to ...[/COLOR]', 'RunPlugin(' + move_listitem_url + ')'))\r\n listitem_url = (sys.argv[0]\r\n + \"?mode=\" + str('favorites.remove_listitem')\r\n + \"&listitem_id=\" + str(listitem_id))\r\n contextMenuItems.append(('[COLOR hotpink]Remove from list[/COLOR]', 'RunPlugin(' + listitem_url + ')'))\r\n moveupitem_url = (sys.argv[0]\r\n + \"?mode=\" + str('favorites.moveup_listitem')\r\n + \"&listitem_id=\" + str(listitem_id))\r\n contextMenuItems.append(('[COLOR hotpink]Move item Up[/COLOR]', 'RunPlugin(' + moveupitem_url + ')'))\r\n movedownitem_url = (sys.argv[0]\r\n + \"?mode=\" + str('favorites.movedown_listitem')\r\n + \"&listitem_id=\" + str(listitem_id))\r\n contextMenuItems.append(('[COLOR hotpink]Move item Down[/COLOR]', 'RunPlugin(' + movedownitem_url + ')'))\r\n\r\n if custom_list:\r\n editlist_url = (sys.argv[0]\r\n + \"?mode=\" + str('favorites.edit_list')\r\n + \"&rowid=\" + str(url))\r\n contextMenuItems.append(('[COLOR hotpink]Edit name[/COLOR]', 'RunPlugin(' + editlist_url + ')'))\r\n dellist_url = (sys.argv[0]\r\n + \"?mode=\" + str('favorites.remove_list')\r\n + \"&rowid=\" + str(url))\r\n contextMenuItems.append(('[COLOR hotpink]Remove list[/COLOR]', 'RunPlugin(' + dellist_url + ')'))\r\n moveuplist_url = (sys.argv[0]\r\n + \"?mode=\" + str('favorites.moveup_list')\r\n + \"&rowid=\" + str(url))\r\n contextMenuItems.append(('[COLOR hotpink]Move list Up[/COLOR]', 'RunPlugin(' + moveuplist_url + ')'))\r\n movedownlist_url = (sys.argv[0]\r\n + \"?mode=\" + str('favorites.movedown_list')\r\n + \"&rowid=\" + str(url))\r\n contextMenuItems.append(('[COLOR hotpink]Move list Down[/COLOR]', 'RunPlugin(' + movedownlist_url + ')'))\r\n\r\n settings_url = (sys.argv[0]\r\n + \"?mode=\" + str('utils.openSettings'))\r\n contextMenuItems.append(\r\n ('[COLOR hotpink]Addon settings[/COLOR]', 'RunPlugin(' + settings_url + ')'))\r\n setview = (sys.argv[0]\r\n + \"?mode=\" + str('utils.setview'))\r\n contextMenuItems.append(\r\n ('[COLOR hotpink]Set this view as default[/COLOR]', 'RunPlugin(' + setview + ')'))\r\n liz.addContextMenuItems(contextMenuItems, replaceItems=False)\r\n ok = xbmcplugin.addDirectoryItem(handle=addon_handle, url=u, listitem=liz, isFolder=Folder)\r\n return ok\r\n\r\n\r\ndef searchDir(url, mode, page=None, alphabet=None):\r\n if not alphabet:\r\n addDir('[COLOR hotpink]One time search[/COLOR]', url, 'utils.oneSearch', cum_image('cum-search.png'), page=page, channel=mode, Folder=False)\r\n addDir('[COLOR hotpink]Add Keyword[/COLOR]', url, 'utils.newSearch', cum_image('cum-search.png'), '', mode, Folder=False)\r\n addDir('[COLOR hotpink]Alphabetical[/COLOR]', url, 'utils.alphabeticalSearch', cum_image('cum-search.png'), '', mode)\r\n if addon.getSetting('keywords_sorted') == 'true':\r\n addDir('[COLOR hotpink]Unsorted Keywords[/COLOR]', url, 'utils.setUnsorted', cum_image('cum-search.png'), '', mode, Folder=False)\r\n else:\r\n addDir('[COLOR hotpink]Sorted Keywords[/COLOR]', url, 'utils.setSorted', cum_image('cum-search.png'), '', mode, Folder=False)\r\n conn = sqlite3.connect(favoritesdb)\r\n c = conn.cursor()\r\n\r\n try:\r\n if alphabet:\r\n c.execute(\"SELECT * FROM keywords WHERE keyword LIKE ? ORDER BY keyword ASC\", (alphabet.lower() + '%', ))\r\n else:\r\n if addon.getSetting('keywords_sorted') == 'true':\r\n c.execute(\"SELECT * FROM keywords ORDER by keyword\")\r\n else:\r\n c.execute(\"SELECT * FROM keywords ORDER BY rowid DESC\")\r\n for (keyword,) in c.fetchall():\r\n keyword = keyword if six.PY3 else keyword.encode('utf8')\r\n keyword = urllib_parse.unquote_plus(keyword)\r\n name = '[COLOR deeppink]' + keyword + '[/COLOR]'\r\n addDir(name, url, mode, cum_image('cum-search.png'), page=page, keyword=keyword)\r\n except:\r\n pass\r\n conn.close()\r\n eod()\r\n\r\n\r\ndef keys():\r\n ret = {}\r\n conn = sqlite3.connect(favoritesdb)\r\n c = conn.cursor()\r\n try:\r\n c.execute(\"\"\"SELECT substr(upper(keyword),1,1) AS letter, count(keyword) AS count FROM keywords\r\n GROUP BY substr(upper(keyword),1,1)\r\n ORDER BY keyword\"\"\")\r\n for (letter, count) in c.fetchall():\r\n ret[letter] = count\r\n except:\r\n pass\r\n conn.close()\r\n return ret\r\n\r\n\r\ndef clean_temp():\r\n shutil.rmtree(tempDir)\r\n os.makedirs(tempDir)\r\n","repo_name":"dobbelina/repository.dobbelina","sub_path":"plugin.video.cumination/resources/lib/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":20372,"program_lang":"python","lang":"en","doc_type":"code","stars":223,"dataset":"github-code","pt":"27"} +{"seq_id":"33378335789","text":"\"\"\" Tool for implementing a simple cache\n\nTool returns 0 if cache is valid, returns non-zero is cache is invalid.\n\ncheck_cache.py || ( && update_cache.py )\n\"\"\"\nimport argparse\nimport hashlib\nimport pathlib\nimport os.path\nimport sys\n\n\ndef main():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('cache_input')\n parser.add_argument('cache_value')\n parser.add_argument('cache_outputs', nargs='+')\n\n args = parser.parse_args()\n\n if not os.path.exists(args.cache_input):\n sys.exit(-1)\n\n if not os.path.exists(args.cache_value):\n sys.exit(-1)\n\n for out in args.cache_outputs:\n if not os.path.exists(out):\n sys.exit(-1)\n\n with open(args.cache_input, 'rb') as f:\n m = hashlib.sha1()\n m.update(f.read())\n h = m.hexdigest()\n\n with open(args.cache_value) as f:\n if f.read().strip() != h:\n sys.exit(-1)\n\n # Update file timestamps\n pathlib.Path(args.cache_value).touch(exist_ok=True)\n for out in args.cache_outputs:\n pathlib.Path(out).touch(exist_ok=True)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"f4pga/f4pga-arch-defs","sub_path":"utils/check_cache.py","file_name":"check_cache.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":228,"dataset":"github-code","pt":"27"} +{"seq_id":"467065315","text":"from pyspark.sql import SparkSession\nimport logging\nimport logging.config\n\n# Load logging configuration file\nlogging.config.fileConfig(fname='/opt/airflow/dags/agg_fhv_monthly/python/util/logging_to_file.conf')\nlogger = logging.getLogger(__name__)\n\ndef get_spark_object(envn, appName):\n try:\n logger.info(f'The env: {envn} is used')\n if envn == 'TEST':\n master = 'local'\n else:\n master = 'yarn'\n\n spark = SparkSession \\\n .builder \\\n .master(master) \\\n .appName(appName) \\\n .getOrCreate()\n#.config(\"spark.sql.catalogImplementation\", \"hive\") \\\n\n\n except NameError as exp:\n logger.error(\"NameError in the method - get_spark_object(). Please check the Stack Trace. \" + str(exp), exc_info=True)\n raise\n except Exception as exp:\n logger.error(\"Error in the method - get_spark_object(). Please check the Stack Trace. \" + str(exp), exc_info=True)\n else:\n logger.info(\"Spark Object is created \")\n\n return spark","repo_name":"fernaldy77/tlc_pipeline_analytics","sub_path":"dags/agg_fhv_monthly/python/util/create_objects.py","file_name":"create_objects.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12027499030","text":"def tim_sort(arr, k):\n def insertion_sort(arr, start, end):\n for i in range(start + 1, end + 1):\n key = arr[i]\n j = i - 1\n while j >= start and arr[j] > key:\n arr[j + 1] = arr[j]\n j -= 1\n arr[j + 1] = key\n\n def merge(arr, start, mid, end):\n if arr[mid] <= arr[mid + 1]:\n return\n\n i = start\n j = mid + 1\n aux = arr[start:end+1]\n\n for k in range(start, end + 1):\n if i > mid:\n arr[k] = aux[j-start]\n j += 1\n elif j > end:\n arr[k] = aux[i-start]\n i += 1\n elif aux[i-start] <= aux[j-start]:\n arr[k] = aux[i-start]\n i += 1\n else:\n arr[k] = aux[j-start]\n j += 1\n\n def tim_sort_util(arr, start, end, k):\n if end - start + 1 <= k:\n insertion_sort(arr, start, end)\n return\n\n mid = start + (end - start) // 2\n tim_sort_util(arr, start, mid, k)\n tim_sort_util(arr, mid + 1, end, k)\n merge(arr, start, mid, end)\n\n tim_sort_util(arr, 0, len(arr) - 1, k)\n","repo_name":"prot95/cse830","sub_path":"timsort.py","file_name":"timsort.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20698950849","text":"import random\r\nfrom time import sleep\r\nimport praw\r\nimport json\r\nimport sys\r\n\r\n# 追着大猪骂\r\n\r\nfrom praw.models import *\r\n\r\ndef main(argv):\r\n with open(\"src/main/resource/clients.json\", \"r\", encoding = \"UTF-8\") as f:\r\n data = json.load(f)\r\n reddit = praw.Reddit(**(data[\"bots\"][\"iws2000\"]))\r\n replies = data[\"replies\"][\"iws2000\"]\r\n replied = set()\r\n while (True):\r\n bigpig = reddit.redditor(\"HalleluYahmygod\")\r\n \r\n for submission in bigpig.submissions.new(limit=10):\r\n if (submission.id not in replied):\r\n rply = submission.reply(random.choice(replies))\r\n replied.add(submission.id)\r\n print(rply.body)\r\n \r\n for comment in bigpig.comments.new(limit=10):\r\n if (comment.id not in replied):\r\n rply = comment.reply(random.choice(replies))\r\n replied.add(comment.id)\r\n print(rply.body)\r\n sleep(10)\r\n\r\nif __name__ == \"__main__\":\r\n main(sys.argv)","repo_name":"vulzrov/moderationBots","sub_path":"src/main/py/missile.py","file_name":"missile.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73043807112","text":"\"\"\"\nTo launch all the tasks, create tmux sessions and run across tasks, i.e.\n\npython csi.py --task two_moons\npython csi.py --task gaussian_mixture\n\"\"\"\n\nimport time\nimport torch\nimport sbibm\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport seaborn as sns\nimport seaborn_image as isns\nimport pickle\nimport os\n\nfrom scipy.ndimage.measurements import label\nfrom sklearn.cluster import KMeans\nfrom scipy import spatial\nimport itertools\nimport argparse\n\nplt.rcParams['mathtext.fontset'] = 'stix'\nplt.rcParams['font.family'] = 'STIXGeneral'\n\nSMALL_SIZE = 12\nMEDIUM_SIZE = 14\nBIGGER_SIZE = 18\n\nplt.rc('font', size=BIGGER_SIZE) # controls default text sizes\nplt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title\nplt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels\nplt.rc('xtick', labelsize=BIGGER_SIZE) # fontsize of the tick labels\nplt.rc('ytick', labelsize=BIGGER_SIZE) # fontsize of the tick labels\nplt.rc('legend', fontsize=BIGGER_SIZE) # legend fontsize\nplt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\n\nproj_dim = 4\n\nclass CSI:\n def __init__(self, prior, simulator, encoder, N, k, mins=None, maxs=None, desired_coverage = 0.95):\n self.prior = prior\n self.simulator = simulator\n self.encoder = encoder\n self.N = N\n self.k = k\n self.conformal_quantile = self.get_conformal_quantile(self.prior, self.simulator, self.encoder, desired_coverage=desired_coverage)\n self.trajectories = None\n self.test_samples = None\n\n self.mins = mins\n self.maxs = maxs\n if self.mins is None:\n self.mins = self.prior.support.base_constraint.lower_bound.cpu().numpy()\n if self.maxs is None:\n self.maxs = self.prior.support.base_constraint.upper_bound.cpu().numpy()\n self.mins = self.mins[:proj_dim]\n self.maxs = self.maxs[:proj_dim]\n\n def get_conformal_quantile(self, prior, simulator, encoder, desired_coverage):\n sims = 10_000\n calibration_theta = prior.sample((sims,))\n calibration_x = simulator(calibration_theta)\n calibration_theta = calibration_theta[:,:proj_dim]\n\n # cal_scores = 1 / encoder.log_prob(calibration_theta.to(device), calibration_x.to(device)).detach().cpu().exp().numpy()\n\n theta_cal_hat = encoder.sample(self.k, calibration_x).detach().cpu().numpy()\n theta_cal_tiled = np.transpose(np.tile(calibration_theta.detach().cpu().numpy(), (self.k, 1, 1)), (1, 0, 2))\n theta_cal_diff = theta_cal_hat - theta_cal_tiled\n theta_cal_norms = np.linalg.norm(theta_cal_diff, axis=-1)\n theta_cal_scores = np.min(theta_cal_norms, axis=-1)\n\n return np.quantile(theta_cal_scores, q = desired_coverage)\n \n def gen_test_samples(self, text_x):\n # not really a fan of this, but this caches the generated samples, since we wish to have a fixed region\n self.test_samples = self.encoder.sample(self.k, text_x).detach().cpu().numpy()[0]\n\n def _get_grid(self, K = 200):\n # K -> discretization of the grid (assumed same for each dimension)\n d = len(self.mins) # dimensionality of theta\n ranges = [np.arange(self.mins[i], self.maxs[i], (self.maxs[i] - self.mins[i]) / K) for i in range(d)]\n return np.array(np.meshgrid(*ranges)).T.astype(np.float32)\n\n def _get_conformal_region(self, test_x, thetas):\n thetas_flat = thetas.reshape(-1, thetas.shape[-1])\n theta_tiled = np.transpose(np.tile(thetas_flat, (self.k, 1, 1)), (1, 0, 2))\n theta_diff = self.test_samples - theta_tiled\n theta_norm = np.linalg.norm(theta_diff, axis=-1)\n theta_score = np.min(theta_norm, axis=-1)\n\n # test_x_tiled = np.tile(test_x, (thetas_flat.shape[0], 1)).astype(np.float32)\n # probs = self.encoder.log_prob(thetas_flat, test_x_tiled).detach().cpu().exp().numpy()\n \n flat_region = (theta_score < self.conformal_quantile).astype(int)\n return flat_region.reshape(thetas.shape[:-1])\n\n def _get_connected_components(self, region):\n active_region = np.where(region == 1)\n active_locs = set(zip(*active_region))\n\n components = []\n while len(active_locs) > 0:\n component = set()\n to_branch = {active_locs.pop()}\n while len(to_branch) > 0:\n current_loc = to_branch.pop()\n component.add(current_loc)\n current_loc_arr = np.array(current_loc) # necessary to change component values\n for dim in range(len(current_loc_arr)):\n for displacement in [-1, 1]:\n displacement_loc = np.zeros(len(current_loc_arr)).astype(int)\n displacement_loc[dim] = displacement\n candidate_loc = tuple(current_loc_arr + displacement_loc)\n if candidate_loc in active_locs:\n active_locs.remove(candidate_loc)\n to_branch.add(candidate_loc)\n components.append(np.array(list(component)))\n return components\n\n def _get_rps_cc(self, regions_samples):\n total_covered = np.sum([len(region_samples) for region_samples in regions_samples])\n total_rps = 0\n rps = []\n\n for region_idx, region_samples in enumerate(regions_samples):\n component_prop = len(region_samples) / total_covered\n if region_idx == len(regions_samples) - 1:\n n = self.N - total_rps\n else:\n n = int(np.round(component_prop * self.N))\n total_rps += n\n\n # TODO: should we ensure each connected component is > 1 in the \"exact answer\"? feels arbitrary but maybe desireable?\n if n > 0:\n kmeans = KMeans(n_clusters=n, random_state=0, n_init=\"auto\").fit(region_samples)\n rps.append(kmeans.cluster_centers_)\n return np.vstack(rps)\n\n def get_exact_rps(self, test_x):\n K = 60\n theta_grid = self._get_grid(K=K)\n region = self._get_conformal_region(test_x, theta_grid)\n self.explicit_region = theta_grid[np.where(region == 1)] # cache explicit region\n\n connected_components = self._get_connected_components(region)\n region_samples = [theta_grid[tuple(connected_component.T)] for connected_component in connected_components]\n return self._get_rps_cc(region_samples)\n\n def _get_diffused_trajs(self, test_x, T):\n y_hat = self.encoder.sample(self.N, test_x)[0].detach().cpu().numpy()\n test_x_tiled = np.tile(test_x, (y_hat.shape[0], 1)).astype(np.float32)\n trajectories = []\n eta = 0.01\n\n for _ in range(T):\n proposed_y_hat = y_hat.copy() + eta * np.random.randn(y_hat.shape[0], y_hat.shape[1]).astype(np.float32)\n proposed_probs = self.encoder.log_prob(proposed_y_hat, test_x_tiled).detach().cpu().exp().numpy()\n\n in_region = (1 / proposed_probs) < self.conformal_quantile\n y_hat[in_region] = proposed_y_hat[in_region]\n trajectories.append(y_hat.copy())\n return np.array(trajectories)\n\n def get_approx_rps_diffuse(self, test_x, T=5_000, cache_trajs=False):\n \"\"\"\n NOTE: this form of uniform sampling is now deprecated in favor of Muller's sampling\n\n T : time steps of repulsive simulation\n cache_trajs: HACK -- this arg doesn't really make any sense in real use cases, but it caches the trajectories\n between calls of this function -- it should ONLY be used for repeated calls to this for visualization\n \"\"\"\n if cache_trajs and self.trajectories is not None:\n trajectories = self.trajectories[:T]\n else:\n trajectories = self._get_diffused_trajs(test_x, T)\n self.trajectories = trajectories\n\n remaining_traj_idxs = set(range(self.N))\n final_trajs = []\n ns = []\n dist_thresh = 0.01\n while len(remaining_traj_idxs) > 0:\n root_traj_idx = remaining_traj_idxs.pop()\n connected_trajs = [trajectories[:,root_traj_idx,:]]\n connected_traj_idxs = set()\n \n tree = spatial.KDTree(trajectories[:,root_traj_idx,:])\n for traj_idx in remaining_traj_idxs:\n min_dists, _ = tree.query(trajectories[:,traj_idx,:])\n closest_encounter = min(min_dists)\n if closest_encounter < dist_thresh:\n connected_trajs.append(trajectories[:,traj_idx,:])\n connected_traj_idxs.add(traj_idx)\n \n for traj_idx in connected_traj_idxs:\n if traj_idx in remaining_traj_idxs:\n remaining_traj_idxs.remove(traj_idx)\n ns.append(len(connected_trajs))\n final_trajs.append(np.vstack(connected_trajs))\n\n rps = []\n for traj_idx, final_traj in enumerate(final_trajs):\n kmeans = KMeans(n_clusters=ns[traj_idx], random_state=0, n_init=\"auto\").fit(final_traj)\n rps.append(kmeans.cluster_centers_)\n return np.vstack(rps)\n \n def _mullers_sample_from_ball(self, center, r, N, d):\n u = np.random.normal(0, 1, (N, d))\n norm = np.linalg.norm(u, axis=1)\n radius = np.random.uniform(0, 1, N) ** (1/d)\n \n u = r * radius.reshape(-1, 1) * u / norm.reshape(-1, 1)\n u = u + center\n return u\n\n def get_approx_rps(self, N):\n edges = []\n while len(edges) == 0:\n d = self.test_samples.shape[-1]\n \n samples = np.zeros((self.k, N, d))\n voronoi_assignments = np.zeros((self.k, N))\n\n # fraction of samples in each ball in the associated voronoi cell\n samples = np.apply_along_axis(self._mullers_sample_from_ball, axis=1, arr=self.test_samples, r=self.conformal_quantile, N=N, d=d)\n samples = samples.reshape(self.k, N, 1, -1)\n dist = np.linalg.norm(samples - self.test_samples, axis=-1)\n voronoi_assignments = np.argmin(dist, axis=-1).flatten()\n sampled_assignments = np.array([[k] * N for k in range(self.k)]).flatten()\n \n flat_samples = samples.reshape(-1, *samples.shape[-2:])\n samples = np.vstack(flat_samples[np.where(voronoi_assignments == sampled_assignments),0,:])\n \n # voronoi_regions_samples = [flat_samples[voronoi_assignments == voronoi_idx] for voronoi_idx in range(self.k)]\n # min_voronoi_samples = np.min([len(voronoi_region_samples) for voronoi_region_samples in voronoi_regions_samples])\n # samples = np.vstack([voronoi_region_samples[:min_voronoi_samples,0,:] for voronoi_region_samples in voronoi_regions_samples])\n \n # create graph\n kdt = spatial.KDTree(samples)\n edges = kdt.query_pairs(self.conformal_quantile)\n G = nx.from_edgelist(edges)\n\n connected_components = list(nx.connected_components(G)) \n region_samples = [samples[np.array(list(connected_component))] \n for connected_component in connected_components]\n return self._get_rps_cc(region_samples)\n \n def viz_rps(self, test_x, exact_rps, approx_rps, fn):\n K = 100\n theta_grid = self._get_grid(K=K)\n region = self._get_conformal_region(test_x, theta_grid)\n region = region.reshape((K, K))\n \n plt.imshow(region, extent=[self.mins[0], self.maxs[0], self.mins[1], self.maxs[1]], origin=\"lower\")\n sns.scatterplot(x=approx_rps[:,1], y=approx_rps[:,0], s=10, palette=\"deep\", label=r\"$\\widehat{\\Xi}$\")\n sns.scatterplot(x=exact_rps[:,1], y=exact_rps[:,0], s=10, palette=\"deep\", label=r\"$\\Xi$\")\n\n plt.xticks([])\n plt.yticks([])\n plt.xlim(self.mins[0], self.maxs[0])\n plt.ylim(self.mins[1], self.maxs[1])\n\n plt.savefig(fn)\n plt.clf()\n\n def _list_assoc(self, L1, L2, reverse=False):\n tree = spatial.KDTree(L2)\n assoc = []\n for I1, point in enumerate(L1):\n _, I2 = tree.query(point,k=1)\n if reverse:\n assoc.append((I2, I1))\n else:\n assoc.append((I1, I2))\n return assoc\n\n def _get_opt_correspondence(self, exact_rps, approx_rps):\n # some complication to ensure 1-1 correspondence\n exact_to_approx_assoc_A = self._list_assoc(exact_rps, approx_rps, reverse=False)\n exact_to_approx_assoc_B = self._list_assoc(approx_rps, exact_rps, reverse=True)\n exact_to_approx_assoc = list(set.intersection(set(exact_to_approx_assoc_A), set(exact_to_approx_assoc_B)))\n \n # exhaustively enumerate and match remainder: N here should be small, so N! should be manageable\n unmatched_exact = list(range(len(exact_rps)))\n unmatched_approx = list(range(len(approx_rps)))\n [(unmatched_exact.remove(assoc[0]), unmatched_approx.remove(assoc[1])) for assoc in exact_to_approx_assoc]\n \n if len(unmatched_exact) > 0:\n approx_permutations = list(itertools.permutations(unmatched_approx))\n dists = []\n for approx_permutation in approx_permutations:\n correspondence_dist = exact_rps[unmatched_exact] - approx_rps[approx_permutation,:]\n dists.append(np.sum(correspondence_dist ** 2))\n unmatched_opt = np.argmin(dists)\n\n exact_to_approx_assoc += list(zip(unmatched_exact, approx_permutations[unmatched_opt]))\n exact_to_approx_assoc = np.array(sorted(exact_to_approx_assoc)).astype(np.int32)\n return exact_to_approx_assoc\n\n def get_dist(self, exact_rps, approx_rps):\n exact_to_approx_assoc = self._get_opt_correspondence(exact_rps, approx_rps)\n opt_correspondence_dist = exact_rps[exact_to_approx_assoc[:,0]] - approx_rps[exact_to_approx_assoc[:,1]]\n return np.sum(opt_correspondence_dist ** 2)\n \n def get_rps_obj(self, rps):\n theta_tiled = np.transpose(np.tile(self.explicit_region, (rps.shape[0], 1, 1)), (1, 0, 2))\n theta_diff = rps - theta_tiled\n theta_norm = np.linalg.norm(theta_diff, axis=-1)\n theta_score = np.min(theta_norm, axis=-1)\n return np.mean(theta_score)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--task\")\n args = parser.parse_args()\n task_name = args.task\n\n device = \"cpu\"\n task = sbibm.get_task(task_name)\n prior = task.get_prior_dist()\n simulator = task.get_simulator()\n\n cached_fn = os.path.join(\"projected_results\", f\"{task_name}.nf\")\n with open(cached_fn, \"rb\") as f:\n encoder = pickle.load(f)\n encoder.to(device)\n\n # sample_idx = 0\n # while sample_idx < 10:\n for sample_idx in range(10):\n test_sim = 1\n test_theta = prior.sample((test_sim,))\n test_x = simulator(test_theta)\n test_theta = test_theta[:,:proj_dim]\n\n with open(os.path.join(\"minmax\", f\"{task_name}.pkl\"), \"rb\") as f:\n mins, maxs = pickle.load(f)\n\n Ns = np.arange(5, 501, 5)\n csi = CSI(prior=prior, simulator=simulator, encoder=encoder, N=5, k=10, mins=mins, maxs=maxs, desired_coverage=0.95)\n os.makedirs(\"results\", exist_ok=True)\n\n csi.gen_test_samples(test_x)\n\n print(\"Computing exact RPs...\")\n exact_rps = csi.get_exact_rps(test_x)\n exact_obj = csi.get_rps_obj(exact_rps)\n \n # if sample_idx == 0:\n # print(\"Performing visualization...\")\n # approx_rps = csi.get_approx_rps(N=20)\n # csi.viz_rps(test_x, exact_rps, approx_rps, os.path.join(\"results\", f\"{task_name}_rps.png\"))\n\n print(\"Computing approximate RPs...\")\n optimality_gaps = []\n for N in Ns:\n print(f\"Computing {N}...\")\n approx_rps = csi.get_approx_rps(N=N)\n optimality_gaps.append(csi.get_rps_obj(approx_rps) - exact_obj)\n\n with open(os.path.join(\"results\", \"dists\", f\"{task_name}_{sample_idx}.pkl\"), \"wb\") as f:\n pickle.dump((Ns, optimality_gaps), f)\n # sample_idx += 1","repo_name":"yashpatel5400/csi","sub_path":"sbi/csi.py","file_name":"csi.py","file_ext":"py","file_size_in_byte":16254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12586112499","text":"# Factorial of a Large Number\n\ndef factorialWithoutRecusrion(n):\n fact=1\n for i in range(1,n+1):\n fact*=i\n\ndef factorialRecursion(n):\n if n == 0:\n return 1\n \n return n * factorialRecursion(n-1)\n\nnum = 5\nfactorialRecursion(num)\nfactorialWithoutRecusrion(num)\n","repo_name":"vikasprashar99/CodingProblems","sub_path":"factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"74752137031","text":"def main(request, response):\n cookie = request.cookies.first(\"COOKIE_NAME\", None)\n\n response_headers = [(\"Content-Type\", \"text/javascript\"),\n (\"Access-Control-Allow-Credentials\", \"true\")]\n\n origin = request.headers.get(\"Origin\", None)\n if origin:\n response_headers.append((\"Access-Control-Allow-Origin\", origin))\n\n cookie_value = '';\n if cookie:\n cookie_value = cookie.value;\n return (200, response_headers,\n \"if ('DedicatedWorkerGlobalScope' in self &&\" +\n \" self instanceof DedicatedWorkerGlobalScope) {\" +\n \" postMessage('\"+cookie_value+\"');\" +\n \"} else if (\" +\n \" 'SharedWorkerGlobalScope' in self &&\" +\n \" self instanceof SharedWorkerGlobalScope) {\" +\n \" onconnect = e => e.ports[0].postMessage('\"+cookie_value+\"');\" +\n \"}\")\n","repo_name":"mozilla-spidermonkey/rust-frontend","sub_path":"testing/web-platform/tests/workers/modules/resources/postmessage-credentials.py","file_name":"postmessage-credentials.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"27"} +{"seq_id":"74786763270","text":"#this code plays first part of marry had a little lamb\n\nimport time\nfrom adafruit_circuitplayground import cp\n\n#make music\ndef note(name): #function to take notes and convert to frequency\n octave = int(name[-1])\n PITCHES = \"c,c#,d,d#,e,f,f#,g,g#,a,a#,b\".split(\",\")\n pitch = PITCHES.index(name[:-1].lower())\n return 440 * 2 ** ((octave - 4) + (pitch - 9) / 12.)\n\nsequence = [\n (\"c5\", 1), (\"d5\", 1), (\"e5\", 1), (\"d5\", 1), (\"c5\", 1), (\"c5\", 1),\n (\"c5\", 1),(None, 1), (\"d5\", 1), (\"d5\", 1), (\"d5\", 1),(None, 1), (\"c5\", 1),(\"g5\", 1),\n (\"g5\", 1)\n ]\n\nfor (notename, eigths) in sequence:\n length = eigths *.5\n if notename:\n cp.play_tone(note(notename), length)\n else:\n time.sleep(length)\n","repo_name":"jta0030/CodeWorks","sub_path":"Examples/marry_lamb.py","file_name":"marry_lamb.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"40413081117","text":"from util import get_data\n\n\nd = get_data(\"\\n\")\n\nprint(d)\n\nadd = 0\nfor data in d:\n if not data:\n continue\n for i in range(len(data[:-4])):\n if len(set(data[i:i+4])) == 4:\n add += i + 4\n break\n\nprint(add)","repo_name":"TobisMa/AOC22","sub_path":"d06/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"26194217244","text":"#import packages\nimport sys\nimport math\nimport numpy as np\nimport udacourse2 #my library for this project!\nimport pandas as pd\nfrom time import time\n\n#SQLAlchemy toolkit\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import pool\nfrom sqlalchemy import inspect\n\n#Machine Learning preparing/preprocessing toolkits\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\n\n#Machine Learning Feature Extraction tools\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n#Machine Learning Classifiers\nfrom sklearn.svm import LinearSVC\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\n\n#Machine Learning Classifiers extra tools\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.pipeline import Pipeline\n\n#pickling tool\nimport pickle\n\n#only a dummy function, as I pre-tokenize my data\ndef dummy(doc):\n return doc\n\n#########1#########2#########3#########4#########5#########6#########7#########8\ndef load_data(data_file, #='sqlite:///Messages.db',\n remove_cols=False,\n pre_tokenize=False,\n condense=True,\n verbose=False):\n '''This function takes a path for a MySQL table and returns processed data\n for training a Machine Learning Classifier\n Inputs:\n - data_file (mandatory) - full path for SQLite table - text string\n - remove_cols (optional) - if you want to remove (un)trainable labels\n columns (default=False)\n - pre_tokenize (optional) - if you want to keep preprocessing tokenization\n column, thaw was made for removal of empty-messages from X-training.\n Obsservation: keeping this column turns the system faster, but it can\n cause instability on Classifier training on Flask due to \"pipeline\n leakage\" (default=False)\n - condense (optional) - if you want to remove duplicate tokens under\n one document (default=True)\n See: https://rebeccabilbro.github.io/module-main-has-no-attribute/\n - verbose (optional) - if you want some verbosity during the running \n (default=False)\n Outputs:\n - X - tokenized text X-training - Pandas Series\n - y - y-multilabels 0|1 - Pandas Dataframe'''\n if verbose:\n print('###load_data function started')\n start = time()\n\n #1.read in file\n #importing MySQL to Pandas - load data from database\n engine = create_engine(data_file, poolclass=pool.NullPool) #, echo=True)\n #retrieving tables names from my DB\n inspector = inspect(engine)\n if verbose:\n print('existing tables in my SQLite database:', inspector.get_table_names())\n connection = engine.connect()\n df = pd.read_sql('SELECT * FROM Messages', con=connection)\n connection.close()\n df.name = 'df'\n \n #2.clean data\n #2.1.Elliminate rows with all-blank labels\n if verbose:\n print('all labels are blank in {} rows'.format(df[df['if_blank'] == 1].shape[0]))\n df = df[df['if_blank'] == 0]\n if verbose:\n print('remaining rows:', df.shape[0])\n #Verifying if removal was complete\n if df[df['if_blank'] == 1].shape[0] == 0:\n if verbose:\n print('removal complete!')\n else:\n raise Exception('something went wrong with {} rows to remove'.\\\nformat(df[df['if_blank'] == 1].shape[0]))\n \n #2.2.Premature Tokenization Strategy (pre-tokenizer)\n #Pre-Tokenizer + not removing provisory tokenized column\n #inserting a tokenized column\n try:\n df = df.drop('tokenized', axis=1)\n except KeyError:\n if verbose:\n print('*tokenized column don´t exist, creating it')\n df.insert(1, 'tokenized', np.nan)\n\n #tokenizing over the provisory\n df['tokenized'] = df.apply(lambda x: udacourse2.fn_tokenize_fast(x['message']), axis=1) #condense=condense\n \n #removing NaN over provisory (if istill exist)\n df = df[df['tokenized'].notnull()]\n empty_tokens = df[df['tokenized'].apply(lambda x: len(x)) == 0].shape[0]\n if verbose:\n print('found {} rows with no tokens'.format(empty_tokens))\n df = df[df['tokenized'].apply(lambda x: len(x)) > 0]\n empty_tokens = df[df['tokenized'].apply(lambda x: len(x)) == 0].shape[0]\n if verbose:\n print('*after removal, found {} rows with no tokens'.format(empty_tokens))\n\n if pre_tokenize:\n #I will drop the original 'message' column\n try:\n df = df.drop('message', axis=1)\n if verbose:\n print('*message column dropped')\n except KeyError:\n if verbose:\n print('*message column not dropped')\n else:\n #I will drop the already processed 'tokenized' column \n try:\n df = df.drop('tokenized', axis=1)\n if verbose:\n print('*tokenized column dropped')\n except KeyError:\n if verbose:\n print('*tokenized column not dropped')\n \n if verbose:\n print('now I have {} rows to train'.format(df.shape[0]))\n\n #2.3.Database Data Consistency Check/Fix\n #correction for aid_related\n df = udacourse2.fn_group_check(dataset=df,\n subset='aid',\n correct=True, \n shrink=False, \n shorten=False, \n verbose=verbose)\n #correction for weather_related\n df = udacourse2.fn_group_check(dataset=df,\n subset='wtr',\n correct=True, \n shrink=False, \n shorten=False, \n verbose=verbose)\n #correction for infrastrucutre_related\n df = udacourse2.fn_group_check(dataset=df,\n subset='ifr',\n correct=True, \n shrink=False, \n shorten=False, \n verbose=verbose)\n #correction for related(considering that the earlier were already corrected)\n df = udacourse2.fn_group_check(dataset=df,\n subset='main',\n correct=True, \n shrink=False, \n shorten=False,\n verbose=verbose)\n \n #load to database <-I don't know for what it is!\n \n #3.Define features and label arrays (break the data)\n #3.1.X is the Training Text Column\n if pre_tokenize:\n if verbose:\n print('X-training will be the tokenized column')\n X = df['tokenized']\n else:\n if verbose:\n print('X-training will be the message column')\n X = df['message']\n \n #3.2.y is the Classification labels\n #I REMOVED \"related\" column from my labels, as it is impossible to train it!\n if remove_cols: #for removing untrainable columns\n y = df[df.columns[5:]] #for removal of \"related\" column\n remove_lst = []\n\n for column in y.columns:\n col = y[column]\n if (col == 0).all():\n if verbose:\n print('*{} -> only zeroes (un)training column!'.format(column))\n remove_lst.append(column)\n elif (col == 1).all():\n if verbose:\n print('*{} -> only ones (un)training column!'.format(column))\n remove_lst.append(column)\n else:\n if verbose:\n print('*{} -> column OK'.format(column))\n pass\n if verbose:\n print('remove colums:', remove_lst)\n y = y.drop(remove_lst, axis=1)\n if verbose:\n print('(un)trainable label columns removed')\n else:\n y = df[df.columns[4:]]\n if y.shape[1] == 36:\n if verbose:\n print('y dataset has 36 labels')\n else:\n raise Exception('something went wrong, dataset has {} labels instead of 36'.format(y.shape[1]))\n \n spent = time() - start\n if verbose:\n print('*dataset breaked into X-Training Text Column and Y-Multilabels') \n print('process time:{:.0f} seconds'.format(spent))\n return X, y\n\n#########1#########2#########3#########4#########5#########6#########7#########8\ndef build_model(data_file, #='sqlite:///Messages.db',\n tree_type=True,\n C=2.0,\n learning_rate=0.5,\n n_estimators=80,\n remove_cols=False,\n grid_search=False,\n pre_tokenize=False,\n condense=True,\n test_size=0.25,\n verbose=False):\n '''This function builds the Classifier Pipeline, for future fitting\n Inputs:\n - data_file (mandatory) - full path for the source SQLite file\n - tree-type (optional) - Classifier will be from Tree-family (Adaboost)\n if you want to use the alternative Classifier (LSVM), set it as False.\n Default: True\n - C (optional) - C parameter for the LinearSVC Classifier (default=2.0)\n - learning_rate (optional) - parameter for the Adaboost Classifier\n (default=0.5)\n - n_estimators (optional) - parameter for the Adaboost Classifier\n (default=80) \n - remove_cols: ONLY used for Grid Search! (default=False)\n - grid_search (optional) performas Grid Search over Adaboost before \n training for the best parameters. Please use it wisely, as it costs \n a lot of processing time! (default=False)\n Observation: Grid Search was not implemented on this code for the \n alternative Classifier (LSVM), so this parameter will not work when \n using (tree_type=False)\n - pre_tokenize (optional) - if you want to keep preprocessing tokenization\n column, thaw was made for removal of empty-messages from X-training.\n Obsservation: keeping this column turns the system faster, but it can\n cause instability on Classifier training on Flask due to \"pipeline\n leakage\" (default=False)\n See: https://rebeccabilbro.github.io/module-main-has-no-attribute/\n - condense (optional) - if you want to remove duplicate tokens under\n one document (default=True) note: not activated due to technical issues\n - test_size: ONLY used for Grid Sarch! (default=0.25)\n - verbose (optional) - if you want some verbosity during the running \n (default=False)\n Output:\n - model_pipeline for your Classifiear (untrained)\n Future improvement:\n - other parameters for Grid Search\n - Grid Search for the alternative Classifier\n '''\n if verbose:\n print('###build_model function started')\n start = time()\n \n #0.grid search for Adaboost Classifier\n if grid_search:\n if verbose:\n print('###Grid Search function started over Adaboost')\n print('WARNING: running parameters optimization requires hard processing and some repetitive processes!')\n vect_a = CountVectorizer(tokenizer=udacourse2.fn_tokenize_fast)\n transf_a = TfidfTransformer()\n class_a = MultiOutputClassifier(AdaBoostClassifier(random_state=42))\n\n pipeline_adab = Pipeline([('vect', vect_a),\n ('tfidf', transf_a),\n ('clf', class_a)])\n\n param_dict = {'clf__estimator__learning_rate': [0.5, 1.0],\n 'clf__estimator__n_estimators': [50, 80]}\n\n estimator = GridSearchCV(estimator=pipeline_adab, \n param_grid=param_dict,\n n_jobs=-1) #set for using maximum CPU cores\n\n X, y = load_data(data_file=data_file,\n remove_cols=remove_cols,\n pre_tokenize=pre_tokenize,\n condense=condense,\n verbose=verbose)\n\n X_train, X_test, y_train, y_test = train_test_split(X, \n y, \n test_size=test_size, \n random_state=42)\n pipeline_adab.fit(X_train, y_train)\n estimator_fit = train(X=X_train, \n y=y_train, \n model=estimator,\n verbose=verbose)\n #get the best parameters\n learning_rate = estimator.best_estimator_[2].estimator.learning_rate\n n_estimators = estimator.best_estimator_[2].estimator.n_estimators\n if verbose:\n print('Grid Search finished - new params: learning_rate={}, n_estimators={}'\\\n .format(learning_rate, n_estimators))\n \n #1.text processing and model pipeline\n #(text processing was made at a earlier step, at Load Data function) \n if tree_type:\n if verbose:\n print('Tree-type Classifier (Adaboost-default) pipeline is on the way')\n print('*note: parameter C, is NOT used in this family of Classifiers, so don´t call it!')\n \n if pre_tokenize:\n if verbose:\n print('creating pre-tokenized Adaboost pipeline')\n \n vec_adab = CountVectorizer(tokenizer=dummy, preprocessor=dummy)\n trf_adab = TfidfTransformer()\n clf_adab = MultiOutputClassifier(AdaBoostClassifier(learning_rate=learning_rate,\n n_estimators=n_estimators,\n random_state=42))\n model_pipeline = Pipeline([('vect', vec_adab),\n ('tfidf', trf_adab),\n ('clf', clf_adab)])\n else:\n if verbose:\n print('creating convencional Adaboost pipeline')\n vec_adab = CountVectorizer(tokenizer=udacourse2.fn_tokenize_fast)\n trf_adab = TfidfTransformer()\n clf_adab = MultiOutputClassifier(AdaBoostClassifier(learning_rate=learning_rate,\n n_estimators=n_estimators,\n random_state=42))\n model_pipeline = Pipeline([('vect', vec_adab),\n ('tfidf', trf_adab),\n ('clf', clf_adab)])\n \n else: #alternative LSVM Classifier\n if verbose:\n print('Support Vector Machine (Linear-alternative) pipeline is on the way')\n print('*note: parameters learning_rate and n_estimators are NOT used in this family of Classifiers')\n \n if pre_tokenize:\n if verbose:\n print('creating pre-tokenized LSVM pipeline')\n \n vec_lsvm = TfidfVectorizer(analyzer='word', \n tokenizer=dummy, \n preprocessor=dummy,\n token_pattern=None,\n ngram_range=(1, 3))\n \n cls_lsvm = OneVsRestClassifier(LinearSVC(C=C, \n random_state=42))\n \n model_pipeline = Pipeline([('vect', vec_lsvm),\n ('clf', cls_lsvm)])\n else: \n if verbose:\n print('creating convencional LSVM pipeline')\n \n vec_lsvm = TfidfVectorizer(analyzer='word', \n tokenizer=udacourse2.fn_tokenize_fast, \n token_pattern=None,\n ngram_range=(1, 3))\n \n cls_lsvm = OneVsRestClassifier(LinearSVC(C=C, \n random_state=42))\n \n model_pipeline = Pipeline([('vect', vec_lsvm),\n ('clf', cls_lsvm)])\n \n #define parameters for GridSearchCV (parameters already defined)\n #create gridsearch object and return as final model pipeline (made at pipeline preparation)\n #obs: for better performance, I pre-tokenized my data. And GridSearch was runned on Jupyter,\n # and the best parameters for LSVM where adjusted, just to save processing time during code execution.\n spent = time() - start\n if verbose:\n print('*Classifier pipeline was created')\n print('process time:{:.0f} seconds'.format(spent))\n return model_pipeline\n\n#########1#########2#########3#########4#########5#########6#########7#########8\ndef train(X, \n y, \n model,\n test_size=0.25,\n best_10=True,\n verbose=False):\n '''This function trains your already created Classifier Pipeline\n Inputs:\n - X (mandatory) - tokenized data for training - Pandas Series\n - y (mandatory) - Multilabels 0|1 - Pandas Dataset\n - test_size (optional) - test size for data split (default=0.25)\n - best_10 (optional) - if metrics will be best_10 or all \n (default=True - best_10) \n - verbose (optional) - if you want some verbosity during the running \n (default=False)\n Output:\n - trained model'''\n if verbose:\n print('###train function started')\n start = time()\n\n #1.Train test split\n #Split makes randomization, so random_state parameter was set\n X_train, X_test, y_train, y_test = train_test_split(X, \n y, \n test_size=test_size, \n random_state=42)\n if (X_train.shape[0] + X_test.shape[0]) == X.shape[0]:\n if verbose:\n print('data split into train and text seems OK')\n else:\n raise Exception('something went wrong when splitting the data')\n \n #2.fit the model\n model.fit(X_train, y_train)\n \n # output model test results\n y_pred = model.predict(X_test)\n if verbose:\n metrics = udacourse2.fn_scores_report2(y_test, \n y_pred,\n best_10=best_10,\n data_ret=True,\n verbose=True)\n else:\n metrics = udacourse2.fn_scores_report2(y_test, \n y_pred,\n best_10=best_10,\n data_ret=True,\n verbose=False)\n for metric in metrics:\n if metric < 0.6:\n if verbose:\n print('*metrics alert: something is wrong, model is predicting poorly')\n\n spent = time() - start\n if verbose:\n print('*classifier was trained!')\n print('process time:{:.0f} seconds'.format(spent))\n return model\n\n#########1#########2#########3#########4#########5#########6#########7#########8\ndef export_model(model,\n file_name, #='classifier.pkl',\n verbose=False):\n '''This function writes your already trained Classifiear as a Picke Binary\n file.\n Inputs:\n - model (mandatory) - your already trained Classifiear - Python Object\n - file_name (mandatory) - the name of the file to be created\n - verbose (optional) - if you want some verbosity during the running \n (default=False)\n Output: return True if everything runs OK\n ''' \n if verbose:\n print('###export_model function started')\n start = time()\n\n #1.Export model as a pickle file\n file_name = file_name\n\n #writing the file\n with open (file_name, 'wb') as pk_writer: \n pickle.dump(model, pk_writer)\n\n #reading the file\n #with open('classifier.pkl', 'rb') as pk_reader:\n # model = pickle.load(pk_reader)\n \n spent = time() - start\n if verbose:\n print('*trained Classifier was exported')\n print('process time:{:.0f} seconds'.format(spent))\n \n return True\n\n#########1#########2#########3#########4#########5#########6#########7#########8\ndef run_pipeline(data_file, #='sqlite:///Messages.db',\n file_name, #='classifier.pkl',\n remove_cols=False,\n tree_type=True,\n C=2.0,\n learning_rate=0.5,\n n_estimators=80,\n test_size=0.25,\n best_10=True,\n pre_tokenize=False,\n condense=True,\n grid_search=False,\n verbose=False):\n '''This function is a caller: it calls load, build, train and save modules\n Inputs:\n - data_file (mandatory) - complete path to the SQLite datafile to be \n processed\n - file_name (mandatory) - name for pickling the Classifier\n - remove_cols (optional) - if you want to remove (un)trainable labels\n columns (default=False)\n - tree-type (optional) - Classifier will be from Tree-family (Adaboost)\n if you want to use the alternative Classifier (LSVM), set it as False.\n Default: True\n - C (optional) - parameter for the LinearSVC Classifier (default=2.0)\n - learning_rate (optional) - parameter for the Adaboost Classifier\n (default=0.5)\n - n_estimators (optional) - parameter for the Adaboost Classifier\n (default=80)\n - test_size (optional) - test size for data split (default=0.25)\n - best_10 (optional) - if metrics will be best_10 or all \n (default=True - best_10) \n - pre_tokenize (optional) - if you want to keep preprocessing tokenization\n column, thaw was made for removal of empty-messages from X-training.\n Obsservation: keeping this column turns the system faster, but it can\n cause instability on Classifier training on Flask due to \"pipeline\n leakage\" (default=False)\n - condense (optional) - if you want to remove duplicate tokens under\n one document (default=True)\n See: https://rebeccabilbro.github.io/module-main-has-no-attribute/\n - grid_search (optional) performas Grid Search over Adaboost before \n training for the best parameters. Please use it wisely, as it costs \n a lot of processing time! (default=False).\n Observation: Grid Search was not implemented on this code for the \n alternative Classifier (LSVM), so this parameter will not work when \n using tree_type=False\n - verbose (optional) - if you want some verbosity during the running \n (default=False)\n Output: return True if everything runs OK\n '''\n if verbose:\n print('###run_pipeline function started')\n start = time()\n\n #1.Run ETL pipeline\n X, y = load_data(data_file,\n remove_cols=remove_cols,\n pre_tokenize=pre_tokenize,\n condense=condense,\n verbose=verbose)\n \n #2.Build model pipeline\n if grid_search: #takes additional parameters for run\n model = build_model(C=C,\n learning_rate=learning_rate,\n n_estimators=n_estimators,\n tree_type=tree_type,\n pre_tokenize=pre_tokenize,\n remove_cols=remove_cols,\n condense=condense,\n grid_search=True,\n test_size=test_size,\n data_file=data_file,\n verbose=verbose)\n \n else: #takes standard parameters only\n model = build_model(C=C,\n learning_rate=learning_rate,\n n_estimators=n_estimators,\n tree_type=tree_type,\n pre_tokenize=pre_tokenize,\n remove_cols=remove_cols,\n condense=condense,\n test_size=test_size,\n grid_search=False,\n data_file=data_file,\n verbose=verbose)\n\n #3.Train model pipeline\n model = train(X, \n y, \n model,\n test_size=test_size,\n best_10=best_10,\n verbose=verbose)\n \n # save the model\n export_model(model,\n file_name=file_name,\n verbose=verbose)\n \n spent = time() - start\n if verbose:\n print('process time:{:.0f} seconds'.format(spent))\n return True\n\n#########1#########2#########3#########4#########5#########6#########7#########8\ndef main(data_file, # = 'sqlite:///Messages.db',\n file_name, # = 'classifier.pkl',\n remove_cols = False,\n tree_type = True,\n C = 2.0,\n learning_rate = 0.5,\n n_estimators = 80,\n test_size = 0.25,\n best_10 = True,\n pre_tokenize = False,\n condense = True,\n grid_search = False,\n verbose = False):\n '''This is the main Machine Learning Pipeline function. It calls the other \n ones, in the correct order.\n Example: python train_classifier.py\n Basic parameters:\n - data_file (mandatory) - just indicate the complete path after the command\n Example: python train_classifier.py ../data/Database.db\n - file_name (mandatory) - you need to indicate both data_file and file_name\n Example: python train_classifier.py ../data/Database.db other.pkl\n Extra parameters:\n here you need to indicate both data_file and classifier, in order to use \n them you can use only one, or more, in any order\n -a -> run metrics over ALL labels (not recommended!) \n default=False - run metris over the 10 main labels only\n -c -> C parameter for your Classificer (default=2.0)\n -e -> NOT remove duplicates (condense) tokens on a document. Sometimes it \n turns easier for the Classifier to fit best parameters, others not \n (default=True - remove duplicates)\n -g -> perform Grid Search over Adaboost before training for the best\n parameters. Please use it wisely, as it costs a lot of processing\n time!\n -l -> learning rate for Adaboost Classifier. It have a tradeoff with\n n_estimators, so consider to tune both parameters (default=0.5)\n -n -> number of maximum estimators for Adaboost (default=80)\n -p -> pre_tokenize - keep preprocessing tokenization column, for saving\n processing time. Obsservation: keeping this column turns the system \n faster, but may cause instability on Classifier training on Flask\n due to \"pipeline leakage\" (not recomended) (default=False) \n -r -> remove columns - if you want to remove (un)trainable columns from\n your y-labels dataset (default=False)\n -s -> change Classifier from Adaboost (tree-type) to LSVM \n (support vector machine-type)\n -t -> test size for splitting your data (default=0.25)\n -v -> verbose - if you want some verbosity during the running\n (default=False)\n Example: python train_classifier data.db other.pkl -c=0.5 -t=0.2 -r -v\n '''\n run_pipeline(data_file=data_file,\n file_name=file_name,\n remove_cols=remove_cols,\n tree_type=tree_type,\n C=C,\n learning_rate=learning_rate,\n n_estimators=n_estimators,\n test_size=test_size,\n best_10=best_10,\n pre_tokenize=pre_tokenize,\n condense=condense,\n grid_search=grid_search,\n verbose=verbose)\n \n#########1#########2#########3#########4#########5#########6#########7#########8 \nif __name__ == '__main__':\n\n #first, try to get the system arguments\n args = sys.argv[1:]\n print('args:', args)\n\n #second, try to charge with the two main arguments\n #if len(args) == 0: #python\n # main()\n #elif len(args) == 1: #python sqlite:///Messages.db\n # main(data_file=args[0])\n if len(args) < 2:\n raise Exception('at least two arguments needed')\n elif len(args) == 2: #python sqlite:///Messages.db classifier.pkl\n main(data_file=args[0],\n file_name=args[1])\n else: #default parameters\n remove_cols = False\n tree_type = True\n C = 2.0\n learning_rate = 0.5\n n_estimators = 80\n test_size = 0.25\n best_10 = True\n pre_tokenize = False\n grid_search = False\n condense = True\n verbose = False\n \n remain_args = args[2:] #elliminate the two main arg \n for arg in remain_args:\n comm = arg[:2] #get the command part\n if comm == '-a':\n best_10 = False\n elif comm == '-c':\n C = arg[3:]\n elif comm == '-e':\n condense = False\n elif comm == '-g':\n grid_search = True\n elif comm == '-l':\n learning_rate = arg[3:]\n elif comm == '-n':\n n_estimators = arg[3:]\n elif comm == '-p':\n pre_tokenize=True\n elif comm == '-r':\n remove_cols = True\n elif comm == '-s':\n tree_type = False\n elif comm == '-t':\n test_size = arg[3:]\n elif comm == '-v':\n verbose = True\n else:\n raise Exception('invalid argument')\n \n main(data_file=args[0],\n file_name=args[1],\n remove_cols=remove_cols,\n tree_type=tree_type,\n C=C,\n learning_rate=learning_rate,\n n_estimators=n_estimators,\n test_size=test_size,\n best_10=best_10,\n pre_tokenize=pre_tokenize,\n condense=condense,\n grid_search=grid_search,\n verbose=verbose)","repo_name":"epasseto/Second_Project","sub_path":"train_classifier.py","file_name":"train_classifier.py","file_ext":"py","file_size_in_byte":30884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73374154311","text":"def fizzbuzz(x, y, n):\n \"\"\" Prints fizz for factors of x and buzz for factors of y in range\n n. Prints fizzbuzz when factor of both x and y.\n Otherwise prints the number.\n \"\"\"\n for i in range(1, n):\n if i % (x*y) == 0:\n print(\"fizzbuzz\", end=\", \")\n elif i % x == 0:\n print(\"fizz\", end=\", \")\n elif i % y == 0:\n print(\"buzz\", end=\", \")\n else:\n print(i, end=\", \")\n print()\n\nfizzbuzz(3, 4, 20)\nfizzbuzz(3, 5, 20)\nfizzbuzz(2, 5, 20)\n","repo_name":"bennett39/algorithms","sub_path":"fizzbuzz.py","file_name":"fizzbuzz.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"5787312351","text":"#here i imported some modules\n\nimport pyttsx3\nimport datetime\nimport speech_recognition as sr\nimport wikipedia\nimport webbrowser\nimport os\nimport time\nimport requests\nfrom bs4 import BeautifulSoup\nimport smtplib\nimport python_weather\nimport asyncio\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nNEWS_API_KEY = 'YOUR_NEWS_API_KEY'\n\n\napps = [\"youtube\",\"facebook\",\"twitter\",\"aware international\",\"musa tahawar\",\"netflix\",\"chrome\",\"google\"]\n\nemail_dict = {\"USER_NAME\":\"YOUR_MAIL\",\n \"USER_NAME\":\"YOUR_MAIL\",\n \"USER_NAME\":\"YOUR_MAIL\",\n \"USER_NAME\":\"YOUR_MAIL\"\n }\n\nengine = pyttsx3.init(\"sapi5\")\nvoices = engine.getProperty(\"voices\")\n\nengine.setProperty(\"voice\", voices[0].id)\n\nprint(\"We are still working on this\")\n\n\n# Function to play a song on YouTube using Selenium\n\ndef playSongOnYouTube(query):\n # Initialize Chrome WebDriver\n options = webdriver.ChromeOptions()\n options.add_argument(\"--start-maximized\") # Maximize the Chrome window\n driver = webdriver.Chrome(options=options)\n\n # Open YouTube and search for the query\n driver.get(\"https://www.youtube.com\")\n search_box = driver.find_element_by_name(\"search_query\")\n search_box.send_keys(query)\n search_box.send_keys(Keys.RETURN)\n\n # Wait for a moment to load search results\n time.sleep(2)\n\n # Click on the first video in the search results\n video_link = driver.find_element_by_id(\"video-title\")\n video_link.click()\n\n # Close the WebDriver when the video starts playing\n time.sleep(5) # Adjust this time based on your internet speed\n driver.quit()\n\n if \"play a song\" in query:\n speak(\"What song would you like to listen to on YouTube?\")\n search_query = takeCommand()\n speak(f\"Playing {search_query} on YouTube\")\n playSongOnYouTube(search_query)\n\ndef speak(audio):\n '''this function is used for ai to speak'''\n engine.say(audio)\n engine.runAndWait()\n\ndef wishme():\n '''this function will greet you'''\n hour = int(datetime.datetime.now().hour)\n\n\n if hour >= 0 and hour<12:\n speak(\"good morning\")\n\n elif hour>=12 and hour<18:\n speak(\"good afternoon\")\n\n else:\n speak(\"good evening\")\n speak(\"I am Jarvis Assistamt, Please tell me how may i help you\")\n\ndef takeCommand():\n '''this function will take command from you'''\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening\")\n r.pause_threshold = 0.5\n audio = r.listen(source)\n\n try:\n print(\"Recognizing\")\n query = r.recognize_google(audio, language='en-in')\n print(f\"User said: {query} \\n\")\n\n except Exception as e:\n print(\"Say that again please\")\n return \"None\"\n\n return query\ndef sendEmail(to, content):\n server = smtplib.SMTP(\"smntp.gmail.com\", 587)\n server.ehlo()\n server.starttls()\n server.login(\"YOUR_EMAIL\", \"YOUR_PASSWORD\")\n server.sendmail(\"YOUR_MAIL\", to, content)\n server.close()\n\ndef search_google(query):\n search_url = f\"https://www.google.com/search?q={query}\"\n webbrowser.open(search_url)\n\ndef get_google_answer(query):\n search_google(query)\n response = requests.get(f\"https://www.google.com/search?q={query}\")\n soup = BeautifulSoup(response.text, \"html.parser\")\n answer_div = soup.find(\"div\", class_=\"BNeawe iBp4i AP7Wnd\")\n\n if answer_div:\n answer = answer_div.get_text()\n return answer\n\n else:\n return \"I couldn't find an answer for your query.\"\ndef open_website(query):\n search_google(query)\n response = requests.get(f\"https://www.google.com/search?q={query}\")\n\ndef get_weather_info(city):\n # Replace 'YOUR_API_KEY' with your actual API key\n weather = python_weather.Client(format=python_weather.Metric, apikey='YOUR_API_KEY')\n\n\n try:\n response = weather.find(city)\n current_weather = response.current\n temperature = current_weather.temperature\n description = current_weather.sky_text\n\n weather_info = f\"The current weather in {city} is {description} with a temperature of {temperature} degrees Celsius.\"\n speak(weather_info)\n except Exception as e:\n speak(\"Sorry, I couldn't retrieve the weather information at the moment.\")\n\ndef get_and_read_news():\n try:\n news_url = f\"https://newsapi.org/v2/top-headlines?country=us&apiKey={NEWS_API_KEY}\"\n response = requests.get(news_url)\n news_data = response.json()\n\n if news_data['status'] == 'ok':\n articles = news_data['articles']\n for index, article in enumerate(articles):\n title = article['title']\n description = article['description']\n source = article['source']['name']\n news_info = f\"News {index + 1}: {title}. {description}. Source: {source}.\"\n speak(news_info)\n\n else:\n speak(\"Sorry, I couldn't retrieve the news at the moment.\")\n\n except Exception as e:\n speak(\"Sorry, I couldn't retrieve the news at the moment.\")\n\nif __name__ == \"__main__\":\n wishme()\n\n while True:\n query = takeCommand().lower()\n \n if \"wikipedia\" in query:\n speak(\"Searching Wikipedia\")\n query = query.replace(\"wikipedia, \"\"\")\n results = wikipedia.summary(query, sentences=2)\n speak(\"According to wikipedia\")\n print(results)\n speak(results)\n\n pass\n elif \"hi\" in query or \"hello\" in query:\n speak(\"Hello How may i assist you?\")\n print(\"Hello How may i assist you?\")\n elif \"tell me the news\" in query:\n speak(\"Sure, here are the latest news headlines.\")\n get_and_read_news()\n elif \"who are you\" in query:\n speak(\"My name is kinzond i am a AI assistant i am here to help you\")\n print(\"My name is Kinzond I am an AI assistant I am here to help you\")\n\n elif \"what is your name\" in query:\n speak(\"My name is Kinzond\")\n print(\"My name is Kinzond\")\n\n elif \"play cupid\" in query:\n speak(\"Playing Cupid\")\n print(\"Playing Cupid\")\n webbrowser.open(\"https://youtu.be/Qc7_zRjH808?si=YIpH-D4vjS7ZaNBP\")\n\n elif \"Play a song\" in query:\n speak(\"What song you want to listen?\")\n search_query = takeCommand()\n search_url = f\"https://www.youtube.com/results?search_query={search_query}\"\n speak(f\"Playing {search_query}\")\n webbrowser.open(search_url)\n\n elif \"open youtube\" in query:\n speak(\"Opening Youtube\")\n webbrowser.open(\"youtube.com\")\n\n elif \"open facebook\" in query:\n speak(\"Opening Facebook\")\n webbrowser.open(\"facebook.com\")\n\n elif \"open aware international\" in query:\n speak(\"Opening Aware international\")\n webbrowser.open(\"awareinternational.net\")\n\n elif \"open instagram\" in query:\n speak(\"Opening Instagram\")\n webbrowser.open(\"instagram.com\")\n\n elif \"open twitter \" in query:\n speak(\"Opening Twitter\")\n webbrowser.open(\"twitter.com\")\n\n elif \"open google\" in query:\n speak(\"Opening Google\")\n webbrowser.open(\"google.com\")\n\n elif \"open bing\" in query:\n speak(\"Opening Bing\")\n webbrowser.open(\"bing.com\")\n\n elif \"open Musa Tahawar\" in query:\n speak(\"Opening Musa Tahawar Website\")\n webbrowser.open(\"musatahawar.epizy.com\")\n\n elif \"open sales Dash\" in query:\n speak(\"Opening Salesdash\")\n webbrowser.open(\"https://salesdashcrm.com/\")\n\n elif \"open netflix\" in query:\n speak(\"Opening Netflix\")\n webbrowser.open(\"netflix.com\")\n\n elif \"play a song\" in query:\n speak(\"What song would you like to listen to on YouTube?\")\n search_query = takeCommand()\n speak(f\"Playing {search_query} on YouTube\")\n playSongOnYouTube(search_query)\n\n elif \"the time\" in query:\n strTime = datetime.datetime.now().strftime(\"%H:%M\")\n speak(f\"Musa the time is {strTime}\")\n\n elif \"search on youtube\" in query:\n speak(\"What would you like to search for on YouTube?\")\n search_query = takeCommand()\n search_url = f\"https://www.youtube.com/results?search_query={search_query}\"\n webbrowser.open(search_url)\n elif \"opem vscode\" in query:\n code_path = \"Local Disk (C) - Shortcut.lnk\"\n os.startfile(code_path)\n elif \"opem chrome\" in query:\n code_path = \"Local Disk (C) - Shortcut.lnk\"\n speak(\"Opening Chrome\")\n os.startfile(code_path)\n\n elif \"search on google\" in query:\n speak(\"What would you like to search for on Google?\")\n search_query = takeCommand()\n answer = get_google_answer(search_query)\n speak(answer)\n\n elif \"tell me the weather\" in query:\n speak(\"Sure, in which city?\")\n city = takeCommand()\n get_weather_info(city)\n\n elif \"Email to Musa\" in query:\n\n try:\n speak(\"What should i say?\")\n content = takeCommand()\n to = \"jjsa\"\n sendEmail(to, content)\n speak(\"Email has been Sent!\")\n\n except Exception as e:\n print(e)\n speak(\"Sorry my friend musa, i am not able to send the email\")\n\n","repo_name":"MusaTahawar/Jarvis-Ai","sub_path":"Jarvis-Ai.py","file_name":"Jarvis-Ai.py","file_ext":"py","file_size_in_byte":9605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26111322207","text":"from bs4 import BeautifulSoup\nimport os\nimport requests\nfrom pymongo import MongoClient\nimport datetime\nimport time\nimport random\n\nclass mzitu():\n def __init__(self):\n client = MongoClient() #与MongDB建立连接(这是默认连接本地MongDB数据库)\n db = client['meinvxiezhenji'] #选择或创建一个数据库\n self.meizitu_collection = db['meizitu'] #在meizixiezhenji这个数据库中,选择一个集合\n self.title = '' #用来保存页面主题\n self.url = '' ##用来保存页面地址\n self.img_urls = [] ##初始化一个 列表 用来保存图片地址\n self.href=''\n self.user_agent_list = [\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1\",\n \"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6\",\n \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5\",\n \"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24\",\n \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24\"\n ] #这是hearders 库\n self.iplist = [\n \"123.163.96.88\",\n \"163.204.246.48\",\n \"183.129.244.16\",\n \"120.79.203.1\",\n \"114.113.222.131\",\n \"180.118.135.18\",\n \"120.25.203.182\",\n \"163.204.245.203\",\n \"59.37.33.62\",\n \"43.248.123.237\",\n \"175.44.156.198\",\n \"120.236.178.117\",\n \"183.158.202.222\",\n \"221.1.200.242\",\n \"61.176.223.7\"\n ] # 这是IP池\n def all_url(self, url): #查找全站图片的URL\n html = self.request(url,href=None) #请求访问网页\n all_a = BeautifulSoup(html.text, 'lxml').find('div', class_='all').find_all('a') #解析网页并找到图片地址\n for a in all_a:\n title = a.get_text()\n old = '早期图片' #简单清洗一个叫早期图片的没用的URL\n if title == old:\n continue\n self.title = title ##将主题保存到self.title中\n print(u'开始保存:', title)\n path = str(title).replace(\"?\", '_')\n self.mkdir(path)\n href = a['href']\n self.url = href ##将页面地址保存到self.url中\n if self.meizitu_collection.find_one({'主题页面': href}): ##判断这个主题是否已经在数据库中、不在就运行else下的内容,在则忽略。\n print(u'这个页面已经爬取过了')\n else:\n self.html(href)\n\n def html(self, href):\n html = self.request(href)\n max_span = BeautifulSoup(html.text, 'lxml').find('div', class_='pagenavi').find_all('span')[-2].get_text()\n page_num = 0 ##这个当作计数器用 (用来判断图片是否下载完毕)\n self.img_urls=[] ##每个文件夹保存后清零\n for page in range(1, int(max_span) + 1):\n page_num = page_num + 1 ##每for循环一次就+1 (当page_num等于max_span的时候,就证明我们的在下载最后一张图片了)\n page_url = href + '/' + str(page)\n self.img(page_url,href ,max_span, page_num) ##调用img函数\n\n def img(self, page_url,href,max_span, page_num): ##添加上面传递的参数\n img_html = self.request(page_url,href=href)\n img_url = BeautifulSoup(img_html.text, 'lxml').find('div', class_='main-image').find('img')['src']\n self.img_urls.append(img_url)\n if int(max_span) == page_num: ##我们传递下来的两个参数用上了 当max_span和Page_num相等时,就是最后一张图片了,最后一次下载图片并保存到数据库中。\n self.save(img_url,href)\n post = { ##这是构造一个字典,里面有啥都是中文,很好理解吧!\n '标题': self.title,\n '主题页面': self.url,\n '图片地址': self.img_urls,\n '获取时间': datetime.datetime.now()\n }\n self.meizitu_collection.save(post) ##将post中的内容写入数据库。\n print(u'插入数据库成功')\n else: ##max_span 不等于 page_num执行这下面\n self.save(img_url,href)\n def save(self, img_url,href):\n name = img_url[-9:-4]\n print(u'开始保存:', img_url)\n img = self.request(img_url,href=href)\n print(img)\n f = open(name + '.jpg', 'ab')\n f.write(img.content)\n f.close()\n time.sleep(0.5)\n\n def mkdir(self, path): ##这个函数创建文件夹\n path = path.strip()\n isExists = os.path.exists(os.path.join(\"D:\\mzitu\", path))\n if not isExists:\n print(u'建了一个名字叫做', path, u'的文件夹!')\n os.makedirs(os.path.join(\"D:\\mzitu\", path))\n os.chdir(os.path.join(\"D:\\mzitu\", path)) ##切换到目录\n return True\n else:\n print(u'名字叫做', path, u'的文件夹已经存在了!')\n os.chdir(os.path.join(\"D:\\mzitu\", path))\n return False\n\n def request(self,url,href=None,proxy=None,timeout=3,num_retries=6):\n UA = random.choice(self.user_agent_list)\n headers = {'User-Agent': UA}\n headers['referer'] = href\n try:\n return requests.get(url, headers=headers,proxies=proxy, timeout=timeout)\n except:\n print(u'开始使用代理')\n time.sleep(10)\n IP = ''.join(str(random.choice(self.iplist)).strip()) ##下面有解释哦\n proxy = {'http': IP}\n try:\n return requests.get(url, headers=headers, proxies=proxy, timeout=timeout)\n except:\n if num_retries > 0:\n time.sleep(10)\n IP = ''.join(str(random.choice(self.iplist)).strip())\n proxy = {'http': IP}\n print(u'正在更换代理,10S后将重新获取倒数第', num_retries, u'次')\n print(u'当前代理是:', proxy)\n num_retries -= 1\n return self.request(url,href=href, proxy=proxy,timeout=3,num_retries=num_retries)\n else:\n print(u'代理也不好使了!取消代理')\n return self.request(url, 3)\n\nMzitu = mzitu() ##实例化\nMzitu.all_url('http://www.mzitu.com/all') ##给函数all_url传入参数 你可以当作启动爬虫(就是入口)\n# -*- coding:utf-8 -*-\n","repo_name":"wanglegedong/wddpythonscrapy","sub_path":"妹子图爬虫3.0优化数据库保存.py","file_name":"妹子图爬虫3.0优化数据库保存.py","file_ext":"py","file_size_in_byte":8326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"31586069306","text":"\"\"\"\ncode to compare 2 models with precision, recall and F1 score over a set of images.\n\"\"\"\nfrom tensorflow.keras.optimizers import Adam\nimport tensorflow as tf\nfrom skimage.io import imread\nimport os\nimport numpy as np\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nimport random\nimport cv2\n\nmodelBest = tf.keras.models.load_model(\"/Users/javier/Desktop/UNet/vegetation/final_weights/modelo_best.hdf5\", compile=False)\nmodelModelo11 = tf.keras.models.load_model(\"/Users/javier/Desktop/UNet/vegetation/final_weights/modelo11_1100_200.hdf5\", compile=False)\n\n\nSIZE =256\n\npath_images = \"/Users/javier/Desktop/UNet/vegetation/images/imagesBlueNoTrees/\"\npath_masks = \"/Users/javier/Desktop/UNet/vegetation/masks/masksNoTrees/\"\n\nseed=22\n\nF1_best_list = []\nF1_model11_list = []\nprecision_best_list = []\nprecision_model11_list = []\nrecall_best_list = []\nrecall_model11_list = []\n\nmalas_pred_best = dict()\nmalas_pred_modelo11 = dict()\n\ni = 0\n\nimages = os.listdir(path_images)\nmasks = os.listdir(path_masks)\nrandom.shuffle(images)\nprint(images)\n\nthreshold = 0.1\n\n\nworseImages_bestModelBest = dict()\nworseImages_bestModel11 = dict()\nbestThresholdBest = 0.1\nbestThresholdModel11 = 0.1\n\n\ndef PrecRecF1(mask, predictions):\n TP_best = np.sum(np.logical_and(mask, predictions[:,:,0]))\n TP_model11 = np.sum(np.logical_and(mask, predictions[:,:,1]))\n\n FN_best = np.sum(np.logical_and(mask, (~predictions[:,:,0].astype(bool)).astype(int)))\n FN_model11 = np.sum(np.logical_and(mask, (~predictions[:,:,1].astype(bool)).astype(int)))\n\n FP_best = np.sum(np.logical_and((~mask.astype(bool)).astype(int), predictions[:,:,0]))\n FP_model11 = np.sum(np.logical_and((~mask.astype(bool)).astype(int), predictions[:,:,1]))\n\n precision_best = TP_best / (TP_best + FP_best)\n precision_model11 = TP_model11 / (TP_model11 + FP_model11)\n\n recall_best = TP_best / (TP_best + FN_best)\n recall_model11 = TP_model11 / (TP_model11 + FN_model11)\n\n F1_best = 2 * (precision_best * recall_best) / (precision_best + recall_best)\n F1_model11 = 2 * (precision_model11 * recall_model11) / (precision_model11 + recall_model11)\n\n return precision_best, precision_model11, recall_best, recall_model11, F1_best, F1_model11\n\ndef calc_predictions(image):\n\n image_norm = np.expand_dims(tf.keras.utils.normalize(np.array(image), axis=1, order = 2),2)\n image_norm = image_norm[:,:,0][:,:,None]\n image_input = np.expand_dims(image_norm,0)*10\n\n predictions = np.zeros((SIZE,SIZE,2))\n\n predictions[:,:,0] = (modelBest.predict(image_input)[0,:,:,0] > 0.15).astype(np.uint8)\n predictions[:,:,1] = (modelModelo11.predict(image_input)[0,:,:,0] > 0.15).astype(np.uint8)\n\n return predictions\n\nfor image in images:\n #ensuring that the file is png type\n if image.split('.')[1] != \"png\":\n continue\n image2 = cv2.imread(path_images + image)[:,:,0]\n image2 = cv2.resize(image2,(SIZE,SIZE), cv2.INTER_LINEAR)\n\n #computing predictions\n predictions = calc_predictions(image2)\n\n #creating pictures to be painted\n painted = cv2.imread(path_images+image)[:,:,0]\n painted = cv2.resize(painted, (SIZE,SIZE), cv2.INTER_LINEAR)\n painted = cv2.cvtColor(painted,cv2.COLOR_GRAY2BGR)\n\n painted2 = painted.copy()\n\n #painting pictures\n painted[predictions[:,:,0]>0] = [0,0,255]\n painted2[predictions[:,:,1]>0] = [255,0,0]\n\n mask = imread(path_masks + image)[:,:,0]\n mask = Image.fromarray(mask)\n mask = mask.resize((SIZE,SIZE))\n mask = np.array(mask)\n\n #computing precision, recall, and F1 score\n precision_best, precision_model11, recall_best, recall_model11, F1_best, F1_model11 = PrecRecF1(mask, predictions)\n\n if F1_best < 0.2:\n malas_pred_best[image] = [precision_best, recall_best, F1_best]\n\n if F1_model11 < 0.2:\n malas_pred_modelo11[image] = [precision_model11, recall_model11, F1_model11]\n\n F1_best_list.append(F1_best)\n F1_model11_list.append(F1_model11)\n precision_best_list.append(precision_best)\n precision_model11_list.append(precision_model11)\n recall_best_list.append(recall_best)\n recall_model11_list.append(recall_model11)\n\n mean_F1_best = np.mean(np.array(F1_best_list))\n mean_F1_model11 = np.mean(np.array(F1_model11_list))\n mean_precision_best = np.mean(np.array(precision_best_list))\n mean_precision_model11 = np.mean(np.array(precision_model11_list))\n mean_recall_best = np.mean(np.array(recall_best_list))\n mean_recall_model11 = np.mean(np.array(recall_model11_list))\n\n print(f\"\"\"\\n model best; \n F1: {F1_best:.2f},\n Precision: {precision_best:.2f},\n Recall: {recall_best:.2f} \n on image {image} \\n\"\"\")\n\n print(f\"\"\"mean values model Best; \n F1: {mean_F1_best:.2f},\n Precision: {mean_precision_best:.2f},\n Recall: {mean_recall_best:.2f} \n on image {image} \\n\"\"\") \n\n print(f\"\"\"model 11; \n F1: {F1_model11:.2f},\n Precision: {precision_model11:.2f},\n Recall: {recall_model11:.2f} \n on image {image} \\n\"\"\")\n \n print(f\"\"\"mean values model 11; \n F1: {mean_F1_model11:.2f},\n Precision: {mean_precision_model11:.2f},\n Recall: {mean_recall_model11:.2f} \n on image {image} \\n\"\"\")\n\n #computing intersections to show visually the performance\n intersectionBest = np.logical_and(mask, predictions[:,:,0])\n intersectionModelo11 = np.logical_and(mask, predictions[:,:,1])\n\n #showing some examples\n if i <= 5:\n \n plt.figure(figsize=(16, 8))\n plt.subplot(331)\n plt.title(image)\n plt.imshow(image2, cmap = 'gray')\n plt.subplot(332)\n plt.title(image)\n plt.imshow(painted)\n plt.subplot(333)\n plt.title(image)\n plt.imshow(painted2)\n plt.subplot(334)\n plt.title(image)\n plt.imshow(mask, cmap = 'gray')\n plt.subplot(335)\n plt.title('model ' + \"Best\")\n plt.imshow(predictions[:,:,0], cmap = 'gray')\n plt.subplot(336)\n plt.title('model ' + \"11\")\n plt.imshow(predictions[:,:,1], cmap = 'gray')\n plt.subplot(338)\n plt.title('intersection model ' + \"Best\")\n plt.imshow(intersectionBest, cmap = 'gray')\n plt.subplot(339)\n plt.title('intersection model ' + \"11\")\n plt.imshow(intersectionModelo11, cmap = 'gray')\n plt.show() \n i += 1\n\nprint(\"predicciones malas Best: \")\nprint(malas_pred_best)\nprint(\"predicciones malas Modelo11: \")\nprint(malas_pred_modelo11)\n\n \n\n","repo_name":"xbalaguer/UNet_project","sub_path":"analysis/comparing_models_P_R_F1.py","file_name":"comparing_models_P_R_F1.py","file_ext":"py","file_size_in_byte":6520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38353261952","text":"import numpy as np\r\n\r\npath1 = r'E:\\pcx\\VT-UNet\\runs_twostage\\logs_base\\model_1\\val.txt'#ET:0.802, TC:0.83995, WT:0.88314, max_avg_dice:0.8416966666666666\r\npath2 = r'E:\\pcx\\VT-UNet\\runs_onestage\\logs_base\\model_1\\val.txt'#ET:0.79367, TC:0.83191, WT:0.8846, max_avg_dice:0.8367266666666667\r\npath3 = r'E:\\pcx\\VT-UNet\\runs_TransBTS\\logs_base\\model_1\\val.txt'#ET:0.76287, TC:0.79252, WT:0.86678, max_avg_dice:0.8073899999999999\r\npath4 = r'E:\\pcx\\VT-UNet\\runs_twostage_zsocre\\logs_base\\model_1\\val.txt'#ET:0.80247, TC:0.83505, WT:0.89364, max_avg_dice:0.8437199999999999\r\npath5 = r'E:\\pcx\\VT-UNet\\runs_twostage_wdice\\logs_base\\model_1\\val.txt'#ET:0.79962, TC:0.83886, WT:0.88621, max_avg_dice:0.8415633333333333\r\npath6 = r'E:\\pcx\\VT-UNet\\runs_twostage_zsocre_warmup\\logs_base\\model_1\\val.txt'#ET:0.80926, TC:0.83471, WT:0.89004, max_avg_dice:0.8446699999999999\r\npath7 = r'E:\\pcx\\CU-Trans\\runs\\logs_base\\model_1\\val.txt'\r\n\r\nmax_ET_dice = 0\r\nmax_WT_dice = 0\r\nmax_TC_dice = 0\r\nmax_avg_dice = 0\r\nmax_epoch = 0\r\nfor line in open(path7):\r\n EPOCH =line.split(' :Val')[0]\r\n EPOCH = np.array(EPOCH.split('Epoch ')[1], dtype=np.float)\r\n ET = np.array(line.split('ET : ')[1][0:7], dtype=np.float)\r\n WT = np.array(line.split('WT : ')[1][0:7], dtype=np.float)\r\n TC = np.array(line.split('TC : ')[1][0:7], dtype=np.float)\r\n avg_dice = (ET + WT + TC) / 3\r\n if max_avg_dice < avg_dice:\r\n max_avg_dice = avg_dice\r\n max_ET_dice = ET\r\n max_WT_dice = WT\r\n max_TC_dice = TC\r\n max_epoch = EPOCH\r\n\r\nprint(f\"max epoch:{max_epoch}, ET:{max_ET_dice}, TC:{max_TC_dice}, WT:{max_WT_dice}, max_avg_dice:{max_avg_dice}\")\r\n\r\n\r\n\r\n","repo_name":"chaineypung/BTS-UGT","sub_path":"cal_dice.py","file_name":"cal_dice.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"18636204949","text":"from cycgkit.cgtypes import vec3, mat4\nfrom uuid import uuid1\nimport math\n\nfrom .face3 import Face3\n\n# Based on Geometry.js from https://github.com/mrdoob/three.js\n\n\nclass Geometry:\n def __init__(self):\n\n self.uuid = uuid1()\n\n self.name = ''\n self.type = 'Geometry'\n\n self.vertices = []\n self.colors = [] # one-to-one vertex colors, used in Points and Line\n\n self.faces = []\n\n self.faceVertexUvs = []\n\n self.lineDistances = []\n self.hasTangents = False\n self.groups = []\n\n # def computeLineDistances(self):\n #\n # d = 0\n # vertices = self.vertices\n #\n # for ( i = 0, il = vertices.length i < il i ++ ):\n #\n # if ( i > 0 ):\n #\n # d += vertices[ i ].distanceTo( vertices[ i - 1 ] )\n #\n # }\n #\n # self.lineDistances[ i ] = d\n #\n # }\n #\n #\n #\n # def computeBoundingBox(self):\n #\n # if ( self.boundingBox == None ):\n #\n # self.boundingBox = new THREE.Box3()\n #\n # }\n #\n # self.boundingBox.setFromPoints( self.vertices )\n #\n #\n #\n # def computeBoundingSphere(self):\n #\n # if ( self.boundingSphere == None ):\n #\n # self.boundingSphere = new THREE.Sphere()\n #\n # }\n #\n # self.boundingSphere.setFromPoints( self.vertices )\n #\n #\n\n def mergeVertices(self):\n verticesMap = {} # Hashmap for looking up vertice by position coordinates (and making sure they are unique)\n unique = []\n changes = {}\n\n precisionPoints = 4 # number of decimal points, eg. 4 for epsilon of 0.0001\n precision = math.pow(10, precisionPoints)\n\n for i in range(len(self.vertices)):\n v = self.vertices[i]\n key = '{}_{}_{}'.format(round(v.x * precision), round(v.y * precision), round(v.z * precision))\n\n if verticesMap.get(key) is None:\n verticesMap[key] = i\n unique.append(self.vertices[i])\n changes[i] = len(unique) - 1\n else:\n # console.log('Duplicate vertex found. ', i, ' could be using ', verticesMap[key])\n changes[i] = changes[verticesMap[key]]\n\n # if faces are completely degenerate after merging vertices, we\n # have to remove them from the geometry.\n faceIndicesToRemove = []\n\n for i in range(len(self.faces)):\n\n face = self.faces[i]\n\n face.a = changes[face.a]\n face.b = changes[face.b]\n face.c = changes[face.c]\n\n indices = [face.a, face.b, face.c]\n\n dupIndex = - 1\n\n # if any duplicate vertices are found in a Face3\n # we have to remove the face as nothing can be saved\n for n in range(3):\n if indices[n] == indices[(n + 1) % 3]:\n dupIndex = n\n faceIndicesToRemove.append(i)\n break\n for i in range(len(faceIndicesToRemove) - 1, 0, -1):\n # for ( i = faceIndicesToRemove.length - 1 i >= 0 i -- ):\n idx = faceIndicesToRemove[i]\n\n self.faces.pop(idx)\n\n # for j in range(len(self.faceVertexUvs)):\n self.faceVertexUvs.pop(idx)\n\n # Use unique set of vertices\n\n diff = len(self.vertices) - len(unique)\n self.vertices = unique\n return diff\n\n def addGroup(self, start, count):\n\n self.groups.append({'start': start, 'count': count})\n\n return self\n\n def setIndex(self, indices):\n for v in indices:\n self.faces.append(Face3(v.x, v.y, v.z))\n\n def setFaceUVS(self, uvs):\n for f in self.faces:\n self.faceVertexUvs.append([uvs[int(f.a)], uvs[int(f.b)], uvs[int(f.c)]])\n","repo_name":"jr-garcia/Engendro3D","sub_path":"e3d/model_management/pygeom/Geometry.py","file_name":"Geometry.py","file_ext":"py","file_size_in_byte":4007,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"27"} +{"seq_id":"72102126153","text":"import cv2\nimport math\nimport numpy as np\nfrom DataModel.enums import Pattern, Color\n\n\nclass PatternRecognizer:\n\n def __init__(self, common_operator):\n self.common_operator = common_operator\n self.canny_threshold_1 = None\n self.canny_threshold_2 = None\n self.minimum_line_length = None\n self.hough_lines_threshold = None\n self.max_line_gap = None\n self.angle_epsilon = None\n self.percentage_of_line_type_to_qualify_as_pattern = None\n\n def find_pattern(self, image):\n \"\"\"\n Find pattern and its color in the given image\n :param image: image where only pattern is visible, image must be in BGR color space\n :return: tuple in form of (pattern, color_pattern), where\n pattern is pattern id defined in class Pattern from enums.py,\n pattern_color is color id defined in class Color from enums.py\n \"\"\"\n\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n edges = cv2.Canny(gray, self.canny_threshold_1, self.canny_threshold_2)\n lines = cv2.HoughLinesP(image=edges, rho=1, theta=np.pi / 180, threshold=self.hough_lines_threshold, lines=np.array([]),\n minLineLength=self.minimum_line_length, maxLineGap=self.max_line_gap)\n\n if lines is None:\n return Pattern.NONE, Color.NONE\n\n pattern_color = self._find_patterns_color(cv2.cvtColor(image, cv2.COLOR_BGR2HSV))\n pattern = self._assume_pattern(lines)\n return pattern, pattern_color\n\n def _assume_pattern(self, lines):\n \"\"\"\n Given ndarray containing lines of pattern assume its pattern\n :param lines: ndarray containing line of pattern\n :return: pattern id defined in class Pattern from enums.py\n \"\"\"\n\n number_of_lines, _, _ = lines.shape\n angles = []\n for i in range(number_of_lines):\n begin = (lines[i][0][0], lines[i][0][1])\n end = (lines[i][0][2], lines[i][0][3])\n angles.append(self._line_angle((begin, end)))\n\n number_of_horizontal_lines = sum(i >= 180 - self.angle_epsilon or i <= self.angle_epsilon for i in angles)\n number_of_vertical_lines = sum(90 - self.angle_epsilon <= i <= 90 + self.angle_epsilon for i in angles)\n number_of_left_inclined_lines = sum(90 + self.angle_epsilon < i < 180 - self.angle_epsilon for i in angles)\n number_of_right_inclined_lines = sum(self.angle_epsilon < i < 90 - self.angle_epsilon for i in angles)\n\n if float(number_of_horizontal_lines) / len(angles) > self.percentage_of_line_type_to_qualify_as_pattern:\n return Pattern.HORIZONTAL_LINES\n if float(number_of_vertical_lines) / len(angles) > self.percentage_of_line_type_to_qualify_as_pattern:\n return Pattern.VERTICAL_LINES\n if float(number_of_left_inclined_lines) / len(angles) > self.percentage_of_line_type_to_qualify_as_pattern:\n return Pattern.LEFT_INCLINED_LINES\n if float(number_of_right_inclined_lines) / len(angles) > self.percentage_of_line_type_to_qualify_as_pattern:\n return Pattern.RIGHT_INCLINED_LINES\n if float(number_of_vertical_lines) / len(angles) >= self.percentage_of_line_type_to_qualify_as_pattern / 2 and \\\n float(number_of_horizontal_lines) / len(angles) >= self.percentage_of_line_type_to_qualify_as_pattern / 2:\n return Pattern.GRID\n if float(number_of_left_inclined_lines) / len(angles) >= self.percentage_of_line_type_to_qualify_as_pattern / 2 and \\\n float(number_of_right_inclined_lines) / len(angles) >= self.percentage_of_line_type_to_qualify_as_pattern / 2:\n return Pattern.INCLINED_GRID\n return Pattern.NONE\n\n @staticmethod\n def _line_angle(line):\n \"\"\"\n Calculates the agle of line\n :param line: ndarray representing line\n :return: angle of line in degrees\n \"\"\"\n\n a = line[0]\n b = line[1]\n if a[1] < b[1]:\n temp = a\n a = b\n b = temp\n c = (a[0], b[1])\n horizontal_length = b[0] - c[0]\n vertical_length = a[1] - c[1]\n line_length = math.sqrt(pow(a[0] - b[0], 2) + pow(a[1] - b[1], 2))\n sin_alpha = vertical_length / line_length\n cos_alpha = horizontal_length / line_length\n alpha = math.degrees(math.atan2(sin_alpha, cos_alpha))\n if alpha == 180:\n alpha = 0\n return alpha\n\n def _find_patterns_color(self, image):\n \"\"\"\n Find color of pattern in image\n :param image: image with only pattern present\n :return: color id defined in class Color from enums.py\n \"\"\"\n non_black_pixels = image[np.where((image != [0, 0, 0]).all(axis=2))]\n if len(non_black_pixels) is 0:\n return Color.NONE\n avg_color = (0., 0., 0.)\n for pixel in non_black_pixels:\n avg_color += pixel\n avg_color /= len(non_black_pixels)\n return self.common_operator.color_from_bounds(avg_color)\n","repo_name":"kamil-nowowiejski/ObjectsDetection","sub_path":"ImageProcessing/PatternRecognizer.py","file_name":"PatternRecognizer.py","file_ext":"py","file_size_in_byte":5052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"25496000761","text":"import json\n\n\ndef return_cv(a, v):\n if type(a) is not list:\n lst = a, v\n a = list(lst)\n else:\n a.append(v)\n return a\n\n\ndef line_to_dict(lines):\n a = {}\n for line in lines:\n if line == '\\n':\n continue\n elif line[0:6] == 'HTTP/2':\n a.update({'protocol': 'HTTP/2', 'status_code': line.split()[1]})\n elif line[0:6] == 'HTTP/1':\n a.update({'protocol': line.split()[0], 'status_code': line.split()[1],\n 'status_message': ' '.join(line.split()[2:])})\n elif 'POST' in line[0:4] or 'GET' in line[0:4]:\n a.update({'method': line.split()[0], 'uri': line.split()[1],\n 'protocol': line.split()[2]})\n else:\n if len(line.split(': ')) > 1:\n key, value = line.split(': ')\n else:\n key, value = line.split(': ') + [' ']\n if value.endswith(\"\\n\"):\n value = value[:-1]\n if a.get(key):\n a[key] = return_cv(a[key], value)\n else:\n a.update({key: value})\n return a\n\n\ndef http_headers_to_json(path_to_http_headers, path_to_result_json):\n json_result = {}\n with open(path_to_http_headers) as f:\n json_result = line_to_dict(f)\n\n with open(path_to_result_json, 'w') as f:\n json.dump(json_result, f)","repo_name":"vaeskcode/pylabs","sub_path":"task_http_headers.py","file_name":"task_http_headers.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30976883918","text":"import os\nimport pathlib\nimport typing\nimport glob\nimport click\nimport subprocess\nfrom tqdm.cli import tqdm\n\nfrom .settings import EsreverSettings\nfrom .data import DATA_DIR\n\nclass GlobPath(click.ParamType):\n\n name = \"Path or glob pattern representing multiple files.\"\n\n CONVERTED_SINGLE_TYPE = typing.List[pathlib.Path]\n CONVERTED_MULTIPLE_TYPE = typing.Tuple[CONVERTED_SINGLE_TYPE]\n\n def convert(\n self, value: typing.Any, param: typing.Optional[click.Parameter], ctx: typing.Optional[click.Context]\n ) -> typing.List[pathlib.Path]:\n glob_value = glob.glob(str(value))\n return [pathlib.Path(glob_match) for glob_match in glob_value]\n\n @staticmethod\n def combine(values: typing.Tuple[typing.List[pathlib.Path]]) -> typing.Set[pathlib.Path]:\n result_set: typing.Set[pathlib.Path] = set()\n for value in values:\n if isinstance(value, pathlib.Path):\n result_set.add(value)\n elif isinstance(value, list):\n for sub_value in value:\n if isinstance(sub_value, pathlib.Path):\n result_set.add(sub_value)\n return result_set\n\n\n\n\n\n\n\nconfig = click.option(\n \"config\",\n \"--config\",\n \"-c\")\noutput_dir = click.option(\n \"output_dir\",\n \"--output-dir\",\n \"-o\",\n type=click.Path(\n dir_okay=True,\n file_okay=False))\ninput_files = click.argument(\n \"input_files\",\n type=GlobPath(),\n required=True,\n nargs=-1,\n)\n\n\ndef decompile(settings: EsreverSettings, input_file: pathlib.Path, output_file: pathlib.Path):\n\n # echo time $GHIDRA_PATH/support/analyzeHeadless . tmp_ghidra_project -import $1 -postscript $DECOMPILE_SCRIPT_PATH/Decompile.java $2\n subprocess.run(\n [\n settings.ghidra_headless_path,\n \".\",\n \"tmp_ghidra_project\",\n \"-scriptPath\",\n DATA_DIR,\n \"-import\",\n str(input_file.absolute()),\n \"-postscript\",\n f\"{DATA_DIR}/Decompile.java\",\n str(output_file.absolute())\n ]\n )\n\n\n\n@click.group(\"esrever\")\ndef cli_app():\n ...\n\n@cli_app.command(\"decompile\")\n@config\n@input_files\ndef decompile_command(\n input_files: typing.List[pathlib.Path],\n config: typing.Optional[str] = None,\n output_dir: typing.Optional[pathlib.Path] = None):\n \"\"\"Reverse a set of binary files with Ghidra.\"\"\"\n config = config or EsreverSettings()\n input_files = GlobPath.combine(input_files)\n output_dir = output_dir or pathlib.Path(os.getcwd()).joinpath(\"out\")\n if not output_dir.exists():\n output_dir.mkdir()\n for input_file in tqdm(input_files):\n output_file = output_dir.joinpath(f\"{input_file.stem}.c\")\n decompile(settings=config, input_file=input_file, output_file=output_file)\n\n","repo_name":"infosec-garage/esrever","sub_path":"esrever/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71578429193","text":"import time\n\nfrom collections import defaultdict\nfrom os import PathLike\nfrom typing import Collection, Optional, Tuple, List, Mapping, Any, Sequence, Union\nfrom keras.optimizers import Optimizer\nfrom keras.losses import Loss\nfrom engines.exact_engine import ExactEngine\nfrom engines.tensor_ops import *\nfrom engines.builtins import register_tensor_predicates\nfrom problog.logic import term2list, Term, Var, Constant, InstantiationError, Or, list2term\nfrom problog.program import PrologString, PrologFile, SimpleProgram, LogicProgram\nfrom embeddings import TermEmbedder\nfrom engines import Engine\nfrom network import Network\nfrom query import Query\nfrom semiring import Result\nfrom semiring.tensor_semiring import TensorSemiring\nfrom solver import Solver\nfrom utils import check_path\nfrom utils.logger import Logger\n\n\nclass Model(object):\n def __init__(\n self,\n program_string: str,\n networks: List[Network] = None,\n pcf_functions: List[Network] = None,\n embeddings: Optional[TermEmbedder] = None,\n load: bool = True,\n gumbel_temperature: float = 2.,\n gumbel_alpha: float = 1e-4,\n nb_samples: int = 250\n ):\n \"\"\"\n :param program_string: A string representing a DeepProbLog program or the path to a file containing a program.\n :param networks: A collection of networks that will be used to evaluate the neural predicates.\n :param embeddings: A TermEmbedder used to embed Terms in the program.\n :param load: If true, then it will attempt to load the program from 'program_string',\n else, it will consider program_string to be the program itself.\n \"\"\"\n super(Model, self).__init__()\n if load:\n self.program: LogicProgram = PrologFile(program_string)\n else:\n self.program: LogicProgram = PrologString(program_string)\n\n if networks is None:\n networks = []\n self.networks = dict()\n for network in networks:\n self.add_network(network, det=True)\n\n if pcf_functions is None:\n pcf_functions = []\n self.pcf_functions = dict()\n for pcf_function in pcf_functions:\n self.add_pcf(pcf_function)\n\n self.nb_samples = nb_samples\n self.gumbel_temperature = gumbel_temperature\n self.gumbel_alpha = gumbel_alpha\n self.add_builtins()\n\n self.embeddings = embeddings\n self.tensor_sources = dict()\n self.tensor_sources[\"inputs\"] = dict()\n self.tensor_sources[\"mem_inputs\"] = dict()\n self.optimizer = None\n self.loss = None\n self.graph = None\n self.memgraph = None\n self.logger = Logger()\n self.counter = 0\n self.set_engine(ExactEngine(self))\n\n self.breaker_input = tf.keras.Input(shape=())\n self.add_tensor_source(\"breaker\", {0: self.breaker_input})\n\n def get_embedding(self, term: Term):\n return self.embeddings.get_embedding(term)\n\n def get_trainable_variables(self):\n return self.graph.trainable_variables\n\n def set_engine(self, engine: Engine, **kwargs):\n \"\"\"\n Initializes the solver of this model with the given engine and additional arguments.\n :param engine: The engine that will be used to ground queries in this model.\n :param kwargs: Additional arguments passed to the solver.\n :return:\n \"\"\"\n self.solver = Solver(self, engine, **kwargs)\n register_tensor_predicates(engine)\n\n\n def set_optimizer(self, optimiser: Optimizer):\n self.optimizer = optimiser\n\n def set_loss(self, loss: Loss):\n self.loss = loss\n\n def reset_logger(self):\n self.logger = Logger()\n\n def add_network(self, net, det=False):\n self.networks[net.name] = net\n net.model = self\n net.det = det\n\n def add_pcf(self, pcf):\n self.pcf_functions[pcf.name] = pcf\n pcf.model = self\n\n def add_builtins(self):\n self.add_network(Network(Is(), \"tf_eq\"), det=True)\n self.add_network(Network(ContinuousOperation(operator=\"add\"), \"add\"), det=True)\n self.add_network(Network(ContinuousOperation(operator=\"sub\"), \"subtract\"), det=True)\n self.add_network(Network(ContinuousOperation(operator=\"mul\"), \"mul\"), det=True)\n self.add_network(Network(ContinuousOperation(operator=\"div\"), \"div\"), det=True)\n self.add_network(Network(ContinuousOperation(operator=\"rounddiv\"), \"rounddiv\"), det=True)\n self.add_network(Network(Sampler(self, sample_nb=self.nb_samples, temperature=self.gumbel_temperature,\n alpha=self.gumbel_alpha), \"sampler_internal\"), det=True)\n\n if \"equals\" not in self.networks.keys():\n self.add_network(Network(Equals(), \"equals\"), det=True)\n if \"smaller_than\" not in self.networks.keys():\n self.add_network(Network(SmallerThan(), \"smaller_than\"), det=True)\n if \"unification\" not in self.networks.keys():\n self.add_network(Network(SoftUnification(), \"unification\"), det=True)\n\n def evaluate_nn(self, to_evaluate: List[Tuple[Term, Term]]):\n \"\"\"\n :param to_evaluate: List of neural predicates to evaluate\n :return: A dictionary with the elements of to_evaluate as keys, and the output of the NN as values.\n \"\"\"\n result = dict()\n evaluations = defaultdict(list)\n # Group inputs per net to send in batch\n for net_name, inputs in to_evaluate:\n net = self.networks[str(net_name)]\n if net.det:\n tensor_name = Term(\"nn\", net_name, inputs)\n if tensor_name not in self.solver.engine.tensor_store:\n evaluations[net_name].append(inputs)\n else:\n if inputs in net.cache:\n result[(net_name, inputs)] = net.cache[inputs]\n del net.cache[inputs]\n else:\n evaluations[net_name].append(inputs)\n for net in evaluations:\n network = self.networks[str(net)]\n out = network([term2list(x, False) for x in evaluations[net]])\n for i, k in enumerate(evaluations[net]):\n if network.det:\n tensor_name = Term(\"nn\", net, k)\n self.solver.engine.tensor_store.store(out[i], tensor_name)\n else:\n result[(net, k)] = out[i]\n return result\n\n def _solve(self, query: Query, generate=False) -> List[Result]:\n if generate:\n self.breaker_input = tf.constant(1.)\n self.add_tensor_source(\"breaker\", {0: self.breaker_input})\n generation = self.solver.solve([query])\n self.breaker_input = tf.keras.Input(shape=())\n self.add_tensor_source(\"breaker\", {0: self.breaker_input})\n return generation\n else:\n return self.solver.solve([query])\n\n def _compile(self, query: Query, mem=False) -> None:\n \"\"\"\n Builds the computation graph for the given query.\n :param query: The query to be solved and built.\n :param input: A list of Keras tensors that will be used as input to the computation graph.\n :param mem: If true, the graph of the query will be stored separately to be used as a memory query for\n continual learning or curriculum learning purposes.\n :return:\n \"\"\"\n if mem:\n input = [v for k, v in self.tensor_sources[\"mem_inputs\"].items()]\n else:\n input = [v for k, v in self.tensor_sources[\"inputs\"].items()]\n input.append(self.breaker_input)\n start_time = time.time()\n outputs = list(self._solve(query)[0].result.values())\n graph = tf.keras.Model(inputs=input, outputs=outputs[0])\n print(f\"Build time: {np.round(time.time() - start_time, 4)}s\")\n if mem:\n self.memgraph = graph\n else:\n self.graph = graph\n self.counter = 0\n\n def _build_query(self, name: str, inputs, substitution, mem=False) -> Query:\n translated_input = []\n tensor_idx = {}\n for id, i in enumerate(inputs):\n if type(i) == Constant or type(i) == Var or type(i) == Term:\n translated_input.append(i)\n elif tf.keras.backend.is_keras_tensor(i) or tf.is_tensor(i):\n if mem:\n translated_input.append(Term(\"tensor\", Term(\"mem_inputs\", Constant(id))))\n else:\n translated_input.append(Term(\"tensor\", Term(\"inputs\", Constant(id))))\n tensor_idx[id] = i\n else:\n raise Exception(f\"Invalid type of input {i}\")\n if mem:\n self.add_tensor_source(\"mem_inputs\", tensor_idx)\n else:\n self.add_tensor_source(\"inputs\", tensor_idx)\n return Query(Term(name, *translated_input), substitution=substitution)\n\n def solve_query(self, query: str, inputs=None, substitution=None, generate=False) -> List[Result]:\n if inputs is None:\n inputs = []\n query = self._build_query(query, inputs, substitution)\n return self._solve(query, generate=generate)\n\n def compile_query(self, query: str, inputs=None, substitution=None, mem=False):\n if inputs is None:\n inputs = []\n query = self._build_query(query, inputs, substitution, mem)\n self._compile(query, mem=mem)\n\n def call(self, input, training=False, mem=False):\n input.append(tf.constant(1.))\n if mem:\n graph = self.memgraph\n else:\n graph = self.graph\n wmc = graph.call(input)\n if wmc.shape[-1] > 1:\n wmc = tf.reduce_mean(wmc, axis=-1)\n return wmc\n\n def save_state(self, filename: Union[str, PathLike]):\n \"\"\"\n Save the compiled Tensorflow graph of a query to the given filename.\n :param filename: The filename to save the model to.\n :param complete: If true, save neural networks with information needed to resume training.\n :return:\n \"\"\"\n check_path(filename)\n self.graph.save_weights(filename)\n\n\n def load_state(self, filename: Union[str, PathLike]):\n \"\"\"\n Load the weights of a Tensorflow graph of a query from the given filename.\n :param filename: The filename to restore the model from.\n :return:\n \"\"\"\n check_path(filename)\n self.graph.load_weights(filename)\n\n def grad(self, inputs, targets, training=False, mem_data=None):\n with tf.GradientTape() as tape:\n y_pred = self.call(inputs, training)\n probability = tf.squeeze(y_pred)\n loss_value = self.loss(targets, probability)\n\n # Include memory in a simple, continual learning replay fashion\n if mem_data is not None: \n rd_id = np.random.randint(0, len(mem_data))\n mem_inputs, mem_targets = mem_data[rd_id][:-1], mem_data[rd_id][-1]\n mem_y_pred = self.call(mem_inputs, training, mem=True)\n loss_value += 5 * self.loss(mem_targets, mem_y_pred)\n return loss_value, tape.gradient(loss_value, self.graph.trainable_variables)\n\n def train(self, data, epochs, log_its=100, mem_data=None, val_data=None, eval_fns=None, fn_args=None, training=False):\n \"\"\"\n Trains all weights present in the model graph.\n \"\"\"\n if mem_data is not None:\n param_list = self.graph.trainable_variables\n param_list.extend(self.memgraph.trainable_variables)\n else:\n param_list = self.graph.trainable_variables\n\n for epoch in range(epochs):\n print(\"Epoch {}\".format(epoch + 1))\n accumulated_loss = 0\n acc_eval_time = 0\n prev_iter_time = time.time()\n for el in data:\n x, y = el[:-1], el[-1]\n prev_eval_time = time.time()\n loss_val, grads = self.grad(x, y, training=training, mem_data=mem_data)\n self.optimizer.apply_gradients(zip(grads, param_list))\n acc_eval_time += time.time() - prev_eval_time\n accumulated_loss += loss_val.numpy()\n\n self.counter += 1\n if self.counter % log_its == 0:\n update_time = time.time() - prev_iter_time\n if val_data == None:\n print(\n \"Iteration: \",\n self.counter,\n \"\\ts:%.4f\" % update_time,\n \"\\tAverage Loss: \",\n accumulated_loss / log_its\n )\n self.log(self.counter, accumulated_loss, acc_eval_time, update_time, log_iter=log_its)\n else:\n val_loss = 0\n val_counter = 0\n for val_el in val_data:\n x2, y2 = val_el[:-1], val_el[-1]\n val_y = self.call(x2, training)\n val_loss += self.loss(y2, val_y).numpy()\n val_counter += 1\n evals = []\n if eval_fns is not None:\n for id, fn in enumerate(eval_fns):\n eval = fn(*fn_args[id])\n self.logger.log(f\"val_accs{id}\", self.counter, eval)\n evals.append(eval)\n print(\n \"Iteration: \",\n self.counter,\n \"\\ts:%.4f\" % update_time,\n \"\\tAverage Loss: \",\n accumulated_loss / log_its,\n \"\\tValidation Loss: \",\n val_loss / val_counter,\n \"\\tValidation Eval: \",\n evals\n )\n self.log(self.counter, accumulated_loss, acc_eval_time, update_time, log_iter=log_its)\n accumulated_loss = 0\n prev_iter_time = time.time()\n\n\n def register_foreign(self, *args, **kwargs):\n self.solver.engine.register_foreign(self.solver.program, *args, **kwargs)\n\n def __str__(self):\n return \"\\n\".join(str(line) for line in self.program)\n\n def get_tensor(self, term: Term):\n \"\"\"\n :param term: A term of the form tensor(_).\n If the tensor is of the form tensor(a(*args)), then it will look into tensor source a.\n :return: Returns the stored tensor identifier by the term.\n \"\"\"\n tensor_list = []\n if type(term) == list:\n for i in term:\n tensor_list.append(self.get_tensor_helper(i))\n return tensor_list\n else:\n return self.get_tensor_helper(term)\n\n def get_tensor_helper(self, term: Term) -> tf.Tensor:\n if type(term) == int:\n return term\n if len(term.args) > 0 and term.args[0].functor in self.tensor_sources:\n if len(term.args) > 0 and term.args[0].functor in self.tensor_sources:\n if type(term.args[0].args) is tuple:\n return self.tensor_sources[term.args[0].functor][term.args[0].args[0].value]\n else:\n return self.tensor_sources[term.args[0].functor][term.args[0].args]\n return self.solver.get_tensor(term)\n\n def store_tensor(self, tensor: tf.Tensor) -> Term:\n \"\"\"\n Stores a tensor in the tensor store and returns and identifier.\n :param tensor: The tensor to store.\n :return: The Term that is the identifier by which this tensor can be uniquely identified in the logic.\n \"\"\"\n return Term(\"tensor\", Constant(self.solver.engine.tensor_store.store(tensor)))\n\n def add_tensor_source(\n self, name: str, source: Mapping[Any, tf.Tensor]\n ):\n \"\"\"\n Adds a named tensor source to the model.\n :param name: The name of the added tensor source.\n :param source: The tensor source to add\n :return:\n \"\"\"\n self.tensor_sources[name] = source\n\n def get_parameters(self):\n return self.graph.variables\n\n def log(\n self, counter, acc_loss, eval_timing, it_timing, snapshot_iter=None,\n log_iter=100, verbose=1, **kwargs\n ):\n if (\n \"snapshot_name\" in kwargs\n and snapshot_iter is not None\n and counter % snapshot_iter == 0\n ):\n filename = \"{}_iter_{}.mdl\".format(kwargs[\"snapshot_name\"], counter)\n print(\"Writing snapshot to \" + filename)\n self.save_state(filename)\n if verbose and counter % log_iter == 0:\n self.logger.log(\"time\", counter, it_timing)\n self.logger.log(\"loss\", counter, acc_loss / log_iter)\n self.logger.log(\"eval_time\", counter, eval_timing / log_iter)\n","repo_name":"ML-KULeuven/deepseaproblog","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":17085,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"25298355705","text":"import sys\n\n\ninput = sys.stdin.readline\nn = int(input())\ncandidate1 = int(input())\nvote_list = []\nif n > 1:\n for _ in range(n-1):\n vote_list.append(int(input()))\n vote_list.sort(reverse=True)\n\n temp_voter = candidate1\n while(candidate1 <= vote_list[0]):\n candidate1 += 1\n vote_list[0] -= 1\n vote_list.sort(reverse=True)\n\n print(candidate1-temp_voter)\n\nelse:\n print(0)","repo_name":"jo9392/ProblemSolving","sub_path":"greedy_search/boj_1417.py","file_name":"boj_1417.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"17617784316","text":"from project.animal import Animal\nfrom project.worker import Worker\n\n\nclass Zoo:\n\n def __init__(self, name: str, budget: int, animal_capacity: int, workers_capacity: int):\n self.name = name\n self.__budget = budget\n self.__animal_capacity = animal_capacity\n self.__workers_capacity = workers_capacity\n self.animals = list()\n self.workers = list()\n\n def add_animal(self, animal: Animal, price: int):\n if self.__budget >= price and self.__animal_capacity >= len(self.animals) + 1:\n self.__budget -= price\n self.animals.append(animal)\n return f\"{animal.name} the {animal.__class__.__name__} added to the zoo\"\n elif self.__budget < price and self.__animal_capacity >= len(self.animals) + 1:\n return \"Not enough budget\"\n return \"Not enough space for animal\"\n\n def hire_worker(self, worker:Worker):\n if self.__workers_capacity > len(self.workers):\n self.workers.append(worker)\n return f\"{worker.name} the {worker.__class__.__name__} hired successfully\"\n return \"Not enough space for worker\"\n\n def fire_worker(self, worker_name):\n for pos, x in enumerate(self.workers):\n if x.name == worker_name:\n del self.workers[pos]\n return f\"{worker_name} fired successfully\"\n return f\"There is no {worker_name} in the zoo\"\n\n def pay_workers(self):\n sum_salaries = sum(x.salary for x in self.workers)\n if self.__budget >= sum_salaries:\n self.__budget -= sum_salaries\n return f\"You payed your workers. They are happy. Budget left: {self.__budget}\"\n return \"You have no budget to pay your workers. They are unhappy\"\n\n def tend_animals(self):\n total_costs = 0\n for animal in range(len(self.animals)):\n current_animal = self.animals[animal]\n total_costs += current_animal.money_for_care\n if self.__budget >= total_costs:\n self.__budget -= total_costs\n return f\"You tended all the animals. They are happy. Budget left: {self.__budget}\"\n return \"You have no budget to tend the animals. They are unhappy.\"\n\n def profit(self, amount):\n self.__budget += amount\n\n def animals_status(self):\n info = {\"Cheetah\": [], \"Tiger\": [], \"Lion\": []}\n [info[x.__class__.__name__].append(str(x)) for x in self.animals]\n result = [f\"You have {len(self.animals)} animals\",\n f\"----- {len(info['Lion'])} Lions:\", *info['Lion'],\n f\"----- {len(info['Tiger'])} Tigers:\", *info['Tiger'],\n f\"----- {len(info['Cheetah'])} Cheetahs:\", *info['Cheetah']]\n return \"\\n\".join(result)\n\n def workers_status(self):\n info = {\"Keeper\": [], \"Vet\": [], \"Caretaker\": []}\n [info[x.__class__.__name__].append(str(x)) for x in self.workers]\n result = [f\"You have {len(info['Keeper']) + len(info['Vet']) + len(info['Caretaker'])} workers\",\n f\"----- {len(info['Keeper'])} Keepers:\", *info['Keeper'],\n f\"----- {len(info['Caretaker'])} Caretakers:\", *info['Caretaker'],\n f\"----- {len(info['Vet'])} Vets:\", *info['Vet']]\n return \"\\n\".join(result)\n","repo_name":"DianVK/softuni_python_advanced","sub_path":"Python OOP - Exercises/Encapsulation - Exercise/wild_cat_zoo/project/zoo.py","file_name":"zoo.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"42241156610","text":"import math\nimport argparse\n\n\ndef shor_algorithm(n: int):\n for a in range(1, n):\n r = 0\n while r < n:\n if math.pow(a, r) % n == 1:\n result = [a, r]\n num1 = int(math.pow(result[0], result[1] // 2) - 1)\n num2 = int(math.pow(result[0], result[1] // 2) + 1)\n p = math.gcd(num1, n)\n q = math.gcd(num2, n)\n if p != n and q != n:\n return [p, q]\n r = r + 2\n return []\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Simulate Shor\\'s algorithm for N.')\n parser.add_argument('n', type=int, help='The integer to factor')\n args = parser.parse_args()\n p, q = shor_algorithm(args.n)\n print(\"Factor P is: \" + str(p) + \" and \" + \"Factor Q is: \" + str(q))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"qifanyyy/JupyterNotebook","sub_path":"new_algs/Number+theoretic+algorithms/Shor's+algorithm/shor_algorithm.py","file_name":"shor_algorithm.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"27"} +{"seq_id":"72927305993","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef plotData(X,y):\n \"\"\"\n plots the data points and gives the figure axes labels of\n population and profit.\n \"\"\"\n\n# ====================== YOUR CODE HERE ======================\n# Instructions: Plot the training data into a figure using the\n# \"figure\" and \"plot\" commands. Set the axes labels using\n# the \"xlabel\" and \"ylabel\" commands. Assume the\n# population and revenue data have been passed in\n# as the x and y arguments of this function.\n#\n# Hint: You can use the 'ro' option with plt.plot to have the markers\n# appear as red crosses. Furthermore, you can make the\n# markers larger by using plt.plot(..., 'r0', markersize=10);\n \n plt.figure()\n plt.xlabel(\"population\")\n plt.ylabel(\"revenus\")\n plt.plot(X, y, 'ro', markersize=10)\n plt.show()\n \n# ============================================================\n","repo_name":"Teusner/Machine_Learning","sub_path":"Week_1/plotData.py","file_name":"plotData.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"10467409230","text":"import numpy as np\n\ndef readMatrix(file):\n fd = open(file, 'r')\n hdr = fd.readline()\n rows, cols = [int(s) for s in fd.readline().strip().split()]\n tokens = fd.readline().strip().split()\n matrix = np.zeros((rows, cols))\n Y = []\n for i, line in enumerate(fd):\n nums = [int(x) for x in line.strip().split()]\n Y.append(nums[0])\n kv = np.array(nums[1:])\n k = np.cumsum(kv[:-1:2])\n v = kv[1::2]\n matrix[i, k] = v\n return matrix, tokens, np.array(Y)\n\ndef nb_train(matrix, category):\n state = {}\n N = matrix.shape[1]\n ###################\n spam = matrix[category == 1, :]\n nonspam = matrix[category == 0, :]\n\n phi_y1 = (np.sum(spam, axis = 0) + 1) / (spam.sum() + N)\n phi_y0 = (np.sum(nonspam, axis = 0) + 1) / (nonspam.sum() + N)\n phi_y = spam.shape[0] / matrix.shape[0]\n\n state['phi_y1'] = phi_y1 # prob that jth token appears given y=1\n state['phi_y0'] = phi_y0 # prob that jth token appears given y=1\n state['phi_y'] = phi_y\n\n ###################\n return state\n\ndef nb_test(matrix, state):\n output = np.zeros(matrix.shape[0])\n ###################\n\n phi_y1 = state['phi_y1']\n phi_y0 = state['phi_y0']\n phi_y = state['phi_y']\n\n log_probSpam = np.sum(np.log(phi_y1)*matrix, axis = 1) + np.log(phi_y)\n log_probNon = np.sum(np.log(phi_y0)*matrix, axis = 1) + np.log(phi_y)\n\n output[log_probSpam >= log_probNon] = 1\n ###################\n return output\n\ndef evaluate(output, label):\n error = (output != label).sum() * 1. / len(output)\n print('Error: %1.4f' % error)\n\ndef main():\n trainMatrix, tokenlist, trainCategory = readMatrix('data\\MATRIX.TRAIN')\n testMatrix, tokenlist, testCategory = readMatrix('data\\MATRIX.TEST')\n\n state = nb_train(trainMatrix, trainCategory)\n output = nb_test(testMatrix, state)\n\n evaluate(output, testCategory)\n return\n\nif __name__ == '__main__':\n main()\n","repo_name":"nmduonggg/ML-CS229","sub_path":"Problem Set 2/src/nb.py","file_name":"nb.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4884443434","text":"import random\nimport math\n\nfrom torch.utils.data.dataset import Dataset\nfrom torchvision import transforms\nimport torch\n\nimport numpy as np\nimport cv2\nimport imgaug as ia\nimport imgaug.augmenters as iaa\nimport imgaug.parameters as iap\nfrom imgaug.augmentables import Keypoint, KeypointsOnImage\n\nfrom pytorch_tools.image.cropper import Cropper\n\n\nclass LandmarkDataset(Dataset):\n def __init__(self, datasets: list, is_train: bool, average_landmark: np.ndarray = None):\n super(LandmarkDataset, self).__init__()\n\n self.target_size = 112\n self.target_face_box_size_range = (self.target_size * 0.30, self.target_size * 0.75)\n self.target_average_face_box_size = 56\n\n self.transforms = transforms.Compose([\n transforms.ToTensor()\n ])\n\n self.fixed_label = 0\n\n self.is_train = is_train\n\n self.datasets = []\n for dataset in datasets:\n if is_train is True:\n dataset.set_train_mode()\n else:\n dataset.set_validation_mode()\n\n count = dataset.count(self.fixed_label)\n\n if count > 0:\n self.datasets.append(dataset)\n\n self.count = 0\n\n self.index_map = None\n\n self.prepare_index_map()\n\n self.landmarks = None\n self.average_landmark = None\n self.face_boxes = None\n\n self.prepare_landmark_cache(average_landmark)\n\n self.val_cache = {}\n\n def prepare_index_map(self):\n self.index_map = []\n\n if self.is_train is True:\n count_array = []\n\n for dataset in self.datasets:\n dataset.set_train_mode()\n count_array.append(dataset.count(self.fixed_label))\n\n self.count = sum(count_array)\n\n for dataset_index, dataset in enumerate(self.datasets):\n fixed_label = 0\n for i in range(dataset.count(fixed_label)):\n self.index_map.append((fixed_label, dataset_index, i))\n else:\n count_array = []\n\n for dataset in self.datasets:\n dataset.set_validation_mode()\n count_array.append(dataset.count(self.fixed_label))\n\n self.count = sum(count_array)\n\n for dataset_index, dataset in enumerate(self.datasets):\n fixed_label = 0\n for i in range(dataset.count(fixed_label)):\n self.index_map.append((fixed_label, dataset_index, i))\n\n def prepare_landmark_cache(self, average_landmark: np.ndarray = None):\n landmarks = []\n face_boxes = []\n\n if average_landmark is None:\n _average_landmark = np.zeros((68, 2))\n\n for index in range(len(self)):\n label, dataset_index, data_index = self.index_map[index]\n if self.is_train is True:\n self.datasets[dataset_index].set_train_mode()\n else:\n self.datasets[dataset_index].set_validation_mode()\n\n _, annotation_filename = self.datasets[dataset_index].get_filename(label, data_index)\n annotation = self.datasets[dataset_index].parse_annotation(annotation_filename)\n\n landmark = np.array(annotation)\n landmarks.append(landmark)\n\n face_box = landmark.min(axis=0).tolist() + landmark.max(axis=0).tolist()\n face_boxes.append(face_box)\n\n if average_landmark is None:\n landmark_center = landmark.mean(axis=0)\n normalized_landmark = landmark - landmark_center\n _average_landmark += normalized_landmark\n\n if average_landmark is None:\n _average_landmark /= len(self)\n\n self.landmarks = landmarks\n self.face_boxes = face_boxes\n\n if average_landmark is None:\n average_landmark = _average_landmark\n\n average_face_box = face_box = average_landmark.min(axis=0).tolist() + average_landmark.max(axis=0).tolist()\n box_width = face_box[2] - face_box[0]\n box_height = face_box[3] - face_box[1]\n box_length = max(box_width, box_height)\n average_landmark_scale = self.target_average_face_box_size / box_length\n\n average_landmark -= average_landmark.mean(axis=0)\n average_landmark *= average_landmark_scale\n average_landmark += self.target_size / 2.0\n\n self.average_landmark = average_landmark\n\n def __getitem__(self, index: int):\n if self.is_train is True:\n label, dataset_index, data_index = self.index_map[index]\n\n self.datasets[dataset_index].set_train_mode()\n image, _ = self.datasets[dataset_index].get_datum(label, data_index)\n annotation = [self.face_boxes[index], self.landmarks[index]]\n\n image, target, pupil_distance = self.create_input_and_target(image, annotation)\n\n target = target.flatten()\n\n image = self.transforms(image)\n target = torch.tensor(target)\n else:\n if index in list(self.val_cache.keys()):\n image, target, pupil_distance = self.val_cache[index]\n else:\n label, dataset_index, data_index = self.index_map[index]\n\n self.datasets[dataset_index].set_validation_mode()\n image, _ = self.datasets[dataset_index].get_datum(label, data_index)\n annotation = [self.face_boxes[index], self.landmarks[index]]\n\n image, target, pupil_distance = self.create_input_and_target(image, annotation, random_seed=index)\n\n target = target.flatten()\n\n image = self.transforms(image)\n target = torch.tensor(target)\n\n return image, (target, pupil_distance)\n\n def create_input_and_target(self, image, annotation: list, random_seed: int = None):\n random.seed(random_seed)\n\n face_box = np.array(annotation[0], dtype=np.float32)\n landmark = np.array(annotation[1], dtype=np.float32)\n\n box_width = face_box[2] - face_box[0]\n box_height = face_box[3] - face_box[1]\n box_length = max(box_width, box_height)\n\n if self.is_train is True:\n scale = random.uniform(self.target_face_box_size_range[0] / box_length,\n self.target_face_box_size_range[1] / box_length)\n else:\n scale = self.target_average_face_box_size / box_length\n\n width, height = image.shape[1], image.shape[0]\n resized_width = int(round(width * scale))\n resized_height = int(round(height * scale))\n width_scale = resized_width / width\n height_scale = resized_height / height\n\n if self.is_train is False:\n image = cv2.resize(image, (resized_width, resized_height))\n\n landmark[:, 0] *= width_scale\n landmark[:, 1] *= height_scale\n face_box[0::2] *= width_scale\n face_box[1::2] *= height_scale\n box_center = [face_box[0::2].mean(), face_box[1::2].mean()]\n\n crop_x = int(round(box_center[0] - 56))\n crop_y = int(round(box_center[1] - 56))\n\n if self.is_train is True:\n rotation_margin = int(math.ceil(((math.sqrt(2.0) * 3 * box_length) - box_length) / 2.0))\n crop_x = int(face_box[0] - rotation_margin)\n crop_y = int(face_box[1] - rotation_margin)\n\n image = Cropper.crop(image, crop_x, crop_y,\n int(box_length) + (2 * rotation_margin), int(box_length) + (2 * rotation_margin))\n\n landmark[:, 0] -= crop_x\n landmark[:, 1] -= crop_y\n\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n image = np.expand_dims(image, axis=2)\n\n augmentations = iaa.Sequential([\n iaa.Sequential([\n iaa.Affine(scale, translate_px={\"x\": (-10, 10), \"y\": (-10, 10)}, rotate=(-20, 20)),\n iaa.CenterPadToFixedSize(height=self.target_size, width=self.target_size),\n iaa.CenterCropToFixedSize(height=self.target_size, width=self.target_size),\n ]),\n iaa.Sequential([iaa.Sometimes(0.5, iaa.Add((-40, 40))),\n iaa.Sometimes(0.5, iaa.SomeOf(1, [iaa.GammaContrast((0.5, 2.0)),\n iaa.LinearContrast((0.4, 1.6))])),\n iaa.Sometimes(0.5, iaa.Fliplr(0.5))], random_order=True)\n\n ])\n\n keypoints = KeypointsOnImage([Keypoint(x=l[0], y=l[1]) for l in landmark], shape=image.shape)\n image, keypoints = augmentations(image=image, keypoints=keypoints)\n\n if keypoints.keypoints[0].x > keypoints.keypoints[16].x:\n landmark_flip_mapping_indices = [16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,\n 26, 25, 24, 23, 22, 21, 20, 19, 18, 17,\n 27, 28, 29, 30,\n 35, 34, 33, 32, 31,\n 45, 44, 43, 42, 47, 46, 39, 38, 37, 36, 41, 40,\n 54, 53, 52, 51, 50, 49, 48,\n 59, 58, 57, 56, 55,\n 64, 63, 62, 61, 60, 67, 66, 65\n ]\n\n for target_i, source_i in enumerate(landmark_flip_mapping_indices):\n landmark[target_i, 0] = keypoints.keypoints[source_i].x\n landmark[target_i, 1] = keypoints.keypoints[source_i].y\n else:\n for target_i in range(68):\n landmark[target_i, 0] = keypoints.keypoints[target_i].x\n landmark[target_i, 1] = keypoints.keypoints[target_i].y\n else:\n image = Cropper.crop(image, crop_x, crop_y, self.target_size, self.target_size)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n image = np.expand_dims(image, axis=2)\n\n landmark[:, 0] -= crop_x\n landmark[:, 1] -= crop_y\n\n left_pupil = landmark[36:42, :].mean(axis=0)\n right_pupil = landmark[42:48, :].mean(axis=0)\n\n pupil_distance = np.linalg.norm((left_pupil - right_pupil))\n\n return image, landmark, pupil_distance\n\n def __len__(self):\n return self.count\n","repo_name":"just-hjkwon/DeepAlignmentNetwork","sub_path":"landmark_dataset.py","file_name":"landmark_dataset.py","file_ext":"py","file_size_in_byte":10508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42246418250","text":"import json\n\nimport divik._inspect.color as color\n\nDEFAULT_COLOR = 128\n\n\ndef as_rgb(selected_point, figure):\n try:\n cluster = int(json.loads(selected_point)['cluster'])\n except json.JSONDecodeError:\n return DEFAULT_COLOR, DEFAULT_COLOR, DEFAULT_COLOR\n except ValueError:\n return DEFAULT_COLOR, DEFAULT_COLOR, DEFAULT_COLOR\n colormap = color.make_colormap(figure['data'][0]['z'])\n selected_color = colormap[cluster][1]\n jsonified = selected_color.replace('rgb(', '[').replace(')', ']')\n return json.loads(jsonified)\n\n\ndef update_color_overrides(overrides, new_color, level, selected_point):\n if not overrides:\n overrides = '{}'\n\n try:\n cluster = str(int(json.loads(selected_point)['cluster']))\n except json.JSONDecodeError:\n return overrides\n except TypeError:\n return overrides\n\n overrides = json.loads(overrides)\n level = str(level)\n if level not in overrides:\n overrides[level] = {}\n overrides[level][cluster] = new_color\n return json.dumps(overrides)\n","repo_name":"qifanyyy/JupyterNotebook","sub_path":"new_algs/Sequence+algorithms/Selection+algorithm/recolor.py","file_name":"recolor.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"27"} +{"seq_id":"74323109832","text":"import pandas as pd\nimport vectorbtpro as vbt\n\nfrom Utilities.bars_utilities import BARSINCE_genie\n\n\ndef cache_func(close_data,\n rsi_timeframes, rsi_windows,\n sma_on_rsi_1_windows, sma_on_rsi_2_windows, sma_on_rsi_3_windows,\n T1_ema_timeframes, T1_ema_1_windows, T1_ema_2_windows,\n # T2_ema_timeframes, T2_ema_1_windows, T2_ema_2_windows,\n #\n take_profit_points, stop_loss_points\n ):\n #\n cached_data = {\n # Data\n 'Close': {},\n 'Resamplers': {},\n 'Empty_df_like': pd.DataFrame().reindex_like(close_data),\n }\n # if type(rsi_timeframes) == type(T1_ema_timeframes) == list:\n '''Pre-Resample Data'''\n timeframes_set = tuple(set(tuple(rsi_timeframes) + tuple(T1_ema_timeframes))) # + tuple(T2_ema_timeframes)))\n\n for timeframe in timeframes_set:\n # CLOSE\n cached_data['Close'][timeframe] = close_data.vbt.resample_apply(timeframe,\n vbt.nb.last_reduce_nb).dropna() if timeframe != '1 min' else close_data\n\n '''Pre-Prepare Resampler'''\n cached_data['Resamplers'][timeframe] = vbt.Resampler(\n cached_data['Close'][timeframe].index,\n close_data.index,\n source_freq=timeframe,\n target_freq=\"1m\") if timeframe != '1 min' else None\n\n return cached_data\n\n\ndef apply_function(close_data,\n rsi_timeframe, rsi_window,\n sma_on_rsi_1_window, sma_on_rsi_2_window, sma_on_rsi_3_window,\n T1_ema_timeframe, T1_ema_1_window, T1_ema_2_window,\n # T2_ema_timeframe, T2_ema_1_window, T2_ema_2_window,\n #\n take_profit_points, stop_loss_points,\n cached_data):\n \"\"\"Function for RLGL Strategy/Indicators\"\"\"\n\n '''RSI and SMA Indicators'''\n ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n rsi_indicator = vbt.RSI.run(cached_data['Close'][rsi_timeframe], window=rsi_window, ewm=False).rsi\n sma_on_rsi_1_indicator = vbt.MA.run(rsi_indicator, window=sma_on_rsi_1_window, ewm=False).ma\n sma_on_rsi_2_indicator = vbt.MA.run(rsi_indicator, window=sma_on_rsi_2_window, ewm=False).ma\n sma_on_rsi_3_indicator = vbt.MA.run(rsi_indicator, window=sma_on_rsi_3_window, ewm=False).ma\n ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\n '''Trend I EMA Indicators'''\n ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n T1_ema_1_indicator = vbt.MA.run(cached_data['Close'][T1_ema_timeframe], window=T1_ema_1_window, ewm=True).ma\n T1_ema_2_indicator = vbt.MA.run(cached_data['Close'][T1_ema_timeframe], window=T1_ema_2_window, ewm=True).ma\n ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\n # '''Trend II EMA Indicators'''\n # ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n # T2_ema_1_indicator = cached_indicator['EMA'][f'{T2_ema_timeframe}_{T2_ema_1_window}']\n # T2_ema_2_indicator = cached_indicator['EMA'][f'{T2_ema_timeframe}_{T2_ema_2_window}']\n # ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\n '''Resample Indicators Back To 1 minute'''\n ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n # Fetch the resamplers from cached_data for a given timeframe\n rsi_timeframe_to_1min_Resampler = cached_data['Resamplers'][rsi_timeframe]\n T1_ema_timeframe_to_1min_Resampler = cached_data['Resamplers'][T1_ema_timeframe]\n # T2_ema_timeframe_to_1min_Resampler = cached_data['Resamplers'][T2_ema_timeframe]\n\n # Resample indicators to 1m\n rsi_indicator = rsi_indicator.vbt.resample_closing(\n rsi_timeframe_to_1min_Resampler) if rsi_timeframe_to_1min_Resampler else rsi_indicator\n sma_on_rsi_1_indicator = sma_on_rsi_1_indicator.vbt.resample_closing(\n rsi_timeframe_to_1min_Resampler) if rsi_timeframe_to_1min_Resampler else sma_on_rsi_1_indicator\n sma_on_rsi_2_indicator = sma_on_rsi_2_indicator.vbt.resample_closing(\n rsi_timeframe_to_1min_Resampler) if rsi_timeframe_to_1min_Resampler else sma_on_rsi_2_indicator\n sma_on_rsi_3_indicator = sma_on_rsi_3_indicator.vbt.resample_closing(\n rsi_timeframe_to_1min_Resampler) if rsi_timeframe_to_1min_Resampler else sma_on_rsi_3_indicator\n #\n T1_ema_1_indicator = T1_ema_1_indicator.vbt.resample_closing(\n T1_ema_timeframe_to_1min_Resampler) if T1_ema_timeframe_to_1min_Resampler else T1_ema_1_indicator\n T1_ema_2_indicator = T1_ema_2_indicator.vbt.resample_closing(\n T1_ema_timeframe_to_1min_Resampler) if T1_ema_timeframe_to_1min_Resampler else T1_ema_2_indicator\n # T2_ema_1_indicator = T2_ema_1_indicator.vbt.resample_closing(\n # T2_ema_timeframe_to_1min_Resampler) if T2_ema_timeframe_to_1min_Resampler else T2_ema_1_indicator\n # T2_ema_2_indicator = T2_ema_2_indicator.vbt.resample_closing(\n # T2_ema_timeframe_to_1min_Resampler) if T2_ema_timeframe_to_1min_Resampler else T2_ema_2_indicator\n ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\n '''Long Entries Conditions'''\n # 1. TrendType == 'T1' or (ema_4h(13) > ema_4h(50))\n ... # fixme not adding this right now\n # 2. crossover( ema(13) , ema(50) )\n long_entry_condition_2 = T1_ema_1_indicator.vbt.crossed_above(T1_ema_2_indicator)\n\n # 3. barssince( crossover( rsi_4h(13).sma(2) , rsi_4h(13).sma(34) ) or crossover( rsi_4h(13).sma(2) , rsi_4h(13).sma(7) ) ) <\n # barssince( crossunder( rsi_4h(13).sma(2) , rsi_4h(13).sma(34) ) or crossunder( rsi_4h(13).sma(2) , rsi_4h(13).sma(7) ) )\n inside_long_condition_3a1 = sma_on_rsi_1_indicator.vbt.crossed_above(sma_on_rsi_3_indicator)\n inside_long_condition_3a2 = sma_on_rsi_1_indicator.vbt.crossed_above(sma_on_rsi_2_indicator)\n inside_long_condition_3a = inside_long_condition_3a1 | inside_long_condition_3a2.to_numpy()\n #\n inside_long_condition_3b1 = sma_on_rsi_1_indicator.vbt.crossed_below(sma_on_rsi_3_indicator)\n inside_long_condition_3b2 = sma_on_rsi_1_indicator.vbt.crossed_below(sma_on_rsi_2_indicator)\n inside_long_condition_3b = inside_long_condition_3b1 | inside_long_condition_3b2.to_numpy()\n #\n long_entry_condition_3 = BARSINCE_genie(inside_long_condition_3a).lt(BARSINCE_genie(inside_long_condition_3b))\n\n '''Short Entries Conditions'''\n # 1. TrendType == 'T1' or (ema_4h(13) < ema_4h(50))\n ... # fixme not adding this right now\n # 2. crossunder( ema(13) , ema(50) )\n short_entry_condition_2 = T1_ema_1_indicator.vbt.crossed_below(T1_ema_2_indicator)\n\n # 3. barssince( crossover( rsi_4h(13).sma(2) , rsi_4h(13).sma(34) ) or crossover( rsi_4h(13).sma(2) , rsi_4h(13).sma(7) ) ) >\n # barssince( crossunder( rsi_4h(13).sma(2) , rsi_4h(13).sma(34) ) or crossunder( rsi_4h(13).sma(2) , rsi_4h(13).sma(7) ) )\n inside_short_condition_3a1 = sma_on_rsi_1_indicator.vbt.crossed_above(sma_on_rsi_3_indicator)\n inside_short_condition_3a2 = sma_on_rsi_1_indicator.vbt.crossed_above(sma_on_rsi_2_indicator)\n inside_short_condition_3a = inside_short_condition_3a1 | inside_short_condition_3a2.to_numpy()\n #\n inside_short_condition_3b1 = sma_on_rsi_1_indicator.vbt.crossed_below(sma_on_rsi_3_indicator)\n inside_short_condition_3b2 = sma_on_rsi_1_indicator.vbt.crossed_below(sma_on_rsi_2_indicator)\n inside_short_condition_3b = inside_short_condition_3b1 | inside_short_condition_3b2.to_numpy()\n #\n short_entry_condition_3 = BARSINCE_genie(inside_short_condition_3a).gt(BARSINCE_genie(inside_short_condition_3b))\n\n '''Fill Rest of Parameters for Sim'''\n ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n # Used to fill signals and parameter dfs into the correct size (just a workaround for now, fast)\n empty_df_like = cached_data['Empty_df_like']\n #\n take_profit_points = empty_df_like.fillna(take_profit_points)\n stop_loss_points = empty_df_like.fillna(stop_loss_points)\n ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\n '''Define Entries and Exits Signals'''\n ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n long_entries = (\n long_entry_condition_2\n & long_entry_condition_3.to_numpy()\n ).vbt.signals.fshift()\n long_exits = pd.DataFrame().reindex_like(long_entries).fillna(False)\n\n short_entries = (\n short_entry_condition_2\n & short_entry_condition_3.to_numpy()\n ).vbt.signals.fshift()\n short_exits = long_exits\n ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\n return long_entries, long_exits, short_entries, short_exits, \\\n take_profit_points, stop_loss_points\n\n # T2_ema_1_indicator, T2_ema_2_indicator, \\\n\n\ndef RLGL_Strategy(open_data, low_data, high_data, close_data, parameter_data, ray_sim_n_cpus):\n \"\"\"Red Light Geen Light Strategy\"\"\"\n\n '''RSI and SMA Information'''\n rsi_timeframes = parameter_data[\"rsi_timeframes\"]\n rsi_windows = parameter_data[\"rsi_windows\"]\n #\n sma_on_rsi_1_windows = parameter_data[\"sma_on_rsi_1_windows\"]\n sma_on_rsi_2_windows = parameter_data[\"sma_on_rsi_2_windows\"]\n sma_on_rsi_3_windows = parameter_data[\"sma_on_rsi_3_windows\"]\n\n '''Trend I EMA Information'''\n T1_ema_timeframes = parameter_data[\"T1_ema_timeframes\"] # refers to timeframe of loaded chart.\n T1_ema_1_windows = parameter_data[\"T1_ema_1_windows\"]\n T1_ema_2_windows = parameter_data[\"T1_ema_2_windows\"]\n\n # '''Trend II EMA Information'''\n # T2_ema_timeframes = parameter_data[\"T2_ema_timeframes\"]\n # T2_ema_1_windows = parameter_data[\"T2_ema_1_windows\"]\n # T2_ema_2_windows = parameter_data[\"T2_ema_2_windows\"]\n\n '''TP and SL'''\n take_profit_points = parameter_data[\"take_profit_points\"]\n stop_loss_points = parameter_data[\"stop_loss_points\"]\n\n '''Compile Structure and Run Master Indicator'''\n Master_Indicator = vbt.IF(\n input_names=['close_data'],\n #\n param_names=['rsi_timeframes', 'rsi_windows',\n 'sma_on_rsi_1_windows', 'sma_on_rsi_2_windows', 'sma_on_rsi_3_windows',\n 'T1_ema_timeframes', 'T1_ema_1_windows', 'T1_ema_2_windows',\n # 'T2_ema_timeframes', 'T2_ema_1_windows', 'T2_ema_2_windows',\n 'take_profit_points', 'stop_loss_points'],\n #\n output_names=['long_entries', 'long_exits', 'short_entries', 'short_exits',\n 'take_profit_points', 'stop_loss_points']\n ).with_apply_func(\n apply_func=apply_function,\n cache_func=cache_func,\n keep_pd=True,\n param_product=False,\n execute_kwargs=dict(\n engine='ray',\n init_kwargs={\n 'address': 'auto',\n 'num_cpus': ray_sim_n_cpus,\n # 'memory': 100 * 10 ** 9,\n # 'object_store_memory': 100 * 10 ** 9,\n },\n show_progress=True\n ),\n #\n rsi_timeframes='4h', rsi_windows=13,\n sma_on_rsi_1_windows=2, sma_on_rsi_2_windows=7, sma_on_rsi_3_windows=34,\n T1_ema_timeframes='1m', T1_ema_1_windows=13, T1_ema_2_windows=50,\n # T2_ema_timeframes='4h', T2_ema_1_windows=13, T2_ema_2_windows=50,\n take_profit_points=300, stop_loss_points=-300\n ).run(\n close_data,\n rsi_timeframes=rsi_timeframes, rsi_windows=rsi_windows,\n #\n sma_on_rsi_1_windows=sma_on_rsi_1_windows,\n sma_on_rsi_2_windows=sma_on_rsi_2_windows,\n sma_on_rsi_3_windows=sma_on_rsi_3_windows,\n #\n T1_ema_timeframes=T1_ema_timeframes,\n T1_ema_1_windows=T1_ema_1_windows, T1_ema_2_windows=T1_ema_2_windows,\n #\n # T2_ema_timeframes=T2_ema_timeframes,\n # T2_ema_1_windows=T2_ema_1_windows, T2_ema_2_windows=T2_ema_2_windows,\n take_profit_points=take_profit_points, stop_loss_points=stop_loss_points\n )\n\n '''Type C conditions'''\n strategy_specific_kwargs = dict(\n allow_multiple_trade_from_entries=False, # strategy_specific_kwargs['allow_multiple_trade_from_entries'],\n exit_on_opposite_direction_entry=True, # strategy_specific_kwargs['exit_on_opposite_direction_entry'],\n #\n progressive_bool=False, # Master_Indicator.progressive_bool,\n long_progressive_condition=False, # Master_Indicator.long_entry_condition_3.vbt.signals.fshift(),\n short_progressive_condition=False, # Master_Indicator.short_entry_condition_3.vbt.signals.fshift(),\n #\n breakeven_1_trigger_bool=False, # Master_Indicator.breakeven_1_trigger_bool,\n breakeven_1_trigger_points=0, # Master_Indicator.breakeven_1_trigger_points,\n breakeven_1_distance_points=0, # Master_Indicator.breakeven_1_distance_points,\n #\n breakeven_2_trigger_bool=False, # Master_Indicator.breakeven_2_trigger_bool,\n breakeven_2_trigger_points=0, # Master_Indicator.breakeven_2_trigger_points,\n breakeven_2_distance_points=0, # Master_Indicator.breakeven_2_distance_points,\n #\n take_profit_bool=True, # Master_Indicator.take_profit_bool,\n take_profit_points=Master_Indicator.take_profit_points,\n take_profit_point_parameters=take_profit_points,\n #\n stop_loss_bool=True, # Master_Indicator.stop_loss_bool,\n stop_loss_points=Master_Indicator.stop_loss_points,\n stop_loss_points_parameters=stop_loss_points,\n )\n\n # strategy_specific_kwargs = dict()\n return Master_Indicator.long_entries, Master_Indicator.long_exits, \\\n Master_Indicator.short_entries, Master_Indicator.short_exits, \\\n strategy_specific_kwargs\n","repo_name":"Bucanero06/mini_Genie","sub_path":"mini_genie_source/Strategies/RLGL_Strategy.py","file_name":"RLGL_Strategy.py","file_ext":"py","file_size_in_byte":14124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"29081566507","text":"\nclass Solution:\n def searchRange(self, nums: List[int], target: int) -> List[int]:\n\n res = [-1,-1]\n\n if target in nums:\n\n res[0]=bisect.bisect_left(nums,target)\n res[1]=bisect.bisect_right(nums,target)-1\n\n return res\n ","repo_name":"Shivangik01/leetcode","sub_path":"0034-find-first-and-last-position-of-element-in-sorted-array/0034-find-first-and-last-position-of-element-in-sorted-array.py","file_name":"0034-find-first-and-last-position-of-element-in-sorted-array.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"74744920710","text":"# _*_ coding: utf-8 _*_\r\n__author__ = 'LennonChin'\r\n__date__ = '2017/09/12 下午 10:23'\r\n\r\nfrom django.conf.urls import url\r\n\r\nfrom .views import CourseListView, CourseDetailView, CourseInfoView, CourseCommentView, AddCommentView, VideoPlayView\r\n\r\nurlpatterns = [\r\n\r\n # course list\r\n url(r'^list/$', CourseListView.as_view(), name=\"list\"),\r\n url(r'^detail/(?P\\d+)/$', CourseDetailView.as_view(), name=\"detail\"),\r\n url(r'^info/(?P\\d+)/$', CourseInfoView.as_view(), name=\"info\"),\r\n url(r'^comment/(?P\\d+)/$', CourseCommentView.as_view(), name=\"comment\"),\r\n url(r'^add_comment/$', AddCommentView.as_view(), name=\"add_comment\"),\r\n url(r'^video/(?P\\d+)/$', VideoPlayView.as_view(), name=\"video_play\"),\r\n]","repo_name":"LennonChin/Django-Practices","sub_path":"MxOnline/apps/courses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"27"} +{"seq_id":"17511346477","text":"import pathlib\nimport re\n\nimport setuptools\n\n__packagename__ = \"gittrail\"\nROOT = pathlib.Path(__file__).parent\n\n\ndef get_version():\n VERSIONFILE = pathlib.Path(ROOT, __packagename__, \"__init__.py\")\n initfile_lines = open(VERSIONFILE).readlines()\n VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n for line in initfile_lines:\n mo = re.search(VSRE, line, re.M)\n if mo:\n return mo.group(1)\n raise Exception(f\"Unable to find version string in {VERSIONFILE}.\")\n\n\n__version__ = get_version()\n\n\nsetuptools.setup(\n name=__packagename__,\n packages=setuptools.find_packages(),\n version=__version__,\n description=\"Context manager for enforcing links between data pipeline outputs and git history.\",\n license=\"AGPLv3\",\n long_description=open(pathlib.Path(ROOT, \"README.md\")).read(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/michaelosthege/gittrail\",\n author=\"Michael Osthege\",\n author_email=\"michael.osthege@outlook.com\",\n classifiers=[\n \"Programming Language :: Python\",\n \"Operating System :: OS Independent\",\n \"License :: OSI Approved :: GNU Affero General Public License v3\",\n \"Intended Audience :: Science/Research\",\n ],\n install_requires=[open(pathlib.Path(ROOT, \"requirements.txt\")).readlines()],\n python_requires=\">=3.6\",\n)\n","repo_name":"michaelosthege/gittrail","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"30377124126","text":"from django.db import models\n\n# Create your models here.\nclass User(models.Model):\n id = models.CharField(primary_key=True, blank=False, null=False, max_length=128)\n username = models.CharField(max_length=128, blank=False, null=False, db_index=True)\n area = models.CharField(max_length=128, blank=False, null=True, db_index=True)\n description = models.TextField(blank=True, null=True)\n gender = models.CharField(max_length=16, null=False, blank=False, db_index=True)\n url_token = models.CharField(max_length=128, blank=False, null=False)\n\nclass Question(models.Model):\n id = models.CharField(primary_key=True, blank=False, null=False, max_length=128)\n answer_count = models.IntegerField(default=0)\n # update_time = models.IntegerField(default=0)\n content = models.TextField(null=False, blank=False)\n\nclass Answer(models.Model):\n id = models.CharField(primary_key=True, blank=False, null=False, max_length=128)\n user = models.ForeignKey(to=User, on_delete=models.CASCADE)\n content = models.TextField(null=False, blank=False)\n question = models.ForeignKey(to=Question, on_delete=models.CASCADE)\n update_time = models.IntegerField(default=0)\n comment_count = models.IntegerField(default=0)\n\nclass Comment(models.Model):\n id = models.CharField(primary_key=True, blank=False, null=False, max_length=128)\n user = models.ForeignKey(to=User, on_delete=models.CASCADE)\n like = models.IntegerField(default=0)\n dislike = models.IntegerField(default=0)\n children = models.TextField(blank=True, null=True)\n answer = models.ForeignKey(to=Answer, on_delete=models.CASCADE)\n content = models.TextField(null=False, blank=False)\n\n","repo_name":"liuxukun2000/Zhihu_spider","sub_path":"database/zhihu/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"27"} +{"seq_id":"74072079750","text":"\"\"\" Neural network.\n\n@author: Even M. Nordhagen \n\"\"\"\n\n\nimport numpy as np\n\nclass Network:\n \"\"\" Initialize network, including weights and nodes\n \n Parameters\n ----------\n \n input_shape : ndtuple\n dimension on input. Can be 1d (works for fully connected \n layer only), 2d (image with only 1 channel) and 3d (image\n with multiple channels)\n init : obj\n how to initialize weights. Methods are found in initialize.py. This is \n global and can be overwritten by each specific layer.\n cost : obj\n cost function. Functions are found in cost.py\n activation : obj\n activation function. Functions are found in activation.py. This is \n global and can be overwritten by each specific layer.\n optimizer : obj\n optimizer function. Methods are found in optimizer.py. This is global \n and can be overwritten by each specific layer.\n bias : boolean\n include bias node yes / no. This is global and can be overwritten\n by each specific layer.\n \"\"\"\n\n from tensornet.cost import MSE\n from tensornet.activation import Sigmoid\n from tensornet.optimizer import ADAM\n from tensornet.initialize import Normal\n \n def __init__(self, input_shape, \n cost=MSE(), \n init=Normal(), \n activation=Sigmoid(), \n optimizer=ADAM(lr=0.01),\n bias=True):\n \n self.layers = []\n self.h = np.array([input_shape])\n self.weight = []\n self.a = [np.zeros(input_shape)]\n self.activation = activation\n self.init = init\n self.optimizer = optimizer\n self.cost = cost\n self.bias = bias\n \n def append(self, layer):\n self.layers.append(layer)\n self.weight.append(layer.weight)\n \n def dense(self, units, \n init=None, \n activation=None, \n optimizer=None,\n bias=None):\n \"\"\" Add dense layer.\n \n Parameters\n ----------\n \n units : int\n number of hidden units\n eta : float\n learning rate\n init : obj\n how to initialize weights. Methods are found in initialize.py\n activation : obj\n activation function. Functions are found in activation.py\n optimizer : obj\n optimizer function. Methods are found in optimizer.py\n bias : bool\n bias on (True) / off (False)\n \"\"\"\n if init is None:\n init = self.init\n if activation is None:\n activation = self.activation\n if optimizer is None:\n optimizer = self.optimizer\n if bias is None:\n bias = self.bias\n self.h = np.append(self.h, units)\n self.a.append(np.zeros(units))\n \n from tensornet.layer import DenseLayer\n layer = DenseLayer(self.h[-2], self.h[-1], init, activation, optimizer, bias)\n self.append(layer)\n \n def conv(self, kernel=(3,32,32), \n pad_size=(15,15), \n stride=(1,1), \n init=None,\n activation=None, \n optimizer=None,\n bias=None):\n \"\"\" Add convolutional layer.\n \n Parameters\n ----------\n \n kernel : 3dtuple of ints\n kernel size in vertical and horizontal direction\n pad_size : 2dtuple of ints\n zero padding in horizontal and vertical direction\n stride : 2dtuple of ints\n stride in horizontal and vertical direction \n eta : float\n learning rate\n init : obj\n how to initialize weights. Methods are found in initialize.py\n activation : obj\n activation function. Functions are found in activation.py\n optimizer : obj\n optimizer function. Methods are found in optimizer.py\n bias : bool\n bias on (True) / off (False)\n \"\"\"\n if init is None:\n init = self.init\n if activation is None:\n activation = self.activation\n if optimizer is None:\n optimizer = self.optimizer\n if bias is None:\n bias = self.bias\n \n from tensornet.layer import ConvLayer\n layer = ConvLayer(kernel, pad_size, stride, init, activation, optimizer, bias)\n self.append(layer)\n \n def pooling(self, kernel=(2,2), pad_size=(0,0), stride=(1,1), mode='max'):\n \"\"\" Add pooling layer.\n \n Parameters\n ----------\n kernel : 2dtuple of ints\n kernel size in vertical and horizontal direction. (2,2) by default\n pad_size : 2dtuple of ints\n pad size in vertical and horizontal direction. No padding by default.\n stride : 2dtuple of ints\n stride of pooling (height,width). By default the \n size of kernel (no overlap)\n mode : str\n mode of pooling. Max pooling ('max'), min pooling\n ('min') and mean pooling ('mean'/'avg') implemented\n \"\"\"\n \n from tensornet.layer import Pooling\n layer = Pooling(kernel, pad_size, stride, mode)\n self.layers.append(layer)\n self.weight.append(0)\n \n def __call__(self, input_data):\n \"\"\" Predicting output from network, given a input data set.\n \n Parameters\n ----------\n input_data : ndarray\n input data needs to match the input shape of model\n \"\"\"\n a = np.array(input_data)\n for layer in self.layers:\n a = layer(a)\n self.predicted = a\n return a\n \n @staticmethod\n def mse(predicted, targets):\n \"\"\" Mean-square error, given inputs and targets.\n \n Parameters\n ----------\n predicted : ndarray\n input data needs to match the input shape of model\n targets : ndarray\n number of targets need to match number of inputs\n size of target needs to match last layer of model\n \"\"\"\n mse_cost = MSE()\n return mse_cost(predicted, targets)\n \n @staticmethod\n def score(predicted, targets):\n \"\"\" Returns the R2-score.\n \"\"\"\n u = ((targets - predicted) ** 2).sum()\n v = ((targets - targets.mean()) ** 2).sum()\n return 1 - u/v\n \n def backprop(self, start, stop):\n \"\"\" Back-propagation processing based on some targets\n \"\"\"\n dcost = self.cost.derivate()[start:stop]\n for layer in reversed(list(self.layers)):\n dcost = layer.backward(dcost, start, stop)\n \n def update(self, step):\n \"\"\" Update weights.\n \n Parameters\n ----------\n step : int\n current step\n \"\"\"\n for i, layer in enumerate(self.layers):\n self.weight[i] = layer.update_weights(step)\n \n def train(self, input_data, targets, epochs=1000, mini_batches=10):\n \"\"\" Train the model.\n \n Parameters\n ----------\n input_data : ndarray\n input data needs to match the input shape of model\n targets : ndarray\n number of targets need to match number of inputs\n size of target needs to match last layer of model\n max_iter : int\n max number of training interations\n mini_batches : int\n number of mini batches\n \"\"\"\n \n samples = len(input_data)\n samples_per_batch = int(samples/mini_batches)\n \n from tqdm import trange\n with trange(epochs, unit=' epochs') as epoch:\n for step in epoch:\n start = 0\n for batch in range(mini_batches):\n # Forward\n predicted = self(input_data)\n loss = self.cost(predicted, targets).sum()\n \n # Backward\n stop = start + samples_per_batch\n self.backprop(start, stop)\n self.update(batch*(1+step))\n epoch.set_description('Training ' + '.' * (step%4) + \\\n ' ' * (4-step%4))\n epoch.set_postfix(loss=loss)\n start = stop\n return loss\n \n \nif __name__ == \"__main__\":\n from tensornet.activation import LeakyReLU, ReLU, Sigmoid\n from tensornet.optimizer import ADAM, GradientDescent\n from tensornet.cost import MSE\n from tensornet.initialize import Normal\n \n # XOR GATE\n data = [[0, 0], [0, 1], [1, 0], [1, 1]]\n targets = [[0], [1], [1], [0]]\n \n model = Network((2), cost=MSE(), activation=LeakyReLU(a=0.2), optimizer=ADAM(eta=0.1), bias=False) \n model.dense(units=5, optimizer=GradientDescent(eta=0.1), activation=ReLU())\n model.dense(units=1)\n model.train(data, targets, max_iter=1000)\n \n \n","repo_name":"evenmn/tensornet","sub_path":"tensornet/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"31757564869","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 3 18:59:26 2023\r\n\r\n@author: anton.freyberg\r\n\"\"\"\r\n\r\nimport requests\r\nimport pandas as pd\r\n\r\n\r\n\r\ndynUrl = 'Dynatrace URL, e.g. https://xxx99999.live.dynatrace.com'\r\ndynToken = 'your API token with problems.read scope'\r\ntimeframe = '6h'\r\n\r\nheaders = {\r\n 'Authorization': 'Api-Token {}'.format(dynToken)\r\n}\r\n\r\n#header needed for put request\r\nheadersPOST = {\r\n 'Authorization': 'Api-Token {}'.format(dynToken),\r\n 'Content-Type': 'text/plain'\r\n}\r\n\r\n\r\n\r\ndef getSecurityProblems():\r\n query = {#'fields': 'evidenceDetails,impactAnalysis',\r\n 'from': 'now-'+timeframe,\r\n 'fields': 'evidenceDetails,impactAnalysis'\r\n }\r\n \r\n req = requests.get (\"{}/api/v2/problems\".format(dynUrl), params=query, headers=headers, verify=False)\r\n\r\n print(req.status_code)\r\n response = req.json()\r\n problems = response['problems']\r\n while \"nextPageKey\" in response:\r\n #print(response[\"nextPageKey\"])\r\n query = {\"nextPageKey\": response[\"nextPageKey\"]}\r\n req = requests.get(\"{}/api/v2/problems\".format(dynUrl), params=query, headers=headers, verify=False)\r\n response = req.json()\r\n print(req.status_code)\r\n problems = problems + response[\"problems\"]\r\n \r\n \r\n print(problems) \r\n return problems\r\n\r\n\r\ndef writeToFile(problems, filename ):\r\n df = pd.json_normalize(problems)\r\n df.to_csv(filename,sep=';', index=False, quotechar=\"'\", encoding='utf-8')\r\n\r\nif __name__ =='__main__':\r\n problems = getSecurityProblems()\r\n filename = 'problemsDetails.csv'\r\n writeToFile(problems, filename )\r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"anton-freyberg/export-problems","sub_path":"problemDetails.py","file_name":"problemDetails.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6192046977","text":"import sys\nimport math\nimport picos as pic\nimport networkx as nx\nimport itertools\nimport cvxopt\nimport matplotlib.pyplot as plt\nfrom networkx.drawing.nx_agraph import write_dot, graphviz_layout\n\nclass Carpool:\n def __init__(self, people):\n self.people = people\n self.G = nx.DiGraph()\n self.days = ['M', 'T', 'W', 'R', 'F']\n self.daysDict = {}\n for person in people:\n for day in person.days:\n self.daysDict[day] = self.daysDict.get(day, 0) + 1\n self.create_network()\n pos = graphviz_layout(self.G, prog='dot') \n edge_labels = nx.get_edge_attributes(self.G, 'capacity')\n nx.draw(self.G, pos)\n nx.draw_networkx_edge_labels(self.G, pos, edge_labels, font_size=8)\n nx.draw_networkx_labels(self.G, pos, font_size=10)\n \n plt.show() \n\n def create_network(self):\n '''Builds up the network needed for solving the badminton elimination\n problem as a network flows problem & stores it in self.G. Returns a\n dictionary of saturated edges that maps team pairs to the amount of\n additional games they have against each other.\n\n teamID: ID of team that we want to check if it is eliminated\n return: dictionary of saturated edges that maps team pairs to\n the amount of additional games they have against each other\n '''\n\n saturated_edges = {}\n\n self.G.add_node(\"Source\")\n self.G.add_node(\"Sink\")\n \n\n for day in self.days:\n self.G.add_node(day)\n self.G.add_edge(day, \"Sink\", capacity=1, flow=0)\n \n\n for person in self.people:\n self.G.add_node(person)\n for day in person.days:\n self.G.add_edge(person, day, capacity=1, flow=0)\n capacity = math.ceil(self.getResponsibility(person))\n self.G.add_edge(\"Source\", person, capacity=capacity, flow=0)\n\n return saturated_edges\n\n def network_flows(self):\n '''Uses network flows to determine if the team with given team ID\n has been eliminated. You can feel free to use the built in networkx\n maximum flow function or the maximum flow function you implemented as\n part of the in class implementation activity.\n\n saturated_edges: dictionary of saturated edges that maps team pairs to\n the amount of additional games they have against each other\n return: True if team is eliminated, False otherwise\n '''\n\n flow_value, flow_dict = nx.maximum_flow(self.G, \"Source\", \"Sink\")\n print(flow_dict)\n G2 = self.dictToNx(flow_dict)\n pos = graphviz_layout(G2, prog='dot') \n edge_labels = nx.get_edge_attributes(G2, 'flow')\n nx.draw(G2, pos)\n nx.draw_networkx_edge_labels(G2, pos, edge_labels, font_size=8)\n nx.draw_networkx_labels(G2, pos, font_size=10)\n plt.show()\n if flow_value != 5:\n return False\n\n return True\n\n def getResponsibility(self, person):\n total = 0\n for day in person.days:\n total += 1 / self.daysDict[day]\n return total\n \n def dictToNx(self, flow_dict):\n G2 = nx.DiGraph()\n for key in flow_dict.keys():\n G2.add_node(key)\n for key in flow_dict.keys():\n for destKey in flow_dict[key]:\n if flow_dict[key][destKey] > 0:\n G2.add_edge(key, destKey, flow=flow_dict[key][destKey])\n return G2\n\n\nclass Person:\n def __init__(self, ID, days):\n self.ID = ID\n self.days = days\n \n def __str__(self):\n return \"Person \" + str(self.ID)\n\n\nif __name__ == '__main__':\n person1 = Person(1, [\"M\", \"T\", \"W\"])\n person2 = Person(2, [\"M\", \"W\"])\n person3 = Person(3, [\"M\", \"T\", \"W\", \"R\", \"F\"])\n person4 = Person(4, [\"T\", \"W\", \"R\", \"F\"])\n people = [person1, person2, person3, person4]\n\n car = Carpool(people)\n print(\"Solvable?: \" + str(car.network_flows()))","repo_name":"dberny/AdvancedAlgorithmsFinal","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"21777622018","text":"from __future__ import absolute_import, division, print_function\n\n# Hidden Markov Model with Multivariate Gaussian Emission Distribution\n# each dimension are independent (covariance of different dimensions are 0)\n\n__author__ = 'billhuang'\n\nimport numpy as np\nimport kmean\nimport numerical_utils as nu\nfrom scipy import stats\n\ndef random_initialization(Y_, K_):\n pi0_, A_ = init_transition(K_)\n D_ = Y_.shape[1]\n mu_ = np.random.normal(0, 1, size = (K_, D_))\n s_ = np.zeros((K_, D_, D_))\n for k in range(K_):\n s_[k,:] = np.eye(D_)\n return (pi0_, A_, mu_, s_)\n\ndef kmean_initialization(Y_, K_):\n pi0_, A_ = init_transition(K_)\n D_ = Y_.shape[1]\n label_ = kmean.kmean(Y_, K_)\n mu_ = np.zeros((K_, D_))\n s_ = np.zeros((K_, D_, D_))\n for k in range(K_):\n Yk_ = Y_[label_ == k]\n mu_[k,:] = np.mean(Yk_, axis = 0)\n s_[k,:] = np.eye(D_)\n return (pi0_, A_, mu_, s_)\n\ndef init_transition(K_):\n pi0_ = nu.log(np.random.dirichlet(np.ones(K_)))\n A_ = nu.log(np.random.dirichlet(np.ones(K_), size = K_))\n return (pi0_, A_)\n\ndef sync_B(Y_, mu_, s_):\n T_ = Y_.shape[0]\n K_ = mu_.shape[0]\n B_ = np.zeros((T_, K_))\n for k in range(K_):\n B_[:,k] = stats.multivariate_normal.logpdf(Y_, mu_[k,:], s_[k,:])\n return (B_)\n\ndef pass_message_forward(pi0_, A_, B_):\n T_, K_ = B_.shape\n M_ = np.zeros((T_, K_))\n M_[0,:] = pi0_ + B_[0,:]\n for t in range(1, T_):\n M_[t,:] = nu.log_matrix_multiply_vector(A_.T, M_[(t-1),:]) + B_[t,:]\n return (M_)\n\ndef pass_message_backward(A_, B_):\n R_ = np.zeros(B_.shape)\n R_[-1,:] = 0\n for t in range(B_.shape[0] - 2, -1, -1):\n R_[t,:] = nu.log_matrix_multiply_vector(A_, (B_[(t+1),:] + R_[(t+1),:]))\n return (R_)\n\ndef sync_Q(M_, R_):\n Qu_ = M_ + R_\n logQ_ = (Qu_.T - np.logaddexp.reduce(Qu_, axis = 1)).T\n Q_ = np.exp(logQ_)\n return (Q_)\n\ndef sync_N(M_, R_, A_, B_):\n T_, K_ = B_.shape\n xi_ = np.zeros((T_ - 1, K_, K_))\n for t in range(0, T_ - 1):\n xi_[t,:] = (A_.T + M_[t,:]).T + B_[(t+1),:] + R_[(t+1),:]\n xi_ = xi_ - np.logaddexp.reduce(M_[-1,:])\n N_ = np.sum(np.exp(xi_), axis = 0)\n return (N_)\n\ndef sync_A(N_):\n A_ = nu.normalize_across_row(N_)\n return (nu.log(A_))\n\ndef update_params(Y_, Q_, N_):\n T_, D_ = Y_.shape\n K_ = Q_.shape[1]\n q_ = np.sum(Q_, axis = 0)\n mu_ = np.dot(np.diag(1/q_), np.dot(Q_.T, Y_))\n s_ = np.zeros((K_, D_, D_))\n for k in range(K_):\n ym_ = Y_ - mu_[k,:]\n s_[k,:] = np.dot(ym_.T, np.dot(np.diag(Q_[:,k]), ym_)) / q_[k]\n return (mu_, s_)\n\ndef compute_lower_bound(pi0_, A_, B_, Q_, N_):\n lower_bound_ = np.sum(N_ * A_) + np.sum(Q_[0,:] * pi0_) + np.sum(Q_ * B_) \n return (lower_bound_)\n\n'''\ndef compute_lower_bound(M_):\n lower_bound_ = np.sum(np.logaddexp.reduce(M_, axis = 1))\n return (lower_bound_)\n'''\n\ndef E_step(Y_, pi0_, A_, mu_, s_):\n B_ = sync_B(Y_, mu_, s_)\n M_ = pass_message_forward(pi0_, A_, B_)\n R_ = pass_message_backward(A_, B_)\n Q_ = sync_Q(M_, R_)\n N_ = sync_N(M_, R_, A_, B_)\n #lower_bound_ = compute_lower_bound(M_)\n lower_bound_ = compute_lower_bound(pi0_, A_, B_, Q_, N_)\n return (Q_, N_, lower_bound_)\n\ndef M_step(Y_, Q_, N_):\n pi0_ = nu.log(Q_[0,:])\n A_ = sync_A(N_)\n mu_, s_ = update_params(Y_, Q_, N_)\n return (pi0_, A_, mu_, s_)\n\ndef HMM(Y_, K_, eps = np.power(0.1, 3),\n initializer = random_initialization):\n print('Start Inference...')\n pi0_, A_, mu_, s_ = initializer(Y_, K_)\n lower_bound = np.array([])\n continue_ = True\n while (continue_):\n print('*', end = '')\n Q_, N_, lower_bound_ = E_step(Y_, pi0_, A_, mu_, s_)\n lower_bound = np.append(lower_bound, lower_bound_)\n pi0_, A_, mu_, s_ = M_step(Y_, Q_, N_)\n if (lower_bound.size > 1):\n if ((np.exp(lower_bound[-1] - lower_bound[-2]) - 1) < eps):\n continue_ = False\n print(' done!')\n print('A')\n print(np.exp(A_))\n print('mu')\n print(mu_)\n print('s')\n print(s_)\n \n \n","repo_name":"BillHuang01/Expectation_Maximization","sub_path":"hidden_markov_model/HMMG.py","file_name":"HMMG.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"5070274020","text":"from unittest import mock\n\nfrom neutronclient.v2_0 import client as neutronclient\n\nfrom heat.common import template_format\nfrom heat.engine.clients.os import neutron\nfrom heat.engine import scheduler\nfrom heat.tests import common\nfrom heat.tests import utils\n\n\nclass NeutronL2GatewayConnectionTest(common.HeatTestCase):\n test_template = '''\n heat_template_version: queens\n description: Template to test L2GatewayConnection Neutron resource\n resources:\n l2gw_conn:\n type: OS::Neutron::L2GatewayConnection\n properties:\n network_id: j29n3678-c012-p008-3975-93584a65a18a\n segmentation_id: 501\n l2_gateway_id: d3590f37-b072-4358-9719-71964d84a31c\n '''\n\n mock_create_req = {\n \"l2_gateway_connection\": {\n \"network_id\": \"j29n3678-c012-p008-3975-93584a65a18a\",\n \"segmentation_id\": \"501\",\n \"l2_gateway_id\": \"d3590f37-b072-4358-9719-71964d84a31c\"\n }}\n mock_create_reply = {\n \"l2_gateway_connection\": {\n \"id\": \"e491171c-3458-4d85-b3a3-68a7c4a1cacd\",\n \"tenant_id\": \"7ea656c7c9b8447494f33b0bc741d9e6\",\n \"network_id\": \"j29n3678-c012-p008-3975-93584a65a18a\",\n \"segmentation_id\": \"501\",\n \"l2_gateway_id\": \"d3590f37-b072-4358-9719-71964d84a31c\"\n }}\n\n def setUp(self):\n super(NeutronL2GatewayConnectionTest, self).setUp()\n self.mockclient = mock.MagicMock()\n self.patchobject(neutronclient, 'Client', return_value=self.mockclient)\n\n self.patchobject(neutron.NeutronClientPlugin, 'has_extension',\n return_value=True)\n\n def _create_l2_gateway_connection(self):\n # stack create\n self.mockclient.create_l2_gateway_connection.return_value = (\n self.mock_create_reply)\n self.mockclient.show_l2_gateway_connection.return_value = (\n self.mock_create_reply)\n orig_template = template_format.parse(self.test_template)\n self.stack = utils.parse_stack(orig_template)\n scheduler.TaskRunner(self.stack.create)()\n self.l2gwconn_resource = self.stack['l2gw_conn']\n\n def test_l2_gateway_connection_create(self):\n self._create_l2_gateway_connection()\n self.assertIsNone(self.l2gwconn_resource.validate())\n self.assertEqual((self.l2gwconn_resource.CREATE,\n self.l2gwconn_resource.COMPLETE),\n self.l2gwconn_resource.state)\n self.assertEqual('e491171c-3458-4d85-b3a3-68a7c4a1cacd',\n self.l2gwconn_resource.FnGetRefId())\n self.mockclient.create_l2_gateway_connection.assert_called_once_with(\n self.mock_create_req)\n\n def test_l2_gateway_connection_delete(self):\n self._create_l2_gateway_connection()\n self.stack.delete()\n self.mockclient.delete_l2_gateway_connection.assert_called_with(\n 'e491171c-3458-4d85-b3a3-68a7c4a1cacd')\n","repo_name":"openstack/heat","sub_path":"heat/tests/openstack/neutron/test_neutron_l2_gateway_connection.py","file_name":"test_neutron_l2_gateway_connection.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":385,"dataset":"github-code","pt":"27"} +{"seq_id":"43115637955","text":"import numpy as np\nimport matplotlib as mpl\nimport xarray as xr\n\nimport ipywidgets\nimport plotly.subplots\nimport plotly.graph_objs as go\nimport warnings\n\n\nclass Fit_DataView:\n '''MVC Data Viewer for 2D Data Fitters'''\n def __init__(self,subplot_kw,height=450,width=500):\n \n self.output = ipywidgets.Output()\n self.output.layout = ipywidgets.Layout(\n height='150px',\n overflow_y='auto',\n border = '1px solid black',\n padding= '25px',\n )\n \n self.widget = go.FigureWidget(\n plotly.subplots.make_subplots(**subplot_kw)\n )\n self.widget.update_layout(height=height,width=width,margin=dict(t=25,b=25))\n \n \n def update_legend(self,legend_kw):\n self.widget.update_layout( legend=legend_kw ) \n \n def add_image(self,img,row=1,col=1,lognorm=True,cmap='viridis',norm_min=1):\n self.widget.add_heatmap(z=img,row=row,col=col)\n \n def add_trace(self,x,y,name,row=1,col=1,mode='marker',color='blue'):\n \n self.widget.add_trace(\n go.Scatter(\n x=x, \n y=y, \n mode=mode, \n marker={'color':color},\n line={'color':color},\n name=name),\n row=row,col=col\n )\n \n def add_horizontal_line(self,y,x0=0,x1=128,row=1,col=1,line_kw=None):\n if line_kw is None:\n line_kw=dict(color='red',dash='dot',width=0.3)\n \n self.widget.add_shape(\n name='horizontal',\n xref='paper',\n yref='y',\n x0=x0, x1=x1, y0=y, y1=y,\n line=line_kw,\n row=row,\n col=col,\n )\n \n def add_vertical_line(self,x,y0=0,y1=128,row=1,col=1,line_kw=None):\n if line_kw is None:\n line_kw=dict(color='red',dash='dot',width=0.3)\n self.widget.add_shape(\n name='vertical',\n xref='x',\n yref='paper',\n x0=x, x1=x, y0=y0, y1=y1,\n line=line_kw,\n row=row,col=col,\n )\n \n def update_trace(self,name,x,y):\n for data in self.widget.data:\n if data.name == name:\n data.update(x=x,y=y)\n return\n warnings.warn(f'Data named \"{name}\"not found in this widget')\n \n \n def change_output(self,value):\n self.output.clear_output()\n with self.output:\n print(value)\n \n def run(self):\n vbox = ipywidgets.VBox(\n [self.widget,self.output],\n layout={ 'align_items':'center', 'justify_content':'center'},\n )\n return vbox\n ","repo_name":"martintb/typySANS","sub_path":"typySANS/MVC.py","file_name":"MVC.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32949980086","text":"import matplotlib.pyplot as plt\nimport pickle\nfrom os.path import abspath\n\n# loads a saved model from \ndef load_model(filename):\n if not filename.endswith('.pkl'):\n filename += '.pkl'\n\n filename = abspath(filename)\n\n with open(filename, 'rb') as f:\n return pickle.load(f)\n\n# plot errors produced by neural network\ndef plot_errors(errors, lrs, title, n):\n plt.figure(figsize=(10, 10))\n plt.title(title)\n styles = ['-', '--', '-.', ':']\n for i, lr in enumerate(lrs):\n plt.plot(range(len(errors[lr][:n])), errors[lr][:n], label=f'lr = {lr}', linestyle=styles[i % 4])\n \n plt.ylabel('Error')\n plt.xlabel('Epochs')\n plt.legend(bbox_to_anchor=(1, 1))\n plt.show()","repo_name":"MikeynJerry/cs525","sub_path":"Project 1/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20607552617","text":"class Luminary(object):\n \"\"\"\n Diese Klasse stellt einen bestimmten Himmelskoerper dar. Dabei werden alle Eigenschaften, die zum Initialisieren\n eines Himmelskoerpers angegeben werden muessen, als Parameter uebergeben.\n \"\"\"\n def __init__(self, name, texturePath, modelPath, initPosition, scale, children, selfRotate, orbitRotate, textureToggle):\n \"\"\"\n Hier werden alle Attribute, welche zum Erzeugen eines Himmelskoerpers benoetigt werden, initialisiert.\n\n :param name: Name des Himmelskoerpers\n :param texturePath: gibt den Pfad an, wo sich die Textur befindet\n :param modelPath: gibt den Pfad zum Objekt an, welche die Form des Himmelskoerpers angibt\n :param initPosition: initiale Position des Himmelskoerpers\n :param scale: gibt die Groesse des Himmelskoerpers an\n :param children: dient zur Definition der Kinder, die der jeweilige Himmelskoerper besitzt\n :param selfRotate: gibt an, wie schnell sich der Himmelskoerper um sich selbst drehen soll\n :param orbitRotate: gibt an, wie schnell sich der Himmelskoerper um die Laufbahn drehen soll\n :param textureToggle: dient zur Definition, welche Texturen von Himmelskoerpern togglen sollen und welche nicht\n \"\"\"\n self.orbitRotate = orbitRotate\n self.selfRotate = selfRotate\n self.children = children\n self.name = name\n self.initPosition = initPosition\n self.texturePath = texturePath\n self.textureToggle = textureToggle\n\n self.model = loader.loadModel(modelPath)\n self.model.setTexture(loader.loadTexture(texturePath), 1)\n self.model.setScale(scale)\n if (initPosition):\n self.model.setPos(initPosition, 0, 0)\n","repo_name":"spalatoo/SEW-SolarSystem","sub_path":"src/Luminary.py","file_name":"Luminary.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"30542575350","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.ext.declarative import declarative_base\n\n\nSQLALCHEMY_DATABASE_URL = \"postgresql://mykyta:1234@localhost:5432/kijiji_data\"\n\nengine = create_engine(\n SQLALCHEMY_DATABASE_URL,\n echo=True\n)\nSessionLocal = Session(autocommit=False, autoflush=False, bind=engine)\n\nBase = declarative_base()\n\n\n\n\n\n","repo_name":"ImVanillaHehe/test_task_for_dataox","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"35769229303","text":"# reading and writing csv file\nimport csv\nwith open('CSVData_1.csv') as f:\n f_csv = csv.reader(f)\n headers = next(f_csv)\n for row in f_csv:\n print(row)\n\nfrom collections import namedtuple\nwith open('Stock1.csv', encoding='utf-8-sig') as f:\n f_csv = csv.reader(f)\n headings = next(f_csv)\n print(headings)\n Row = namedtuple('Row', headings)\n for pos in f_csv:\n row = Row(*pos)\n print(\"\\t{:7} {} {:10} {:7} {:10.5} {:1.9}\".format(row.Symbol, row.Price, row.Date, row.Time, row.Change, row.Volume))\n","repo_name":"DavidsIT-Site/Python3","sub_path":"Ch 6 Data Encoding and processing/ch6_1.py","file_name":"ch6_1.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14475891298","text":"import numpy as np\r\nimport csv\r\n\r\ndef computepro(pro):\r\n filep = open('D:/Downloads/910ProjectData/preprocessingdata2/jobs.csv','r')\r\n preg = csv.reader(filep)\r\n abortion = 0\r\n normal = 0\r\n pro = str(pro)\r\n new = []\r\n count = 0\r\n for pitem in preg:\r\n if preg.line_num == 1:\r\n # name0 = pitem\r\n continue\r\n if pitem[3] == pro:\r\n if pitem[2].isdigit():\r\n # print(pitem[2])\r\n num = float(pitem[2])\r\n new.append(num)\r\n print(pro+\"-------\")\r\n # new.append(count)\r\n # print(new)\r\n mean = np.mean(new)\r\n med = np.median(new)\r\n return mean,med\r\n\r\nlistall = []\r\n\r\n\r\ndef avg(num):\r\n i = computepro(num)\r\n print(i)\r\n listall.extend(i)\r\n # print(listall)\r\n\r\navg(2015)\r\navg(2011)\r\navg(2009)\r\navg(2006)\r\navg(2004)\r\navg(2000)\r\navg(1997)\r\navg(1993)\r\navg(1991)","repo_name":"Anderbone/AbortionInChina","sub_path":"scripts/final07_wages2.py","file_name":"final07_wages2.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"2218693891","text":"\"\"\"\nAuto Select RetentionPolicy InfluxDB 0.10 proxy for grafana\nAuthors: Zollner Robert,\n Paul Kuiper\n\nFree use\nRequires: gevent, bottle, requests\n\"\"\"\n\nimport gevent\nfrom gevent import monkey\nfrom optparse import OptionParser\n\nmonkey.patch_all()\n\nimport sys\nimport requests\nfrom bottle import get, abort, run, request, response, redirect\nimport regex as re\n\nimport logging\nlogger = logging.getLogger(__name__)\nlogging.basicConfig()\nlogger.setLevel(logging.CRITICAL)\n\nrp_db_map = dict()\n\nCONFIG = {\n 'influxdb_http':'http://localhost:8086',\n\n 'bind_host': '0.0.0.0',\n 'bind_port': '3004',\n\n 'retention_policy_map' : {\n '0.1s': '\"default\"',\n '1s' : '\"default\"',\n '5s' : '\"default\"',\n '10s': '\"rp_10s\"',\n '30s': '\"rp_30s\"',\n '1m' : '\"rp_1m\"',\n '5m' : '\"rp_5m\"',\n '10m': '\"rp_30m\"',\n '30m': '\"rp_30m\"',\n '1h' : '\"rp_1h\"',\n '3h' : '\"rp_3h\"',\n '12h': '\"rp_12h\"',\n '1d' : '\"rp_24h\"',\n '7d' : '\"rp_24h\"',\n '30d': '\"rp_24h\"'\n }\n}\n\npattern = re.compile(r\"\"\"\n\n ^ # beginning of string\n select\\b # must start with select statement (followed by word boundary)\n \\s+ # 1 or more whitespaces\n (count|min|max|mean|sum|first|last) # an aggragate function group 0 (aggregate)\n \\( # time with opening bracket\n (.*) # the field name group 1 (field name)\n \\) # closing bracket\n \\s+ # 1 or more whitespaces\n \\bfrom\\b # the from statement should follow (with word boundaries)\n \\s+ # 1 or more whitespaces\n (.*) # the from content group 2 (measurement)\n \\s+ # 1 or more whitespaces\n \\bwhere\\b # the where statement is always present in a grafana query\n (.*) # the where content group 3 (where clause)\n \\bgroup\\sby\\b # match group by statement\n \\s+ # 1 or more whitespaces\n time\\( # time with opening bracket\n (\\d+) # minimal 1 digit (does not match 0.1s!) group 4 (number of time units)\n ([s|m|h|d|w]) # the group by unit group 5 (time unit)\n \\) # closing bracket\n .* # rest of the request - don't care\n $ # end of string\n \"\"\", re.VERBOSE | re.I)\n\n\n@get('/')\ndef proxy_influx_query(path):\n \"\"\"\n Capture the query events comming from Grafana.\n Investigate the query and replace the measurement name with a Retention Policy measurement name if possible.\n Send out the (modified or unmodified) query to Influx and return the result\n \"\"\"\n\n forward_url = CONFIG['influxdb_http'] # The local influx host\n\n params = dict(request.query) # get all query parameters\n\n auth = request.auth\n logger.info(\"Original query: %s\", params['q'])\n\n try:\n params['q'] = modify_query(params, rp_db_map, auth)\n\n except Exception as e:\n logger.critical(\"Exception in proxy_influx_query():\")\n logger.exception(e)\n pass\n\n headers = request.headers\n cookies = request.cookies\n\n logger.debug(\"Peticion hacia el servidor: \")\n for k in headers.keys():\n logger.debug(\"headers: %s -> %s\", k, headers.raw(k))\n logger.debug(\"cookies: %s\", cookies)\n logger.debug(\"url: %s\", forward_url +'/'+ path)\n logger.info(\"Transformed query: %s\", params['q'])\n\n s = requests.Session()\n req = requests.Request('GET', forward_url +'/'+ path, params=params, headers=headers, cookies=cookies)\n prepped = req.prepare()\n r = s.send(prepped, stream=True)\n\n # Do now try to read response with r.content or r.raw.data because it will close the file (r.raw)\n logger.debug(\"influx response code: %s\", r.status_code)\n\n if r.status_code == 200:\n for key, value in dict(r.headers).iteritems():\n response.set_header(key, value)\n\n for key, value in dict(r.cookies).iteritems():\n response.cookies[key] = value\n else:\n abort(r.status_code, r.content) # NOK, return error\n\n logger.debug(\"Return response headers: %s\", r.headers)\n logger.debug(\"Return response cookies: %s\", r.cookies)\n return r.raw\n\n\ndef modify_query(req, rp_db_map, auth):\n\n \"\"\"\n Grafana will zoom out with the following group by times:\n 0.1s, 1s, 5s, 10s, 30s, 1m, 5m, 10m, 30m, 1h, 3h, 12h, 1d, 7d, 30d, 1y\n \"\"\"\n\n qry = req['q']\n qry_db = req['db']\n\n logger.debug(\"modify_query() with db=%s and query: %s\", qry_db, qry)\n\n try:\n pattern_qry = pattern.search(qry)\n if pattern_qry is None:\n return qry\n\n items = pattern_qry.groups() # get the content of the different query parts\n\n q_gtime = ''.join((items[4],items[5]))\n logger.debug(\"Original group by time: %s\", q_gtime)\n if q_gtime not in CONFIG['retention_policy_map']:\n logger.warn(\"Group by time not found in the CONFIG['retention_policy_map']\")\n return qry\n \n q_table = items[2]\n logger.debug(\"Original measurement queried: %s\", q_table)\n if '.' in q_table:\n q_rp,_,q_table = items[2].partition('.')\n if q_rp in CONFIG['retention_policy_map'].values():\n logger.info('specific RP requested, ignoring detection: %s - %s', q_rp, q_table)\n return qry\n else:\n # This is a dotted series name\n q_table = items[2]\n \n \n logger.info(\"Dot founds. After transform: Group by time: %s. RP: %s. Measurement: %s\", q_gtime, q_rp, q_table)\n \n new_rp = CONFIG['retention_policy_map'][q_gtime]\n logger.debug(\"New RP: %s\", new_rp)\n \n measurement = '.'.join((new_rp, q_table))\n new_qry = qry.replace(items[2], measurement)\n logger.debug(\"New query: %s\", new_qry)\n \n # Download list of RP for current Influxdb database\n if qry_db not in rp_db_map:\n influx_update_rp(rp_db_map, qry_db, auth);\n \n # Check if auto-calc RP is defined in InfluxDB database\n if new_rp.strip(\"\\\"\") not in rp_db_map.get(qry_db, []):\n logger.critical(\"RP [%s] in not defined in Influx database [%s]. skipping...\", new_rp, qry_db)\n logger.critical(\"RPs available: %s\", rp_db_map.get(qry_db, []))\n return qry\n \n logger.debug('original measurement :[%s] new measurement: [%s]', items[2], measurement)\n return new_qry\n \n except Exception as e:\n logger.critical(\"Exception in modify_query():\")\n logger.exception(e)\n return qry\n\n\ndef influx_update_rp(rp_map, r_db, auth):\n \n params = { 'q' : 'SHOW RETENTION POLICIES ON %s' % r_db,\n 'db' : r_db}\n\n try:\n r = requests.get(CONFIG['influxdb_http'] + '/query', params=params, auth=auth)\n\n except Exception as e:\n logger.critical(\"Exception in influx_update_rp():\")\n logger.exception(e)\n pass\n\n if not r.ok:\n logger.warn(\"Error obtaining RPs from InfluxDB server: %s\", r.content)\n return\n\n try:\n rp_list = { rp[0] for rp in r.json()['results'][0]['series'][0]['values'] }\n rp_map[r_db] = rp_list\n logger.debug(\"RPs for database %s: %s\", r_db, rp_list)\n \n except Exception as e:\n logger.critical(\"Exception in influx_update_rp():\")\n logger.exception(e)\n pass\n \nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option(\"-v\", action=\"store_true\", dest=\"verbose\")\n parser.add_option(\"-d\", action=\"store_true\", dest=\"debug\")\n (options, args) = parser.parse_args(sys.argv)\n\n if options.verbose:\n logger.setLevel(logging.INFO)\n logger.info(\"Logger set to INFO level\")\n if options.debug:\n logger.setLevel(logging.DEBUG)\n logger.debug(\"Logger set to DEBUG level\")\n\n print(\"Starting proxy server\")\n run(host=CONFIG['bind_host'], port=CONFIG['bind_port'], server='gevent')\n","repo_name":"adrianlzt/influxdb-grafana-rp-proxy","sub_path":"influxdb_grafana_rp_proxy.py","file_name":"influxdb_grafana_rp_proxy.py","file_ext":"py","file_size_in_byte":8349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"6364763808","text":"class Calculadora:\n def __init__(self, num1, num2):\n self.num1 = num1\n self.num2 = num2\n\n def suma(self):\n suma_total = self.num1 + self.num2\n return suma_total\n\n def resta(self):\n resta_total = self.num1 - self.num2\n return resta_total\n \n def producto(self):\n producto = self.num1 * self.num2\n return producto\n\n def divisor(self):\n dividido = self.num1 / self.num2\n return dividido\n\n\nprint('-*-*-*-*-*-*-*-*-*-*-*-')\na = int(input('Ingrese el primer valor: '))\nb = int(input('Ingrese el segundo valor: '))\nprint('-*-*-*-*-*-*-*-*-*-*-*-')\n\n\ncalc = Calculadora(a,b)\n\nprint('La suma de los valores es: ', calc.suma())\nprint('La resta de los valores es: ', calc.resta())\nprint('La multiplicación de los valores es: ', calc.producto())\nprint('La división de los valores es: ', calc.divisor()) ","repo_name":"jairoandresm/curso_python","sub_path":"calculadora.py","file_name":"calculadora.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"3566432310","text":"'''\nWrite a short formula that computes the length sideC of the hypotenuse of a right triangle given two\nsides sideA = 6 and sideB = 8. Print the length of sideC on the screen.\n'''\nimport math\ndef ComputeHypotenuse(A = 6, B = 8 ):\n\tC = math.sqrt(A**2 + B**2)\n\treturn C\n\nif __name__ ==\"__main__\":\n\tprint (ComputeHypotenuse())\n\t\n","repo_name":"joantyry/PythonClassAtAimsRwanda","sub_path":"exq5.py","file_name":"exq5.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"22497742906","text":"\"\"\" FormLayout\n\nLayout a series of (input) widgets in a form. Example:\n\n.. UIExample:: 200\n\n from flexx import ui\n\n class Example(ui.Widget):\n def init(self):\n with ui.FormLayout():\n self.b1 = ui.LineEdit(title='Name:')\n self.b2 = ui.LineEdit(title=\"Age:\")\n self.b3 = ui.LineEdit(title=\"Favorite color:\")\n ui.Widget(flex=1) # Spacing\n\nAlso see examples: :ref:`themed_form.py`.\n\n\"\"\"\n\nfrom pscript import window\n\nfrom . import Layout\nfrom .. import create_element\n\n\nclass FormLayout(Layout):\n \"\"\" A layout widget that vertically alligns its child widgets in a form.\n A label is placed to the left of each widget (based on the widget's title).\n\n The ``node`` of this widget is a\n `
`_,\n which lays out it's child widgets and their labels using\n `CSS grid `_.\n \"\"\"\n\n CSS = \"\"\"\n .flx-FormLayout {\n display: grid;\n grid-template-columns: auto 1fr;\n justify-content: stretch;\n align-content: stretch;\n justify-items: stretch;\n align-items: center;\n\n }\n .flx-FormLayout > .flx-title {\n text-align: right;\n padding-right: 5px;\n }\n \"\"\"\n\n def _create_dom(self):\n return window.document.createElement('div')\n\n def _render_dom(self):\n rows = []\n row_templates = []\n for widget in self.children:\n rows.extend([\n create_element('div', {'class': 'flx-title'}, widget.title),\n widget.outernode,\n ])\n flex = widget.flex[1]\n row_templates.append(flex + \"fr\" if flex > 0 else \"auto\")\n self.node.style['grid-template-rows'] = \" \".join(row_templates)\n return rows\n\n def _query_min_max_size(self):\n \"\"\" Overload to also take child limits into account.\n \"\"\"\n\n # Collect contributions of child widgets\n mima1 = [0, 1e9, 0, 0]\n for child in self.children:\n mima2 = child._size_limits\n mima1[0] = max(mima1[0], mima2[0])\n mima1[1] = min(mima1[1], mima2[1])\n mima1[2] += mima2[2]\n mima1[3] += mima2[3]\n\n # Dont forget padding and spacing\n extra_padding = 2\n extra_spacing = 2\n for i in range(4):\n mima1[i] += extra_padding\n mima1[2] += extra_spacing\n mima1[3] += extra_spacing\n\n # Own limits\n mima3 = super()._query_min_max_size()\n\n # Combine own limits with limits of children\n return [max(mima1[0], mima3[0]),\n min(mima1[1], mima3[1]),\n max(mima1[2], mima3[2]),\n min(mima1[3], mima3[3])]\n","repo_name":"flexxui/flexx","sub_path":"flexx/ui/layouts/_form.py","file_name":"_form.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","stars":3169,"dataset":"github-code","pt":"27"} +{"seq_id":"34469192471","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@date: 2021/10/22 下午2:18\n@file: swap.py\n@author: zj\n@description: \n\"\"\"\n\nimport cv2\nimport random\n\nimport numpy as np\n\n\ndef crop_image(image, crop_nums):\n assert isinstance(image, np.ndarray)\n assert len(crop_nums) == 2\n\n high, width = image.shape[:2]\n crop_x = [int((width / crop_nums[0]) * i) for i in range(crop_nums[0] + 1)]\n crop_y = [int((high / crop_nums[1]) * i) for i in range(crop_nums[1] + 1)]\n\n im_list = []\n for j in range(len(crop_y) - 1):\n for i in range(len(crop_x) - 1):\n im_list.append(image[crop_y[j]:min(crop_y[j + 1], high), crop_x[i]:min(crop_x[i + 1], width)])\n return im_list\n\n\ndef swap(image, crop_nums):\n assert isinstance(image, np.ndarray)\n assert len(crop_nums) == 2\n src_width, src_height = image.shape[:2]\n\n image = image[10:(src_height - 10), 10:(src_width - 10)]\n swap_img_list = crop_image(image, crop_nums)\n\n tmp_x_list = []\n tmp_y_list = []\n count_x = 0\n count_y = 0\n k = 1\n # swap range\n neighbor = 2\n for i in range(crop_nums[1] * crop_nums[0]):\n tmp_x_list.append(swap_img_list[i])\n count_x += 1\n if len(tmp_x_list) >= k:\n tmp = tmp_x_list[count_x - neighbor:count_x]\n random.shuffle(tmp)\n tmp_x_list[count_x - neighbor:count_x] = tmp\n if count_x == crop_nums[0]:\n tmp_y_list.append(tmp_x_list)\n count_x = 0\n count_y += 1\n tmp_x_list = []\n if len(tmp_y_list) >= k:\n tmp2 = tmp_y_list[count_y - neighbor:count_y]\n random.shuffle(tmp2)\n tmp_y_list[count_y - neighbor:count_y] = tmp2\n random_im = []\n for line in tmp_y_list:\n random_im.extend(line)\n\n image_height, image_width = image.shape[:2]\n iw = int(image_width / crop_nums[0])\n ih = int(image_height / crop_nums[1])\n dst_image = np.zeros(image.shape).astype(np.uint8)\n x = 0\n y = 0\n for im in random_im:\n dst_image[y * ih:(y + 1) * ih, x * iw:(x + 1) * iw] = cv2.resize(im, (iw, ih))\n\n x += 1\n if x == crop_nums[0]:\n x = 0\n y += 1\n\n return cv2.resize(dst_image, (src_width, src_height))\n","repo_name":"ZJCV/DCL","sub_path":"dcl/data/transforms/swap.py","file_name":"swap.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"19294854138","text":"import os\nimport math\nimport torch\nimport numpy as np\nimport hydra\n\nfrom datasets import Dataset\nfrom copy import deepcopy\nfrom functools import partial\nfrom omegaconf import DictConfig, OmegaConf\nfrom typer import Typer\nfrom rich import print\n\n\nfrom tools.utils import Logger\nfrom tools.lm import get_enc_len_fn\nfrom params import AllParams\nfrom constants import Dataset as D, ExSel as ES, LLM, default_prompt_version\nfrom prompts.few_shot import FewShotPromptTemplate2\nfrom selector_utils import get_selector\nfrom eval import eval, dump_prompts\n\n# app = Typer()\n\ndef set_seeds(seed):\n import numpy as np\n import random\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed) # CPU random seed\n torch.cuda.manual_seed(seed) # GPU random seed\n\n@hydra.main(version_base=None, config_name=\"config\")\ndef main(P: AllParams):\n P: AllParams = OmegaConf.to_object(P)\n if P.exp.tiny:\n P.data.n_cands, P.data.n_test = 40, 20\n print(P)\n print(P.get_output_dir())\n os.makedirs(P.get_output_dir(), exist_ok=True)\n logger = Logger(outfile=P.get_logfile())\n try:\n run_main(P, logger)\n except Exception as e:\n import traceback\n logger.log(traceback.format_exc())\n logger.log(e)\n\ndef run_main(P: AllParams, logger: Logger):\n log = logger.log\n # train_ds, test_ds, candidates, fewshot_prompt_fn, templates = get_data(P, logger)\n EP, DP, LP, SP = P.shorthand\n train_ds, candidates, test_ds = DP.get_splits(EP.data_root, 'data', EP.seed)\n templates = DP.get_templates()\n fewshot_prompt_fn = partial(FewShotPromptTemplate2,\n input_variables=templates['example_template'].input_variables,\n example_separator='\\n\\n', **templates)\n prompt_template = get_prompt_template(\n P, train_ds, test_ds, candidates, fewshot_prompt_fn,\n templates, logger\n )\n if P.exp.only_prompts:\n dump_prompts(\n P, test_ds, prompt_template=prompt_template,\n logger=logger, outfile=P.get_promptsfile(), debug=P.exp.debug\n )\n else:\n torch.cuda.empty_cache()\n llm = P.get_lm()\n print('Instantiating LLMChain...')\n # agent = LLMChain(prompt=prompt_template, llm=llm, verbose=P.debug)\n eval(P, test_ds, llm, prompt_template, batch_size=P.exp.batch_size,\n logger=logger, outfile=P.get_resultsfile(), debug=P.exp.debug)\n\ndef get_max_output_length(\n dataset: Dataset, example_template, llm: LLM = None, enc_len_fn = None):\n enc_len_fn = enc_len_fn or get_enc_len_fn(llm)\n test_strings = [example_template.format(**ex, test=True) for ex in dataset]\n completed_strings = [example_template.format(**ex, test=False) for ex in dataset]\n test_str_lens = [enc_len_fn(s) for s in test_strings]\n completed_str_lens = [enc_len_fn(s) for s in completed_strings]\n output_lens = [c - t for t, c in zip(test_str_lens, completed_str_lens)]\n return max(output_lens)\n\ndef get_prompt_template(\n P: AllParams, train_ds: Dataset, test_ds: Dataset, candidates: Dataset,\n fewshot_prompt_fn, templates, logger: Logger\n):\n EP, DP, LP, SP = P.shorthand\n from constants import max_new_tokens_d, context_length_limit\n enc_len_fn = get_enc_len_fn(LP.lm_name)\n max_len = context_length_limit[LP.lm_name]\n subtract_gen_len = True\n if DP.dataset in max_new_tokens_d:\n max_len -= max_new_tokens_d[DP.dataset]\n subtract_gen_len = False\n else:\n max_output_len = get_max_output_length(train_ds, templates['example_template'], P.lm_name)\n max_len -= (max_output_len + 5)\n subtract_gen_len = False\n\n fewshot_prompt_fn = partial(fewshot_prompt_fn,\n max_len=max_len, enc_len_fn=enc_len_fn, subtract_gen_len=subtract_gen_len)\n\n if SP.n_shots == -1:\n P = deepcopy(P)\n SP.n_shots = 50\n\n if SP.selector_type == ES.RANDOM:\n fewshot_prompt = fewshot_prompt_fn(examples=list(train_ds.select(np.arange(SP.n_shots))))\n\n elif SP.selector_type in [\n ES.COSINE, ES.STRUCT, ES.BERTSCORE, ES.LF_COVERAGE\n ]:\n ex_selector = get_selector(P, candidates, templates['example_template'], test_ds, enc_len_fn, max_len, subtract_gen_len=subtract_gen_len)\n fewshot_prompt = fewshot_prompt_fn(example_selector=ex_selector)\n else:\n raise ValueError(f'Unknown selector_type: {SP.selector_type}')\n return fewshot_prompt\n\nif __name__ == '__main__':\n main()","repo_name":"Shivanshu-Gupta/icl-coverage","sub_path":"src/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"42245081320","text":"import random\nimport numpy as np\nimport time\n\nimport pandas as pd\n\nfrom sklearn.model_selection import cross_val_score\n\nfrom sklearn.feature_selection import RFECV\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier\nfrom boruta import BorutaPy\n\nfrom ga.GeneticSelector import GeneticSelector\n\nSEED = 2018\nrandom.seed(SEED)\nnp.random.seed(SEED)\n\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\n\ndef timeit(method):\n def timed(*args, **kw):\n ts = time.time()\n result = method(*args, **kw)\n te = time.time()\n print('%r %2.2f m' % (method.__name__, int(te - ts) / 60))\n return result\n\n return timed\n\n\ndef get_cv_score(est, X_features, y, mode):\n if mode == 'regression':\n return -1.0 * cross_val_score(est, X_features, y, cv=5, scoring=\"neg_mean_squared_error\")\n elif mode == 'classification':\n return cross_val_score(est, X_features, y, cv=5, scoring=\"f1_macro\")\n\n\ndef get_RFE_features(est, X, y, mode, cv=5):\n # recursive feature estimator\n if mode == 'regression':\n rfe = RFECV(est, cv=cv, scoring=\"neg_mean_squared_error\")\n elif mode == 'classification':\n rfe = RFECV(est, cv=cv, scoring=\"f1_macro\")\n else:\n raise Exception('invalid mode ' + mode)\n rfe.fit(X, y)\n X_features = X[:, rfe.support_]\n return X_features\n\n\ndef get_RF_feature_importance(est, X, y, mode):\n if mode == 'regression':\n rf = RandomForestRegressor(n_estimators=500, random_state=SEED)\n elif mode == 'classification':\n rf = RandomForestClassifier(n_estimators=500, random_state=SEED)\n rf.fit(X, y)\n support = rf.feature_importances_ > 0.01\n X_features = X[:, support]\n return X_features\n\n\ndef get_boruta_features(est, X, y, mode):\n if mode == 'regression':\n rf = RandomForestRegressor(n_estimators=500, random_state=SEED)\n elif mode == 'classification':\n rf = RandomForestClassifier(n_estimators=500, random_state=SEED)\n boruta = BorutaPy(rf, n_estimators='auto')\n boruta.fit(X, y)\n X_features = X[:, boruta.support_]\n return X_features\n\n\ndef get_genetic_features(est, X, y, mode, need_plot_scores=False):\n sel = GeneticSelector(estimator=est,\n n_gen=7, size=200, n_best=40, n_rand=40,\n n_children=5, mutation_rate=0.05, mode=mode, cv=5)\n sel.fit(X, y)\n if need_plot_scores:\n sel.plot_scores()\n X_features = X[:, sel.support_]\n return X_features\n\n\ndef get_selector_score(cv_scores, mode, est_name, feat_selector, feat_count, elapsed_time):\n if mode == 'regression':\n mse = round(np.mean(cv_scores), 2)\n rmse = round(mse ** 0.5, 2)\n selector_score = {'est_name': est_name,\n 'name': feat_selector,\n 'mse': mse,\n 'rmse': rmse,\n 'feat_count': feat_count,\n 'time': elapsed_time}\n elif mode == 'classification':\n f1_macro = round(np.mean(cv_scores), 2)\n selector_score = {'est_name': est_name,\n 'name': feat_selector,\n 'f1_macro': f1_macro,\n 'feat_count': feat_count,\n 'time': elapsed_time}\n else:\n raise Exception('invalid mode ' + mode)\n\n return selector_score\n\n\ndef get_scores_df(est_dict, X, y, mode, features_selectors=('init', 'RFE', 'RF', 'boruta', 'gen')):\n features_selectors_dict = {\n 'init': lambda est, X, y, mode: X,\n 'RFE': get_RFE_features,\n 'RF': get_RF_feature_importance,\n 'boruta': get_boruta_features,\n 'gen': get_genetic_features\n }\n selector_scores = []\n\n for est_name, est in est_dict.items():\n for feat_selector in features_selectors:\n selector_start = pd.Timestamp.now()\n X_features = features_selectors_dict[feat_selector](est, X, y, mode)\n elapsed_time = pd.Timestamp.now() - selector_start\n feat_count = X_features.shape[1]\n cv_scores = get_cv_score(est, X_features, y, mode)\n\n selector_score = get_selector_score(cv_scores, mode, est_name, feat_selector, feat_count, elapsed_time)\n selector_scores.append(selector_score)\n print(feat_selector, selector_score['mse' if mode == 'regression' else 'f1_macro'], feat_count, elapsed_time)\n\n return pd.DataFrame(selector_scores)\n","repo_name":"qifanyyy/JupyterNotebook","sub_path":"new_algs/Sequence+algorithms/Selection+algorithm/features_estimators.py","file_name":"features_estimators.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"27"} +{"seq_id":"36043730178","text":"import os\nimport re\n\nimport requests.exceptions as req_exc\n\nfrom libs import jenkinslib\n\nfrom .BasePlugin import BasePlugin\n\n\nclass DumpCreds(BasePlugin):\n \"\"\"Class for managing DumpCreds SubCommand\"\"\"\n\n def __init__(self, args):\n super().__init__(args)\n\n with open(os.path.join(\"data\", \"groovy\", \"dump_creds.groovy\")) as f:\n dumpcreds = f.read()\n\n try:\n cred = self.args.credentials[0]\n server = self._get_jenkins_server(cred)\n\n if not server.can_access_script_console():\n self.logging.fatal(\n \"%s: Is not a valid Jenkins Admin or unable to access Jenkins server.\",\n self._get_username(cred),\n )\n\n result = server.execute_script(dumpcreds, node=self.args.node)\n\n result = re.sub(\n r\"---------------------------------------------------[\\r\\n][\\r\\n]{2,}\",\n \"\\n\\n\",\n result,\n ).strip()\n\n print(result)\n except jenkinslib.JenkinsException as ex:\n if \"[403]\" in str(ex).split(\"\\n\")[0]:\n self.logging.fatal(\n \"%s authentication failed or not an admin with script privileges\",\n self._get_username(cred),\n )\n else:\n self.logging.fatal(\n \"Unable to access Jenkins at: %s With User: %s For Reason:\\n\\t%s\"\n % (\n (\n self.server_url.netloc\n if len(self.server_url.netloc) > 0\n else self.args.server\n ),\n self._get_username(cred),\n str(ex).split(\"\\n\")[0],\n )\n )\n\n except (req_exc.SSLError, req_exc.ConnectionError):\n self.logging.fatal(\n \"Unable to connect to: \"\n + (self.server_url.netloc if len(self.server_url.netloc) > 0 else self.args.server)\n )\n\n except Exception:\n self.logging.exception(\"\")\n exit(1)\n\n\nclass DumpCredsParser:\n def cmd_DumpCreds(self):\n \"\"\"Handles parsing of DumpCreds Subcommand arguments\"\"\"\n\n self._create_contextual_parser(\"DumpCreds\", \"Dump all Stored Credentials on Jenkins\")\n self._add_common_arg_parsers()\n\n self.parser.add_argument(\n \"-N\",\n \"--node\",\n metavar=\"\",\n help='Node (Slave) to execute against. Executes against \"master\" if not specified.',\n action=\"store\",\n dest=\"node\",\n required=False,\n )\n\n args = self.parser.parse_args()\n\n self._validate_server_url(args)\n self._validate_timeout_number(args)\n self._validate_output_file(args)\n\n return self._handle_authentication(args)\n","repo_name":"Accenture/jenkins-attack-framework","sub_path":"libs/JAF/plugin_DumpCreds.py","file_name":"plugin_DumpCreds.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","stars":535,"dataset":"github-code","pt":"27"} +{"seq_id":"70547193991","text":"import math\n\n### NOTE: this is NOT an actual database class.\n### It handles string representation only.\n\nclass TestDatabase(object):\n \"\"\"Handles TA1 test database representation.\n\n Attributes:\n short_database_names: a dictionary mapping (num_records, record_size)\n to a database nickname\n db_num_records: the number of records in the test database\n db_record_size: the average record size in the test database\n \"\"\"\n def __init__(self, short_database_names, db_num_records, db_record_size):\n \"\"\"Initializes the TestDatabase with a dict of short database names,\n a number of records and a record size.\"\"\"\n self.db_num_records = db_num_records\n self.db_record_size = db_record_size\n self._short_database_names = short_database_names\n self._database_name_template = \"Database with %s Rows, Each of Size %s\"\n\n def get_db_num_records_str(self):\n \"\"\"Returns the string used to represent the number of records in the\n database\"\"\"\n log_db_num_records = math.log10(self.db_num_records)\n if int(log_db_num_records) == log_db_num_records:\n return \"$10^{%s}$\" % str(int(log_db_num_records))\n return str(self.db_num_records)\n\n def get_db_record_size_str(self):\n \"\"\"Returns the string used to represent the database record size\"\"\"\n log_db_record_size = math.log10(self.db_record_size)\n if int(log_db_record_size) == log_db_record_size:\n return \"$10^{%s}$B\" % str(int(log_db_record_size))\n return str(self.db_record_size) + \"B\"\n\n def _get_database_name_from_template(self, template):\n \"\"\"Returns the string used to refer to the database in question given\n a name tempalte.\n Args:\n template: a string with '%s' where the number of records and the\n record size should be.\n \"\"\"\n return template % (\n self.get_db_num_records_str(),\n self.get_db_record_size_str())\n\n def get_database_name(self, lower=False):\n \"\"\"Returns the string used to refer to the database in question.\n Args:\n lower: a boolean indicating whether lowercase is desired.\n \"\"\"\n if not lower:\n return self._get_database_name_from_template(\n self._database_name_template)\n else:\n return self._get_database_name_from_template(\n self._database_name_template.lower())\n \n def get_short_database_name(self, lower=False):\n \"\"\"Returns the short string used to refer to the database in question.\n Args:\n lower: a boolean indicating whether lowercase is desired.\n \"\"\"\n if (self.db_num_records,\n self.db_record_size) not in self._short_database_names:\n return self.get_database_name(lower)\n else:\n nickname = self._short_database_names[\n (self.db_num_records, self.db_record_size)]\n if lower: return nickname.lower()\n else: return nickname\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return ((self.db_num_records == other.db_num_records)\n and (self.db_record_size == other.db_record_size)\n and (self.get_short_database_name()\n == other.get_short_database_name()))\n else:\n return False\n","repo_name":"mit-ll/SPARTA","sub_path":"spar_python/report_generation/ta1/ta1_test_database.py","file_name":"ta1_test_database.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"27"} +{"seq_id":"3240380096","text":"import sys\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nimport plotly\nimport polars as pl\nimport numpy as np\n\n\ndf = pl.read_csv(sys.argv[1], sep='\\t')\n\nqual_bin = \"20-39\"\nqual_bin = \"60+\"\n\nr1 = df.filter((pl.col(\"read12\") == \"r1\") & (pl.col(\"FR\") == \"f\") & (\n pl.col('bq_bin') == qual_bin) & (pl.col('total_count') > 0))\nr2 = df.filter((pl.col(\"read12\") == \"r2\") & (pl.col(\"FR\") == \"r\") & (\n pl.col('bq_bin') == qual_bin) & (pl.col('total_count') > 0))\n\nprint(r1.shape, r2.shape)\n\ncontexts = list(sorted(r1['context'].unique(), reverse=False))\nprint(contexts)\n\nr1 = r1.with_columns([\n (((pl.col('error_count') + 0) / (1 + pl.col('total_count')))).alias('rate')])\nr2 = r2.with_columns([\n (((pl.col('error_count') + 0) / (1 + pl.col('total_count')))).alias('rate')])\n\nr1_rate = r1['error_count'].sum() / (r1['total_count'].sum() / 3) * 1_000_000\nr2_rate = r2['error_count'].sum() / (r2['total_count'].sum() / 3) * 1_000_000\n\ncols = plotly.colors.DEFAULT_PLOTLY_COLORS\n\n# Create figure with secondary y-axis\nfig = make_subplots(\n rows=2, subplot_titles=[f\"read1(F) errors per million read-bases: {r1_rate:.3f}\", f\"read2(R) errors per million read-bases: {r2_rate: 3f}\"],\n vertical_spacing=0.1,\n)\n\nfor i, ctx in enumerate(contexts):\n sub1 = r1.filter(pl.col('context') == ctx)\n sub2 = r2.filter(pl.col('context') == ctx)\n\n rate1 = sub1['error_count'].sum() / sub1['total_count'].sum() * 1_000_000\n rate2 = sub2['error_count'].sum() / sub2['total_count'].sum() * 1_000_000\n\n t1 = go.Scatter(name=f'{ctx}', x=np.array(sub1[\"read_pos\"]), y=(1_000_000 * np.array(\n sub1['rate'])),\n hovertemplate=\"rate/Mb: %{y:.2g} errors:%{text}\",\n text=[f'{c} of {n:,}' for c,\n n in zip(sub1[\"error_count\"], sub1['total_count'])],\n line=dict(color=cols[i]))\n t2 = go.Scatter(name=f'{ctx}', x=np.array(sub2[\"read_pos\"]), y=(1_000_000 * np.array(\n sub2['rate'])),\n hovertemplate=\"rate/Mb: %{y:.2g} errors:%{text}\",\n text=[f'{c} of {n:,}' for c,\n n in zip(sub2[\"error_count\"], sub2['total_count'])],\n line=dict(color=cols[i]), showlegend=False)\n fig.add_trace(t1, row=1, col=1)\n fig.add_trace(t2, row=2, col=1)\n\n# fig.update_layout(barmode='stack')\nfig.update_layout(hovermode='x unified')\nfig.update_xaxes(title_text=\"relative read position\")\n# fig.update_layout(legend_traceorder=\"reversed\")\n\n# fig.update_layout(title_text=\"error-rate along a read\")\n\nfig.update_yaxes(title_text=\"errors per million read bases\")\n# fig.update_layout(yaxis_tickformat = '%g')\n\n\nfig.write_html(\"read.html\")\n","repo_name":"brentp/fraguracy","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"27"} +{"seq_id":"18802460318","text":"\n \nimport sqlite3 # Imports sqlite3 for additional functionality \n\nconnect = sqlite3.connect('test2.db')\n\nwith connect:\n A = connect.cursor() # creates new DB with 2 fields 1 integer 1 string\n## print(A)\n## print(type(A))\n A.execute(\"CREATE TABLE IF NOT EXISTS tbl_persons( \\\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\\\n File_list TEXT)\") #Creates tbl_persons with 2 columns 1 ID 1 Text \n connect.commit()\nconnect.close() #Disconnect from test2.db \n\nconnect = sqlite3.connect('test2.db') # Connects to test2.db\n\nwith connect:\n A = connect.cursor() #Connecting with DB and adding strings\n\nfileList = ('information.docx', 'Hello.txt', 'myImage.png', \\\n 'myMovie.mpg', 'World.txt', 'data.pdf','myPhoto.jpg') #Tuple of values\n\nprint(fileList)\nprint(type(fileList))\nfor x in fileList:\n## print(x)\n## print(type(x))\n if x.endswith('.txt'): #Condition for file filtering\n with connect: # While loop for searching\n A = connect.cursor()\n A.execute(\"INSERT INTO tbl_persons (File_list) VALUES (?)\", (x,))\n print(x) # Print results \n\n\nconnect.close() # Disconnect from test2.db\n\n\n\n\n\n","repo_name":"ReeceHarris/Python_Projects_1","sub_path":"Python_SQL_DB1.py","file_name":"Python_SQL_DB1.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"16389269385","text":"import prompt\n\n\ndef run_engine(TASK, get_q_and_a):\n print(\"Welcome to the Brain Games!\")\n name = prompt.string('May I have your name? ')\n print(f'Hello, {name}!')\n\n print(TASK)\n\n game_rounds_count = 3\n\n for round_number in range(0, game_rounds_count):\n\n q_and_a = get_q_and_a()\n question = str(q_and_a[0])\n answer = str(q_and_a[1])\n\n print('Question: ' + question)\n player_answer = prompt.string('Your answer: ')\n\n if answer == player_answer:\n print('Correct!')\n else:\n print(\"'{}' is wrong answer ;(. Correct answer was '{}'.\\n\"\n \"Let's try again, {}!\"\n .format(player_answer, answer, str(name)))\n break\n else:\n print(f'Congratulations, {name}!')\n","repo_name":"Unshock/python-project-lvl1","sub_path":"brain_games/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"21426845922","text":"#!usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport socket\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n# 绑定端口\ns.bind(('127.0.0.1', 5555))\n\n# 不需要调用listen()进行监听,直接接收来自任何客户端的数据\nprint('Bind UDP on 5555...')\nwhile True:\n data, addr = s.recvfrom(1024) # recvfrom 返回数据和客户端的地址与端口\n print('Received from %s:%s' % addr) # addr是(IP, 端口)类型的tuple\n s.sendto(b'Hello, %s!' % data, addr) # 直接调用sendto把数据用UDP发给客户端\n\n# 注意这个例子省略了多线程","repo_name":"Veraph/Python-Note","sub_path":"internet/udp_server.py","file_name":"udp_server.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"30750939984","text":"class Solution:\n def minCostConnectPoints(self, points: List[List[int]]) -> int:\n n = len(points)\n adjList = {i:[] for i in range(n)}\n for i in range(n):\n x1 = points[i][0]; y1 = points[i][1]\n for j in range(n):\n x2 = points[j][0]; y2 = points[j][1]\n dist = abs(x2 - x1) + abs(y2 - y1)\n adjList[i].append((dist, j))\n adjList[j].append((dist, i))\n \n res = 0\n visited = set()\n minHeap = [(0, 0)]\n while len(visited) < n:\n dist, point = heapq.heappop(minHeap)\n if point in visited: continue\n visited.add(point)\n res += dist\n for d, p in adjList[point]:\n if p not in visited:\n heapq.heappush(minHeap, (d, p))\n \n return res","repo_name":"SamirPaulb/DSAlgo","sub_path":"01_Problem-Solving-LeetCode/1584-min-cost-to-connect-all-points/1584-min-cost-to-connect-all-points.py","file_name":"1584-min-cost-to-connect-all-points.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":1690,"dataset":"github-code","pt":"28"} +{"seq_id":"32797874130","text":"from django.shortcuts import render\nfrom shop.forms import *\nfrom shop.models import *\n\n\ndef index(request):\n seo_title = 'Магазин профессиональной косметики'\n seo_description = 'Профессиональная косметик от известных мировых брендов'\n seo_keywords = 'шампуни, краски для волос, иснтрументы, макияж'\n sliders = Slider.objects.all()\n banners = Banner.objects.all()\n banner3s = Banner3.objects.all()\n bannerls = BannerL.objects.all()\n brands = Brand.objects.order_by('?')\n\n return render(request, 'index/index.html', {'title': seo_title, 'description': seo_description, 'keywords': seo_keywords,\n 'sliders': sliders, 'banners': banners, 'banner3s': banner3s,\n 'bannerls': bannerls, 'brands': brands})\n\n\ndef contact_us(request):\n seo_title = 'Магазин профессиональной косметики'\n seo_description = 'Профессиональная косметик от известных мировых брендов'\n seo_keywords = 'шампуни, краски для волос, иснтрументы, макияж'\n lable = 'Контакты'\n contacts = Contact.objects.all()\n\n if request.method == 'POST':\n form = ContactFormForm(request.POST)\n\n if form.is_valid():\n form.save()\n\n else:\n form = ContactFormForm()\n\n return render(request, 'contact_us.html', {'title': seo_title, 'description': seo_description, 'keywords': seo_keywords,\n 'lable': lable, 'contacts': contacts, 'form': form})","repo_name":"checkago/anri","sub_path":"django/anri/shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"25588480452","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 24 13:07:58 2017\n\n@author: triley\n\"\"\"\n\n# usage: run pytest from top of repo with no parameters eg.\n# ~/src/Learn-Practice$ pytest\n\n\nimport re\n# import os\nimport subprocess\nimport pytest\n# import sys\n# from pprint import pprint\nfrom time import sleep\nfrom typing import Dict\nfrom collections import OrderedDict\n\nfrom partitions import (\n DiskPart,\n PartitionTable)\n\n\n@pytest.fixture(scope='module')\ndef os_partition_table():\n return PartitionTable()\n\n\ndef os_one_liner(cmd):\n \"\"\" one cli line that you already know is right \"\"\"\n result = subprocess.run(\n cmd,\n stdout=subprocess.PIPE,\n shell=True)\n result.check_returncode()\n return result.stdout.decode(\"utf-8\").split('\\n')\n\n\ndef test_mounts(os_partition_table):\n mounts = os_partition_table.mounts\n # some sanity checks we can get independently from get_blkids\n assert '/' in mounts.keys()\n assert '/boot/efi' in mounts.keys()\n # tests that need apriori knowledge (not generally useful but fine for me)\n assert mounts['/boot/efi'].dev == '/dev/sda1'\n assert mounts['/'].dev == '/dev/sda2'\n # TODO\n # Check for stale mounts: You can actually get partitions that no longer\n # exist showing up in /proc/mounts after a USB move or coming out of sleep.\n\n\ndef test_partitions(os_partition_table):\n \"\"\" check that partion_table returns same info as /proc/partitions \"\"\"\n\n # # independent tests\n # tests that work as non-root user\n #\n # get independent list of partitions from kernel\n\n re_part = re.compile(' (sd[a-z][1-9])$')\n lines_out = os_one_liner('cat /proc/partitions')\n proc_parts = [] # partitions from /proc/partitions\n for line in lines_out:\n if re_part.search(line):\n proc_parts += [re_part.search(line).group(1)]\n\n # Are partitions from proc_parts in partition_table\n for d_part in proc_parts:\n test = f'/dev/{d_part}'\n # some partitions from /proc/partitions are not block devices\n # assert test in [v.dev for i, v in os_partition_table.partitions.items()]\n for key, value in os_partition_table.partitions.items():\n assert key == value.dev\n assert value.disk in key\n assert value.part_num in key\n # more tests\n\n\ndef mock_pt(descriptions: str) -> PartitionTable:\n \"\"\" make a mock partition table from a a string describing\n partitions and their state.\n \"\"\"\n code_to_ptype = {'v': 'vfat', 'r': 'ext4'}\n code_to_mount_point = {'v': '/boot/efi', 'r': '/'}\n\n pt = PartitionTable() # the real one\n pt.mounts: OrderedDict[str, DiskPart] = {} # Overwrite\n pt.partitions: Dict[str, DiskPart] = {}\n for i, pt_description in enumerate(descriptions.split()):\n p = DiskPart(dev=f'/dev/{pt_description[:4]}')\n code = pt_description[-1]\n p.ptype = code_to_ptype[code]\n if i < 2: # then we magically mount it as the running partitions\n p.mount_point = code_to_mount_point[code]\n pt.mounts[p.mount_point] = p\n # else assume it's unmounted leave as None\n pt.partitions[p.dev] = p\n\n # print('\\nin mock_pt returning:', pt)\n return pt\n\n\n@pytest.fixture(\n # scope='test',\n params=[\n # (dest '/', available disk partitions)\n # first two are source, rest are potential dests\n ('sdb2', 'sda1_v sda2_r sdb1_v sdb2_r'),\n ('sdb1', 'sda1_v sda2_r sdb1_r sdb2_v'),\n ('sdb2', 'sda1_r sda2_v sdb1_v sdb2_r'),\n # from usb back to HDD\n ('sda2', 'sdb1_v sdb2_r sda1_v sda2_r'),\n # # force not found\n # (None, 'sda1_v_b sda2_e_r'),\n # # and more later\n ])\ndef case(request):\n return request.param\n\n\ndef test_find_dest_disk(case):\n (right_answer, descriptions) = case\n mk_partition_table = mock_pt(descriptions)\n print('\\nBeforeFind',)\n print(mk_partition_table)\n mk_partition_table.find_dest_disk()\n # print('\\nAfterFind',)\n # print(mk_partition_table)\n\n root_part = mk_partition_table.dests[f'/mnt/{right_answer}']\n # print('root_part =', root_part)\n assert root_part.dev == f'/dev/{right_answer}'\n\n boot_part = mk_partition_table.dests[f'/mnt/{right_answer}/boot/efi']\n assert boot_part.ptype == 'vfat'\n assert boot_part.disk in root_part.dev # Same disk as root\n\n\ndef test_check_mount_point(os_partition_table):\n for m, p in os_partition_table.dests.items():\n os_partition_table.check_mount_point(m)\n # print('cmp: m =', m)\n assert os_partition_table.mounts[m] == p.mount_point\n assert os_partition_table.mounts[m] is p\n\n\ndef test_mount_dest_disk(os_partition_table):\n # they might be mounted or might be unmounted\n os_partition_table.find_dest_disk()\n os_partition_table.mount_dest_disk()\n\n # force it unmounted\n for m in reversed(os_partition_table.dests):\n os_one_liner(f'umount {m}')\n # keep os_partition_table in the know even though we bypassed it\n os_partition_table.update_blkids()\n os_partition_table.update_mounts()\n for m in os_partition_table.dests:\n assert m not in os_partition_table.mounts.keys()\n\n # now they are known to be unmounted\n os_partition_table.mount_dest_disk()\n for m in os_partition_table.dests:\n assert m in os_partition_table.mounts.keys()\n\n # check again when mounted\n os_partition_table.mount_dest_disk()\n for m in os_partition_table.dests:\n assert m in os_partition_table.mounts.keys()\n\n\ndef test_find_source_disk(os_partition_table):\n os_partition_table.find_source_disk()\n assert os_partition_table.sources['/'].dev == '/dev/sda2'\n assert os_partition_table.sources['/boot/efi'].dev == '/dev/sda1'\n\n\nif __name__ == '__main__':\n # test_partitions(PartitionTable())\n # test_find_dest_disk(PartitionTable())\n test_mount_dest_disk(PartitionTable())\n","repo_name":"taducbinh123/HSE","sub_path":"test_partitions.py","file_name":"test_partitions.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"24564720249","text":"import sys\r\n\r\ndef cutting(x, y, middle):\r\n global blue, white\r\n color = square[x][y] # 분리된 사각형의 가장 왼쪽 위에 있는것을 기본 색으로 설정\r\n\r\n for i in range(x, x + middle): # 행\r\n for j in range(y, y + middle): # 열\r\n if color!= square[i][j]: # 해당하는 행과 열이 모두 기본색이랑 같지 않을 경\r\n cutting(x, y, middle // 2)\r\n cutting(x + middle // 2, y, middle // 2)\r\n cutting(x, y + middle // 2, middle // 2)\r\n cutting(x + middle // 2, y + middle // 2, middle // 2)\r\n return\r\n \r\n if color == 1:\r\n blue += 1\r\n else:\r\n white += 1\r\n\r\nif __name__ == \"__main__\":\r\n size = int(input())\r\n square = [list(map(int, sys.stdin.readline().split())) for _ in range(size)] # 사용자가 입력하는 사각형 색\r\n blue, white = 0, 0 # 1이면 blue, 0이면 white\r\n\r\n cutting(0, 0, size)\r\n print(white)\r\n print(blue)","repo_name":"ledu1017/baekjoon","sub_path":"분할 정복/2630.py","file_name":"2630.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"26919476306","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 29 16:27:08 2016.\n\n@author: miles\n\"\"\"\nimport importlib\nimport copy\nimport numpy as np\nimport sklearn.svm\nimport sklearn.tree\nimport sklearn.ensemble\nimport sklearn.naive_bayes\nimport sklearn.pipeline\nimport sklearn.impute\n\n\nclass DataSet():\n \"\"\"\n Object for keeping track of data with labels, information about its\n observations (obs), and information about its features (feats).\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n # initialize the empty dataSet\n self.data = np.zeros((0, 0))\n self.obs_info = []\n self.feat_info = []\n self.targets = []\n\n if len(args) > 0:\n self.data = args[0]\n if len(args) > 1:\n self.targets = args[1]\n\n for key in kwargs:\n setattr(self, key, kwargs[key])\n\n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, value):\n assert isinstance(value, np.ndarray), 'Data must be numpy array.'\n assert value.ndim == 2, 'Data must be 2d array.'\n self._data = value\n\n @property\n def targets(self):\n return self._targets\n\n @targets.setter\n def targets(self, value):\n assert isinstance(value, list), 'targets must be a list.'\n assert len(value) == self.n_obs, 'targets must be length nObservations.'\n self._targets = value\n\n @property\n def n_feats(self):\n return self._data.shape[1]\n\n @property\n def n_obs(self):\n return self._data.shape[0]\n\n @property\n def class_names(self):\n c = list(set(self.targets))\n c.sort()\n return c\n\n @property\n def n_classes(self):\n return len(set(self.targets))\n\n @property\n def y(self):\n cn = self.class_names\n return [cn.index(j) for i, j in enumerate(self.targets)]\n\n def isempty(self):\n return self.n_feats == 0 and self.n_obs == 0\n\n def cat_obs(self, ds):\n if self.isempty():\n self = ds\n return self\n if ds.isempty():\n return self\n\n assert isinstance(ds, DataSet), 'Concatenation is only allowed with other dataSets.'\n assert self.n_feats == ds.n_feats, 'Number of features must match.'\n self.data = np.concatenate((self.data, ds.data), 0)\n self.targets += ds.targets\n self.obs_info += ds.obs_info\n return self\n\n def cat_feats(self, ds):\n if self.isempty():\n self = ds\n return self\n if ds.isempty():\n return self\n\n assert isinstance(ds, DataSet), 'Concatenation is only allowed with other dataSets.'\n assert self.n_obs == ds.n_obs, 'Number of observations must match.'\n self.data = np.concatenate((self.data, ds.data), 1)\n self.feat_info += ds.feat_info\n return self\n\n def get_obs(self, keep):\n assert isinstance(keep, list), 'expecting input of type list.'\n if len(self.targets) == 0:\n self.targets = [[]] * self.n_obs\n\n if len(self.obs_info) == 0:\n self.obs_info = [{}] * self.n_obs\n\n kept = [(x, y, z) for x, y, z in\n zip(keep, self.targets, self.obs_info) if x]\n\n if len(kept) == 0:\n ds = DataSet()\n return ds\n\n k, tgs, obs = zip(*kept)\n\n ds = DataSet(self.data[np.array(keep), :].copy(),\n list(tgs).copy(), obs_info=list(obs).copy(),\n feat_info=copy.deepcopy(self.feat_info))\n return ds\n\n\nclass Classifier():\n def __init__(self):\n self.trained = False\n\n def train(self, data_train):\n self._train(data_train)\n self.class_names = data_train.class_names\n self.trained = True\n\n def test(self, data_test):\n assert self.trained, 'Classifier must be trained!'\n return self._test(data_test)\n\n def reset(self):\n self._reset\n self.trained = False\n\n def _train(self, data_train):\n return\n\n def _test(self, data_test):\n return\n\n def _reset(self):\n return\n\n def cross_validate(self, ds, keys):\n uKeys = list(set(keys))\n\n confs = np.zeros((ds.n_obs, ds.n_classes))\n\n for key in uKeys:\n self.reset()\n\n\n test = [k == key for k in keys]\n ds_test = ds.get_obs(test)\n\n train = [not b for b in test]\n ds_train = ds.get_obs(train)\n\n self.train(ds_train)\n\n tested = self.test(ds_test)\n confs[np.array(test), :] = tested\n\n return confs\n\n\nclass DecisionTree(Classifier):\n def __init__(self):\n super().__init__()\n self.parameters = {}\n self.classifier = []\n\n def _train(self, data_train):\n clf = sklearn.pipeline.Pipeline([\n ('imputation', sklearn.impute.SimpleImputer(missing_values=np.nan, strategy='mean')),\n ('decision_tree', sklearn.tree.DecisionTreeClassifier(**self.parameters)),\n ])\n clf.fit(data_train.data, data_train.y)\n self.classifier = clf\n return\n\n def _test(self, data_test):\n confs = self.classifier.predict_proba(data_test.data)\n return confs\n\n def _reset(self):\n self.classifier = []\n\n\nclass RandomForest(Classifier):\n def __init__(self):\n super().__init__()\n self.parameters = {}\n self.classifier = []\n\n def _train(self, data_train):\n clf = sklearn.pipeline.Pipeline([\n ('imputation', sklearn.impute.SimpleImputer(missing_values=np.nan, strategy='mean')),\n ('random_forest', sklearn.ensemble.RandomForestClassifier(**self.parameters)),\n ])\n clf.fit(data_train.data, data_train.y)\n self.classifier = clf\n return\n\n def _test(self, data_test):\n confs = self.classifier.predict_proba(data_test.data)\n return confs\n\n def _reset(self):\n self.classifier = []\n\n\nclass GaussianNB(Classifier):\n def __init__(self):\n super().__init__()\n self.parameters = {}\n self.classifier = []\n\n def _train(self, data_train):\n clf = sklearn.pipeline.Pipeline([\n ('imputation', sklearn.impute.SimpleImputer(missing_values=np.nan, strategy='mean')),\n ('naive_bayes', sklearn.naive_bayes.GaussianNB(**self.parameters)),\n ])\n clf.fit(data_train.data, data_train.y)\n self.classifier = clf\n return\n\n def _test(self, data_test):\n confs = self.classifier.predict_proba(data_test.data)\n return confs\n\n def _reset(self):\n self.classifier = []\n\n\nclass LinearSVM(Classifier):\n def __init__(self):\n super().__init__()\n self.parameters = {'probability':True, 'kernel':'linear'}\n self.classifier = []\n\n def _train(self, data_train):\n clf = sklearn.pipeline.Pipeline([\n ('imputation', sklearn.impute.SimpleImputer(missing_values=np.nan, strategy='mean')),\n ('svm', sklearn.svm.SVC(**self.parameters)),\n ])\n clf.fit(data_train.data, data_train.y)\n self.classifier = clf\n return\n\n def _test(self, data_test):\n confs = self.classifier.predict_proba(data_test.data)\n return confs\n\n def _reset(self):\n self.classifier = []\n\n\ndef get_classifiers():\n \"\"\"Convert the name from the front end to what the backend needs.\"\"\"\n classifiers = [{\"name\": \"Linear SVM\", \"id\": \"LinearSVM\"},\n {\"name\": \"Gaussian Naive Bayes\", \"id\": \"GaussianNB\"},\n {\"name\": \"Decision Tree\", \"id\": \"DecisionTree\"},\n {\"name\": \"Random Forest\", \"id\": \"RandomForest\"}]\n return classifiers\n\n\nclassifier_map = {\n \"Linear SVM\": \"LinearSVM\",\n \"Gaussian Naive Bayes\": \"GaussianNB\",\n \"Decision Tree\": \"DecisionTree\",\n \"Random Forest\": \"RandomForest\"\n}\n\n\ndef build_classifier(name):\n module = importlib.import_module(\"clarkproc.engine.classification\")\n clazz = getattr(module, classifier_map[name])\n instance = clazz()\n return instance\n","repo_name":"NCTraCSIDSci/clark","sub_path":"clarkproc/clarkproc/engine/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":8030,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"28"} +{"seq_id":"3747068492","text":"from argparse import ArgumentParser\nimport shelve\nfrom Common.networking import Computer\nfrom UptimeManager.models import WOLCommand\n\n__author__ = 'konsti'\n\ncomputers = dict()\n\n\ndef main():\n parser = ArgumentParser(description='Simple WOL Tool')\n parser.add_argument('command', choices=['wake', 'create', 'list', 'status'])\n parser.add_argument('-n', '--name', help='The Hostname of the machine', nargs='+')\n\n args = parser.parse_args()\n\n load()\n\n if args.command == 'create':\n for name in args.name:\n computers[name] = Computer(name)\n elif args.command == 'wake':\n cmd = WOLCommand('wol')\n for name in args.name:\n print(cmd(computers[name], sync=True))\n\n elif args.command == 'list':\n for name in computers:\n print(name)\n\n elif args.command == 'status':\n for name in computers:\n print(name, computers[name].status, sep=': ')\n\n save()\n\n\ndef load():\n cache = shelve.open('cache')\n for key in cache:\n computers[key] = cache[key]\n cache.close()\n\n\ndef save():\n cache = shelve.open('cache')\n for computer in computers.values():\n cache[computer.host] = computer\n cache.close()\n\n\nif __name__ == '__main__':\n main()","repo_name":"konairius/UranosServermanager","sub_path":"woltool.py","file_name":"woltool.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"36789777636","text":"# -*- encoding=utf8 -*-\nfrom Base.BaseTestCase import BaseTestCase\nfrom airtest.core.api import *\nfrom Base.PublicFunction import *\nfrom Base.BaseElement import *\n\n\nclass TestWebsite(BaseTestCase):\n\n def setUp(self):\n if self.poco(element('WelcomeActivity', 'Get_Start')).exists(): # get start 按钮\n self.poco(element('WelcomeActivity', 'Get_Start')).click()\n self.poco(element('MainActivity', 'Connect_Button')).wait(timeout=120)\n sleep()\n\n @frame(\"Website\", \"Website.html\", \"测试官网是否能正常打开\")\n def test_Website(self):\n # 判断模式,进入任务\n self.poco(element('MainActivity', 'Menu')).click()\n sleep()\n self.poco(element('MainActivity', 'Help')).click()\n sleep(2)\n self.poco(element('HelpActivity', 'Website')).click()\n sleep(6)\n assert_exists(Template(PictureDir + \"skyvpn_android_web_website.png\"), \"跳转成功\")\n","repo_name":"mcdull0221/airtest_uitest","sub_path":"TestCase/skyvpn/android/test_website.py","file_name":"test_website.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"3874894513","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nfrom pkg_resources import parse_version\nfrom warnings import warn\nfrom copy import deepcopy\n\nimport networkx as nx\nfrom networkx.readwrite import json_graph\n\nfrom catpy.applications.base import CatmaidClientApplication\n\n\nNX_VERSION_INFO = parse_version(nx.__version__)._key[1]\n\n\nerr_msg = (\n \"Tried to treat the edge's source/target fields as indices into the list of nodes, but failed. \"\n \"See issue #26 [1]. \"\n \"Has CATMAID upgraded to networkx 2.x? [2]\\n\\n\"\n \"[1]: https://github.com/catmaid/catpy/issues/26\\n\"\n \"[2]: https://github.com/catmaid/CATMAID/blob/master/django/requirements.txt\"\n)\n\n\ndef convert_nodelink_data(jso):\n \"\"\"NetworkX serialises graphs differently in v1.x and v2.x.\n\n This converts v1-style data (as emitted by CATMAID) to v2-style data.\n\n See issue #26 https://github.com/catmaid/catpy/issues/26\n\n Parameters\n ----------\n jso : dict\n\n Returns\n -------\n dict\n \"\"\"\n if NX_VERSION_INFO < (2, 0):\n warn(\n \"You are converting networkx v1-style JSON (emitted by CATMAID) to v2-style JSON,\"\n \" but you are using networkx v1\"\n )\n\n out = deepcopy(jso)\n for edge in out[\"links\"]:\n for label in [\"source\", \"target\"]:\n try:\n edge[label] = out[\"nodes\"][edge[label]][\"id\"]\n except (KeyError, IndexError):\n raise RuntimeError(err_msg)\n return out\n\n\nclass ExportWidget(CatmaidClientApplication):\n def get_swc(self, skeleton_id, linearize_ids=False):\n \"\"\"\n Get a single skeleton in SWC format.\n\n Parameters\n ----------\n skeleton_id : int or str\n linearize_ids : bool\n\n Returns\n -------\n str\n \"\"\"\n return self.get(\n (self.project_id, \"skeleton\", skeleton_id, \"swc\"),\n {\"linearize_ids\": \"true\" if linearize_ids else \"false\"},\n )\n\n def get_connector_archive(self, *args, **kwargs):\n \"\"\"Not implemented: requires an async job\"\"\"\n raise NotImplementedError(\"Requires an async job\")\n\n def get_treenode_archive(self, *args, **kwargs):\n \"\"\"Not implemented: requires an async job\"\"\"\n raise NotImplementedError(\"Requires an async job\")\n\n def get_networkx_dict(self, *skeleton_ids):\n \"\"\"\n Get the data for a networkx graph of the given skeletons in node-link format.\n\n In networkx 1.x, as used by CATMAID and therefore returned by this method,\n \"source\" and \"target\" in the dicts in \"links\" refer to nodes by their indices in the \"nodes\" array.\n\n See ``convert_nodelink_data`` function to convert into networkx 2.x-compatible format.\n\n https://networkx.readthedocs.io/en/networkx-1.11/reference/generated/networkx.readwrite.json_graph.node_link_data.html\n\n Parameters\n ----------\n skeleton_ids : array-like of (int or str)\n\n Returns\n -------\n dict\n \"\"\"\n return self.post(\n (self.project_id, \"graphexport\", \"json\"),\n data={\"skeleton_list\": list(skeleton_ids)},\n )\n\n def get_networkx(self, *skeleton_ids):\n \"\"\"\n Get a networkx MultiDiGraph of the given skeletons.\n\n Parameters\n ----------\n skeleton_ids : array-like of (int or str)\n\n Returns\n -------\n networkx.MultiDiGraph\n \"\"\"\n data = self.get_networkx_dict(*skeleton_ids)\n if NX_VERSION_INFO >= (2, 0):\n data = convert_nodelink_data(data)\n return json_graph.node_link_graph(data, directed=True)\n\n def get_neuroml(self, skeleton_ids, skeleton_inputs=tuple()):\n \"\"\"\n Get NeuroML v1.8.1 (level 3, NetworkML) for the given skeletons, possibly with their input synapses\n constrained to another set of skeletons.\n\n N.B. If len(skeleton_ids) > 1, skeleton_inputs will be ignored and only synapses within the first skeleton\n set will be used in the model.\n\n Parameters\n ----------\n skeleton_ids : array-like\n Skeletons whose NeuroML to return\n skeleton_inputs : array-like, optional\n If specified, only input synapses from these skeletons will be added to the NeuroML\n\n Returns\n -------\n str\n NeuroML output string\n \"\"\"\n\n data = {\"skids\": list(skeleton_ids)}\n\n if skeleton_inputs:\n if len(skeleton_ids) > 1:\n warn(\n \"More than one skeleton ID was selected: ignoring skeleton input constraints\"\n )\n else:\n data[\"inputs\"] = list(skeleton_inputs)\n\n return self.post((self.project_id, \"neuroml\", \"neuroml_level3_v181\"), data=data)\n\n def get_treenode_and_connector_geometry(self, *skeleton_ids):\n \"\"\"\n Get the treenode and connector information for the given skeletons. The returned dictionary will be of the form\n\n {\n \"skeletons\": {\n skeleton_id1: {\n \"treenodes\": {\n treenode_id1: {\n \"location\": [x, y, z],\n \"parent_id\": id_of_parent_treenode\n },\n treenode_id2: ...\n },\n \"connectors\": {\n connector_id1: {\n \"location\": [x, y, z],\n \"presynaptic_to\": [list, of, treenode, ids],\n \"postsynaptic_to\": [list, of, treenode, ids]\n },\n connector_id2: ...\n }\n },\n skeleton_id2: ...\n }\n }\n\n Parameters\n ----------\n skeleton_ids : array-like of (int or str)\n\n Returns\n -------\n dict\n \"\"\"\n # todo: factor API call into MorphologyFetcher\n skeletons = dict()\n warnings = set()\n\n relation_names = {0: \"presnaptic_to\", 1: \"postsynaptic_to\"}\n\n for skeleton_id in skeleton_ids:\n\n data = self.get(\n \"{}/{}/1/0/compact-skeleton\".format(self.project_id, skeleton_id)\n )\n\n skeleton = {\"treenodes\": dict(), \"connectors\": dict()}\n\n for treenode in data[0]:\n skeleton[\"treenodes\"][int(treenode[0])] = {\n \"location\": treenode[3:6],\n \"parent_id\": None if treenode[1] is None else int(treenode[1]),\n }\n\n for connector in data[1]:\n # NOT the database relation ID\n # {pre: 0, post: 1, gj: 2}\n relation_number = connector[2]\n\n if relation_number not in relation_names:\n continue\n\n conn_id = int(connector[1])\n if conn_id not in skeleton[\"connectors\"]:\n skeleton[\"connectors\"][conn_id] = {\n rn: [] for rn in relation_names.values()\n }\n\n skeleton[\"connectors\"][conn_id][\"location\"] = connector[3:6]\n skeleton[\"connectors\"][conn_id][relation_names[relation_number]].append(\n connector[0]\n )\n\n skeletons[int(skeleton_id)] = skeleton\n\n warn(\n \"Skeleton representations contained some unknown treenode->connector relation IDs:\\n\\t\"\n \"\\n\\t\".join(sorted(warnings))\n )\n\n return {\"skeletons\": skeletons}\n","repo_name":"catmaid/catpy","sub_path":"catpy/applications/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":7576,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"28"} +{"seq_id":"70836922955","text":"from mddns.server import runserver\nimport argparse\nfrom configparser import SafeConfigParser\n\n\ndef get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('config', type=argparse.FileType())\n return parser\n\n\ndef main():\n parser = get_parser()\n args = parser.parse_args()\n config = SafeConfigParser()\n config.readfp(args.config)\n runserver(config)\n","repo_name":"GaretJax/mddns","sub_path":"mddns/scripts/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"fi","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"13007896516","text":"import frappe\n\nfrom frappe.contacts.doctype.contact.contact import get_default_contact,get_contact_details\n\n\n@frappe.whitelist()\ndef sales_invoice_payload(customer):\n\n\tdefault_contact = get_default_contact(\"Customer\", customer)\n\n\temail_accounts = [x.get(\"email_id\") for x in frappe.get_all(\"Email Account\",filters=dict(enable_outgoing=1),order_by='default_outgoing DESC', fields=['email_id'])]\n\t\n\treturn dict(email_accounts=email_accounts,contact_details=get_contact_details(default_contact) or {}, print_formats=[x.get(\"name\") for x in frappe.get_all(\"Print Format\", filters=dict(doc_type=[\"IN\",[\"Sales Invoice\",\"POS Invoice\"]],standard='Yes'))] or [])\n@frappe.whitelist()\ndef send_invoice_alert(**args):\n\t# frappe.msgprint(f\"{args}\")\n\t# {'recipients': 'dsmwaura@gmail.com', 'subject': 'Sales Invoice for your Purchase', \n\t# 'doctype': 'Sales Invoice', 'name': 'ACC-SINV-2023-00086', 'send_email': '1', \n\t# 'print_format': 'GST POS Invoice', 'sender': 'replies@example.com', '_lang': 'en-US', \n\t# }\n\trecipient_emails = list(map(lambda x: x.strip(),args.get(\"recipients\").split(\",\")))\n\tfrappe.enqueue(\n\t\tmethod=frappe.sendmail,\n\t\tnow=True,\n\t\t# sender=args.get(\"sender\"),\n\t\treference_doctype=args.get(\"doctype\"),\n\t\treference_name=args.get(\"name\"),\n\t\trecipients=recipient_emails,\n\t\tbcc=\"dsmwaura@gmail.com\",\n\t\tsubject=args.get(\"subject\"),\n\t\tmessage = args.get(\"content\"),\n\t\tattachments = [frappe.attach_print(args.get(\"doctype\"), args.get(\"name\"), print_format=args.get(\"print_format\"))]\n\t)\n","repo_name":"Lonius-Limited/posawesome","sub_path":"posawesome/posawesome/api/contacts.py","file_name":"contacts.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"22791133311","text":"'''1) Análise\n Fazer uma função que calcule o valor de H a partir de n, sendo\n H = 1 + 1/2 + 1/3 + ... + 1/n.\n\n 2) Definição dos tipos de dados\n A entrada da função se dará por um número inteiro n. Já a saída\n será um valor real, denominado H. A função também contará com uma variável\n inteira para o denominador da fração, que será incrementada até o valor de n.'''\n\n# 3) Especificação\ndef somatorioFracao(n: int) -> float:\n '''Calcula o valor de H a partir de n, sendo\n H = 1 + 1/2 + 1/3 + ... + 1/n.\n\n Exemplos:\n >>> somatorioFracao(3)\n 1,8333333333333333333333333333333\n >>> somatorioFracao(0)\n 0\n >>> somatorioFracao(4)\n 2,0833333333333333333333333333333\n >>> somatorioFracao(-2)\n 0\n >>> somatorioFracao(0)\n 0\n '''\n denominador: int = 1\n somatorio: int = 0\n\n if n > 0:\n while denominador <= n:\n somatorio = somatorio + 1/denominador\n denominador = denominador + 1\n return somatorio\n else:\n return 0 \n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()","repo_name":"Bruno-Ozen/Exercicios-3","sub_path":"Fundamentos_ALG/Exercicios/exRepeticao3/ex004.py","file_name":"ex004.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"3888912106","text":"from numpy import*\nfrom numpy.linalg import*\n\nmatriz=array(eval(input(\"matrix: \")))\nnum=shape(matriz)[0]\nzeros=zeros((num**2-num), dtype=float)\nk=0\nfor i in range(num):\n\tfor j in range(num):\n\t\tif((i+j)!=(num-1)):\n\t\t\tzeros[k]=matriz[i,j]\n\t\t\tk=k+1\nprint(round(min(zeros), 2))","repo_name":"JosephLevinthal/Research-projects","sub_path":"5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4060/codes/1861_2181.py","file_name":"1861_2181.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"36016316559","text":"def gcd_classic(a: int, b: int) -> int:\r\n if b > a:\r\n a, b = b, a\r\n while b:\r\n a, b = b, a % b\r\n return a\r\n\r\n\r\ndef bin_gcd_recursion(a: int, b: int) -> int:\r\n if a == b:\r\n return a\r\n if b > a:\r\n a, b = b, a\r\n # since a >= b, just return a, no matter a ?= 0\r\n if b == 0:\r\n return a\r\n if not (a & 1):\r\n if not (b & 1):\r\n return 2 * bin_gcd_recursion(a >> 1, b >> 1) # a even, b even\r\n else:\r\n return bin_gcd_recursion(a >> 1, b) # a even, b odd\r\n else:\r\n if not (b & 1):\r\n return bin_gcd_recursion(a, b >> 1) # a odd, b even\r\n else:\r\n return bin_gcd_recursion(b, (a - b) >> 1) # a odd, b odd\r\n\r\n\r\ndef bin_gcd_loop(a: int, b: int) -> int:\r\n if b > a:\r\n a, b = b, a\r\n if b == 0:\r\n return a\r\n adic = 0\r\n while not ((a | b) & 1):\r\n adic += 1\r\n a >>= 1\r\n b >>= 1\r\n while b:\r\n while not (a & 1): # a even\r\n a >>= 1\r\n while not (b & 1): # b even\r\n b >>= 1\r\n if b > a:\r\n a, b = b, a\r\n a, b = b, (a - b) >> 1\r\n return a * 2 ** adic\r\n\r\n\r\ndef gcd(a: int, b: int) -> int:\r\n return bin_gcd_loop(a, b)\r\n\r\n\r\ndef ext_gcd(a: int, b: int) -> tuple:\r\n if b == 0:\r\n return a, 1, 0\r\n else:\r\n gcd_t, x, y = ext_gcd(b, a % b) # 递归直至余数等于0(需多递归一层用来判断)\r\n x, y = y, (x - (a // b) * y) # 辗转相除法反向推导每层a、b的因子使得gcd(a,b)=ax+by成立\r\n return gcd_t, x, y\r\n\r\n\r\ndef gcd_reverse(r, x, y, alpha, beta):\r\n if x & 1 == 0 and y & 1 == 0:\r\n x, y = x >> 1, y >> 1\r\n else:\r\n x, y = (x + beta) >> 1, (y - alpha) >> 1\r\n return r >> 1, x, y\r\n\r\n\r\ndef bin_ext_gcd(a: int, b: int) -> tuple:\r\n ap, bp = a, b\r\n d = 1\r\n x1, y1, x2, y2 = 1, 0, 0, 1\r\n while ap & 1 == 0 and bp & 1 == 0:\r\n ap = ap >> 1\r\n bp = bp >> 1\r\n d = d << 1\r\n alpha = ap\r\n beta = bp\r\n\r\n while not (ap & 1):\r\n ap, x1, y1 = gcd_reverse(ap, x1, y1, alpha, beta)\r\n while not (bp & 1):\r\n bp, x2, y2 = gcd_reverse(bp, x2, y2, alpha, beta)\r\n if ap < bp:\r\n ap, x1, y1, bp, x2, y2 = bp, x2, y2, ap, x1, y1\r\n while bp:\r\n ap = ap - bp\r\n x1, y1 = x1 - x2, y1 - y2\r\n while ap and (not (ap & 1)):\r\n ap, x1, y1 = gcd_reverse(ap, x1, y1, alpha, beta)\r\n if ap < bp:\r\n ap, x1, y1, bp, x2, y2 = bp, x2, y2, ap, x1, y1\r\n if a < 0:\r\n x1, x2 = -x1, -x2\r\n if b < 0:\r\n y1, y2 = -y1, -y2\r\n return d * ap, x1, y1\r\n\r\n\r\ndef invert(a: int, modulo: int) -> int:\r\n return (ext_gcd(a, modulo)[1]) % modulo\r\n\r\n\r\nif __name__ == '__main__':\r\n from random import randint\r\n from time import time\r\n\r\n r = []\r\n for i in range(200001):\r\n r.append(randint(2 ** 100, 2 ** 101 - 1))\r\n t1 = time()\r\n for i in range(200000):\r\n bin_gcd_loop(r[i], r[i + 1])\r\n print(time() - t1)\r\n t1 = time()\r\n for i in range(200000):\r\n gcd(r[i], r[i + 1])\r\n print(time() - t1)\r\n","repo_name":"feammer/CryptographyFundamentals","sub_path":"RSA_algorithm_implementation_and_security_analysis/custom_gcd.py","file_name":"custom_gcd.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"23964723656","text":"from lxml import etree\ndoc = etree.parse('provinciasypoblaciones.xml')\nraiz=doc.getroot()\n\nlistaProvincias=['02', '04', '07']\nprovincias=doc.findall('provincias')\nfor provincia in provincias:\n if provincia.attrib['id'] in listaProvincias:\n \tid = provincia.find('id')\n \tprint(provincia.text)","repo_name":"joeldm7/LM1718","sub_path":"XML/ejercicio6provincias.py","file_name":"ejercicio6provincias.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"20964265540","text":"def ijones(height, width, plate):\n def _extend_occurrences(extended_dict, extended_letter, new_occurrences):\n if extended_dict.get(extended_letter):\n extended_dict[extended_letter].extend(new_occurrences)\n else:\n extended_dict[extended_letter] = new_occurrences\n\n def _append_occurrences(appended_dict, appended_letter, new_occurrence):\n if appended_dict.get(appended_letter):\n appended_dict[appended_letter].append(new_occurrence)\n else:\n appended_dict[appended_letter] = [new_occurrence]\n\n path_to = [[0 for i in range(width)] for j in range(height)]\n letter_occurrences = {}\n\n for row in range(height):\n current_letter = plate[row][0]\n path_to[row][0] = 1\n _append_occurrences(letter_occurrences, current_letter, (row, 0))\n\n for column in range(1, width):\n new_letter_occurrences = {}\n for row in range(height):\n current_letter = plate[row][column]\n for occurrence in letter_occurrences.get(current_letter, []):\n path_to[row][column] += path_to[occurrence[0]][occurrence[1]]\n if plate[row][column-1] != current_letter:\n path_to[row][column] += path_to[row][column-1]\n _append_occurrences(new_letter_occurrences, current_letter, (row, column))\n\n for letter in new_letter_occurrences:\n _extend_occurrences(letter_occurrences, letter, new_letter_occurrences[letter])\n\n if height == 1:\n return path_to[0][width-1]\n\n return path_to[0][width-1] + path_to[height-1][width-1]\n","repo_name":"MykhailoPolnyi/IJonesLab6","sub_path":"ijones/ijones.py","file_name":"ijones.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"21550905953","text":"from gym import spaces\nimport torch as th\nimport torch.distributions as tdist\nimport numpy as np\n\nfrom .basic_controller import BasicMAC\nfrom utils.rl_utils import project_l1_ball, project_l2_ball\n\n\n# This multi-agent controller shares parameters between agents\nclass CQMixAdvNoiseMAC(BasicMAC):\n def __init__(self, scheme, groups, args):\n super(CQMixAdvNoiseMAC, self).__init__(scheme, groups, args)\n\n if hasattr(args,'attack_agent'):\n if args.attack_agent is not None:\n if type(args.attack_agent) != list:\n self.attack_agent = [args.attack_agent]\n else:\n self.attack_agent = args.attack_agent\n else:\n raise ValueError('Need to specify attack_agent')\n else:\n raise ValueError('Need to specify attack_agent')\n\n def select_actions(self, ep_batch, t_ep, t_env, bs=slice(None), test_mode=False, epsilon=0.):\n # if self.args.agent in [\"naf\", \"mlp\"]:\n chosen_actions, info, final_noise = self.forward(ep_batch[bs], t_ep, test_mode=test_mode, select_actions=True, epsilon=epsilon)\n chosen_actions = chosen_actions.view(ep_batch[bs].batch_size, self.n_agents, self.args.n_actions).detach()\n # elif self.args.agent in [\"cem\"]:\n # chosen_actions = self.cem_sampling(ep_batch, t_ep, bs)\n # else:\n # raise Exception(\"No known agent type selected! ({})\".format(self.args.agent))\n\n \n # now clamp actions to permissible action range (necessary after exploration)\n if all([isinstance(act_space, spaces.Box) for act_space in self.args.action_spaces]):\n for _aid in range(self.n_agents):\n for _actid in range(self.args.action_spaces[_aid].shape[0]):\n chosen_actions[:, _aid, _actid].clamp_(np.asscalar(self.args.action_spaces[_aid].low[_actid]),\n np.asscalar(self.args.action_spaces[_aid].high[_actid]))\n elif all([isinstance(act_space, spaces.Tuple) for act_space in self.args.action_spaces]):\n for _aid in range(self.n_agents):\n for _actid in range(self.args.action_spaces[_aid].spaces[0].shape[0]):\n chosen_actions[:, _aid, _actid].clamp_(self.args.action_spaces[_aid].spaces[0].low[_actid],\n self.args.action_spaces[_aid].spaces[0].high[_actid])\n for _actid in range(self.args.action_spaces[_aid].spaces[1].shape[0]):\n tmp_idx = _actid + self.args.action_spaces[_aid].spaces[0].shape[0]\n chosen_actions[:, _aid, tmp_idx].clamp_(self.args.action_spaces[_aid].spaces[1].low[_actid],\n self.args.action_spaces[_aid].spaces[1].high[_actid])\n return chosen_actions, info, final_noise\n\n def get_weight_decay_weights(self):\n return self.agent.get_weight_decay_weights()\n\n def forward(self, ep_batch, t, actions=None, select_actions=False, test_mode=False, epsilon=0.):\n agent_inputs = self._build_inputs(ep_batch, t)\n\n final_noise = th.zeros_like(agent_inputs).to(agent_inputs.device)\n\n # print(final_noise.shape)\n\n if epsilon > 0:\n # select attack agent\n if type(self.attack_agent) == list:\n if len(self.attack_agent) <= self.args.num_atk_agent:\n agents = self.attack_agent\n else:\n # random from a set\n agents = np.random.choice(self.attack_agent, self.args.num_atk_agent, replace=False)\n else:\n raise ValueError('Unsupported type of attack_agent',type(self.attack_agent))\n\n # generate noise\n if self.args.adv_noise_type=='uniform':\n noise = epsilon*(2*th.rand(agent_inputs.shape, device=agent_inputs.device)-1)\n else: \n noise = epsilon*th.randn(agent_inputs.shape, device=agent_inputs.device)\n \n if self.args.noise_constraint_type == 'l1_norm':\n for agent in agents:\n noise[agent] = project_l1_ball(noise[agent], rad=epsilon)\n elif self.args.noise_constraint_type == 'l2_norm':\n for agent in agents:\n noise[agent] = project_l2_ball(noise[agent], rad=epsilon)\n else:\n noise = noise.clamp(-epsilon,epsilon)\n\n agent_inputs[agents,:] += noise[agents,:]\n\n final_noise[agents] = noise[agents].detach()\n\n # noise_info = {\n # \"l1_norm\": th.linalg.norm(noise[agents].view(self.args.num_atk_agent,-1), ord=1).item(),\n # \"l2_norm\": th.linalg.norm(noise[agents].view(self.args.num_atk_agent,-1), ord=2).item(),\n # \"linf_norm\": th.linalg.norm(noise[agents].view(self.args.num_atk_agent,-1), ord=float('inf')).item()\n # }\n noise_info = {\n \"l1_norm\": th.max(th.sum(th.abs(final_noise.view(self.n_agents,-1)), 1)).item(),\n \"l2_norm\": th.max(th.sqrt(th.sum(th.square(final_noise.view(self.n_agents,-1)), 1))).item(),\n \"linf_norm\": th.max(th.abs(final_noise.view(self.n_agents,-1))).item()\n }\n else:\n noise_info = {\n \"l1_norm\": 0.,\n \"l2_norm\": 0.,\n \"linf_norm\": 0.,\n }\n\n ret = self.agent(agent_inputs, actions=actions)\n\n return ret[\"actions\"], noise_info, final_noise.to('cpu').clone().detach().numpy().tolist()\n \n\n def _build_inputs(self, batch, t):\n # Assumes homogenous agents with flat observations.\n # Other MACs might want to e.g. delegate building inputs to each agent\n bs = batch.batch_size\n inputs = []\n inputs.append(batch[\"obs\"][:, t])\n\n if self.args.obs_last_action:\n if t == 0:\n inputs.append(th.zeros_like(batch[\"actions\"][:, t]))\n else:\n inputs.append(batch[\"actions\"][:, t - 1])\n if self.args.obs_agent_id:\n inputs.append(th.eye(self.n_agents, device=batch.device).unsqueeze(0).expand(bs, -1, -1))\n\n inputs = th.cat([x.reshape(bs*self.n_agents, -1) for x in inputs], dim=1)\n\n return inputs\n\n def _get_input_shape(self, scheme):\n input_shape = scheme[\"obs\"][\"vshape\"]\n if self.args.obs_last_action:\n input_shape += scheme[\"actions\"][\"vshape\"][0]\n if self.args.obs_agent_id:\n input_shape += self.n_agents\n\n return input_shape\n\n def cem_sampling(self, ep_batch, t, bs):\n # Number of samples from the param distribution\n N = 64\n # Number of best samples we will consider\n Ne = 6\n\n ftype = th.FloatTensor if not next(self.agent.parameters()).is_cuda else th.cuda.FloatTensor\n mu = ftype(ep_batch[bs].batch_size, self.n_agents, self.args.n_actions).zero_()\n std = ftype(ep_batch[bs].batch_size, self.n_agents, self.args.n_actions).zero_() + 1.0\n its = 0\n maxits = 2\n agent_inputs = self._build_inputs(ep_batch[bs], t)\n\n while its < maxits:\n dist = tdist.Normal(mu.view(-1, self.args.n_actions), std.view(-1, self.args.n_actions))\n actions = dist.sample((N,)).detach()\n actions_prime = th.tanh(actions)\n ret = self.agent(agent_inputs.unsqueeze(0).expand(N, *agent_inputs.shape).contiguous().view(-1, agent_inputs.shape[-1]),\n actions=actions_prime.view(-1, actions_prime.shape[-1]))\n out = ret[\"Q\"].view(N, -1, 1)\n topk, topk_idxs = th.topk(out, Ne, dim=0)\n mu = th.mean(actions.gather(0, topk_idxs.repeat(1, 1, self.args.n_actions).long()), dim=0)\n std = th.std(actions.gather(0, topk_idxs.repeat(1, 1, self.args.n_actions).long()), dim=0)\n its += 1\n topk, topk_idxs = th.topk(out, 1, dim=0)\n action_prime = th.mean(actions_prime.gather(0, topk_idxs.repeat(1, 1, self.args.n_actions).long()), dim=0)\n chosen_actions = action_prime.clone().view(ep_batch[bs].batch_size, self.n_agents, self.args.n_actions).detach()\n return chosen_actions","repo_name":"nhanph/c-MBA","sub_path":"src/controllers/cqmix_adv_noise_controller.py","file_name":"cqmix_adv_noise_controller.py","file_ext":"py","file_size_in_byte":8299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"8902652468","text":"import numpy as np\nimport gensim\nimport gensim.corpora as corpora\n\nfrom utils import preprocess_text\nfrom text_transformers.base_text_transformer import BaseTextTransformer\n\n\nclass LDA(BaseTextTransformer):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n # fake fit to be consistent with count and tfidf vectorizers usage\n def fit_transform(self, texts):\n clean_texts = [preprocess_text(t) for t in texts]\n id2word = corpora.Dictionary(clean_texts)\n id2word.filter_extremes(no_below=3, no_above=0.7)\n corpus = [id2word.doc2bow(text) for text in clean_texts]\n\n lda_model = gensim.models.LdaMulticore(corpus=corpus,\n id2word=id2word,\n num_topics=20,\n alpha=0.1,\n eta=0.1,\n random_state=100,\n chunksize=100,\n passes=10,\n per_word_topics=True,\n minimum_probability=0.0)\n\n sentence_embeddings = lda_model[corpus]\n\n sentence_embeddings = np.array([[t[1] for t in r[0]] for r in sentence_embeddings])\n\n return sentence_embeddings\n","repo_name":"mikhmakarov/graph_text","sub_path":"text_transformers/lda.py","file_name":"lda.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"28677934689","text":"from __future__ import print_function\nfrom setuptools import setup\nfrom distutils.core import Extension\nfrom distutils.command.build_ext import build_ext\nimport sys\nimport os\n\nnxprdlib_include_path = '/usr/local/src/nxprdlib'\nnxprdlib_link_path = '/usr/local/lib/nxprdlib'\n\nnxppy = Extension('nxppy',\n define_macros=[('LINUX', None),\n ('NATIVE_C_CODE', None),\n ('NXPBUILD_CUSTOMER_HEADER_INCLUDED', None),\n ('NXPBUILD__PHHAL_HW_RC523', None)],\n extra_compile_args=['-O0',\n '-std=gnu99',\n '-isystem{}/nxprdlib/NxpRdLib/intfs'.format(nxprdlib_include_path),\n '-isystem{}/nxprdlib/NxpRdLib/types'.format(nxprdlib_include_path),\n '-isystem{}/nxprdlib/NxpRdLib/comps/phbalReg/src/LinuxUserSpi'.format(nxprdlib_include_path),\n '-isystem{}/linux/shared'.format(nxprdlib_include_path),\n '-isystem{}/examples/NfcrdlibEx4_MIFAREClassic/intfs'.format(nxprdlib_include_path),\n '-isystem{}/nxprdlib/NxpRdLib/comps/phbalReg/src/Stub'.format(nxprdlib_include_path),\n '-isystem{}/linux/comps/phPlatform/src/Posix'.format(nxprdlib_include_path),\n '-isystem{}/linux/comps/phOsal/src/Posix'.format(nxprdlib_include_path),\n '-isystem{}/linux/intfs'.format(nxprdlib_include_path)\n ],\n extra_link_args=['{}/libNxpRdLibLinuxPN512.a'.format(nxprdlib_link_path), '-lpthread', '-lrt'],\n sources=['Mifare.c', 'nxppy.c']\n\n )\n\n\n# noinspection PyShadowingBuiltins\nclass BuildNxppy(build_ext):\n def run(self):\n if not os.path.isdir(nxprdlib_include_path) or not os.path.isdir(nxprdlib_link_path):\n print('\\nNXP Reader library not found. Install it using the DEB package from http://bit.ly/nxpreader, and the instructions included in the README for this repo.\\n')\n sys.exit(1)\n\n # Run the rest of the build\n build_ext.run(self)\n\n\nshort_description = 'A python extension for interfacing with the NXP PN512 NFC Reader. Targeted specifically for ' \\\n 'Raspberry Pi and the EXPLORE-NFC module'\n\n# noinspection PyBroadException\ntry:\n # noinspection PyPackageRequirements\n import pypandoc\n\n long_description = pypandoc.convert('README.md', 'rst')\nexcept:\n long_description = short_description\n\nsetup(name='nxppy',\n version='1.6.0',\n description=short_description,\n long_description=long_description,\n author='Scott Vitale',\n author_email='svvitale@gmail.com',\n url='http://github.com/svvitale/nxppy',\n ext_modules=[nxppy],\n cmdclass={'build_ext': BuildNxppy})\n","repo_name":"svvitale/nxppy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"28"} +{"seq_id":"24363432766","text":"\"\"\"\nTest file for accounts route\n\"\"\"\n\n# Pytest imports\nimport pytest\n\n# FastAPI imports\nfrom fastapi import FastAPI, Response\nfrom fastapi.testclient import TestClient\n\n# Main app import\nfrom ..main import app\n\n# Model imports\nfrom ..accounts.models import AccountCreate\n\n# Create new client\nclient: TestClient = TestClient(app)\n\n### GLOBAL HELPER FUNCTIONS ###\n\ndef create_account(account: AccountCreate, client_instance: TestClient) -> dict:\n \"\"\"\n Create an Account object on API, validate, \n then return the JSON model of it\n \"\"\"\n\n # Get account as dictionary\n account_dict: dict = account.dict()\n\n # Send request to create account\n response: Response = client_instance.post(\"/api/accounts/\", json=account_dict)\n\n # Test and return\n assert response.status_code == 200\n return response.json()\n\n\nclass TestAccounts:\n\n ### SETUP FUNCTIONS ###\n\n @pytest.fixture(autouse=True)\n def setup_and_teardown(self):\n\n # Delete everything in database\n response: Response = client.delete(\"/api/\")\n assert response.status_code == 200\n assert response.json() == {\"ok\": True}\n\n # Create a new account and save it in the class\n self.account = create_account(\n AccountCreate(\n fname=\"Maheer\", \n lname=\"Aeron\", \n email=\"maa368@cornell.edu\"\n ),\n client_instance=client\n )\n \n # Transfer control to a test\n yield\n\n # Clear everything in database\n response: Response = client.delete(\"/api/\")\n assert response.status_code == 200\n assert response.json() == {\"ok\": True}\n\n\n ### TEST HTTP GET FUNCTIONS ###\n\n def test_get_all_accounts(self):\n\n # Make second account\n account2: dict = create_account(\n AccountCreate(\n fname=\"Mayank\", \n lname=\"Rao\", \n email=\"ms3293@cornell.edu\"\n ),\n client_instance=client\n )\n\n # Make third account\n account3: dict = create_account(\n AccountCreate(\n fname=\"Brett\", \n lname=\"Schelsinger\", \n email=\"bgs59@cornell.edu\"\n ),\n client_instance=client\n )\n\n # Now call get on all accounts\n response = client.get(\"/api/accounts/\")\n\n # Check length of accounts\n accounts: list = response.json()\n assert len(accounts) == 3\n\n def test_get_account(self):\n\n # Call get on the stored account in the class\n response = client.get(f\"/api/accounts/{self.account['id']}\")\n fetched_account: dict = response.json()\n\n # Check account properties itself\n assert fetched_account[\"id\"] == self.account[\"id\"]\n assert fetched_account[\"fname\"] == self.account[\"fname\"]\n assert fetched_account[\"lname\"] == self.account[\"lname\"]\n assert fetched_account[\"email\"] == self.account[\"email\"]\n assert fetched_account[\"created\"] == self.account[\"created\"]\n\n ### TEST HTTP POST FUNCTIONS ###\n\n def test_create_account(self):\n\n # Setup already creates an account. Just check it\n assert self.account[\"fname\"] == \"Maheer\"\n assert self.account[\"lname\"] == \"Aeron\"\n assert self.account[\"email\"] == \"maa368@cornell.edu\"\n\n ### TEST HTTP PATCH FUNCTIONS ###\n\n def test_update_account(self):\n\n # Try updating account\n response = client.patch(\n f\"/api/accounts/{self.account['id']}\",\n json={\n \"fname\": \"Mahee\",\n \"lname\": \"Aero\",\n \"email\": \"maa368@cornell.ed\"\n }\n )\n assert response.status_code == 200\n\n # Now issue a get and test the fields\n response = client.get(f\"/api/accounts/{self.account['id']}\")\n fetched_account: dict = response.json()\n\n assert fetched_account[\"fname\"] == \"Mahee\"\n assert fetched_account[\"lname\"] == \"Aero\"\n assert fetched_account[\"email\"] == \"maa368@cornell.ed\"\n\n ### TEST HTTP DELETE FUNCTIONS ###\n\n def test_delete_account(self):\n\n # Delete account\n response = client.delete(f\"/api/accounts/{self.account['id']}\")\n assert response.status_code == 200\n assert response.json() == {\"ok\": True}\n\n # Try get on all accounts and ensure it really deleted\n response = client.get(\"/api/accounts/\")\n assert response.status_code == 200\n assert len(response.json()) == 0","repo_name":"MayoSR/cornell-student-housing-backend","sub_path":"app/accounts/test_acccounts.py","file_name":"test_acccounts.py","file_ext":"py","file_size_in_byte":4513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"12807768588","text":"import numpy as np\nfrom .base import AbstractModel\n\nfrom ml.functions.distance import EuclideanDistance\nfrom ml.algorithms.normalization import IdentityScaler\n\nclass KNN(AbstractModel):\n def __init__(self, num_clusters, distance=EuclideanDistance, data_scaler=IdentityScaler):\n super(KNN, self).__init__(None)\n\n self.num_clusters = num_clusters\n self.distance = distance()\n self.inputs_scaler = data_scaler()\n\n def fit(self, inputs, outputs):\n self.inputs = self.inputs_scaler.fit(inputs).transform(inputs)\n self.outputs = outputs\n\n def predict(self, x):\n predictions = []\n for xi in x:\n distances = self.distance.measure(self.inputs_scaler.transform(xi), self.inputs)\n\n indexes_of_k_nearest = distances.argsort()[:self.num_clusters]\n classes_of_k_nearest = self.outputs[indexes_of_k_nearest]\n predictions.append(np.bincount(classes_of_k_nearest).argmax())\n\n return np.array(predictions)\n\nclass KMeans(AbstractModel):\n def __init__(self, k, max_iter, distance=EuclideanDistance):\n super(KMeans, self).__init__(None)\n\n self.max_iter = max_iter\n self.num_clusters = k\n self.distance = distance()\n\n # finds good-enough centroids which better clusters inputs\n def fit(self, inputs):\n num_samples, num_features = inputs.shape\n \n self.errors = []\n self.output_centroids = []\n\n # choose 'num_clusters' elements from the inputs matrix,\n # then build a 3D matrix with cluster index as the first dimension\n initial_centroids = inputs[np.random.choice(num_samples, self.num_clusters, replace=False)]\n self.centroids = initial_centroids.reshape(self.num_clusters, 1, num_features)\n\n for i in range(self.max_iter):\n self.clusters = [[] for _ in range(self.num_clusters)]\n\n # assign samples to closest centroid\n distances = self.distance.measure(inputs, self.centroids, axis=2)\n closest_centroid_idxs = np.argmin(distances, axis=0, keepdims=True)\n\n # fill clusters with sample indexes\n for sample_idx, closest_centroid in enumerate(closest_centroid_idxs.T):\n self.clusters[closest_centroid[0]].append(sample_idx)\n\n # update centroids\n old_centroids = self.centroids.copy()\n for cluster_idx in range(self.num_clusters):\n self.centroids[cluster_idx] = np.mean(inputs[self.clusters[cluster_idx]], axis=0)\n\n # record error and centroids in current iteration\n current_err = self.quantization_error(inputs)\n self.errors.append(current_err)\n self.output_centroids.append(self.centroids)\n\n if self.is_converged(old_centroids, self.centroids):\n break\n\n self.trained = True\n return self.errors, self.output_centroids\n\n def is_converged(self, old_centroids, curr_centroids):\n distances = [self.distance.measure(old_centroids[i, 0], curr_centroids[i, 0], axis=0) for i in range(self.num_clusters)]\n return (np.sum(distances) == 0)\n\n def quantization_error(self, x):\n norm = np.linalg.norm(x - self.centroids, axis=1)\n return np.sum(norm**2)\n\n def db_index(self, inputs):\n mini_delta = np.zeros(self.num_clusters)\n big_delta = np.zeros((self.num_clusters, self.num_clusters))\n\n for ki in range(self.num_clusters):\n dif = inputs[self.clusters[ki]] - self.centroids[ki][0]\n norm = np.linalg.norm(dif, axis=1, keepdims=True)\n mini_delta[ki] = (np.mean(norm, axis=1, keepdims=True))[0]\n\n for kj in range(self.num_clusters):\n if ki != kj:\n big_delta[ki, kj] = EuclideanDistance().measure(self.centroids[ki][0], self.centroids[kj][0], axis=0)\n\n db = 0\n for ki in range(self.num_clusters):\n max = 0\n\n for kj in range(self.num_clusters):\n if ki != kj:\n current = (mini_delta[ki] + mini_delta[kj])/big_delta[ki, kj]\n if current > max:\n max = current\n db += max\n return db/self.num_clusters\n","repo_name":"jeffcav/ml-from-scratch","sub_path":"ml/models/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":4249,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"26127776943","text":"import io\nimport logging\nfrom pathlib import Path\nfrom typing import Any\n\nfrom pydantic.color import ColorTuple\nfrom pypdf import PdfReader, PdfWriter\nfrom pypdf.generic import IndirectObject, Fit, PAGE_FIT\nfrom pypdf.types import PagemodeType\n\nfrom app import utils\nfrom app.core.config import settings\nfrom app.schemas import PdfBookmark, FitSchema\nfrom .__get_bookmarks import get_bookmarks\n\n\ndef _print_bookmarks(bookmark: PdfBookmark, *, num_space: int = 1) -> None:\n logging.info(' ' * num_space + f'{bookmark.title}, page_num={bookmark.page_num}')\n for sub_bookmark in bookmark.children:\n _print_bookmarks(sub_bookmark, num_space=num_space * 2)\n\n\ndef print_bookmarks(bookmarks: list[PdfBookmark]) -> None:\n for bookmark in bookmarks:\n _print_bookmarks(bookmark)\n\n\ndef reader2writer(reader: PdfReader) -> PdfWriter:\n writer = PdfWriter()\n for page in reader.pages:\n writer.add_page(page)\n return writer\n\n\ndef reader2writer_with_copy_bookmarks(reader: PdfReader) -> PdfWriter:\n writer = PdfWriter()\n writer.clone_reader_document_root(reader)\n return writer\n\n\ndef set_show_bookmarks_panel(writer: PdfWriter) -> None:\n page_mode: PagemodeType = '/UseOutlines'\n writer.page_mode = page_mode\n\n\ndef upload_pdf(\n writer: PdfWriter,\n *,\n path: Path,\n object_storage: utils.ObjectStorage\n) -> None:\n path: Path = Path(settings.DATA_DIR) / path\n with path.open(mode='wb') as file_:\n writer.write(file_)\n\n # __temp_file_path = Path('some_pdf.pdf')\n # with __temp_file_path.open(mode='wb') as file_:\n # writer.write(file_)\n # object_storage.upload(\n # file_path=__temp_file_path,\n # object_path=path,\n # object_storage_class=enums.ObjectStorageClass.STANDARD_IA\n # )\n # __temp_file_path.unlink()\n\n\ndef get_fit(fit_schema: FitSchema | None = None) -> Fit:\n fit = Fit.xyz(fit_schema.left, top=fit_schema.top, zoom=fit_schema.zoom) if fit_schema else PAGE_FIT\n return fit\n\n\ndef add_outline_item(\n writer: PdfWriter,\n *,\n bookmark: PdfBookmark,\n parent: IndirectObject | None = None\n) -> IndirectObject:\n fit: Fit = get_fit(bookmark.fit)\n color: ColorTuple | None = tuple((i / 255. for i in bookmark.color.as_rgb_tuple())) if bookmark.color else None\n indirect: IndirectObject = writer.add_outline_item(\n title=bookmark.title,\n page_number=bookmark.page_num - 1,\n parent=parent,\n color=color,\n fit=fit\n )\n return indirect\n\n\ndef _add_bookmarks(\n writer: PdfWriter,\n *,\n bookmark: PdfBookmark,\n parent_indirect: IndirectObject | None = None\n) -> None:\n indirect: IndirectObject = add_outline_item(writer, bookmark=bookmark, parent=parent_indirect)\n for sub_bookmark in bookmark.children:\n _add_bookmarks(\n writer,\n bookmark=sub_bookmark,\n parent_indirect=indirect\n )\n\n\ndef add_bookmarks(writer: PdfWriter, *, bookmarks: list[PdfBookmark]) -> None:\n for bookmark in bookmarks:\n _add_bookmarks(writer, bookmark=bookmark)\n\n\ndef _offset_pages_bookmarks(bookmark: PdfBookmark, *, num_offset_pages: int) -> None:\n bookmark.page_num = bookmark.page_num + num_offset_pages\n for sub_bookmark in bookmark.children:\n _offset_pages_bookmarks(sub_bookmark, num_offset_pages=num_offset_pages)\n\n\ndef offset_pages_bookmarks(bookmarks: list[PdfBookmark], *, num_offset_pages: int) -> None:\n for bookmark in bookmarks:\n _offset_pages_bookmarks(bookmark, num_offset_pages=num_offset_pages)\n\n\ndef get_pdf_bookmarks(\n path: Path,\n *,\n object_storage: utils.ObjectStorage\n) -> list[PdfBookmark]:\n reader: PdfReader = __get_pdf_reader(path, object_storage=object_storage)\n bookmarks: list[PdfBookmark] = get_bookmarks(reader)\n return bookmarks\n\n\ndef get_pdf_writer(\n path: Path,\n *,\n object_storage: utils.ObjectStorage\n) -> PdfWriter:\n reader: PdfReader = __get_pdf_reader(path, object_storage=object_storage)\n writer: PdfWriter = reader2writer(reader)\n return writer\n\n\ndef __get_pdf_reader(\n path: Path,\n *,\n object_storage: utils.ObjectStorage\n) -> PdfReader:\n reader = PdfReader(Path(settings.DATA_DIR) / path)\n return reader\n\n object_: dict[str, Any] = object_storage.get(path)\n with io.BytesIO(object_['Body'].read()) as open_pdf_file:\n reader = PdfReader(open_pdf_file)\n return reader\n","repo_name":"maxim1770/app","sub_path":"backend/app/app/create/prepare/manuscript/bookmark/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"44088570612","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n# ##### END GPL LICENSE BLOCK #####\n\nbl_info = {\n \"name\": \"WallBuilder\",\n \"description\": \"Utilities to support wall and room building.\",\n \"author\": \"JackTheFoxOtter\",\n \"version\": (1, 2),\n \"blender\": (2, 90, 1),\n \"location\": \"View3D > Object > WallBuilder\",\n \"warning\": \"\",\n \"wiki_url\": \"https://github.com/JackTheFoxOtter/blender-wallbuilder\",\n \"tracker_url\": \"https://github.com/JackTheFoxOtter/blender-wallbuilder/issues\",\n \"support\": \"COMMUNITY\",\n \"category\": \"Object\",\n}\n\n\"\"\"\nUtilities to support wall and room building.\n\"\"\"\n\nfrom bpy.utils import register_class, unregister_class\nfrom bpy.props import FloatProperty, BoolProperty\nfrom functools import cmp_to_key\nfrom mathutils import Vector\nimport colorsys\nimport bmesh\nimport math\nimport bpy\n\ndef vec_to_str(vec):\n \"\"\"\n Returns a formatted string for the given vector.\n \"\"\"\n return f\"<{' '.join([str(x) for x in vec])}>\"\n\n\ndef get_signed_angle(v, vBase=Vector([0.0, 1.0])):\n \"\"\"\n Returns angle of 2D-Vector v relative to 2D-Vector vBase.\n Defaults to base X=0.0, Y=1.0 if vBase isn't specified.\n Raises an exception if angle couldn't be determined\n \"\"\"\n v = v.to_2d()\n vBase = vBase.to_2d()\n \n angle = vBase.angle_signed(v, None)\n \n if angle is None:\n raise Exception(f\"Couldn't determine angle for vector {vec_to_str(v)} and base {vec_to_str(vBase)}!\")\n \n return angle\n\n\ndef get_adjesant_edge(origin_edge, origin_vert, inverted=False):\n \"\"\"\n Returns edge connected to origin_vert which's angle is closest to origin_edge. (Except origin_edge)\n Returns None if no further edges are connected to origin_vert.\n The parameter inverted specifies the direction of origin_edge, and therefore the role of origin_vert:\n False - origin_vert is the start vertex of origin_edge\n True - origin_vert is the end vertex of origin_edge\n The direction of the returned adjesant edge follows the direction of origin_edge.\n \"\"\"\n adjesant_edges = [edge for edge in origin_vert.link_edges if edge != origin_edge]\n if len(adjesant_edges) == 0: return None # No adjesant edges\n if len(adjesant_edges) == 1: return adjesant_edges[0] # Only one edge, return that\n\n if inverted:\n origin_edge_direction = (origin_vert.co - origin_edge.other_vert(origin_vert).co).to_2d().normalized()\n else:\n origin_edge_direction = (origin_edge.other_vert(origin_vert).co - origin_vert.co).to_2d().normalized()\n\n # Determine directions of all connected edges\n edge_directions = []\n for edge in adjesant_edges:\n if inverted:\n direction = (edge.other_vert(origin_vert).co - origin_vert.co).to_2d().normalized()\n else:\n direction = (origin_vert.co - edge.other_vert(origin_vert).co).to_2d().normalized()\n \n edge_directions.append((edge, direction))\n\n # Sort connected edges by angle\n edge_directions.sort(key=lambda x: get_signed_angle(x[1], origin_edge_direction), reverse=True)\n \n return edge_directions[0][0] if inverted else edge_directions[len(edge_directions)-1][0]\n\n\ndef get_corner_position(origin, edge1, edge2, wall_thickness):\n \"\"\"\n Returns the position of the corner of edge1 and edge2, connected by origin.\n Corner is offset of each edge as defined by wall_thickness.\n P = O ± v; v = ||a||*l - ||b||*l; l = 1/sin(α)\n \"\"\"\n # Determine direction vectors of edges\n dir1 = (origin.co - edge1.other_vert(origin).co).normalized()\n dir2 = (edge2.other_vert(origin).co - origin.co).normalized()\n \n # Determine angle in between edges\n angle = get_signed_angle(dir1, dir2)\n if abs(angle) < 0.0001:\n # Edges are parallel\n orthogonal = Vector([dir1.y, -dir1.x, 0.0])\n return origin.co - orthogonal * wall_thickness\n \n # Calculate length of edge vector\n length = 1 / math.sin(angle)\n offset = (dir2 - dir1) * length * wall_thickness\n return origin.co + offset\n\n\ndef generate_wall_mesh_data(reference_obj, mesh, wall_thickness=0.125, fill_rims=True):\n \"\"\"\n Generates a wall geometry based on the reference object and applies it to the specified target mesh.\n The reference object should contain a mesh with a 2D wireframe representation of the wall layout, without faces.\n \"\"\"\n data = reference_obj.data\n bm = bmesh.new()\n bm.from_mesh(data)\n\n # Dynamic variables\n new_verts = []\n new_edges = []\n new_faces = []\n corner_vertices = {}\n\n # Generate mesh data for walls\n for edge in bm.edges:\n # Iterate through each edge in the mesh\n end_vertex_indices = []\n \n for start, end in [(edge.verts[0], edge.verts[1]), (edge.verts[1], edge.verts[0])]:\n # Runs twice per edge, for both possible directions\n start_edge = get_adjesant_edge(edge, start, False)\n end_edge = get_adjesant_edge(edge, end, True)\n \n if start_edge:\n # Edge exists\n corner_position = get_corner_position(start, edge, start_edge, wall_thickness).to_3d()\n corner_position.z = start.co.z\n new_verts.append(corner_position.to_tuple())\n # Add corner to corner_vertices list to create corner caps later\n corner_vertices.setdefault(start, []).append(corner_position)\n else:\n # Edge doesn't exist -> end segment\n direction = (edge.other_vert(start).co - start.co).normalized()\n orthogonal = Vector([direction.y, -direction.x, 0.0])\n corner_position = (start.co + orthogonal * wall_thickness).to_3d()\n corner_position.z = start.co.z\n new_verts.append(corner_position.to_tuple())\n # Store index of added end vertex so we can close it later\n end_vertex_indices.append(len(new_verts)-1)\n \n if end_edge:\n # Edge exists\n corner_position = get_corner_position(end, end_edge, edge, wall_thickness).to_3d()\n corner_position.z = end.co.z\n new_verts.append(corner_position.to_tuple())\n else:\n # Edge doesn't exist -> end segment\n direction = (end.co - edge.other_vert(end).co).normalized()\n orthogonal = Vector([direction.y, -direction.x, 0.0])\n corner_position = (end.co + orthogonal * wall_thickness).to_3d()\n corner_position.z = end.co.z\n new_verts.append(corner_position.to_tuple())\n # Store index of added end vertex so we can close it later\n end_vertex_indices.append(len(new_verts)-1)\n \n if fill_rims:\n # Fill the face between the created vertices\n new_faces.append(list(range(len(new_verts)-4, len(new_verts))))\n else:\n # Connect the created vertices through edges\n new_edges.append([len(new_verts)-4, len(new_verts)-3])\n new_edges.append([len(new_verts)-2, len(new_verts)-1])\n # Connect vertices of end segments\n if len(end_vertex_indices) == 2:\n # One end segment\n new_edges.append([end_vertex_indices[0], end_vertex_indices[1]])\n elif len(end_vertex_indices) == 4:\n # Two end segments\n new_edges.append([end_vertex_indices[0], end_vertex_indices[3]])\n new_edges.append([end_vertex_indices[1], end_vertex_indices[2]])\n\n if fill_rims:\n # Fill corner caps\n for corner, verts in corner_vertices.items():\n if len(verts) > 2:\n # Corner needs cap (face between vertices)\n # Sort vertices around corner_position\n # We do this to ensure the faces aren't messed up later\n vert_directions = []\n for vert in verts:\n direction = (corner.co - vert).to_2d().normalized()\n vert_directions.append((vert, direction))\n vert_directions.sort(key=lambda x: get_signed_angle(x[1]), reverse=True)\n \n # Append sorted vertices to new_verts for new_mesh\n for vert, direction in vert_directions:\n new_verts.append(vert.to_3d().to_tuple())\n \n # Create corner cap faces\n new_faces.append(list(range(len(new_verts)-len(verts), len(new_verts))))\n \n # Fill new mesh with generated mesh data\n mesh.from_pydata(new_verts, new_edges, new_faces)\n # Free BMesh of reference object\n bm.free()\n\n\ndef select_loop_faces(loop):\n \"\"\"\n Select all faces on the specified loop.\n Returns a list of the selected faces in order.\n Only works for quads!\n \"\"\"\n faces = []\n \n while not loop.face.select:\n # If radial loop links back here, we're boundary, thus done\n if loop.link_loop_radial_next == loop:\n break\n \n # Remember and mark current face\n loop.face.select = True\n faces.append(loop.face)\n \n # Jump to adjacent face and walk two edges forward\n loop = loop.link_loop_radial_next.link_loop_next.link_loop_next\n\n return faces\n\n\ndef get_loop_direction(loop):\n \"\"\"\n Returns the direction of a given edge loop relative to the face it's connected to.\n The direction is defined through the 2D angle of the v1 to v2, where v1 is the direction of the\n loop's current edge to the opposite one on the same face, and v2 is the normal direction of the loops face.\n Positive Angle -> 1.0, Negative Angle -> -1.0.\n Only works for quads!\n \"\"\"\n face = loop.face\n edge1 = loop.edge\n edge2 = loop.link_loop_next.link_loop_next.edge\n \n direction = edge2.verts[0].co - edge1.verts[0].co\n angle = get_signed_angle(direction.to_2d(), face.normal.to_2d())\n \n return 1.0 if angle > 0 else -1.0\n\n \ndef get_horizontal_face_rings(bm):\n \"\"\"\n Returns all face rings along walls (horizontal faces) for a given mesh object.\n (Requires edit mode, probably)\n \"\"\"\n face_rings = []\n \n for edge in [e for e in bm.edges if abs((e.verts[0].co - e.verts[1].co).z) > 0.0001]:\n # Loop through all vertical edges\n if not edge.select:\n edge.select = True\n # Append all faces of edge's forward loop to list. Already processed edges / faces are selected.\n forward_loop = edge.link_loops[0] if get_loop_direction(edge.link_loops[0]) > 0 else edge.link_loops[1]\n loop_faces = select_loop_faces(forward_loop)\n face_rings.append(loop_faces)\n \n return face_rings\n\n\ndef uv_unwrap_walls(mesh_object, material_per_face_ring=False):\n \"\"\"\n Adds UV information to all walls (horizontal faces) of the specified mesh object.\n Will group UVs together by continuous face loops (inner \"rooms\" or outer perimeter in case of walls).\n This way, the amount of UV seams is minimized (one per continuous edge loop).\n 1.0 in UV space is mapped to 1.0 in 3D-Space.\n \"\"\"\n me = mesh_object.data\n bm = bmesh.new()\n bm = bmesh.from_edit_mesh(me)\n \n uv_layer = bm.loops.layers.uv.active\n if not uv_layer: uv_layer = bm.loops.layers.uv.new()\n \n face_rings = get_horizontal_face_rings(bm)\n for i in range(len(face_rings)):\n face_ring = face_rings[i]\n \n # Determine which material to use for the faces of this face ring\n material_index = 0\n if material_per_face_ring:\n material_index = add_wall_material(i+1, me)\n \n width_offset = 0.0\n for face in face_ring:\n # Set material of face\n face.material_index = material_index\n # Determine length and height of face\n face_width = face.edges[0].calc_length()\n face_height = face.edges[1].calc_length()\n for i in range(0, len(face.loops)):\n # Calculate UV position for loop of each vertex on face\n if i == 0:\n # Loop for bottom-left vertex\n uv = (width_offset, 0)\n elif i == 1:\n # Loop for bottom-right vertex\n uv = (width_offset+face_width, 0)\n elif i == 2:\n # Loop for top-right vertex\n uv = (width_offset+face_width, face_height)\n elif i == 3:\n # Loop for top-left vertex\n uv = (width_offset, face_height)\n \n # Assign UV position to UV-map\n face.loops[i][uv_layer].uv = uv\n width_offset += face_width\n\n\ndef create_default_material(material_name=\"Default Material\", hue=0.0, saturation=0.0):\n \"\"\"\n Returns the default checkerboard material for walls.\n Hue and Saturation parameters can be used to specif a color tint.\n \"\"\"\n # Create the new material\n material = bpy.data.materials.new(name=material_name)\n material.use_nodes = True\n \n # Create nodes\n nodes = material.node_tree.nodes\n node_texture_coordinates = nodes.new(\"ShaderNodeTexCoord\")\n node_texture_coordinates.location = (-540, 0)\n node_checkter_texture_1 = nodes.new(\"ShaderNodeTexChecker\")\n node_checkter_texture_1.location = (-360, 100)\n node_checkter_texture_1.inputs[1].default_value = colorsys.hsv_to_rgb(hue, saturation, 0.75) + (1.0,)\n node_checkter_texture_1.inputs[2].default_value = colorsys.hsv_to_rgb(hue, saturation, 0.5) + (1.0,)\n node_checkter_texture_1.inputs[3].default_value = 6.0\n node_checkter_texture_2 = nodes.new(\"ShaderNodeTexChecker\")\n node_checkter_texture_2.location = (-360, -100)\n node_checkter_texture_2.inputs[1].default_value = (1, 1, 1, 1)\n node_checkter_texture_2.inputs[2].default_value = (0, 0, 0, 1)\n node_checkter_texture_2.inputs[3].default_value = 6.0\n node_bump = nodes.new(\"ShaderNodeBump\")\n node_bump.location = (-180, -100)\n node_bump.inputs[0].default_value = 0.5\n node_principled = nodes.get(\"Principled BSDF\")\n \n # Link nodes\n links = material.node_tree.links\n links.new(node_texture_coordinates.outputs[2], node_checkter_texture_1.inputs[0])\n links.new(node_texture_coordinates.outputs[2], node_checkter_texture_2.inputs[0])\n links.new(node_checkter_texture_2.outputs[1], node_bump.inputs[2])\n links.new(node_checkter_texture_1.outputs[0], node_principled.inputs[0])\n links.new(node_bump.outputs[0], node_principled.inputs[19])\n \n return material\n\n\ndef add_wall_material(index, mesh):\n \"\"\"\n Adds the wall material with the specified index to the mesh and returns it's index.\n 0 adds the base material with no tint, >= 1 adds indexed materials with hue tint.\n \"\"\"\n if index == 0:\n material_name = \"Wall Material\"\n material_hue = 0.0\n material_saturation = 0.0\n else:\n material_name = f\"Wall Material {index}\"\n material_hue = index * (0.07) % 1\n material_saturation = 0.5\n \n wall_material = bpy.data.materials.get(material_name)\n if wall_material is None:\n wall_material = create_default_material(material_name, material_hue, material_saturation)\n \n # Add material to mesh if not already existing\n if (not mesh.materials) or (not material_name in mesh.materials):\n mesh.materials.append(wall_material)\n \n return mesh.materials.find(material_name)\n\n\ndef main(wall_thickness=0.125, wall_height=2.5, fill_rims=True, material_per_face_ring=False, context=bpy.context):\n \"\"\"\n Creates a new object containing a mesh with wall geometry based on the currently selected object.\n The reference object should contain a mesh with a 2D wireframe representation of the wall layout, without faces.\n Adds the new wall object to the same collections the reference object is linked to.\n Hides the reference object after execution and selects the newly created wall object.\n \"\"\"\n scene = context.scene\n reference_object = context.active_object\n \n # Create new mesh linked to new object in scene\n new_mesh = bpy.data.meshes.new(reference_object.name + \" Walls\") # add the new mesh\n generate_wall_mesh_data(reference_object, new_mesh, wall_thickness, fill_rims) # add the mesh data\n new_obj = bpy.data.objects.new(new_mesh.name, new_mesh) # add a new object containing the mesh\n\n # Link new object to all collections the old object is a part of\n for collection in reference_object.users_collection:\n collection.objects.link(new_obj)\n\n # Hide and de-select reference object\n reference_object.select_set(False)\n reference_object.hide_set(True)\n\n # Select new object\n context.view_layer.objects.active = new_obj\n new_obj.select_set(True)\n \n # Add default material to new object's mesh\n add_wall_material(0, new_obj.data)\n\n # Edit new object\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.select_all(action='SELECT')\n # Remove doubles\n bpy.ops.mesh.remove_doubles(threshold=0.0001)\n # Extrude upwards\n bpy.ops.mesh.extrude_region_move(TRANSFORM_OT_translate={\"value\":(0, 0, wall_height)})\n # Unwrap UV-Information for horizontal face loops (walls)\n bpy.ops.mesh.select_all(action='DESELECT')\n uv_unwrap_walls(bpy.context.edit_object, material_per_face_ring)\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.object.mode_set(mode='OBJECT')\n\n\nclass OBJECT_OT_create_walls(bpy.types.Operator):\n \"\"\"\n WallBuilder \"Create Walls\" Operator\n \"\"\"\n bl_idname = \"wallbuilder.create_walls\"\n bl_label = \"Create Walls\"\n bl_description = \"Creates a new mesh object with wall geometry for a given wireframe blueprint.\"\n bl_options = {'REGISTER', 'UNDO'}\n \n wall_thickness: FloatProperty(name=\"Wall Thickness\", default=0.25)\n wall_height: FloatProperty(name=\"Wall Height\", default=2.5)\n fill_rims: BoolProperty(name=\"Fill Rims\", default=True)\n material_per_face_ring: BoolProperty(name=\"Material per face ring\", default=False)\n \n @classmethod\n def poll(cls, context):\n return (context.mode == 'OBJECT' and context.active_object is not None)\n \n def execute(self, context):\n # Execute main function\n main(\n self.wall_thickness/2, \n self.wall_height, \n self.fill_rims, \n self.material_per_face_ring, \n context=context\n )\n return {'FINISHED'}\n \n def invoke(self, context, event):\n try:\n # Execute main function\n main(\n self.wall_thickness/2, \n self.wall_height, \n self.fill_rims, \n self.material_per_face_ring, \n context=context\n )\n return {'FINISHED'}\n except Exception:\n self.report({'WARNING'}, \"Failed to construct walls for reference object. Make sure the reference object contains a mesh with a 2D wireframe outline of the wall layout.\")\n return {'CANCELLED'}\n\n\nclass OBJECT_MT_wallbuilder(bpy.types.Menu):\n \"\"\"\n WallBuilder Menu\n \"\"\"\n bl_idname = \"OBJECT_MT_wallbuilder_menu\"\n bl_label = \"WallBuilder\"\n \n def draw(self, context):\n layout = self.layout\n layout.operator(OBJECT_OT_create_walls.bl_idname)\n\n\ndef menu_draw(self, context):\n self.layout.operator_context = 'INVOKE_REGION_WIN'\n self.layout.menu(OBJECT_MT_wallbuilder.bl_idname)\n\n\ndef register():\n register_class(OBJECT_MT_wallbuilder)\n register_class(OBJECT_OT_create_walls)\n bpy.types.VIEW3D_MT_object.append(menu_draw)\n \n\ndef unregister():\n bpy.types.VIEW3D_MT_object.remove(menu_draw)\n unregister_class(OBJECT_MT_wallbuilder)\n unregister_class(OBJECT_OT_create_walls)\n\n\nif __name__ == \"__main__\":\n # Unregister the operator class if it is already registered\n try:\n unregister()\n except RuntimeError:\n pass\n \n # (Re-)register the operator class\n register()\n","repo_name":"JackTheFoxOtter/blender-wallbuilder","sub_path":"wallbuilder.py","file_name":"wallbuilder.py","file_ext":"py","file_size_in_byte":20715,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"28"} +{"seq_id":"26209184415","text":"# simple method of printing the square root or with the help of generators also we can do\r\ndef TopTen():\r\n n = 1\r\n while n <= 100:\r\n sq = n * n\r\n yield sq # Yield expressions and statements are only\r\n # used when defining a generator function, and are only used in the body of the generator function.\r\n # Using yield in a function definition is sufficient to cause that definition to create a generator function instead of a normal function.\r\n n += 1\r\n\r\n\r\nvalues = TopTen()\r\nfor i in values:\r\n print(i)\r\n\r\n\r\n# simple methodf to print th esquare root of ten numbers in python using function\r\n\r\ndef square():\r\n n = 1\r\n\r\n while n <= 10:\r\n sqrt = n * n\r\n n += 1\r\n print(sqrt)\r\n\r\n\r\nsquare()\r\n\r\n# square of ten numbers using for loop\r\ndef aa():\r\n for i in range(1, 11):\r\n sqrts = i * i\r\n i += 1\r\n print(sqrts)\r\n\r\n\r\naa()\r\n","repo_name":"suyash444/Python","sub_path":"Python_Codes/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"17430458273","text":"from flask_restful import Resource\nfrom flask import jsonify, request\nimport logging as logger\nimport logging\nfrom bson import json_util, ObjectId\nimport json\nfrom apiclient.discovery import build\n\n\nclass CommentsController(Resource):\n\n def post(self):\n logger.info(\"Starting Scrape Comments....\")\n\n requestData = request.get_json()\n stop_words = {'good video', 'good description', 'nice video', 'thank you for this video'}\n results = []\n\n def extract_youtube():\n api_key = 'AIzaSyB4klg5itSHIb8Q7JL3CXGJ665b-aTh1o4'\n youtube = build('youtube', 'v3', developerKey=api_key)\n q = requestData[\"name\"]\n q_split = q.split()\n res = True\n rec = 0\n perc_ = 0\n try:\n req = youtube.search().list(q=q_split[0], part='snippet', type='video', maxResults=10)\n res = req.execute()\n\n for item in res['items']:\n for c in q_split:\n if c in item['snippet']['title']:\n perc_ = perc_ + 1\n\n if (perc_ / q_split.__len__()) >= 0.5:\n req1 = youtube.commentThreads().list(part='snippet', videoId=item['id']['videoId'],\n textFormat=\"plainText\").execute()\n for a in req1['items']:\n c = {'comment_name': a[\"snippet\"][\"topLevelComment\"][\"snippet\"][\"authorDisplayName\"],\n 'comment': a[\"snippet\"][\"topLevelComment\"][\"snippet\"][\"textDisplay\"],\n 'pro_pic': a[\"snippet\"][\"topLevelComment\"][\"snippet\"][\"authorProfileImageUrl\"]}\n sentence = a[\"snippet\"][\"topLevelComment\"][\"snippet\"][\"textDisplay\"]\n for w in stop_words:\n if w in sentence:\n res = False\n break\n if res == True:\n if rec < 10:\n print(sentence)\n results.append({\"comment_name\": a[\"snippet\"][\"topLevelComment\"][\"snippet\"][\"authorDisplayName\"], \"comment\": sentence})\n rec = rec + 1\n else:\n return\n else:\n res = True\n except:\n print(\"error in getting comments from youtube\")\n\n extract_youtube()\n\n # results = [{\"comment_name\": \"Jason McMillan\", \"comment\": \"I have used this man. Itz incrediblly fast. You should try this\"},\n # {\"comment_name\": \"Alberto Sanchez\", \"comment\": \"I have used this man. Itz incrediblly fast. You should try this\"},\n # {\"comment_name\": \"Alba Bibin\", \"comment\": \"I have used this man. Itz incrediblly fast. You should try this\"},\n # {\"comment_name\": \"Alex Cary\", \"comment\": \"I have used this man. Itz incrediblly fast. You should try this\"},\n # {\"comment_name\": \"Gabriel Lasso\", \"comment\": \"I have used this man. Itz incrediblly fast. You should try this\"},\n # {\"comment_name\": \"Juan Mackenzie\", \"comment\": \"I have used this man. Itz incrediblly fast. You should try this\"},\n # {\"comment_name\": \"Rest Camilo\", \"comment\": \"I have used this man. Itz incrediblly fast. You should try this\"},\n # {\"comment_name\": \"Happy Man\", \"comment\": \"I have used this man. Itz incrediblly fast. You should try this\"},\n # {\"comment_name\": \"King Slayer\", \"comment\": \"I have used this man. Itz incrediblly fast. You should try this\"},\n # {\"comment_name\": \"James Henderson\", \"comment\": \"I have used this man. Itz incrediblly fast. You should try this\"}]\n my_json = json.loads(json_util.dumps({'res': results}))\n\n return my_json\n","repo_name":"sachithkk/pyknow-expert-system-api","sub_path":"api/CommentsController.py","file_name":"CommentsController.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"34538702084","text":"import numpy as np\n\nfrom extra_trees.ensemble.forest import ExtraTreesClassifier\nfrom extra_trees.ensemble.forest import ExtraTreesRegressor\n\n\ndef test_extra_trees_regressor(circles):\n X, y = circles\n indices = np.random.permutation(len(X.data))\n X_train = X[indices[:-10]]\n y_train = y[indices[:-10]]\n X_test = X[indices[-10:]]\n y_test = y[indices[-10:]]\n\n regressor = ExtraTreesRegressor()\n regressor.fit(X_train, y_train)\n predictions = regressor.predict(X_test)\n\n assert len(predictions) == len(y_test)\n\n\ndef test_extra_trees_classifier(circles):\n X, y = circles\n indices = np.random.permutation(len(X.data))\n X_train = X[indices[:-10]]\n y_train = y[indices[:-10]]\n X_test = X[indices[-10:]]\n y_test = y[indices[-10:]]\n\n classifier = ExtraTreesClassifier()\n classifier.fit(X_train, y_train)\n predictions = classifier.predict(X_test)\n\n assert len(predictions) == len(y_test)\n\n all_classes = np.unique(y)\n predicted_classes = np.unique(predictions)\n assert all(value in all_classes for value in predicted_classes)\n","repo_name":"allrod5/extra-trees","sub_path":"tests/ensemble/test_forest.py","file_name":"test_forest.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"28"} +{"seq_id":"24211808225","text":"from project_2.logistic_reg import *\nfrom collections import Counter\n\nlg = Logistic(learning_rate=0.1, num_iter=100)\nnum_of_bags = 100\n\n\n# logistic using sampled data and predict the result using bagging method.\ndef bagging_classify():\n print(\"Start %d times bagging: \" % num_of_bags)\n for i in range(num_of_bags):\n print(\"------------------------------------\")\n print(\"%d -th iteration: \" % (i + 1))\n X, y = lg.sampled_data(\"train.txt\") # get sampled data\n X, y = transfer(X, y) # transfer the format wanted\n bagging_result = []\n theta = lg.gradient(X, y)\n gender_list = lg.predict(test_X, test_y, theta, True)\n bagging_result.append(gender_list) # save all the prediction results into a list\n\n bagging_result = np.array(bagging_result).T # make a transfer to use Counter\n final_result = []\n for x in bagging_result[0]:\n # Using Counter to return the most common result of bagging\n final_result.append(Counter(x).most_common(1)[0][0])\n\n count = 0\n # count the number of wrong prediction\n for i in range(len(test_y)):\n if int(final_result[i]) != test_y[i]:\n count += 1\n print(\"****************************************\")\n print(\"Final prediction error number: %d\" % count)\n print(\"Final error rate: %f\" % (1.0 * count / len(test_y)))\n\n\n# Single logistic classified and the predict result.\ndef single_classify():\n single_X, single_y = lg.load_data(\"train.txt\")\n single_X, single_y = transfer(single_X, single_y)\n theta = lg.gradient(single_X, single_y)\n gender_list = lg.predict(test_X, test_y, theta)\n\n count = 0\n for i in range(len(test_y)):\n if int(gender_list[i]) != test_y[i]:\n count += 1\n\n print(\"Single logistic error number: %d\" % count)\n print(\"Single logistic rate: %f\" % (1.0 * count / len(test_y)))\n\n\nif __name__ == \"__main__\":\n test_X, test_y = lg.load_data(\"test.txt\")\n test_X, test_y = transfer(test_X, test_y)\n\n print(\"------------------------------------\")\n print(\"Start single logistic classify:\")\n single_classify()\n bagging_classify()\n\n","repo_name":"Krysta1/machine_learning","sub_path":"project_2/bagging.py","file_name":"bagging.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"19465526574","text":"##########################################################################################################\n### CNN.py ###\n##########################################################################################################\n\nfrom keras.models import Model, load_model\nfrom keras.layers import Input, Embedding, Dropout, Dense\nfrom keras.layers import concatenate, Conv1D, GlobalMaxPooling1D\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras import backend as K\nfrom utility import hLabelEncoder\n\n\n########################### function #########################\ndef weighted_crossentropy(y_pred, y_true, e):\n '''\n Add hierarchy label loss to prediction loss.\n '''\n label_loss = K.sparse_categorical_crossentropy(target=y_true, output=y_pred)\n h_pred = get_hierarchy(y_pred)\n h_true = get_hierarchy(y_true)\n hierarchy_loss = K.sparse_categorical_crossentropy(target=h_true, output=h_pred)\n return e * hierarchy_loss + (1-e) * label_loss\n\n\ndef get_hierarchy(y):\n '''\n Get the hierarchy of predict label\n '''\n label_encoder = hLabelEncoder()\n return label_encoder.class2hierarchy(y)\n\n############################# model ###############################\nclass cnn(object):\n def __init__(self, model, embeddings, n_label, sent_length, \n indist_dim, intype_dim, outdist_dim=32, outtype_dim=16,\n n_hierarchy=None, loss_weights=None, n_filter=128, win_size=3, dropout=0.25, learning_rate=1e-4):\n self.embeddings = embeddings\n self.n_label = n_label\n self.sent_length = sent_length\n self.indist_dim = indist_dim\n self.intype_dim = intype_dim\n self.outdist_dim = outdist_dim\n self.outtype_dim = outtype_dim\n self.n_hierarchy = n_hierarchy\n self.loss_weights = loss_weights\n self.n_filter = n_filter\n self.win_size = win_size\n self.dropout = dropout\n self.learning_rate = learning_rate\n self.modelname = model\n if model == 'base':\n self.model = self._model()\n elif model == 'weighted_loss' and self.n_hierarchy is not None and loss_weights is not None:\n self.model = self._model_weighted_loss()\n else:\n raise ValueError('Unrecognized model.')\n \n def _model(self):\n print('Initilizing CNN model ...', end='', flush=True)\n embeddings = self.embeddings\n sent_length = self.sent_length\n\n ### embedding layers\n # word embedding\n words_input = Input(shape=(sent_length,), dtype='int32', name='words_input')\n words = Embedding(input_dim=embeddings.shape[0], output_dim=embeddings.shape[1], weights=[embeddings], trainable=False) (words_input)\n\n # distance embedding\n dist1_input = Input(shape=(sent_length,), dtype='int32', name='dist1_input')\n dist1 = Embedding(input_dim=self.indist_dim, output_dim=self.outdist_dim, trainable=True) (dist1_input)\n\n dist2_input = Input(shape=(sent_length,), dtype='int32', name='dist2_input')\n dist2 = Embedding(input_dim=self.indist_dim, output_dim=self.outdist_dim, trainable=True) (dist2_input)\n\n # type embedding\n type1_input = Input(shape=(sent_length,), dtype='int32', name='type1_input')\n type1 = Embedding(input_dim=self.intype_dim, output_dim=self.outtype_dim, trainable=True) (type1_input)\n\n type2_input = Input(shape=(sent_length,), dtype='int32', name='type2_input')\n type2 = Embedding(input_dim=self.intype_dim, output_dim=self.outtype_dim, trainable=True) (type2_input)\n\n ### convolution layer\n conv = concatenate([words, dist1, dist2, type1, type2])\n conv = Conv1D(filters=self.n_filter, kernel_size=self.win_size, padding='same', activation='tanh', strides=1) (conv)\n\n ### max pool and softmax\n output = GlobalMaxPooling1D() (conv)\n output = Dropout(self.dropout) (output)\n output = Dense(self.n_label, activation='softmax') (output)\n\n # model\n model = Model(inputs=[words_input, dist1_input, dist2_input, type1_input, type2_input], outputs=[output])\n model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=self.learning_rate), metrics=['accuracy'])\n print('Done\\nModel structure summary:', flush=True)\n print(model.summary())\n\n return model\n \n def _model_weighted_loss(self):\n print('Initilizing CNN model ...', end='', flush=True)\n embeddings = self.embeddings\n sent_length = self.sent_length\n\n ### embedding layers\n # word embedding\n words_input = Input(shape=(sent_length,), dtype='int32', name='words_input')\n words = Embedding(input_dim=embeddings.shape[0], output_dim=embeddings.shape[1], weights=[embeddings], trainable=False) (words_input)\n\n # distance embedding\n dist1_input = Input(shape=(sent_length,), dtype='int32', name='dist1_input')\n dist1 = Embedding(input_dim=self.indist_dim, output_dim=self.outdist_dim, trainable=True) (dist1_input)\n\n dist2_input = Input(shape=(sent_length,), dtype='int32', name='dist2_input')\n dist2 = Embedding(input_dim=self.indist_dim, output_dim=self.outdist_dim, trainable=True) (dist2_input)\n\n # type embedding\n type1_input = Input(shape=(sent_length,), dtype='int32', name='type1_input')\n type1 = Embedding(input_dim=self.intype_dim, output_dim=self.outtype_dim, trainable=True) (type1_input)\n\n type2_input = Input(shape=(sent_length,), dtype='int32', name='type2_input')\n type2 = Embedding(input_dim=self.intype_dim, output_dim=self.outtype_dim, trainable=True) (type2_input)\n\n ### convolution layer\n conv = concatenate([words, dist1, dist2, type1, type2])\n conv = Conv1D(filters=self.n_filter, kernel_size=self.win_size, padding='same', activation='tanh', strides=1) (conv)\n\n ### max pool and softmax\n pool = GlobalMaxPooling1D() (conv)\n\n ### output layer\n # output label\n output1 = Dropout(self.dropout) (pool)\n output1 = Dense(self.n_label, activation='softmax') (output1)\n # output hierarchy\n output2 = Dropout(self.dropout) (pool)\n output2 = Dense(self.n_hierarchy, activation='softmax') (output2)\n\n # model\n model = Model(inputs=[words_input, dist1_input, dist2_input, type1_input, type2_input], outputs=[output1, output2])\n model.compile(loss='sparse_categorical_crossentropy', loss_weights=self.loss_weights, optimizer=Adam(lr=self.learning_rate), metrics=['accuracy'])\n print('Done\\nModel structure summary:', flush=True)\n print(model.summary())\n\n return model\n \n def train(self, X_train, y_train, save_path, validation_split, batch_size, epochs, verbose=2):\n # print('Start training CNN models ... ', end='', flush=True)\n early_stopper = EarlyStopping(patience=10, verbose=1)\n check_pointer = ModelCheckpoint(save_path, verbose=1, save_best_only=True)\n self.model.fit(X_train, y_train, \n validation_split=0.2, \n batch_size=batch_size, \n epochs=epochs, \n verbose=verbose, \n shuffle=True,\n callbacks=[early_stopper, check_pointer])\n print('Done')\n\n def loadModel(self, path):\n print('Loading trained CNN model ... ', end='', flush=True)\n self.model = load_model(path)\n print('Done')\n \n def predict(self, X):\n print('Predicting with CNN ... ', flush=True)\n y = self.model.predict(X, verbose=1)\n return y\n \n def predict_calss(self, X):\n y = self.predict(X)\n if self.modelname == 'base':\n return y.argmax(axis=-1)\n elif self.modelname == 'weighted_loss':\n return y[0].argmax(axis=-1)\n\n\n# if __name__ == '__main__':\n# y_pred = K.constant([0,0,2,2,4])\n# y_true = K.constant([0,1,2,3,4])\n# print(K.eval(y_pred), K.eval(y_true))\n# print(K.eval(weighted_crossentropy(y_pred=y_pred, y_true=y_true, e=0)))\n","repo_name":"Minzhe/BioNLP","sub_path":"BioNLP-ST-2016_SeeDev/code/CNN/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":8251,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"39343243996","text":"import pickle\r\nimport sys\r\nimport pprint\r\n\r\n\r\nwith open(sys.argv[1], 'rb') as handle:\r\n\tprint('%s loaded!'%sys.argv[1])\r\n\tdataOne = pickle.load(handle)\r\n\r\nwith open(sys.argv[2], 'rb') as handle:\r\n\tprint('%s loaded!'%sys.argv[2])\r\n\tdataTwo = pickle.load(handle)\r\n\r\nr = 0\r\nfor muts in dataTwo:\r\n\tif r < 5:\r\n\t\tr += 1\r\n\t\tprint (dataOne[muts]['count'])\r\n\t\tprint (dataTwo[muts]['count'])\r\n\t\tif muts in dataOne:\r\n\t\t\t\r\n\t\t\tnewCount = dataOne[muts]['count'] + dataTwo[muts]['count']\r\n\t\t\tdataOne[muts] = {**dataOne[muts],**dataTwo[muts]}\r\n\t\t\tdataOne[muts]['count'] = newCount\r\n\r\n\r\n\t\telse:\r\n\t\t\tdataOne[muts] = dataTwo[muts]\r\n\t\tprint (dataOne[muts]['count'])\r\n\r\n\r\nwith open(sys.argv[3], 'wb') as handle:\r\n\t\tpickle.dump(dataOne, handle)\r\n","repo_name":"ziweizhong/LiuLab","sub_path":"scripts/combPickle.py","file_name":"combPickle.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"6722450586","text":"# Требуется найти в массиве A[1..N] самый близкий по величине элемент к заданному числу X. \n# Пользователь в первой строке вводит натуральное число N – количество элементов в массиве. В последующих строках записаны N целых чисел Ai. Последняя строка содержит число X\n\nimport random\nn = int(input('Введите количество элементов в массиве N: '))\nx = int(input('Введите число X, близжайшее к которому мы будем искать: '))\nlist = []\n\nfor _ in range (n):\n list.append(random.randint(0, 20))\nprint(list)\nprint(x)\n\nmin = abs(x - list[0])\nindex = 0\nfor i in range(1, n):\n count = abs(x - list[i])\n if count < min:\n min = count\n index = i\nprint(f'Числe {x} в списке наиболее близко по величине число {list[index]}, и их разница составляет {abs(x - list[index])}')","repo_name":"amigo154/PythonHomework3","sub_path":"Task18.py","file_name":"Task18.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"30941305907","text":"import re\nimport logging\nfrom datahandling.ShoppingTrip import ShoppingTrip, Purchase\nfrom algorithm.TextReader import TextReader, ContourFinder\nimport time\nimport datetime\n\n\ndef only_digits(s):\n t = ''\n no_dots = True\n for c in s:\n if c.isdigit():\n t += c\n elif c == '.' and no_dots:\n t += c\n no_dots = False\n return float(t)\n\n\nclass DefaultReceiptReader:\n @classmethod\n def convert_to_float(cls, raw_price):\n return 0\n\n @classmethod\n def extract_info(cls, receipt, reader):\n \"\"\"\n Reads special type of\n\n :param img\n PIL gray image\n\n :returns\n compressed image\n \"\"\"\n extracted_data = ShoppingTrip()\n return extracted_data\n\n\nclass SosediReceiptReader(DefaultReceiptReader):\n TOO_LARGE_LINE = 1.5\n CENTER_RATE_THRESHOLD = 0.4\n\n @classmethod\n def convert_to_float(cls, raw_price):\n # template looks like: =0.59*1.000$0.59\n if '$' not in raw_price:\n pos_star = raw_price.find('*')\n if pos_star != -1:\n return only_digits(raw_price[0:pos_star])\n return 0\n l, r = raw_price.rsplit('$', 1)\n return only_digits(r)\n\n @classmethod\n def extract_info(cls, receipt, recognizer):\n from algorithm.ReceiptReader import ReceiptReader\n\n extracted_data = ShoppingTrip()\n\n extracted_data.name_of_shop = 'Соседи'\n extracted_data.address = recognizer.recognize(receipt.img_lines[2])\n extracted_data.trip_date = datetime.datetime.now()\n\n list_of_purchases = []\n\n avg_height = receipt.average_height\n\n in_shoplist = False\n\n class EnumNeeds:\n NAME = 0\n PRICE = 1\n\n what_we_need = EnumNeeds.NAME\n last_purchase = []\n\n for img_line, i in zip(receipt.img_lines, range(len(receipt.img_lines))):\n center_rate = ReceiptReader.rate_center_area(img_line)\n if center_rate > cls.CENTER_RATE_THRESHOLD:\n if in_shoplist:\n logging.info('finished shopping')\n break\n in_shoplist = True\n logging.info('starting shopping')\n elif in_shoplist:\n if img_line.size[1] > cls.TOO_LARGE_LINE * avg_height:\n number_of_lines = img_line.size[1] / avg_height\n number_of_lines = int(number_of_lines + 0.5)\n\n logging.info('line {} is too large. it consists of {} lines'.format(i, number_of_lines))\n\n what_we_need += number_of_lines\n what_we_need %= 2\n\n last_purchase.clear()\n else:\n last_purchase.append(img_line)\n if what_we_need == EnumNeeds.PRICE:\n if len(last_purchase) == 2:\n raw_name, raw_price = last_purchase\n\n raw_name.save('result_lines/purchase_{}_name.png'.format(i))\n raw_price.save('result_lines/purchase_{}_price.png'.format(i))\n\n logging.info('created new purchase number {}'.format(i))\n\n list_of_purchases += [(raw_name, raw_price)]\n last_purchase.clear()\n what_we_need = (what_we_need + 1) % 2\n logging.info('Total num of puurchases is {}'.format(len(list_of_purchases)))\n extracted_data.list_of_purchases = TextReader.purchases_to_text(list_of_purchases, recognizer, cls)\n extracted_data.receipt_amount = sum(\n [\n purchase.price\n for purchase in extracted_data.list_of_purchases\n ]\n )\n return extracted_data\n\n\n# todo: rewrite completely\nclass KoronaReceiptReader(DefaultReceiptReader):\n @classmethod\n def convert_to_float(cls, raw_price):\n return 0\n\n @classmethod\n def extract_info(cls, receipt, reader):\n extracted_data = ShoppingTrip()\n\n return extracted_data\n\n @classmethod\n def find_data(cls, text):\n for line in text:\n data = re.search(r'[0-9]{0,2}[-:.][0-9]{0,2}[-:.][0-9]{0,4}', line)\n if data and '2019' in data.group():\n return data.group()\n return None\n\n","repo_name":"gwyrwch/yourbudget","sub_path":"yourbudget/algorithm/readers.py","file_name":"readers.py","file_ext":"py","file_size_in_byte":4359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"41986951619","text":"import os\nfrom fastapi import HTTPException, Security\nfrom fastapi.security.api_key import APIKeyHeader\nfrom starlette.status import HTTP_403_FORBIDDEN\n\n\nAPI_KEY_NAME = \"X-API-KEY\"\napi_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)\n\n\nasync def get_api_key(api_key_header: str = Security(api_key_header)) -> str:\n expected_api_key = os.getenv(\"API_KEY\", \"default_api_key_if_not_set\")\n if api_key_header == expected_api_key:\n return api_key_header\n else:\n raise HTTPException(\n status_code=HTTP_403_FORBIDDEN, detail=\"Could not validate credentials\"\n )\n","repo_name":"vkopitsa/gpts-chat","sub_path":"app/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"70888124234","text":"def add(num1, num2):\n \"\"\"Adds two numbers together (num1 and num2)\"\"\"\n return num1 + num2\n\n\ndef subtract(num1, num2):\n \"\"\"Subtracts num2 from num1\"\"\"\n return num1 - num2\n\n\ndef multiply(num1, num2):\n \"\"\"Multiplies num1 by num2\"\"\"\n return num1 * num2\n\n\ndef divide(num1, num2):\n \"\"\"Divides num1 by num2\"\"\"\n return num1 / num2\n\n\ncalculator_operations = {\n \"+\": add,\n \"-\": subtract,\n \"*\": multiply,\n \"/\": divide\n}\n\n\ndef calculator():\n first_number = float(input(\"What is your first number?\\n\"))\n\n print(\"List of operations you can perform:\\n\")\n for operation in calculator_operations:\n print(operation)\n\n operation = input(\"What operation would you like to perform?\\n\")\n\n second_number = float(input(\"What is your second number?\\n\"))\n\n result = calculator_operations[operation](first_number, second_number)\n\n print(f\"{first_number} {operation} {second_number} = {result}\")\n\n continue_calculator = input(\n f\"Continue using calculator with current result ({result})? Type 'y' to continue, 'n' \"\n f\"to start a new calculator, or 'exit' to quit the program: \\n\")\n\n while continue_calculator == 'y':\n new_first_number = result\n for operation in calculator_operations:\n print(operation)\n operation = input(\"What operation would you like to perform?\\n\")\n new_second_number = float(input(\"What is your second number?\\n\"))\n result = calculator_operations[operation](new_first_number, new_second_number)\n print(f\"{new_first_number} {operation} {new_second_number} = {result}\")\n continue_calculator = input(\n f\"Continue using calculator with current result ({result})? Type 'y' to continue, \"\n f\"'n' to start a new calculator, or 'exit' to quit the program: \\n\")\n\n if continue_calculator == 'n':\n calculator()\n\n if continue_calculator == 'exit':\n print(\"Exiting program. See you!\")\n\n","repo_name":"beboptank/python-calculator","sub_path":"calc_functions.py","file_name":"calc_functions.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"28768027999","text":"##### Librarie and Package Requirements, and Testing of them\ntry:\n import sys\n import os\n from termcolor import colored\n import warnings\n from sklearn.cluster import MiniBatchKMeans\n from sklearn.exceptions import DataConversionWarning\n from sklearn.metrics import confusion_matrix\n from sklearn.metrics import classification_report, accuracy_score\n import rasterio as rio\n import numpy as np\n import pandas as pd\n from glob import glob\n import seaborn as sns\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n from string import ascii_lowercase as asci\nexcept ModuleNotFoundError:\n print(colored('\\n\\nModule improt error\\n', 'red'))\n sys.exit()\nelse:\n print(colored(\n '\\n\\nBingo!!! All libraries properly loaded. Ready to start!!!', 'green'), '\\n')\n\n##### Disable all warnings\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\n\n##### Warning used to notify implicit data conversions happening in the code.\nwarnings.filterwarnings(action='ignore', category=DataConversionWarning)\nwarnings.filterwarnings(action=\"ignore\", category=UserWarning)\nnp.seterr(divide='ignore', invalid='ignore')\n\n\n############### Data Access ###############\n\n##### Raster file loader\ndef load_raster(input_file: str):\n \"\"\"\n Returns a raster array which consists of its bands and\n transformation matrix parameters\n ----------\n input_file: str\n path directory to the raster file\n \"\"\"\n with rio.open(input_file) as src:\n band = src.read()\n transform = src.transform\n crs = src.crs\n shape = src.shape\n profile = src.profile\n raster_img = np.rollaxis(band, 0, 1)\n\n output = {\"band\": band,\n \"raster_img\": raster_img,\n \"transform\": transform,\n \"crs\": crs,\n \"shape\": shape,\n \"profile\": profile}\n\n return output\n\n##### Raster file writer\ndef write_raster(raster, crs, transform, output_file):\n \"\"\"\n Writes a raster array which consists of one band to the disc.\n ----------\n raster:\n raster array\n transform: \n transformation matrix parameters\n output_file: str\n path directory to write the raster file\n \"\"\"\n profile = {\"driver\": \"GTiff\",\n \"compress\": \"lzw\",\n \"width\": raster.shape[0],\n \"height\": raster.shape[1],\n \"crs\": crs,\n \"transform\": transform,\n \"dtype\": raster.dtype,\n \"count\": 1,\n \"tiled\": False,\n \"interleave\": 'band',\n \"nodata\": 0}\n\n profile.update(dtype=raster.dtype,\n height=raster.shape[0],\n width=raster.shape[1],\n nodata=0,\n compress=\"lzw\")\n\n with rio.open(output_file, \"w\", **profile) as out:\n out.write_band(1, raster)\n\n\n\n############### Spectral Indices (SIs) ###############\n\n##### Normalized Difference Vegetation Index (NDVI)\ndef NDVI(nir, red):\n \"\"\"\n Calculates NDVI\n \n parameters\n ----------\n nir: NIR band as input\n red: RED band as input\n \"\"\"\n NDVI = (nir.astype(\"float\") - red.astype(\"float\")) / \\\n (nir.astype(\"float\") + red.astype(\"float\"))\n\n return NDVI\n\n##### Dry Bareness Index (DBI)\ndef DBI(green, swinr1, ndvi):\n \"\"\"\n Calculate DBI\n \n parameters\n ----------\n swinr1: SWINR1 band as input\n green: green band as input\n \"\"\"\n DBI = ((swinr1.astype(\"float\") - green.astype(\"float\")) /\n (swinr1.astype(\"float\") + green.astype(\"float\"))) - ndvi\n\n return DBI\n\n##### Modified Normalized Difference Water Index (NDWI)\ndef NDWI(green, swinr1):\n \"\"\"\n Calculate MNDWI\n \n parameters\n ----------\n swinr1: MINR band as input\n green: GREEN band as input\n \"\"\"\n NDWI = (green.astype(\"float\") - swinr1.astype(\"float\")) / \\\n (green.astype(\"float\") + swinr1.astype(\"float\"))\n\n return NDWI\n\n##### Normalized Difference Built-up Index (NDBI)\ndef NDBI(swinr1, nir):\n \"\"\"\n Calculate NDBI\n \n parameter\n ---------\n swinr: SWINR band as input\n nir: NIR band as input\n \"\"\"\n NDBI = (swinr1.astype(\"float\") - nir.astype(\"float\")) / \\\n (swinr1.astype(\"float\") + nir.astype(\"float\"))\n\n return NDBI\n\n#####\ndef spectral_indix(input_file: str, sp_index: str=\"NDVI\", verbose: bool=True):\n \"\"\"\n Calculate the specified Spectral Index. \n \n parameters\n ----------\n input_file: str\n path directory to the raster file\n sp_index: spectral indix of interest: NDVI, DBI, NDWI, NDBI\n \"\"\"\n if verbose:\n print(f\"\\nThe spectral indix: {sp_index}, is being calculated ...\",)\n\n ### Get the image id from the image_path\n img_id = input_file.split(\"/\")[-1].split(\".\")[0]\n print(f\"{' '*2} Raster image ID: {img_id}\")\n\n ### Load the raster image\n if not input_file.endswith(\".tif\"):\n return \"S\\nSorry! The file entered is not for a raster image.\"\n else:\n raster = load_raster(input_file)\n\n ### Slice the bands: our data has only 11 bands instead of 13\n # Bands = ('B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8', 'B8A', 'B9', 'B11', 'B12')\n blue = raster[\"band\"][0, :, :] # represented by B2\n green = raster[\"band\"][1, :, :] # represented by B3\n red = raster[\"band\"][2, :, :] # represented by B4\n nir = raster[\"band\"][6, :, :] # represented by B8\n swinr1 = raster[\"band\"][9, :, :] # represented by B11\n swinr2 = raster[\"band\"][10, :, :] # represented by B12\n\n if sp_index == \"NDVI\":\n ### Calculate NDVI\n return NDVI(nir, red)\n\n elif sp_index == \"DBI\":\n ### Calculate DBI\n ndvi = NDVI(nir, red)\n return DBI(green, swinr1, ndvi)\n\n elif sp_index == \"NDWI\":\n ### Calculate NDWI\n return NDWI(green, swinr1)\n\n elif sp_index == \"NDBI\":\n ### Calculate NDBI\n return NDBI(swinr1, nir)\n\n else:\n alert = \"\\nSorry! The spectral indix is one of these: NDVI, DBI, SAVI, NDWI, NDBI!\\n\"\n return alert\n\n#####\ndef spectral_indices(input_file: str, verbose: bool=False) -> tuple:\n \"\"\"\n Calculate the Spectral Indices: NDVI, DBI, SAVI, NDWI, NDBI. \n \n parameters\n ----------\n input_file: str\n path directory to the raster file\n \"\"\"\n ### Get the image id from the image_path\n if verbose:\n img_id = input_file.split(\"/\")[-1].split(\".\")[0]\n print(f\"{' '*2} Spectral Indices from raster image ID: {img_id}\")\n\n ### Load the raster image\n if not input_file.endswith(\".tif\"):\n return \"S\\nSorry! The file entered is not for a raster image.\"\n else:\n raster = load_raster(input_file)\n\n ### Slice the bands: note our data has only 11 bands instead of 13\n # Bands = ('B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B8', 'B8A', 'B9', 'B11', 'B12')\n blue = raster[\"band\"][0, :, :] # represented by B2\n green = raster[\"band\"][1, :, :] # represented by B3\n red = raster[\"band\"][2, :, :] # represented by B4\n nir = raster[\"band\"][6, :, :] # represented by B8\n swinr = raster[\"band\"][9, :, :] # represented by B11\n swinr1 = raster[\"band\"][10, :, :] # represented by B12\n\n ### Calculate NDWI, NDVI, DBI, NDBI, SAVI\n ndvi = NDVI(nir, red)\n spindices = {\"NDWI\": NDWI(green, swinr1),\n \"NDVI\": ndvi,\n \"DBI\": DBI(green, swinr1, ndvi),\n \"NDBI\": NDBI(swinr, red, ndvi)}\n\n RGB = (red, green, blue)\n\n return spindices, RGB\n\n\n\n######### Data Processor for Land Cover Clustering (LCC) ############\n\n#####\ndef lclu_data_processor(DATA_DIR: str,\n IN_DIR: str = \"train\",\n verbose: bool = True) -> tuple:\n \"\"\"\n Preprocesses Sentinel-2 imagery data to extract features from the\n raster bands, and the calculated spectral indices for Land Cover\n segmentation and classification.\n \n parameters\n ----------\n DATA_DIR: main directory of the data\n IN_DIR: folder name of inputs\n \n Returns:\n A tuple(DataFrame, list_of_bands)\n \"\"\"\n print(f\"\\n\\nPreprocessing of {IN_DIR}ing data:\\n\")\n ### Set up the paths\n input_raster = os.path.join(DATA_DIR, IN_DIR, \"*.tif\")\n data_paths = sorted(glob(input_raster))\n\n num_bands = 11\n IMG_arr_list = []\n spindex_arr_dict = {\"NDWI\": [], \"NDVI\": [], \"DBI\": [], \"NDBI\": []}\n for image_path in data_paths:\n ### Extract and process the feature data from the bands\n if verbose:\n img_id = image_path.split(\"/\")[-1].split(\".\")[0]\n print(f\"{' '*2} Raster image ID: {img_id}\")\n raster = rio.open(image_path)\n # Reshape as (num_samples, num_bands)\n img_arr = np.moveaxis(raster.read(), 0, -1).reshape(-1, num_bands)\n IMG_arr_list.append(img_arr)\n\n ### Extract and process the target data from the spectral index\n for spindex in spindex_arr_dict.keys():\n arr_spindex = spectral_indix(image_path, spindex, verbose=False)\n # Reshape as (num_samples, 1)\n spindex_arr = arr_spindex.reshape(-1, 1)\n spindex_arr_dict[spindex].append(\n spindex_arr) # Update its list values\n\n ### Data\n bands = list(raster.descriptions)\n df_bands = pd.DataFrame(data=np.vstack(\n IMG_arr_list), columns=bands)\n spindex_arr_dict = {key: np.vstack(\n spindex_arr_dict[key]).ravel() for key in spindex_arr_dict.keys()}\n spindex_df = pd.DataFrame(data=spindex_arr_dict)\n\n return df_bands.join(spindex_df), bands\n\n\n\n\n\n\n\n######### Additional Performance Metric for classification\ndef jaccard_score(y_true, y_pred) -> float:\n \"\"\"\n Compute Jaccard similarity score to evaluate the accuracy of a classification\n \n parameters:\n y_true: array-like of shape (n_samples,)\n Ground truth (correct) target values.\n y_pred: array-like of shape (n_samples,)\n Estimated targets as returned by a classifier\n \n Returns:\n The calculated metric (float)\n \"\"\"\n ### y_true, y_pred must be a flatten vector\n try:\n y_pred = y_pred.flatten()\n y_true = y_true.flatten()\n except:\n pass\n ### Get the confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n\n ### Compute mean IoU\n intersection = np.diag(cm)\n ground_truth_set = cm.sum(axis=1)\n predicted_set = cm.sum(axis=0)\n union = ground_truth_set + predicted_set - intersection\n IoU = intersection / union.astype(float)\n mean_IoU = np.nanmean(IoU).item()\n\n return mean_IoU\n\n\n\n\n\n\n\n\n\n######### Plotting Tools\n\n##### To annotate plots\ndef number_figures(axes, pos=None, labels=asci, braces=True, **text_kwargs):\n\n def depth(L): return isinstance(L, list) and max(map(depth, L)) + 1\n\n if pos is None:\n pos = [[0.99, 0.93]] * len(axes)\n elif (depth(pos) == 1) & (len(pos) == 2):\n pos = [list(pos)] * len(axes)\n elif (depth(pos) != 2) & (len(pos) != len(axes)):\n raise (Exception, 'check the position is the right format')\n\n for c, ax in enumerate(axes):\n x0, x1 = ax.get_xlim()\n y0, y1 = ax.get_ylim()\n\n w = x1 - x0\n h = y1 - y0\n x = x0 + w * pos[c][0]\n y = y0 + h * pos[c][1]\n\n t = '%s' % labels[c]\n if braces:\n t = '(%s)' % (t)\n ax.text(x, y, t, **text_kwargs)\n\n##### Plot the confusion matrix and feature importance\n\n\ndef plot_cfmatrix_fimp_lgbm(lgbm_clf, test_y, \n pred_labels, labels, bands):\n ##### Get the confusion matrix\n cm = confusion_matrix(test_y, pred_labels)\n df_cm = pd.DataFrame(cm, labels, labels)\n\n ##### Feature importance and Heat Map\n fimp = pd.DataFrame(sorted(zip(lgbm_clf.feature_importance(), bands)),\n columns=[\"Value\", \"Feature\"])\n fimp = fimp.set_index(\"Feature\")/lgbm_clf.feature_importance().sum()\n fimp.reset_index(inplace=True)\n\n fig = plt.figure(figsize=[24, 8])\n grid = mpl.gridspec.GridSpec(1, 2, wspace=0.15, width_ratios=[0.55, 0.45])\n ax = [fig.add_subplot(grid[0, 0]),\n fig.add_subplot(grid[0, 1])]\n ###\n #sns.set(font_scale=1.4) # for label size\n sns.heatmap(df_cm, annot=True, ax=ax[0], cbar=False)\n ax[0].set_xlabel(\"\\nTarget class\")\n ax[0].set_ylabel(\"Estimated class\")\n ax[0].set_title(\"Confusion Matrix\")\n ###\n sns.barplot(x=\"Value\", y=\"Feature\", data=fimp, ax=ax[1], palette=\"dark\")\n ax[1].set_title(\"Light GBM Features' Importance\")\n ax[1].set_xlabel(\n \"Importances\\n(Usefullness fraction of the feature in the model)\")\n ax[1].set_ylabel(\"Features\")\n\n txt_kwargs = {\"fontsize\": 20, \"c\": \"k\"}\n number_figures(ax, pos=[0.005, 1.015], braces=True, **txt_kwargs)\n plt.show()\n return fig\n\n\n\n\n\n","repo_name":"Djeutsch/Earth-System-Satellite-Observations","sub_path":"scripts/src.py","file_name":"src.py","file_ext":"py","file_size_in_byte":13145,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"20111535457","text":"def fibonacci_non_recursive(n):\r\n fib_list = [0, 1]\r\n for i in range(2, n + 1):\r\n fib_list.append(fib_list[i - 1] + fib_list[i - 2])\r\n return fib_list[n]\r\n\r\ndef fibonacci_recursive(n):\r\n if n <= 1:\r\n return n\r\n else:\r\n return fibonacci_recursive(n - 1) + fibonacci_recursive(n - 2)\r\n\r\ndef main():\r\n while True:\r\n print(\"\\n1. Non-recursive Fibonacci\")\r\n print(\"2. Recursive Fibonacci\")\r\n print(\"3. Exit\")\r\n choice = int(input(\"Enter your choice: \"))\r\n\r\n if choice == 1:\r\n n = int(input(\"Enter the value of n: \"))\r\n print(\"Fibonacci:\", fibonacci_non_recursive(n))\r\n elif choice == 2:\r\n n = int(input(\"Enter the value of n: \"))\r\n print(\"Fibonacci:\", fibonacci_recursive(n))\r\n elif choice == 3:\r\n break\r\n else:\r\n print(\"Invalid choice. Please try again.\")\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"shyamr54/daalab","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"9852241300","text":"import collections\nimport time\n\nfrom snmpsim_control_plane import error\nfrom snmpsim_control_plane import log\nfrom snmpsim_control_plane.supervisor import lifecycle\nfrom snmpsim_control_plane.supervisor.reporting import collector\nfrom snmpsim_control_plane.supervisor.reporting.formats import jsondoc\nfrom snmpsim_control_plane.supervisor.reporting.formats import null\n\n\nclass ReportingManager(object):\n \"\"\"Gather and dump activity metrics.\n\n For each of given managed process instances, collect OS-level\n metrics.\n\n Then write them down as a JSON file indexed by time. Consumers\n are expected to process each of these files and are free to remove\n them.\n \"\"\"\n\n REPORTING_PERIOD = 15\n\n REPORTERS = {\n 'null': null.NullReporter,\n 'jsondoc': jsondoc.JsonDocReporter,\n }\n\n STARTED = int(time.time())\n\n _last_reportings = collections.defaultdict(dict)\n\n _reporter = null.NullReporter()\n\n _next_dump = time.time() + REPORTING_PERIOD\n\n @classmethod\n def configure(cls, fmt, *args):\n try:\n reporter = cls.REPORTERS[fmt]\n\n except KeyError:\n raise error.ControlPlaneError(\n 'Unsupported reporting format: %s' % fmt)\n\n cls._reporter = reporter(*args)\n\n log.info('Using \"%s\" activity reporting method with '\n 'params %s' % (cls._reporter, ', '.join(args)))\n\n @classmethod\n def process_metrics(cls, watch_dir, *instances):\n now = int(time.time())\n\n if cls._next_dump > now:\n return\n\n last_dump = cls._next_dump - cls.REPORTING_PERIOD\n cls._next_dump = now + cls.REPORTING_PERIOD\n\n all_metrics = collector.collect_metrics(*instances)\n\n for metrics in all_metrics:\n executable = metrics['executable']\n\n last_reportings = cls._last_reportings[executable]\n\n for metric, value in metrics.items():\n if not isinstance(value, lifecycle.AbstractGrowingValue):\n continue\n\n # compute the difference in value growth to report\n\n previous_value = last_reportings.get(metric)\n current_value = metrics.get(metric)\n\n last_reportings[metric] = current_value.latest\n\n metrics[metric] = current_value.added_content(previous_value)\n\n cls._reporter.dump_metrics(\n all_metrics, watch_dir=watch_dir, started=cls.STARTED,\n begin=int(last_dump), end=int(now))\n","repo_name":"etingof/snmpsim-control-plane","sub_path":"snmpsim_control_plane/supervisor/reporting/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"28"} +{"seq_id":"20600534324","text":"import re\nimport base64\nfrom six.moves import urllib_error\nfrom resources.lib import utils\nfrom resources.lib.adultsite import AdultSite\nfrom resources.lib.decrypters.kvsplayer import kvs_decode\n\nprogress = utils.progress\n\nsite = AdultSite('netflixporno', '[COLOR hotpink]NetflixPorno[/COLOR]', 'https://netflixporno.net/', 'https://netflixporno.net/scenes/wp-content/uploads/2021/04/netflixporno-1.png', 'netflixporno')\n\n\n@site.register(default_mode=True)\ndef Main():\n site.add_dir('[COLOR hotpink]XXX Scenes[/COLOR]', site.url + 'scenes/', 'List', site.img_cat)\n site.add_dir('[COLOR hotpink]Parody Movies[/COLOR]', site.url + 'adult/genre/parodies/', 'List', site.img_cat)\n site.add_dir('[COLOR hotpink]Studios[/COLOR]', site.url + 'adult/genre/parodies/', 'Studios', site.img_cat)\n site.add_dir('[COLOR hotpink]Categories[/COLOR]', site.url + 'adult/genre/parodies/', 'Categories', site.img_cat)\n site.add_dir('[COLOR hotpink]Search[/COLOR]', site.url + 'search/', 'Search', site.img_search)\n List(site.url + 'adult')\n utils.eod()\n\n\n@site.register()\ndef List(url):\n try:\n listhtml = utils.getHtml(url, site.url)\n except urllib_error.URLError as e:\n utils.notify(e)\n return\n if len(listhtml) == 0:\n listhtml = 'Empty'\n\n match = re.compile(r'
([^\"]+)([^<]+)', re.DOTALL | re.IGNORECASE).findall(listhtml)[0]\n lastpg = re.compile(r'>([^<]+)\\s*([^<]+)<', re.DOTALL | re.IGNORECASE).findall(cathtml)\n for catpage, name in match:\n site.add_dir(name, catpage, 'List', site.img_cat)\n utils.eod()\n\n\n@site.register()\ndef Studios(url):\n try:\n studhtml = utils.getHtml(url, site.url)\n except urllib_error.URLError as e:\n utils.notify(e)\n return\n match = re.compile(r'director\\s*menu-item.+?href=\"([^\"]+)\">([^<]+)', re.DOTALL | re.IGNORECASE).findall(studhtml)\n for studpage, name in match:\n site.add_dir(name, studpage, 'List', site.img_cat)\n utils.eod()\n\n\ndef url_decode(str):\n if '/goto/' not in str:\n result = str\n else:\n try:\n result = url_decode(base64.b64decode(re.search('/goto/(.+)', str).group(1)))\n except:\n result = str\n return result\n\n\n@site.register()\ndef Playvid(url, name, download=None):\n links = {}\n videourl = None\n vp = utils.VideoPlayer(name, download)\n vp.progress.update(25, \"[CR]Loading video page[CR]\")\n try:\n html = utils.getHtml(url, site.url)\n except urllib_error.URLError as e:\n utils.notify(e)\n return\n srcs = re.compile(r' None:\n \"\"\"It uses Wikipedia version, which is passed in `language` argument.\"\"\"\n wikipedia.random_page(language=\"de\")\n args, _ = mock_requests_get.call_args\n assert \"de.wikipedia.org\" in args[0]\n\n\ndef test_random_page_returns_page(mock_requests_get: Mock) -> None:\n \"\"\"It returns Page object instance.\"\"\"\n page = wikipedia.random_page()\n assert isinstance(page, wikipedia.Page)\n\n\ndef test_random_page_handles_validation_errors(mock_requests_get: Mock) -> None:\n \"\"\"It raises ClickException if request returns empty JSON object (None).\"\"\"\n mock_requests_get.return_value.__enter__.return_value.json.return_value = None\n with pytest.raises(click.ClickException):\n wikipedia.random_page()\n","repo_name":"Winand/tutorial-hypermodern-python-winand","sub_path":"tests/test_wikipedia.py","file_name":"test_wikipedia.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"10716393126","text":"from ..models import Doctors\r\nimport json\r\nfrom django.http import HttpResponse\r\nclass Doctor:\r\n def __init__(self,therapistName,belongName,count,belongHospitalName,specList,profileImageURL):\r\n self.therapistName = therapistName\r\n self.belongName = belongName\r\n self.count = int(count)\r\n self.belongHospitalName= belongHospitalName\r\n self.specList = specList\r\n self.profileImageURL = profileImageURL\r\n pass\r\n def getName(self):\r\n return self.therapistName,self.belongName,self.count,self.belongHospitalName,self.specList,self.profileImageURL\r\n def save(self):\r\n Doctors.objects.create(\r\n therapistName = self.therapistName,\r\n belongName = self.belongName,\r\n count = self.count,\r\n belongHospitalName = self.belongHospitalName,\r\n specList = self.specList,\r\n profileImageURL = self.profileImageURL\r\n ).save()\r\n print(\"SAVE SUCCESSED\")\r\ndef setDoctor(request):\r\n if request.method == \"POST\":\r\n postdata = request.POST.dict()\r\n therapistName = postdata['therapistName']\r\n belongName = postdata['belongName']\r\n count = postdata['count']\r\n belongHospitalName = postdata['belongHospitalName']\r\n specList = postdata['specList']\r\n profileImageURL = postdata['profileImageURL']\r\n print(therapistName)\r\n doc = Doctor(therapistName,belongName,count,belongHospitalName,specList,profileImageURL)\r\n doc.save()\r\n result = {\r\n 'result':{\r\n 'message' : \"successed\"\r\n }\r\n }\r\n else:\r\n result = {\r\n 'result':{\r\n 'message' : \"failed\"\r\n }\r\n }\r\n result = json.dumps(result)\r\n return HttpResponse(result)\r\ndef getDoctor(request):\r\n DoctorsROW = Doctors.objects.all()\r\n DoctorsArr = []\r\n doctors = []\r\n for i in DoctorsROW:\r\n doc = Doctor(i.therapistName,i.belongName,i.count,i.belongHospitalName,i.specList,i.profileImageURL)\r\n DoctorsArr.append(doc.getName())\r\n doctors.append({\r\n 'therapistName': i.therapistName,\r\n 'belongName': i.belongName,\r\n 'count': i.count,\r\n 'belongHospitalName': i.belongHospitalName,\r\n 'specList': i.specList,\r\n 'profileImageURL': i.profileImageURL\r\n })\r\n result = {\r\n 'result': doctors\r\n }\r\n result = json.dumps(result)\r\n print(result)\r\n\r\n return HttpResponse(result)\r\n\r\n\r\n\r\n","repo_name":"DROPKICKforLife/KickServer","sub_path":"recv/apis/getdoctor.py","file_name":"getdoctor.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"17541603517","text":"# External dependencies\nfrom __future__ import division, absolute_import, print_function\nimport unittest\nimport numpy as np\n\n# Internal dependencies\nfrom svgpathtools import rational_limit\n\n\nclass Test_polytools(unittest.TestCase):\n # def test_poly_roots(self):\n # self.fail()\n\n def test_rational_limit(self):\n\n # (3x^3 + x)/(4x^2 - 2x) -> -1/2 as x->0\n f = np.poly1d([3, 0, 1, 0])\n g = np.poly1d([4, -2, 0])\n lim = rational_limit(f, g, 0)\n self.assertAlmostEqual(lim, -0.5)\n\n # (3x^2)/(4x^2 - 2x) -> 0 as x->0\n f = np.poly1d([3, 0, 0])\n g = np.poly1d([4, -2, 0])\n lim = rational_limit(f, g, 0)\n self.assertAlmostEqual(lim, 0)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"mathandy/svgpathtools","sub_path":"test/test_polytools.py","file_name":"test_polytools.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":484,"dataset":"github-code","pt":"27"} +{"seq_id":"5025781331","text":"# PYTHON IMPLEMENTATION\nimport errors\nimport vm\nwith open(\"main.ahh\", \"r\") as source:\n charStream = source.read()\ndef lexer(charstream):\n toktype = {\n \"a\":\"ADD\",\n \"h\":\"SUBTRACT\",\n \"!\":\"PRINT\",\n \"\\n\":\"MOVE FORWARD\",\n \"?\": \"ASCII\",\n \"#\":\"MOVE BACK\",\n \"~\":\"NEWLINE\",\n \"d\":\"DEBUG\"\n \n }\n toks = []\n lineNo = 0\n for char in charstream: \n if char in toktype.keys():\n toks.append([toktype[char], lineNo])\n if char == \"\\n\":\n lineNo += 1\n elif char in \"\\t \":\n pass\n else:\n errors.DoesNotExist(char, lineNo)\n return toks\ndef interpreter(tokens):\n VM = vm.VirtualMachine()\n pre = \"\"\n for tok in tokens:\n if tok[0] == \"ADD\":\n VM.addToSpot(tok, 1)\n elif tok[0] == \"SUBTRACT\":\n VM.addToSpot(tok, -1)\n elif tok[0] == \"PRINT\":\n if pre != \"\": print(pre, end=\"\"); pre = \"\"\n else: print(VM.getSpot(tok), end=\"\")\n elif tok[0] == \"MOVE FORWARD\":\n VM.movePointer(tok, 1)\n elif tok[0] == \"ASCII\":\n pre = chr(VM.getSpot(tok))\n elif tok[0] == \"MOVE BACK\":\n VM.movePointer(tok, -1)\n elif tok[0] == \"NEWLINE\":\n print(\"\\n\", end=\"\") \n \ndef debug(orig, tok):\n print(\"AHH! Interpreter (v. 1.0)\") \n print(\"-------------------------\")\n print(\"By CompilingCoder\")\n print(\"-------------------------\")\n print(\"main.ahh:\")\n print(orig)\n print(\"Tokens:\")\n print(tok)\n print(\"Result:\")\ntokens = lexer(charStream)\ndebug(charStream,tokens)\ninterpreter(tokens)\n\n ","repo_name":"Sarang0218/aaah-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"4308127178","text":"# coding=utf-8\n# Date: 2021/6/7 9:29\nfrom typing import List\n\n\n# 执行用时:2900 ms, 在所有 Python3 提交中击败了5.02%的用户\n# 内存消耗:15.4 MB, 在所有 Python3 提交中击败了15.56%的用户\nclass Solution:\n def findTargetSumWays(self, a: List[int], target: int) -> int:\n if target < -1000 or target > 1000:\n return 0\n n = len(a)\n Offset = 1000\n f = [[0 for _ in range(2001)] for _ in range(n + 1)]\n f[0][Offset] = 1\n for i in range(1, n + 1):\n for j in range(-1000, 1001):\n if j - a[i - 1] >= -1000:\n f[i][j + Offset] += f[i - 1][j - a[i - 1] + Offset]\n if j + a[i - 1] <= 1000:\n f[i][j + Offset] += f[i - 1][j + a[i - 1] + Offset]\n return f[n][target + Offset]\n\n\nif __name__ == \"__main__\":\n print(Solution().findTargetSumWays([1, 1, 1, 1, 1], 3)) # 5\n","repo_name":"ToLoveToFeel/LeetCode","sub_path":"Python/_0494_Target_Sum/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"24765307990","text":"#!/usr/bin/env python\nfrom flask import request, jsonify\nfrom dateutil import parser\nfrom config import app, rpc, db, SECRET, PORT\nfrom models import Event\nfrom sqlalchemy import or_\n\n\ndef timetable_add(params):\n username = params['username']\n event = params['event']\n parsed_start = parser.parse(event['start'])\n parsed_end = parser.parse(event['end'])\n newEvent = Event(username, event['name'], parsed_start, parsed_end)\n db.session.add(newEvent)\n db.session.commit()\n return {'successful': newEvent is not None}\n\n\ndef timetable_get(params):\n username = params['username']\n start = params['start']\n end = params['end']\n new_date = parser.parse(start)\n end_date = parser.parse(end)\n user = Event.query.filter_by(username=username)\n time = user.filter(or_(Event.start.between(str(new_date), str(end_date))),\n Event.end.between(str(new_date), str(end_date)))\n results = []\n for x in time.all():\n result = {}\n result[\"id\"] = x.id\n result[\"description\"] = x.description\n result[\"start\"] = x.start.isoformat()\n result[\"end\"] = x.end.isoformat()\n results.append(result)\n return results\n\n\n@app.route('/', methods=[\"POST\"])\ndef handle():\n result = rpc(request.json)\n return jsonify(result)\n\n\n@app.route('/create', methods=[\"POST\"])\ndef create():\n if request.json[\"secret\"] == SECRET:\n db.create_all()\n return jsonify(\"Successful\")\n\n\nif __name__ == \"__main__\":\n rpc['Timetable.add'] = timetable_add\n rpc['Timetable.get'] = timetable_get\n app.run(host='0.0.0.0', port=PORT)\n","repo_name":"subhdeep/dashboard-timetable","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20810642236","text":"#-*-coding:utf-8-*-\n\"\"\"\n\n@author: hjx\n\n基于用户的协同过滤算法UserCF\n\n\"\"\"\n\nfrom collections import defaultdict\nimport math \nimport pickle\nimport operator\nimport os\n\n\nclass UserCF(object):\n\t\n\tdef __init__(self, ori_train):\n\t\tself.trainset = dict()\n\t\tfor user, item in ori_train:\n\t\t\tself.trainset.setdefault(user, set())\n\t\t\tself.trainset[user].add(item)\n\n\t\t\t\n\tdef user_similarity(self):\n\t\t\"\"\"计算用户兴趣相似度矩阵\"\"\"\n\t\tprint(\"UserCF: \")\n\t\t#建立物品-用户倒排表\n\t\titem_users = dict()\n\t\tfor user, items in self.trainset.items():\n\t\t\tfor item in items:\n\t\t\t\titem_users.setdefault(item, set())\n\t\t\t\titem_users[item].add(user)\n\n\t\t#计算用户之间共同兴趣物品个数矩阵(分子)\n\t\tC = dict()\n\t\tN = defaultdict(int) #用户访问物品数\n\t\tfor i,users in item_users.items():\n\t\t\tfor u in users:\n\t\t\t\tC.setdefault(u, dict())\n\t\t\t\tN[u] += 1\n\t\t\t\tfor v in users:\n\t\t\t\t\tif u == v:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tC[u].setdefault(v, 0)\n\t\t\t\t\tC[u][v] += 1\n\t\t\n\t\t#计算用户兴趣的余弦相似度矩阵(除以分母)\n\t\tfor u, related_users in C.items():\n\t\t\tfor v, cuv in related_users.items():\n\t\t\t\tC[u][v] = cuv / math.sqrt(N[u] * N[v])\n\t\t\n\t\treturn C\n\t\t\n\tdef train(self, user_matrix_path=\"data/user_matrix.pkl\"):\n\t\t\"\"\"训练模型\"\"\"\n\t\tprint(\"start training\")\n\t\ttry:\n\t\t\tprint(\"loading...\")\n\t\t\tfr = open(user_matrix_path, 'rb')\n\t\t\tself.user_matrix = pickle.load(fr)\n\t\t\tfr.close()\n\t\t\tprint(\"Successfully loaded\")\n\t\texcept BaseException:\n\t\t\tprint(\"Fail to load\")\n\t\t\tprint(\"Recalculate user_matrix...\")\n\t\t\tself.user_matrix = self.user_similarity()\n\t\t\tprint(\"Successfully calculated\")\n\t\t\t\"\"\"\n\t\t\t为测试 因要多次实验计算不同的矩阵而先不保存\n\t\t\tparent_path = user_matrix_path[: user_matrix_path.rfind(\"/\")]\n\t\t\tif not os.path.exists(parent_path):\n\t\t\t\tos.mkdir(parent_path)\n\t\t\tprint(\"Start saving...\")\n\t\t\twith open(user_matrix_path, \"wb\") as f:\n\t\t\t\tpickle.dump(self.user_matrix, f, 0)\n\t\t\tprint(\"Successfully saved\")\n\t\t\t\"\"\"\n\n\tdef recommend(self, user, K, L):\n\t\t\"\"\"推荐 计算用户对物品的感兴趣程度\n\n\t\t:param user: 要做推荐的用户\n\t\t:param K: 相似用户数\n\t\t:param L: 推荐数量\n\t\t:return: {推荐书籍:喜欢度}\n\t\t\"\"\"\n\t\t\n\t\trank = dict()\n\t\tinteracted_items = self.trainset[user]\n\t\tfor v, cuv in sorted(self.user_matrix[user].items(), \n\t\t\tkey = operator.itemgetter(1), reverse = True)[:K]:\n\t\t\t\tfor i in self.trainset[v]:\n\t\t\t\t\tif i in interacted_items:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\trank.setdefault(i, 0.)\n\t\t\t\t\trank[i] += cuv\n\n\t\treturn dict(sorted(rank.items(), key = operator.itemgetter(1), \n\t\t\treverse = True)[:L])\n\t\n\tdef recommends(self, users, K, L):\n\t\t\"\"\"推荐 用于测试\n\n\t\t:param users: 要做推荐的用户列表\n\t\t:param K: 相似用户数\n\t\t:param L: 推荐数量\n\t\t:return: {用户 : 该用户的推荐列表}\n\t\t\"\"\"\n\n\t\trecommends = dict()\n\t\tfor user in users:\n\t\t\t\n\t\t\tif user not in self.user_matrix:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\trecommends[user] = list(self.recommend(user, K, L).keys())\n\t\t\t#print(recommends[user])\n\t\treturn recommends\n","repo_name":"Ponyooo/book-recommender","sub_path":"algorithms_and_evaluation/algorithms/usercf.py","file_name":"usercf.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"74450917192","text":"# ============================================================================\n# Adopted from original Authors:\n# Kenny Young (kjyoung@ualberta.ca)\n# Tian Tian (ttian@ualberta.ca)\n#\n# Anthony G. Chen\n# ============================================================================\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as f\n\nfrom algos.ac_lambda import ACLambda\n\n\nclass LSF_ACLambda(ACLambda):\n \"\"\"\n Incremental AC lambda agent\n \"\"\"\n\n def __init__(self, ModelCls, model_kwargs,\n discount_gamma=0.99,\n lr_alpha=0.00048828125,\n sf_lr=0.00048828125,\n trace_lambda=0.0,\n entropy_beta=0.01,\n grad_rms_gamma=0.999,\n grad_rms_eps=0.0001,\n min_denom=0.0001,\n sf_lambda=0.0,\n seed=None, # dummy\n ):\n # TODO: put more things as arguments\n super().__init__(\n ModelCls, model_kwargs, discount_gamma=discount_gamma,\n lr_alpha=lr_alpha, trace_lambda=trace_lambda,\n entropy_beta=entropy_beta, grad_rms_gamma=grad_rms_gamma,\n grad_rms_eps=grad_rms_eps, min_denom=min_denom,\n )\n self.sf_lambda = sf_lambda\n\n # NOTE: somewhat hacky for now\n self.indiv_str_lr_dict = {\n 'sf_fn': sf_lr,\n }\n\n def optimize_agent(self, sample, time_step):\n\n # ==\n # Unpack sample\n state = sample.state # (batch_n=1, channel, height, width)\n next_state = sample.next_state # (batch_n, c, h w)\n action = sample.action # (batch_n=1, 1)\n reward = sample.reward # (batch_n=1, 1)\n is_terminal = sample.is_terminal # (batch_n=1, 1)\n\n model_out_tup = self.model.compute_pi_v_sf_r_lsfv(state,\n self.sf_lambda)\n pi, V_curr, sf_curr, r_curr, lsf_V_curr, phi_curr = model_out_tup\n\n # ==\n # Compute eligibility trace\n trace_potential = V_curr + 0.5 * torch.log(pi[0, action] + self.min_denom)\n # TODO: add sum for batch dim?\n\n # Gradients to be combined with elig traces\n self.model.zero_grad()\n trace_potential.backward(retain_graph=True)\n self.store_current_trace_grads()\n\n # Accumulating trace with stored gradients\n self.accumulate_eligibility_traces()\n\n # ==\n # Compute additional losses\n # TODO for all below, sum across batch dimension?\n entropy = -torch.sum(torch.log(pi + self.min_denom) * pi)\n\n # SF loss\n with torch.no_grad():\n phi_next = self.model.compute_phi(next_state)\n sf_next = self.model.compute_sf_from_phi(phi_next) # (1, d)\n sf_target = phi_curr.detach().clone() + (\n (self.sf_lambda * self.discount_gamma) * sf_next.detach()\n ) # TODO NOTE do I need to clone after detach?\n sf_loss = torch.mean(0.5 * (sf_target - sf_curr) ** 2)\n\n # Reward loss\n # rew_last = self.model.reward_layer(phi_last)[0] # TODO delete\n rew_loss = 0.5 * (reward - r_curr) ** 2\n\n # Combine non-trace losses to optimize\n non_trace_loss = (\n (0.5 * sf_loss) + (0.5 * rew_loss)\n - (self.entropy_beta * entropy)\n ) # TODO add customizable coefficients\n self.model.zero_grad()\n non_trace_loss.backward()\n\n with torch.no_grad():\n # TD error\n _, _, _, _, lsf_V_next, _ = self.model.compute_pi_v_sf_r_lsfv(\n next_state, self.sf_lambda\n )\n\n lsf_V_next = self.model(next_state)[1]\n delta = (self.discount_gamma *\n (0 if is_terminal else lsf_V_next)\n + reward - V_curr)\n\n # Update\n self.parameter_step(delta, time_step)\n\n # For logging: compute difference in estimate\n lsf_v_theta_v_diff = torch.norm(\n (lsf_V_curr - V_curr)\n )\n\n # ==\n # Construct dict\n # TODO log the average trace magnitude?\n out_dict = {\n 'value_loss': delta.item() ** 2,\n 'sf_loss': sf_loss.item(),\n 'reward_loss': rew_loss.item(),\n 'lsf_v_v_diff': lsf_v_theta_v_diff.item(),\n }\n\n return out_dict\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"im-ant/sr-return","sub_path":"nonlinear/algos/lsf_ac_lambda.py","file_name":"lsf_ac_lambda.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13253240568","text":"# -*- coding: utf-8 -*-\n\nfrom djangosige.apps.compras.models import ItensCompra, Pagamento\nfrom djangosige.apps.vendas.views.report_vendas import VendaReport, REPORT_FONT_BOLD, REPORT_FONT, DadosProdutos, DadosPagamento\n\nfrom geraldo import ReportBand\nfrom geraldo.widgets import ObjectValue\nfrom reportlab.lib.units import cm\n\n\nclass CompraReport(VendaReport):\n\n def __init__(self, *args, **kargs):\n super(CompraReport, self).__init__(*args, **kargs)\n self.title = 'Relatorio de compra'\n\n self.dados_fornecedor = DadosFornecedor()\n self.dados_produtos = DadosProdutosCompra()\n self.dados_pagamento = DadosPagamentoCompra()\n\n\nclass DadosFornecedor(ReportBand):\n\n def __init__(self):\n super(DadosFornecedor, self).__init__()\n self.ender_info = False\n self.elements = []\n txt = ObjectValue(attribute_name='fornecedor.nome_razao_social',\n top=0.3 * cm, left=0.3 * cm, width=8 * cm, height=0.5 * cm)\n txt.style = {'fontName': REPORT_FONT_BOLD,\n 'fontSize': 12, 'leading': 12}\n self.elements.append(txt)\n\n self.height = 2.7 * cm\n\n def inserir_informacoes_pj(self):\n txt = ObjectValue(attribute_name='fornecedor.pessoa_jur_info.format_cnpj',\n top=0.3 * cm, left=8.1 * cm, width=4 * cm, height=0.5 * cm)\n txt.style = {'fontName': REPORT_FONT_BOLD,\n 'fontSize': 10, 'leading': 10}\n self.elements.append(txt)\n\n txt = ObjectValue(attribute_name='fornecedor.pessoa_jur_info.format_ie',\n top=0.3 * cm, left=13 * cm, width=6.4 * cm, height=0.5 * cm)\n txt.style = {'fontName': REPORT_FONT_BOLD,\n 'fontSize': 10, 'leading': 10}\n self.elements.append(txt)\n\n def inserir_informacoes_pf(self):\n txt = ObjectValue(attribute_name='fornecedor.pessoa_fis_info.format_cpf',\n top=0.3 * cm, left=8.1 * cm, width=4 * cm, height=0.5 * cm)\n txt.style = {'fontName': REPORT_FONT_BOLD,\n 'fontSize': 10, 'leading': 10}\n self.elements.append(txt)\n\n txt = ObjectValue(attribute_name='fornecedor.pessoa_fis_info.format_rg',\n top=0.3 * cm, left=13 * cm, width=6.4 * cm, height=0.5 * cm)\n txt.style = {'fontName': REPORT_FONT_BOLD,\n 'fontSize': 10, 'leading': 10}\n self.elements.append(txt)\n\n def inserir_informacoes_endereco(self):\n self.ender_info = True\n txt = ObjectValue(attribute_name='fornecedor.endereco_padrao.format_endereco',\n display_format='Endereço: %s', top=1.1 * cm, left=0.3 * cm, width=19.4 * cm, height=0.5 * cm)\n txt.style = {'fontName': REPORT_FONT, 'fontSize': 10, 'leading': 10}\n self.elements.append(txt)\n\n txt = ObjectValue(attribute_name='fornecedor.endereco_padrao.municipio',\n display_format='Cidade: %s', top=1.6 * cm, left=0.3 * cm, width=8 * cm, height=0.5 * cm)\n txt.style = {'fontName': REPORT_FONT, 'fontSize': 10, 'leading': 10}\n self.elements.append(txt)\n\n txt = ObjectValue(attribute_name='fornecedor.endereco_padrao.uf',\n display_format='UF: %s', top=1.6 * cm, left=8.1 * cm, width=4 * cm, height=0.5 * cm)\n txt.style = {'fontName': REPORT_FONT, 'fontSize': 10, 'leading': 10}\n self.elements.append(txt)\n\n txt = ObjectValue(attribute_name='fornecedor.endereco_padrao.cep', display_format='CEP: %s',\n top=1.6 * cm, left=13 * cm, width=19.4 * cm, height=0.5 * cm)\n txt.style = {'fontName': REPORT_FONT, 'fontSize': 10, 'leading': 10}\n self.elements.append(txt)\n\n def inserir_informacoes_telefone(self):\n if not self.ender_info:\n top = 1.1 * cm\n else:\n top = 2.1 * cm\n\n txt = ObjectValue(attribute_name='fornecedor.telefone_padrao.telefone',\n display_format='Tel: %s', top=top, left=0.3 * cm, width=8 * cm, height=0.5 * cm)\n txt.style = {'fontName': REPORT_FONT, 'fontSize': 10, 'leading': 10}\n self.elements.append(txt)\n\n def inserir_informacoes_email(self):\n if not self.ender_info:\n top = 1.1 * cm\n else:\n top = 2.1 * cm\n\n txt = ObjectValue(attribute_name='fornecedor.email_padrao.email',\n display_format='Email: %s', top=top, left=8.1 * cm, width=11.3 * cm, height=0.5 * cm)\n txt.style = {'fontName': REPORT_FONT, 'fontSize': 10, 'leading': 10}\n self.elements.append(txt)\n\n\nclass DadosProdutosCompra(DadosProdutos):\n\n def __init__(self):\n super(DadosProdutosCompra, self).__init__()\n self.get_queryset = lambda self, parent_object: ItensCompra.objects.filter(\n compra_id=parent_object) or []\n\n\nclass DadosPagamentoCompra(DadosPagamento):\n\n def __init__(self):\n super(DadosPagamento, self).__init__()\n self.get_queryset = lambda self, parent_object: Pagamento.objects.filter(\n compra_id=parent_object) or []\n","repo_name":"thiagopena/djangoSIGE","sub_path":"djangosige/apps/compras/views/report_compras.py","file_name":"report_compras.py","file_ext":"py","file_size_in_byte":5136,"program_lang":"python","lang":"en","doc_type":"code","stars":379,"dataset":"github-code","pt":"27"} +{"seq_id":"20289799157","text":"'''\r\nCopyright 1390(2012(AD)) Vahid Kharazi \r\nLicensed for distribution under the GPL version 3\r\nThis is two function to posded data processing.\r\n'''\r\n\r\n\r\ndef datatolist(string):\r\n str = string[1:-1]\r\n\r\n def fun1(str):\r\n result = []\r\n for i in range(len(str)):\r\n if str[i] == '[':\r\n t = str.find(']', i)\r\n result.append(str[i:t + 1])\r\n return result\r\n\r\n def fun2(l):\r\n result = []\r\n for i in l:\r\n for j in range(len(i)):\r\n if i[j] == '[':\r\n t = i.find(',', j)\r\n result.append(int(i[j + 1:t]))\r\n if i[j] == ',':\r\n t = i.find(']', j)\r\n result.append(int(i[j + 1:t]))\r\n\r\n return result\r\n\r\n def fun3(l):\r\n result = []\r\n for i in range(len(l)):\r\n if i % 2 == 0:\r\n result.append([l[i]])\r\n\r\n c = -1\r\n for i in range(1, len(l), 2):\r\n c += 1\r\n result[c].append(l[i])\r\n return result\r\n return fun3(fun2(fun1(str)))\r\n\r\n\r\ndef xoyyab(string):\r\n result = []\r\n r = string.split('&')\r\n for i in r:\r\n if i != '':\r\n result.append(int(i))\r\n return result\r\n","repo_name":"kharazi/angry-snake","sub_path":"Snake_lib/strtolist.py","file_name":"strtolist.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"1684185411","text":"from django.shortcuts import render\nimport os, json\n\nfrom mainapp.models import Product, ProductCategory\n\n\n\n\nMODULE_DIR = os.path.dirname(__file__)\n# Create your views here.\n\ndef index(request):\n context = {\n 'title': 'geekshop',\n }\n return render(request, 'mainapp/index.html', context)\n\n\ndef products(request):\n context = {\n 'title': 'geekshop',\n 'products': Product.objects.all(),\n 'categories': ProductCategory.objects.all(),\n }\n return render(request, 'mainapp/products.html', context)\n\n","repo_name":"Xemur0/geekshop","sub_path":"mainapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"15775968395","text":"\"\"\"\n题目描述:\n在上次打劫完一条街道之后和一圈房屋后,小偷又发现了一个新的可行窃的地区。这个地区只有一个入口,我们称之为“根”。 除了“根”之外,每栋房子有且只有一个“父“房子与之相连。一番侦察之后,聪明的小偷意识到“这个地方的所有房屋的排列类似于一棵二叉树”。 如果两个直接相连的房子在同一天晚上被打劫,房屋将自动报警。\n计算在不触动警报的情况下,小偷一晚能够盗取的最高金额。\n\n示例:\n输入: [3,2,3,null,3,null,1]\n\n 3\n / \\\n 2 3\n \\ \\ \n 3 1\n\n输出: 7 \n解释: 小偷一晚能够盗取的最高金额 = 3 + 3 + 1 = 7.\n\n解题思路:\n采用动态规划加递归方法,对于每个节点,其最大收益��� max(当前节点选择偷+2个子节点选择不偷的收益, 当前节点不偷+2个节点偷或者不偷的最大收益)\n\"\"\"\nclass Solution:\n def rob(self, root: TreeNode) -> int:\n result = self.helper(root)\n return max(result)\n \n def helper(self, root):\n if not root:\n return [0, 0]\n left = self.helper(root.left)\n right = self.helper(root.right)\n res0 = max(left) + max(right)\n res1 = root.val + left[0] + right[0]\n return [res0, res1]\n","repo_name":"yechens/XiaoZhao-ChongChongChong","sub_path":"coding/337_打家劫舍III.py","file_name":"337_打家劫舍III.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"zh","doc_type":"code","stars":194,"dataset":"github-code","pt":"27"} +{"seq_id":"16187828733","text":"from torch.nn import (Module, Conv2d, Linear, MaxPool2d, ReLU, BatchNorm2d, Dropout2d, Dropout, Flatten)\n\nfrom torch import flatten\n\n\n\nclass CNNBasic(Module):\n def __init__(self, numChannels=1, classes=1):\n # call the parent constructor\n super(CNNBasic, self).__init__()\n\n self.conv1 = ConvBlock(in_channels=numChannels, out_channels=12)\n self.conv2 = ConvBlock(in_channels=12, out_channels=24)\n self.conv3 = ConvBlock(in_channels=24, out_channels=48, kernel_size=(5, 5))\n self.conv4 = ConvBlock(in_channels=48, out_channels=96, kernel_size=(5, 5))\n self.conv5 = ConvBlock(in_channels=96, out_channels=192, kernel_size=(5, 5))\n self.conv6 = ConvBlock(in_channels=192, out_channels=384, kernel_size=(5, 5))\n\n\n self.fc1 = Linear(in_features=6144, out_features=500)\n self.relu5 = ReLU()\n\n self.fc2 = Linear(in_features=500, out_features=classes)\n \n def forward(self, x):\n x = self.conv1(x)\n\n x = self.conv2(x)\n\n x = self.conv3(x)\n\n x = self.conv4(x)\n\n x = self.conv5(x)\n\n x = self.conv6(x)\n\n x = flatten(x, 1)\n x = self.fc1(x)\n x = self.relu5(x)\n output = self.fc2(x)\n # return the output predictions\n return output\n \n\nclass ConvBlock(Module):\n def __init__(self, in_channels, out_channels, kernel_size=(3, 3)):\n # call the parent constructor\n super(ConvBlock, self).__init__()\n # initialize the convolutional layer\n self.conv = Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size)\n # initialize the batch normalization layer\n self.bn = BatchNorm2d(num_features=out_channels)\n # initialize the ReLU layer\n self.relu = ReLU()\n self.maxpool = MaxPool2d(kernel_size=(2, 2), stride=(2, 2))\n\n def forward(self, x):\n # apply the convolutional layer, followed by batch normalization\n # and relu activation\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n x = self.maxpool(x)\n # return the block\n return x","repo_name":"GrowlingM1ke/Pneumonia_Classification","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"18084754860","text":"import numpy as np\nfrom basic_lsh import BasicLSH\nimport heapq\n\n\nclass MultiProbeLSH(BasicLSH):\n def __init__(self, dim, l, m, w, seed=1):\n super(MultiProbeLSH, self).__init__(dim, l, m, w, seed)\n\n def is_valid(self, perturb_set):\n for perturb in perturb_set:\n if (2 * self.m + 1 - perturb) in perturb_set:\n return False\n if perturb > 2 * self.m:\n return False\n return True\n\n def has_max(self, perturb_set):\n if perturb_set[-1] == self.m * 2 - 1:\n return True\n return False\n\n def shift(self, perturb_set):\n next = perturb_set.copy()\n next[-1] = perturb_set[-1] + 1\n return next\n\n def expand(self, perturb_set):\n next = perturb_set.copy()\n next.append(perturb_set[-1] + 1)\n return next\n\n def score(self, query, i, j, perturb):\n if perturb == -1:\n f = np.dot(self.a[j][i], query) + self.b[j][i]\n h = self.hash(query)[j][i]\n return f - h * self.w\n if perturb == 1:\n return self.w - self.score(query, i, j, -1)\n\n class PiPair:\n def __init__(self, i, delta):\n self.i = i\n self.delta = delta\n\n def pi_list(self, query, j):\n pi_list = []\n for i in range(self.m):\n pi_list.append((self.PiPair(i+1, 1), self.score(query, i, j, 1)))\n pi_list.append((self.PiPair(i+1, -1), self.score(query, i, j, -1)))\n self.quick_sort(pi_list, 0, len(pi_list) - 1)\n return pi_list\n\n def quick_sort(self, alist, start, end):\n if start >= end:\n return\n mid = alist[start]\n left = start\n right = end\n while left < right:\n while left < right and alist[right][1] >= mid[1]:\n right -= 1\n while left < right and alist[left][1] <= mid[1]:\n left += 1\n if left < right:\n tmp = alist[right]\n alist[right] = alist[left]\n alist[left] = tmp\n alist[start] = alist[left]\n alist[left] = mid\n self.quick_sort(alist, start, left - 1)\n self.quick_sort(alist, left + 1, end)\n\n def _class_perturb_set(self):\n outer = self\n\n class PerturbSet:\n def __init__(self, perturb_set, query, m, j):\n self.perturb_set = perturb_set\n pi_list = outer.pi_list(query, j)\n score = 0\n for perturb in perturb_set:\n score += pi_list[perturb - 1][1]\n self.score = score\n\n def __lt__(self, other):\n return self.score < other.score\n\n return PerturbSet\n\n def probe_sequence(self, query, j):\n result = []\n perturb_set_begin = self._class_perturb_set()([1], query, self.m, j)\n heap = []\n heapq.heappush(heap, perturb_set_begin)\n while True:\n perturb_set = heapq.heappop(heap)\n if self.is_valid(perturb_set.perturb_set):\n result.append(perturb_set.perturb_set)\n else:\n break\n\n if not self.has_max(perturb_set.perturb_set):\n shift = self._class_perturb_set()(self.shift(perturb_set.perturb_set), query, self.m, j)\n expand = self._class_perturb_set()(self.expand(perturb_set.perturb_set), query, self.m, j)\n heapq.heappush(heap, shift)\n heapq.heappush(heap, expand)\n return result\n\n def query(self, point):\n results = set()\n hash_values = self.hash(point)\n for j in range(self.l):\n pi_list = self.pi_list(point, j)\n\n key = hash_values[j].copy()\n target = self.hash_tables[j].get(tuple(key))\n if target:\n results.add(target)\n\n probe_sequence = self.probe_sequence(point, j)\n for perturb_set in probe_sequence:\n tmp = key.copy()\n for perturb in perturb_set:\n perturb_index, perturb_value = pi_list[perturb - 1][0].i, pi_list[perturb - 1][0].delta\n tmp[perturb_index - 1] += perturb_value\n perturb_target = self.hash_tables[j].get(tuple(tmp))\n if perturb_target:\n results.add(perturb_target)\n\n return results\n","repo_name":"Cher-er/Multi-Probe_LSH","sub_path":"multi_probe_lsh.py","file_name":"multi_probe_lsh.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32166081722","text":"import random\nimport inspect # getargspec for converting function signatures\nimport string # For random name maker\nimport traceback # For magic variable maker\n\nimport numpy as np\nimport scipy.optimize\n\nimport math\n\n# NOLIMIT = 1e10\nNOLIMIT = float('inf')\n\n\ndef randomname(length):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(length))\n\n\ndef packDict(keyOrder, dictToPack, defaultval):\n ''' Order values from dictionary into a list '''\n if dictToPack is None:\n dictToPack = {}\n\n out = []\n for v in keyOrder:\n if v in dictToPack:\n out.append(dictToPack[v])\n else:\n # print(\"Using default: {}={}\".format(v,defaultval))\n out.append(defaultval)\n return out\n\ndef packDictStrict(keyOrder, dictToPack):\n return [dictToPack[v] for v in keyOrder]\n\ndef unpackToDict(varOrder, packedList):\n ''' Pack an ordered list into a dictionary '''\n return {var:val for var,val in zip(varOrder, packedList)}\n\ndef getAssignedName(depth=2):\n # Should be called by an __init__ block of a class.\n # Returns the local name assigned to the instance in the code calling\n # the instantiator!\n stack = traceback.extract_stack()\n filename, lineno, function_name, code = stack[-depth-1]\n name = code.split('=')[0].strip()\n return name\n\nclass Variable:\n # This is mostly a way to help map variable names to function\n # argument names.\n\n def __init__(self, name=\"\", unitlabel=\"\",minVal=-NOLIMIT, maxVal=NOLIMIT, cyclicBounds=None):\n\n if not name:\n try:\n name = getAssignedName()\n except:\n name = randomname(length=3)\n print(\"Failed to make good name -- giving random name: \"+name)\n\n # print(\"Made variable\",name)\n\n self.name = name\n self.cyclicBounds = cyclicBounds\n self.minVal = minVal\n self.maxVal = maxVal\n self.unitlabel = unitlabel\n\n if self.cyclicBounds:\n self.minVal = max(self.minVal, self.cyclicBounds[0])\n self.maxVal = min(self.maxVal, self.cyclicBounds[1])\n\n\n def print(self, value):\n fmt = \"{:10s} : {:8.4g} {}\"\n if self.cyclicBounds:\n\n if not self.cyclicBounds[0] <= value <= self.cyclicBounds[1]:\n width = self.cyclicBounds[1] - self.cyclicBounds[0]\n realvalue = ((value-self.cyclicBounds[0]) % width)+self.cyclicBounds[0]\n print(fmt+\" [{}]\".format(self.name,realvalue,self.unitlabel,value))\n return\n\n print(fmt.format(self.name,value,self.unitlabel))\n\n\n\nclass Equation:\n # An equation is a function with a mapping of variables to parameters.\n\n # Example: take a function defined as:\n # def L(rho, V, CL, Sref):\n # return 0.5*rho*V*V*CL*Sref\n\n # we could use this to compute lift relations at stall.\n # (1) Create 5 variables (could be constants):\n # rho1000 = Variable()\n # Vstall = Variable()\n # CLmax = Variable()\n # Sref = Variable()\n # Lmax = Variable() # Note we need to create the output as a var too!\n\n # (2) Map these to the lift variables when instantiating Equation():\n # e = Equation(L, rho=rho1000, V=Vstall, CL=CLmax, Sref=Sref, L=Lmax)\n\n\n\n # Some instance variables:\n # allParamNames: All kwargs in func + output param name\n # allVarNames: Variable names that have been mapped to the params\n # resFunc\n # param2var, var2param [maps variables to/from equation parameters]\n\n def __init__(self, func, funcIsRes=False, **kwargs):\n '''\n funcIsRes: function is already in residual form [returns 0 when satisified]\n '''\n try:\n func.__name__\n except:\n raise Exception(\"func passed in is not a function\")\n\n argSpec = inspect.getargspec(func)\n inputParamNames = argSpec.args\n self.name = \"eqn_\"+func.__name__\n self.allParamNames = inputParamNames[:] # copy!\n\n if not funcIsRes:\n outputParamName = func.__name__\n # self.allParamNames += [outputParamName]\n self.allParamNames.append(outputParamName)\n\n\n if set(kwargs.keys()) != set(self.allParamNames):\n raise Exception(\"kwargs = \"+str(kwargs.keys())+ \" but should be \"+str(self.allParamNames))\n\n if any(type(_) is not Variable for _ in kwargs.values()):\n raise Exception(\"Input types must all be var\")\n\n # Store the map of variables to function inputs\n self.var2param = {}\n self.param2var = {}\n self.allVarNames = []\n\n minValidValues = []\n maxValidValues = []\n for k,v in kwargs.items():\n self.param2var[k] = v.name\n self.var2param[v.name] = k\n self.allVarNames.append(v.name)\n\n minValidValues.append(-10 if v.minVal == -NOLIMIT else v.minVal)\n maxValidValues.append( 10 if v.maxVal == NOLIMIT else v.maxVal)\n\n checkValidFunction(func)\n\n if funcIsRes:\n # Function is already in residual form\n self.resFunc = func\n else:\n # Create a new function in residual form\n self.resFunc = makeResidualFunc(func)\n\n # Test that everything works properly\n\n # Make randomly generated inputs and try calling function.\n # If variable validity bounds are set, respect those to avoid\n # predictable function failures\n testKwargs = {}\n for k in inputParamNames:\n pname = self.param2var[k]\n i = self.allVarNames.index(pname)\n lo = minValidValues[i]\n hi = maxValidValues[i]\n\n testKwargs[k] = lo + random.random()*(hi-lo)\n\n try:\n out = func(**testKwargs)\n except:\n print(\"Warning: Residual function failed with random inputs\"+str(testKwargs))\n return\n # raise Exception(\"Residual function failed with random inputs\"+str(testKwargs))\n\n testKwargs[outputParamName] = out\n try:\n res = self.resFunc(**testKwargs)\n except:\n raise Exception(\"Residual function failed with random inputs: res = \",res)\n else:\n if res != 0:\n raise Exception(\"Weird failure. Function failed to be converted to residual form\")\n # else:\n # print(\"Residual is happy with random inputs!\")\n #\n\n\n\n def makeCompressedResFunc(self, globalSetVarNameList):\n # Tell the equation which variables will be set, and get\n # back a function with the correctly reduced signature\n\n # globalSetVarValueDict contains ALL set values, even those\n # that don't apply to this equation. This function processes\n # those that it understands.\n\n # Closure using var-to-param mapping\n # Returns a new residual function with constant values baked in\n\n # (1) First, extract only the variables we recognize\n setVarNames = []\n setParamNames = []\n for name in globalSetVarNameList:\n if name in self.allVarNames:\n setVarNames.append(name)\n setParamNames.append(self.var2param[name])\n\n self.setParamValues = {} # Will be populated later\n\n # (2) Determine what parameters will have to be passed in every call,\n # thus determining signature of new compressed residual func.\n unsetParams = list(set(self.allParamNames) - set(setParamNames))\n unsetVarNames = [self.param2var[p] for p in unsetParams]\n\n self.setVarNames = setVarNames\n\n firstCall = True\n def compressedResFunc(**inputs):\n # Inputs are the unset variable values as kwargs\n # References: unsetVarNames to error check,\n # setParamValues dict to close on specified values.\n # Invokes the original residual function,\n # which invokes the original function\n\n\n # Error-checking only on first time through\n nonlocal firstCall # Python3-only syntax!\n if firstCall:\n if len(self.setParamValues) != len(self.setVarNames):\n raise Exception(self.name+\" resFunc called before constant values were populated\")\n\n if set(inputs.keys()) != set(unsetVarNames):\n raise Exception(self.name+\" resFunc called incorrectly: got \"+str(sorted(inputs.keys()))+\" expected \"+str(sorted(unsetVarNames)))\n firstCall = False\n\n fullResFuncDict = self.setParamValues.copy()\n for var,val in inputs.items():\n # print(var,\":\",val)\n p = self.var2param[var]\n fullResFuncDict[p] = val\n\n return self.resFunc(**fullResFuncDict)\n\n compressedResFunc.__name__ = self.resFunc.__name__+\"_compressed\"\n\n return compressedResFunc, unsetVarNames\n\n\n def getSolverReadyFunc(self, setVarNames):\n\n # (1) Reduce from the full residual function to one with the\n # specified variables \"baked in\".\n # varList is the list of variables that must be set.\n # [The values themselves are set later. This just generates\n # a function with the right signature, and with access to\n # a dictionary that will eventually hold the constant values]\n compressedResFunc, varList = self.makeCompressedResFunc(setVarNames)\n\n # (2) Convert from kwarg-only function to arg-only function,\n # so that scipy solvers work. This then requires input to\n # be passed in in a consistent order, so we return that too,\n # so that the outer solver can reorder things properly.\n solverReadyFunc, funcVarOrder = mkKwarglessResFunc(compressedResFunc, varList)\n\n return solverReadyFunc, funcVarOrder\n\n def setSpecifiedValues(self, setVarValuesDict):\n # Call right before solving to set the values of the variables\n # that are being specified.\n\n # Convert {var:value} into {param:value}\n self.setParamValues = {}\n for varName, value in setVarValuesDict.items():\n # Get the equation parameter name that this variable has been\n # mapped to\n eqnParamName = self.var2param[varName]\n self.setParamValues[eqnParamName] = value\n\n\n # Four levels of indirection from original function\n # L = f(...)\n # resL = f(** L,...) --> makes into residual form\n # compressedRes = f(**unknowns) --> remaps *AND* simplifies passing 1 unknown\n # scipyRes = f(*unknowns) [in order] --> allows scipy to call\n\ndef mkKwarglessResFunc(kwargFunc, expectedArguments):\n # Fourth level of indirection from original function\n #\n order = expectedArguments[:] # Could reorder if we wanted, but no reason\n def kwargless(inputs):\n kwargs = {o:i for o,i in zip(order, inputs)}\n return kwargFunc(**kwargs)\n kwargless.__name__ = kwargFunc.__name__ + \"_kwargless\"\n\n return kwargless, order\n\nclass System:\n # A system of equations, with some variables set and others to be\n # solved for. This class manages the \"closure\" of the equations, given\n # the prescribed variables, and creates a more compact system of\n # residual functions only in terms of the unknown variables.\n #\n # Meanwhile, it does basic checks for solvability of the system:\n # [e.g. are there the right number of variables and equations, and is\n # the dependency matrix invertible (full rank).]\n #\n # Finally, it invokes a solver -- various ones from scipy can be used.\n\n\n def __init__(self, equations, constVariables):\n '''\n constVariables can be either the Variables themselves or\n their names\n '''\n if not all(isinstance(eq, Equation) for eq in equations):\n raise Exception(\"Pass in equations\")\n\n if all(isinstance(v, Variable) for v in constVariables):\n specifiedVarNames = [v.name for v in constVariables]\n elif all(type(v) == str for v in constVariables):\n specifiedVarNames = constVariables[:]\n else:\n raise Exception(\"Must pass in Variables or variable string names. Got: \"+str(constVariables))\n\n # Given the specified values (constVals), create new solver-ready\n # functions with those values baked in.\n # The functions have to be called with parameters in the correct\n # order, so the equation maker returns the order in which the functions\n # will expect the parameters to be passed in.\n resFuncs = []\n funcVarOrders = [] # order functions expect vars to be passed in\n for eq in equations:\n resFunc, var_order = eq.getSolverReadyFunc(specifiedVarNames)\n resFuncs.append(resFunc)\n funcVarOrders.append(var_order)\n\n\n # Determine the total set of variables to be solved for.\n allVars = set()\n for theseVars in funcVarOrders:\n allVars.update(theseVars)\n\n nVar = len(allVars)\n nEq = len(equations)\n\n # Check that all specified values were recognized by at least one eqn.\n # and determine the total number of \"participating\" variables,\n # including those set by the user.\n nParams = nVar # initialize, increment later\n\n constValReferenced = {var:False for var in specifiedVarNames}\n for eq in equations:\n processedconsts = eq.setVarNames\n for var in processedconsts:\n constValReferenced[var] = True\n\n for constVar, refd in constValReferenced.items():\n if refd:\n nParams += 1\n else:\n print(\"Warning: Ignoring set variable '{}', which is not in any equation\".format(constVar))\n\n # And an arbitrary order for the specified variables\n setVarOrder = sorted(specifiedVarNames)\n\n self.nParams = nParams\n self.equations = equations\n self.setVarOrder = setVarOrder\n\n # Set an arbitrary order of variables for the solver.\n\n\n self.resFuncs = resFuncs\n self.funcVarOrders = funcVarOrders\n\n unknownVarsOrder = sorted(list(allVars))\n self.analyze(unknownVarsOrder)\n\n self.setUnknownVarOrder(unknownVarsOrder)\n\n self.minBoundDict = {}\n self.maxBoundDict = {}\n\n self.verbose = False\n\n # Disable solver until the constant values have been set!\n if len(setVarOrder) > 0:\n self.constValuesSet = False\n\n\n def setUnknownVarOrder(self, unknownVarsOrder):\n self.unknownVarsOrder = unknownVarsOrder\n\n def combinedResFunc(x):\n '''\n The residual function for the multi-equation problem,\n returning one residual per equation\n '''\n valDict = unpackToDict(unknownVarsOrder, x)\n # valDict = {var:val for var,val in zip(unknownVarsOrder,x)}\n\n all_residuals = []\n for spResFunc, varOrder in zip(self.resFuncs, self.funcVarOrders):\n try:\n xEq = packDictStrict(varOrder, valDict)\n # xEq = [valDict[v] for v in varOrder]\n except KeyError:\n print(\"Var in x:\",x)\n print(\"doesn't belong in valDict:\", valDict.keys())\n raise\n res = spResFunc(xEq)\n all_residuals.append(res)\n\n return all_residuals\n\n # Save some stuff\n self.resFunc = combinedResFunc\n\n\n def getAdjacencyMatrix(self, unknownVarsOrder):\n nParams = self.nParams\n equations = self.equations\n setVarOrder = self.setVarOrder\n nEq = len(equations)\n nVar = len(unknownVarsOrder)\n\n\n M = np.zeros((nParams,nParams)) # (eq, variable)\n for i,eq in enumerate(equations):\n eqVars = list(eq.var2param.keys()) # variables in this equation\n for j,v in enumerate(unknownVarsOrder): # unknown?\n if v in eqVars:\n M[i,j] = 1\n for j,v in enumerate(setVarOrder): # or fixed?\n if v in eqVars:\n M[i,j+nVar] = 1\n for i,varName in enumerate(setVarOrder):\n j = nVar + i\n M[i+nEq,j] = 1\n return M\n\n def getAdjacencyMatrixSmall(self,unknownVarsOrder):\n\n # N = np.zeros((nVar,nVar)) # (eq, variable)\n # for i,eq in enumerate(equations):\n # eqVars = list(eq.var2param.keys())\n # for j,v in enumerate(unknownVarsOrder):\n # if v in eqVars:\n # N[i,j] = 1\n\n # if not np.array_equal(M[:nVar,:nVar], N):\n # raise Exception(\"Huh?\")\n nVar = len(unknownVarsOrder)\n M = self.getAdjacencyMatrix(unknownVarsOrder)\n return M[:nVar,:nVar]\n\n def analyze(self, unknownVarsOrder):\n\n nParams = self.nParams\n equations = self.equations\n setVarOrder = self.setVarOrder\n\n nVar = len(unknownVarsOrder)\n\n nEq = len(equations)\n\n print(\"System has {} equations involving {} variables\".format(nEq,nVar))\n print(\"Equation names: \")\n for i,eq in enumerate(equations,start=1):\n print(\"{:2d}) {}\".format(i,eq.name.replace(\"eqn_\",\"\")))\n print(\"Variable names: \")\n for i,v in enumerate(unknownVarsOrder,start=1):\n print(\"{:2d}) {}\".format(i,v.lstrip('v')))\n\n # Check for obvious unsolvability\n if nVar > nEq:\n raise Exception(\"Underspecified: Must set {} more variable(s) (or provide {} more equation(s))\".format(nVar-nEq,nVar-nEq))\n elif nVar < nEq:\n raise Exception(\"Overspecified: Must set {} fewer variable(s) (or remove {} equation(s))\".format(nEq-nVar,nEq-nVar))\n\n M = self.getAdjacencyMatrix(unknownVarsOrder)\n\n N = self.getAdjacencyMatrixSmall(unknownVarsOrder)\n\n # Try replacing 1's with random numbers, because sometimes a large set\n # of valid equations gives a non-invertible matrix when it's filled\n # with ones. Not sure why yet.\n Mfloat = np.zeros((nParams,nParams)) # (eq, variable)\n for i in range(nParams):\n for j in range(nParams):\n if M[i,j] != 0:\n Mfloat[i,j] = random.random()\n\n Nfloat = np.zeros((nVar,nVar)) # (eq, variable)\n for i in range(nVar):\n for j in range(nVar):\n if N[i,j] != 0:\n Nfloat[i,j] = random.random()\n\n def printN(N):\n print(\"--> \",unknownVarsOrder)\n print(N)\n\n def printM(M):\n print(\"Top-> \",unknownVarsOrder, setVarOrder)\n print(M[:nEq,:])\n print(\"Side: equations\")\n\n # print(\"DET: Mfloat \",np.linalg.det(Mfloat))\n # print(\"DET: Nfloat \",np.linalg.det(Nfloat))\n\n valid = False\n def passedDeterminant(matrix):\n return abs(np.linalg.det(matrix)) > 1e-15\n\n mpassed = passedDeterminant(Mfloat)\n npassed = passedDeterminant(Nfloat)\n if (mpassed ^ npassed):\n print(\"Conflicting N and M results:\")\n print(\"DET: Mfloat \",np.linalg.det(Mfloat))\n print(\"DET: Nfloat \",np.linalg.det(Nfloat))\n valid = True\n elif not mpassed:\n valid = False\n else:\n valid = True\n\n\n printN(N)\n if not valid:\n # printM(M)\n printN(N)\n\n diagnosed = False\n # Try to diagnose:\n for i in range(nEq):\n for j in range(i+1,nEq):\n if np.array_equal(N[i], N[j]):\n print(\"Eqn {} is the same as eqn {}\".format(i+1,j+1))\n diagnosed = True\n if not diagnosed:\n r = np.linalg.matrix_rank(Nfloat)\n print(\"RANK Nfloat = {}/{}\".format(r,len(Nfloat)))\n\n raise Exception(\"This is an invalid set of variables to specify. Equations are not solvable\")\n\n\n def setConstVals(self, constVals):\n\n if all(isinstance(v, Variable) for v in constVals.keys()):\n constVals = {k.name:v for k,v in constVals.items()}\n elif all(type(v) is str for v in constVals.keys()):\n pass\n else:\n raise Exception(\"Must pass in Variables or variable string names. Got: \"+str(constVals))\n\n # Finally, register the specific constant values\n # Later this will be moved farther out when I decompose this\n # function into many calls with different inputs!\n for eq in self.equations:\n # And tell this equation what the constants are\n theseConstValues = {}\n for varName in eq.setVarNames:\n if varName not in constVals:\n raise Exception(\"Equation '{}' expected constant variable '{}' to be set. Got variables: {}\".format(eq.name,varName,list(constVals.keys())))\n theseConstValues[varName] = constVals[varName]\n\n eq.setSpecifiedValues(theseConstValues)\n\n self.constValuesSet = True\n\n def randomGuess(self, initGuess, lbs, ubs):\n guess = []\n for xInit, lb, ub in zip(initGuess,lbs,ubs):\n\n if ub < 1e8 and lb > -1e8:\n pass\n elif lb > -1e8:\n ub = xInit + (xInit-lb)*2\n elif ub < 1e8:\n lb = xInit - (ub-xInit)*2\n else:\n ub = xInit*2\n lb = xInit*0.5\n if ub < lb:\n lb, ub = ub, lb\n guess.append(random.random()*(ub-lb)+lb)\n\n\n return guess\n\n def solve(self, guessDict={}, minBoundDict={}, maxBoundDict={}, verbose=False):\n if not self.constValuesSet:\n assert len(self.setVarOrder) > 0\n raise Exception(\"Must set the constant values: \"+str(self.setVarOrder))\n\n self.verbose = verbose\n\n # If guessDict was passed in as {Variable: guess}, which is more\n # convenient, convert it to {string: guess}\n newGuessDict = {}\n for k,v in guessDict.items():\n if isinstance(k, Variable):\n newGuessDict[k.name] = v\n elif type(k) is str:\n newGuessDict[k] = v\n else:\n raise Exception(\"Invalid guessDict format: type=\"+type(k))\n guessDict = newGuessDict\n\n # Defaults of 1 seem safer sometimes, but it's really sketchy\n guess = packDict(self.unknownVarsOrder, guessDict, defaultval=1)\n if len(guess) != len(self.unknownVarsOrder):\n raise Exception(\"Bad guess formatting!\")\n\n lb = packDict(self.unknownVarsOrder, minBoundDict, defaultval=float(\"-inf\"))\n ub = packDict(self.unknownVarsOrder, maxBoundDict, defaultval=float(\"inf\"))\n\n bounds = (lb, ub)\n\n def solver(x0, bnds=None):\n ''' pick your poison '''\n # Only least_squares supports bounds naturally, but you have\n # to be careful that it actually solved the problem!\n xSoln, failed, msg = solve_scipy_lstsq(self.resFunc, x0, bounds=bnds, maxIter=2000)\n # xSoln, failed, msg = solve_scipy_root(self.resFunc, x0)\n # xSoln, failed, msg = solve_scipy_fsolve(self.resFunc, x0)\n # xSoln, failed, msg = solve_broyden(self.resFunc, x0)\n\n return xSoln, failed, msg\n\n # Optional, retry with randomize guesses\n maxTries = 1\n x0 = guess\n for i in range(maxTries):\n xSoln, failed, msg = solver(x0, bnds=bounds)\n if not failed:\n if i > 0:\n print(\"Succeeded in {} tries\".format(i+1))\n break\n if i < maxTries - 1:\n x0 = self.randomGuess(guess, lb, ub)\n else:\n raise Exception(\"Failed {} times: msg={}\".format(maxTries,msg))\n\n\n # Remap to a dict for easy parsing\n xSolnDict = unpackToDict(self.unknownVarsOrder, xSoln)\n\n return xSolnDict\n\n\ndef solve(equations, guessDict={}, constVals={}):\n\n sys = System(equations, list(constVals.keys()))\n sys.setConstVals(constVals)\n x = sys.solve(guessDict)\n return x\n\n\ndef solve_scipy_lstsq(resFunc, initGuess, bounds=None, maxIter=1000, quiet=False):\n # This is the gold standard root solver in scipy.\n # It allows bounds, unlike other solvers, and has excellent\n # automatic rescaling of the system to improve convergence.\n\n if bounds:\n if len(bounds) != 2:\n raise Exception(\"Bad bounds spec\")\n N = len(initGuess)\n if len(bounds[0]) != N:\n raise Exception(\"Bad LB bounds spec\")\n if len(bounds[1]) != N:\n raise Exception(\"Bad UB bounds spec\")\n # Autofix guess, if bounds violated\n for i in range(N):\n if not (bounds[0][i] <= initGuess[i] <= bounds[1][i]):\n if bounds[0][i] == -NOLIMIT:\n initGuess[i] = bounds[1][i] - 1\n elif bounds[1][i] == NOLIMIT:\n initGuess[i] = bounds[0][i] + 1\n else:\n initGuess[i] = 0.5*(bounds[0][i]+bounds[1][i])\n print(\"Warning: initial guess for var index {} outside bounds, resetting to {}\".format(i,initGuess[i]))\n\n vLevel = 1 # 0/1/2\n\n # 'jac' does automatic variable scaling. Seems to really help\n # convergence for even these moderately badly scaled aero problems\n # optRes = scipy.optimize.least_squares(resFunc, initGuess, bounds=bounds, max_nfev=maxIter,verbose=vLevel,ftol=1e-8,xtol=1e-8,x_scale='jac')\n optRes = scipy.optimize.least_squares(resFunc, initGuess, bounds=bounds, max_nfev=maxIter,verbose=vLevel,ftol=1e-10,gtol=1e-10,xtol=1e-10,x_scale='jac')\n message = str(optRes.status)+\" \"+str(optRes.message)\n failed = False\n if not optRes.success:\n failed = True\n\n COST_TOL_WARN = 1e-7\n COST_TOL_ERR = 1e-4\n if optRes.status > 0 and optRes.cost > COST_TOL_ERR:\n failed = True\n message += \" Converged, but no solution found. Cost = {}\".format(optRes.cost)\n elif optRes.status > 0 and optRes.cost > COST_TOL_WARN:\n print(\" Converged, but to poor solution. Cost = {}\".format(optRes.cost))\n elif optRes.status == 0: # maxfev reached\n failed = True\n\n # print(optRes.status,\"COST:\",optRes.cost)\n # if (optRes.cost > 1e-6):\n # raise Exception(\"Failed to converge\")\n return optRes.x, failed, message\n\n # fun, x0, jac='2-point', bounds=(-inf, inf), method='trf', ftol=1e-08, xtol=1e-08, gtol=1e-08, x_scale=1.0, loss='linear', f_scale=1.0, diff_step=None, tr_solver=None, tr_options={}, jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={})\n\ndef solve_scipy_fsolve(resFunc, initGuess, maxIter=1000, quiet=False):\n\n xopt, infodict, ierr, msg = scipy.optimize.fsolve(resFunc, initGuess, full_output=True, maxfev=maxIter)\n # print(infodict[\"fvec\"])\n print(\"RES\",math.sqrt(sum(_*_ for _ in infodict[\"fvec\"])))\n\n failed = False\n message = str(ierr)+\" \"+msg\n if ierr != 1:\n failed = True\n\n return xopt, failed, message\n\ndef solve_scipy_root(resFunc, initGuess, maxIter=1000, quiet=False):\n\n options = {'maxfev':maxIter}\n # scipy.optimize.show_options('root', method='hybr',disp=True)\n # import sys\n # sys.exit()\n optRes = scipy.optimize.root(resFunc, initGuess, options=options)\n\n # (fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None, options=None)\n failed = False\n message = str(optRes.status)+\" \"+str(optRes.message)\n if not optRes.success:\n failed = True\n\n if not failed:\n res = math.sqrt(sum(_*_ for _ in infodict[\"fvec\"]))\n if res > 1e-6:\n message += \" converged, but not to solution: res = \"+str(res)\n failed = True\n\n # if optRes.status > 0 and optRes.cost > 1e-6:\n # raise Exception(\"Converged, but no solution found: \"+optRes.message)\n # elif optRes.status == 0:\n # raise Exception(\"hit max nfev \"+optRes.message)\n\n # print(optRes.status,\"COST:\",optRes.cost)\n\n return optRes.x, failed, message\n\n\n\ndef solve_broyden(resFunc, initGuess, maxIter=1000, quiet=False):\n optRes = scipy.optimize.broyden2(resFunc, initGuess, verbose=True, maxiter=maxIter)\n\n message = str(optRes.status)+\" \"+str(optRes.message)\n failed = False\n if not optRes.success:\n failed = True\n\n if optRes.status > 0 and optRes.cost > 1e-6:\n failed = True\n message += \" Converged, but no solution found.\"\n elif optRes.status == 0: # maxfev reached\n failed = True\n\n # print(optRes.status,\"COST:\",optRes.cost)\n # if (optRes.cost > 1e-6):\n # raise Exception(\"Failed to converge\")\n return optRes.x, failed, message\n\n # return xopt\n\n\ndef checkValidFunction(func):\n '''\n Currently the input function must have a fixed number of arguments,\n and all must be required. (No *args, **kwargs, or default values)\n '''\n argSpec = inspect.getargspec(func)\n\n if argSpec.varargs or argSpec.keywords:\n raise Exception(\"Can't make a residual function from a function accepting variable argument lists\")\n if argSpec.defaults:\n # This seems to work, but it's worrisome, so I will raise an\n # exception until I'm sure...\n raise Exception(\"Can't make a residual function from a function with argument defaults yet\")\n\n\ndef makeResidualFunc(func):\n '''\n Convert a function z = f(x,y) into res = z - f(x,y)\n\n Currently the input function must have a fixed number of arguments,\n and all must be required. (No *args, **kwargs, or default values)\n '''\n argSpec = inspect.getargspec(func)\n\n inputParamNames = argSpec.args\n N = len(inputParamNames)\n outputParamName = func.__name__\n resFuncName = \"res_\"+func.__name__\n\n errorCheck = True\n verbose = False\n # Notes:\n # 1) Want to forbid args, and only allow kwargs\n # Could remove *args parameter to enforce this, but for now,\n # I want to explicitly catch and warn about this.\n # 2) Can eventually disable error checking at every call for speed\n # -- Or could disable after the first valid call.\n def resFunc(*args, **kwargs):\n if errorCheck:\n if args:\n raise Exception(\"Can't call residual function \"+resFuncName+\" without kwargs\")\n if outputParamName not in kwargs:\n raise Exception(\"Residual function \"+resFuncName+\" missing parameter \"+outputParamName)\n if len(kwargs) != N + 1:\n raise Exception(\"Got wrong number of \")\n\n outputValue = kwargs[outputParamName]\n del kwargs[outputParamName]\n\n # Invoke the original function with the original argument signature\n # and subtract from the predicted output to get the residual\n res = outputValue - func(**kwargs)\n\n if verbose:\n print(\"{:.2e}\".format(res),resFunc.__name__,kwargs,outputValue)\n return res\n\n resFunc.__name__ = resFuncName\n\n return resFunc\n\n","repo_name":"supergra/octopus","sub_path":"octopus.py","file_name":"octopus.py","file_ext":"py","file_size_in_byte":31212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"5401975324","text":"# -----------------------------------------------------------\n# ELGamal Class that Performs the Generation, Encryption and Decryption of a string\n#\n# (C) 2020 Musa Joshua Gideon-Bashir, Abuja, Nigeria\n# Released under MIT Public License\n# email gidijosh@gmail.com\n# -----------------------------------------------------------\n\nimport random\nfrom time import clock, time\nfrom .encoding import encode, decode\n\n\nclass ELGamal:\n \"\"\"\n ELGamal Class that Performs the Generation, Encryption and Decryption of a string\n \"\"\"\n @classmethod\n def if_prime(cls, n):\n \"\"\"Checks if a number is a prime or not\n\n Args:\n cls (class attribute): Access a class atribute through keyword cls\n\n Returns:\n boolean: true or false\n \"\"\"\n\n if (n <= 1):\n return False\n if (n <= 3):\n return True\n\n if (n % 2 == 0 or n % 3 == 0):\n return False\n\n i = 5\n while(i * i <= n):\n if (n % i == 0 or n % (i + 2) == 0):\n return False\n i = i + 6\n\n return True\n\n @classmethod\n def gcd(cls, a, b):\n \"\"\"finds the gcd of two numbers a and b\n\n Args:\n cls (class attribute): Access a class atribute through keyword cls\n a: integer a \n b: integer b \n\n Returns:\n int: gcd of the two numbers\n \"\"\"\n\n if a < b:\n return cls.gcd(b, a)\n elif a % b == 0:\n return b\n else:\n return cls.gcd(b, a % b)\n\n @classmethod\n def modInverse(cls, a, m):\n \"\"\"finds the modular inverse of two numbers a and m\n\n Args:\n cls (class attribute): Access a class atribute through keyword cls\n a: integer a \n m: integer m \n\n Returns:\n int: the modular inverse of the two numbers\n \"\"\"\n a = a % m\n for x in range(1, m):\n if ((a * x) % m == 1):\n return x\n return 1\n\n @classmethod\n def gen_a(cls, p):\n \"\"\"finds a, a number between 1 and p-2, and is a coprime of p\n\n Args:\n cls (class attribute): Access a class atribute through keyword cls\n p: integer p \n\n Returns:\n int: a\n \"\"\"\n a = random.randint(1, p-1)\n while cls.gcd(a, p) != 1:\n a = random.randint(1, p-1)\n return a\n\n @classmethod\n def generate_keys(cls):\n \"\"\"generates the keys\n\n Args:\n cls (class attribute): Access a class atribute through keyword cls\n\n Returns:\n int: p\n int: g\n int: x\n int: y\n \"\"\"\n p = random.randint(50, 300)\n while cls.if_prime(p) != True:\n p = random.randint(50, 300)\n g = random.randint(50, p - 1)\n x = cls.gen_a(p)\n y = pow(g, x, p)\n\n return((p, g, y), (p, x))\n\n @classmethod\n def encrypt(cls, pub_key, plainString):\n \"\"\"Encrypts a string\n\n Args:\n cls (class attribute): Access a class atribute through keyword cls\n pub_key: Public Key of Elgamal\n plainString: string to be encrypted\n\n Returns:\n str: a string of the encrypted cipher string 1\n str: a string of the encrypted cipher string 2\n \"\"\"\n p, g, y = pub_key\n\n c1 = []\n c2 = []\n # Start Timer\n start_time = time()\n k = cls.gen_a(p)\n for i, v in enumerate(plainString):\n c1.append(pow(g, k, p))\n val = (pow(y, k) * ord(v)) % p\n c2.append(val)\n # End Timer\n end_time = time()\n cipherString1 = \"-\".join((map(str, c1)))\n cipherString2 = \"-\".join((map(str, c2)))\n cipher_string1_base64_string = encode(cipherString1)\n cipher_string2_base64_string = encode(cipherString2)\n return ((cipher_string1_base64_string, cipher_string2_base64_string), round((end_time - start_time) * 1000, 4))\n\n @classmethod\n def decrypt(cls, pri_key, cipherFileString1, cipherFileString2):\n \"\"\"Decrypts a string\n\n Args:\n cls (class attribute): Access a class atribute through keyword cls\n pri_key: Private Key of Elgamal\n cipherFileString1: a string of the cipher string 1\n cipherFileString2: a string of the cipher string 2\n\n Returns:\n str: a string of the decrypted string\n \"\"\"\n cipher_string1_base64_string = decode(cipherFileString1)\n cipher_string2_base64_string = decode(cipherFileString2)\n cipherFileArray1 = list(\n map(int, cipher_string1_base64_string.split(\"-\")))\n cipherFileArray2 = list(\n map(int, cipher_string2_base64_string.split(\"-\")))\n p, x = pri_key\n plain_hex_arr = []\n # Start Timer\n start_time = time()\n for i, v in enumerate(cipherFileArray2):\n\n k = pow(pow(cipherFileArray1[i], x), p-2, p)\n\n # k = pow(cipherFileArray1[i], (p - 1 - x))\n\n # k = pow(pow(c1, x), (p - 2) * v) % p\n # k = cls.modInverse(pow(c1, x), p)\n val = (k * v) % p\n # val = pow(pow(c1, x), (p - 2) * v, p)\n plain_hex_arr.append(val)\n end_time = time()\n # End Timer\n plain_hex_string = cls.unifyString(plain_hex_arr)\n return (plain_hex_string, round((end_time - start_time) * 1000, 4))\n\n @classmethod\n def unifyString(cls, fileArray):\n \"\"\"Converts an array of number to a string of its respective ascii value\n\n Args:\n cls (class attribute): Access a class atribute through keyword cls\n fileArray: array of integers\n\n Returns:\n str: a string consisting of the ascii value of each number in the array\n \"\"\"\n string = \"\"\n for i, v in enumerate(fileArray):\n string = string + chr(v)\n return string\n","repo_name":"musajoshua/rsa_elgamal_gui","sub_path":"mod/modules/ELGamal.py","file_name":"ELGamal.py","file_ext":"py","file_size_in_byte":5982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"29702170843","text":"# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\nimport re, ast\n\n# get version from __version__ variable in selco/__init__.py\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n\nwith open('selco/__init__.py', 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\ndef parse_requirements(path, get_dependency_links = False):\n\twith open(path) as f:\n\t\tdeps = f.read().strip().split('\\n')\n\t\tif not get_dependency_links: return deps\n\t\tlink_pattern = re.compile(r\"(git)?\\+?(git|https?):\\/\\/(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)\")\n\t\treturn [re.search(link_pattern, dep).group() for dep in deps if re.search(link_pattern, dep)]\n\nsetup(\n\tname='selco',\n\tversion=version,\n\tdescription='Selco Customizations',\n\tauthor='SELCO',\n\tauthor_email='basawaraj@selco-india.com',\n\tpackages=find_packages(),\n\tzip_safe=False,\n\tinclude_package_data=True,\n\tinstall_requires=parse_requirements('requirements.txt')\n)\n","repo_name":"hidayatmanusiya/selco_v2","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"27"} +{"seq_id":"40838227461","text":"import csv\nimport numpy as np\nimport pandas as pd\nimport sys\nimport os\n\nDIR = os.path.dirname(__file__)\n\ndef get_top_k_queries_all_users(complete_utility_matrix, N_USERS = 2500, top_k=10):\n\n # users are rows, queries are columns\n # list of sets of query, the list has N_USERS elements, a set has top_k elements max\n top_k_queries = [set() for _ in range(N_USERS)]\n\n for user in range(N_USERS):\n for query in range(complete_utility_matrix.shape[1]):\n top_k_queries[user].add((query, complete_utility_matrix.iloc[user, query]))\n top_k_queries[user] = sorted(top_k_queries[user], key=lambda x: x[1], reverse=True)[:top_k]\n\n return top_k_queries\n\n\ndef save_results(TOP_K = 10, N_USERS = 2500):\n\n hybrid_path = os.path.join(DIR, '../../data/hybrid/complete_utility_matrix.csv')\n hybrid_utility_matrix_complete = pd.read_csv(hybrid_path, index_col=0)\n\n compact_path = os.path.join(DIR, '../../data/compact_item_item_cf/complete_utility_matrix.csv')\n compact_utility_matrix_complete = pd.read_csv(compact_path, index_col=0)\n\n real_complete_path = os.path.join(DIR, '../../data/_utility_matrix_complete.csv')\n real_utility_matrix_complete = pd.read_csv(real_complete_path, index_col=0)\n\n top_k_queries_tuples_hybrid = get_top_k_queries_all_users(hybrid_utility_matrix_complete, N_USERS=N_USERS, top_k=TOP_K)\n top_k_queries_tuples_compact = get_top_k_queries_all_users(compact_utility_matrix_complete, N_USERS=N_USERS, top_k=TOP_K)\n top_k_queries_tuples_real = get_top_k_queries_all_users(real_utility_matrix_complete, N_USERS=N_USERS, top_k=TOP_K)\n\n # keep only the queries\n top_k_queries_hybrid = [set([x[0] for x in top_k_queries_tuples_hybrid[i]]) for i in range(N_USERS)]\n top_k_queries_compact = [set([x[0] for x in top_k_queries_tuples_compact[i]]) for i in range(N_USERS)]\n top_k_queries_real = [set([x[0] for x in top_k_queries_tuples_real[i]]) for i in range(N_USERS)]\n\n # save the top k queries for each user\n with open(os.path.join(DIR, '../data/hybrid/top_k_queries/top_' + str(TOP_K) + '_queries.csv'), 'w') as f:\n writer = csv.writer(f)\n writer.writerows(top_k_queries_hybrid)\n\n with open(os.path.join(DIR, '../data/compact_item_item_cf/top_k_queries/top_' + str(TOP_K) + '_queries.csv'), 'w') as f:\n writer = csv.writer(f)\n writer.writerows(top_k_queries_compact)\n\n with open(os.path.join(DIR, '../data/top_k_queries/real_top_' + str(TOP_K) + '_queries.csv'), 'w') as f:\n writer = csv.writer(f)\n writer.writerows(top_k_queries_real)\n\n\ndef compute_jaccard_similarity(set1, set2):\n #print(set1.intersection(set2))\n #print(set1.union(set2))\n return len(set1.intersection(set2)) / len(set1.union(set2))\n #return len(set1 & set2) / len(set1 | set2)\n\ndef log_to_txt(path, text):\n with open(path, 'a') as f:\n f.write(text)\n\n\nif __name__ == \"__main__\":\n\n\n N_USERS = 2500\n TOP_K_VALUES = [1, 2, 3, 4, 5, 10, 15, 20, 30]\n LOG_PATH = os.path.join(DIR, '../../data/PART_A/jaccard_top_k.txt')\n\n #save_results(TOP_K, N_USERS)\n df = pd.DataFrame(columns=['top_k', 'algorithm_type', 'jaccard_similarity_value'])\n for TOP_K in TOP_K_VALUES:\n\n save_results(TOP_K, N_USERS)\n\n # read the top k queries for each user for each csv file and store them lists of sets, each row is a set of queries\n hybrid_path = os.path.join(DIR, '../../data/hybrid/top_k_queries/top_' + str(TOP_K) + '_queries.csv')\n hybrid_top_k_queries = pd.read_csv(hybrid_path, header=None).values.tolist()\n hybrid_top_k_queries = [set(x) for x in hybrid_top_k_queries]\n\n compact_path = os.path.join(DIR, '../../data/compact_item_item_cf/top_k_queries/top_' + str(TOP_K) + '_queries.csv')\n compact_top_k_queries = pd.read_csv(compact_path, header=None).values.tolist()\n compact_top_k_queries = [set(x) for x in compact_top_k_queries]\n\n real_path = os.path.join(DIR, '../../data/top_k_queries/real_top_' + str(TOP_K) + '_queries.csv')\n real_top_k_queries = pd.read_csv(real_path, header=None).values.tolist()\n real_top_k_queries = [set(x) for x in real_top_k_queries]\n\n # compute the jaccard similarity between the top k queries for each user\n jaccard_similarity_hybrid = [compute_jaccard_similarity(hybrid_top_k_queries[i], real_top_k_queries[i]) for i in range(N_USERS)]\n jaccard_similarity_compact = [compute_jaccard_similarity(compact_top_k_queries[i], real_top_k_queries[i]) for i in range(N_USERS)]\n\n for i in range(N_USERS):\n df = df.append({'user': i, 'top_k': TOP_K, 'algorithm_type': 'compact', 'jaccard_similarity_value': jaccard_similarity_compact[i]}, ignore_index=True)\n df = df.append({'user': i, 'top_k': TOP_K, 'algorithm_type': 'hybrid', 'jaccard_similarity_value': jaccard_similarity_hybrid[i]},ignore_index=True)\n\n print('TOP_K: ', TOP_K)\n log_to_txt(LOG_PATH, 'TOP_K: ' + str(TOP_K) + '\\n')\n\n print('Jaccard similarity hybrid: ', np.mean(jaccard_similarity_hybrid))\n print('Jaccard similarity compact: ', np.mean(jaccard_similarity_compact))\n log_to_txt(LOG_PATH, 'Jaccard similarity hybrid: ' + str(np.mean(jaccard_similarity_hybrid)) + '\\n')\n log_to_txt(LOG_PATH, 'Jaccard similarity compact: ' + str(np.mean(jaccard_similarity_compact)) + '\\n')\n\n print('Lowest jaccard similarity hybrid: ', np.min(jaccard_similarity_hybrid))\n print('Lowest jaccard similarity compact: ', np.min(jaccard_similarity_compact))\n log_to_txt(LOG_PATH, 'Lowest jaccard similarity hybrid: ' + str(np.min(jaccard_similarity_hybrid)) + '\\n')\n log_to_txt(LOG_PATH, 'Lowest jaccard similarity compact: ' + str(np.min(jaccard_similarity_compact)) + '\\n')\n\n print('Highest jaccard similarity hybrid: ', np.max(jaccard_similarity_hybrid))\n print('Highest jaccard similarity compact: ', np.max(jaccard_similarity_compact))\n log_to_txt(LOG_PATH, 'Highest jaccard similarity hybrid: ' + str(np.max(jaccard_similarity_hybrid)) + '\\n')\n log_to_txt(LOG_PATH, 'Highest jaccard similarity compact: ' + str(np.max(jaccard_similarity_compact)) + '\\n')\n\n log_to_txt(LOG_PATH, '--------------------------------------------------------\\n')\n\n\n print(df.head())\n # save the dataframe to a csv file\n df.to_csv(os.path.join(DIR, '../../data/PART_A/df_jaccard_similarity.csv'), index=False)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"vicentinileonardo/query-recommendation-system","sub_path":"src/utility/test_top_k.py","file_name":"test_top_k.py","file_ext":"py","file_size_in_byte":6456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"22867219867","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n\"\"\"\nimport random\nimport math\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\n\nclass OuterProductMean(nn.Module):\n \"\"\"\n Implements Algorithm 10.\n \"\"\"\n\n def __init__(self, c_m, c_z, c_hidden, eps=1e-3):\n \"\"\"\n Args:\n c_m:\n Residue level embedding channel dimension\n c_z:\n Pair embedding channel dimension\n c_hidden:\n Hidden channel dimension\n \"\"\"\n super(OuterProductMean, self).__init__()\n\n self.c_m = c_m\n self.c_z = c_z\n self.c_hidden = c_hidden\n self.eps = eps\n\n self.layer_norm = nn.LayerNorm(c_m)\n self.linear_1 = nn.Linear(c_m, c_hidden)\n self.linear_2 = nn.Linear(c_m, c_hidden)\n self.linear_out = nn.Linear(c_hidden ** 2, c_z)\n\n def _opm(self, a, b):\n outer = torch.einsum(\"...bac,...dae->...bdce\", a, b)\n outer = outer.reshape(outer.shape[:-2] + (-1,))\n outer = self.linear_out(outer)\n return outer\n\n\n def forward(self,\n m: torch.Tensor,\n mask = None,\n chunk_size = None,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n m:\n [*, N_seq, N_res, C_m] MSA embedding\n mask:\n [*, N_seq, N_res] MSA mask\n Returns:\n [*, N_res, N_res, C_z] pair embedding update\n \"\"\"\n m = m.unsqueeze(1)\n if mask is None:\n mask = m.new_ones(m.shape[:-1])\n\n ln = self.layer_norm(m)\n mask = mask.unsqueeze(-1)\n a = self.linear_1(ln)\n a = a * mask\n\n b = self.linear_2(ln)\n b = b * mask\n\n del ln\n a = a.transpose(-2, -3)\n b = b.transpose(-2, -3)\n outer = self._opm(a, b)\n\n # [*, N_res, N_res, 1]\n norm = torch.einsum(\"...abc,...adc->...bdc\", mask, mask)\n norm = norm + self.eps\n\n # [*, N_res, N_res, C_z]\n\n outer = outer / norm\n\n return outer\n\nclass dot_attention(nn.Module):\n def __init__(self, attention_dropout=0.0):\n super(dot_attention, self).__init__()\n self.dropout = nn.Dropout(attention_dropout)\n self.softmax = nn.Softmax(dim=2) # for rv\n\n def forward(self, gate,q, k, v,pair, scale=None, attn_mask=None):\n\n #q : [*, src, head, h_dim]\n q = q.permute(0,2,1,3) #[*, head,src,h_dim ]\n k = k.permute(0,2,3,1) #[*, head,h_dim, src]\n attention = torch.matmul(q,k).permute(0,2,3,1) #[*, src,src, head]\n if scale:\n attention = attention * scale # scared or not\n if attn_mask is not None:\n attention = attention.masked_fill(attn_mask, -np.inf) # mask\n # add pair bias to attention\n attention = attention + pair\n # softmax\n attention = self.softmax(attention)\n # dropout\n attention = self.dropout(attention)\n # multipy with V\n context = torch.matmul(attention.permute(0,3,1,2), v.permute(0,2,1,3)).permute(0,2,1,3)\n # dot point with gate\n context = context * gate\n return context, attention\n\n\nclass RowAttentionWithPairBias(nn.Module):\n def __init__(self, pair_dim=39, model_dim=34,num_heads=2, dropout=0.0):\n super(RowAttentionWithPairBias, self).__init__()\n self.dim_per_head = model_dim//num_heads # 每个头的维度\n self.num_heads = num_heads\n self.linear = nn.Linear(model_dim, model_dim)\n self.dot_product_attention = dot_attention(dropout)\n self.linearNoBias = nn.Linear(model_dim, model_dim,bias=False)\n self.linear_q = nn.Linear(model_dim, model_dim,bias=False)\n self.linear_k = nn.Linear(model_dim, model_dim,bias=False)\n self.linear_v = nn.Linear(model_dim, model_dim,bias=False)\n self.linearPair = nn.Linear(pair_dim,num_heads,bias = False)\n self.linear_final = nn.Linear(model_dim, model_dim)\n self.dropout = nn.Dropout(dropout)\n self.layer_norm = nn.LayerNorm(model_dim) # LayerNorm\n self.layer_norm_pair = nn.LayerNorm(pair_dim)\n self.sigmoid = nn.Sigmoid()\n self.transition_layer = nn.Sequential(\n nn.LayerNorm(model_dim),\n nn.Linear(model_dim, model_dim),\n nn.ReLU(),\n nn.Linear(model_dim, model_dim)\n )\n def forward(self, value, pair, attn_mask=None):\n # residual\n value = self.layer_norm(value)\n residual = value\n dim_per_head = self.dim_per_head\n num_heads = self.num_heads\n batch_size = value.size(0)\n src = value.size(1)\n # linear\n gate = self.sigmoid(self.linear(value))\n key = self.linear_k(value)\n value = self.linear_v(value)\n query = self.linear_q(value)\n pair_bais = self.linearPair(self.layer_norm_pair(pair)) # src, scr, Nhead\n # divide by heads\n gate = gate.view(batch_size, src, num_heads, dim_per_head)\n key = key.view(batch_size, src,num_heads, dim_per_head) # batch_size * Nhead, src,head_dim\n value = value.view(batch_size, src, num_heads, dim_per_head)\n query = query.view(batch_size, src,num_heads, dim_per_head)\n\n if attn_mask is not None:\n attn_mask = attn_mask.repeat(1, 1, num_heads).reshape(batch_size,src,src,num_heads) # batch_size * Nhead, src, 1\n\n # scaled multi-heads\n scale = (self.dim_per_head) ** -0.5\n context, attention = self.dot_product_attention(gate, query, key, value, pair_bais,scale, attn_mask)\n\n # concat heads\n context = context.reshape(batch_size, src , dim_per_head * num_heads)\n # linear\n output = self.linear_final(context)\n # dropout\n output = self.dropout(output)\n\n\n output = self.layer_norm(residual + output)\n output = self.transition_layer(output)\n return output, attention\n\n\n","repo_name":"chdcg/H3-OPT","sub_path":"code/model/AF2_models.py","file_name":"AF2_models.py","file_ext":"py","file_size_in_byte":5955,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"671572671","text":"#!/usr/bin/python3\n\"\"\"\nThis module defines the console\n\"\"\"\nimport cmd\nfrom models import storage\nfrom models.base_model import BaseModel\nfrom models.state import State\nfrom models.city import City\nfrom models.place import Place\nfrom models.amenity import Amenity\nfrom models.review import Review\n\n\nclass HBNBCommand(cmd.Cmd):\n \"\"\"\n Represents the HBNB command interpreter for Airbnb\n \"\"\"\n prompt = \"(hbnb) \"\n __all_classes = [\"BaseModel\",\n \"User\",\n \"State\",\n \"City\",\n \"Place\",\n \"Amenity\",\n \"Review\"]\n\n def do_quit(self, args):\n \"\"\"Quit command to exit the program\"\"\"\n return True\n\n def do_EOF(self, args):\n \"\"\"EOF signal to exit the program\"\"\"\n return True\n\n def emptyline(self):\n \"\"\"\n Do nothing when the user presses Enter on an empty line\n \"\"\"\n pass\n\n def do_create(self, args):\n \"\"\"\n Creates a new instance of Model\n Usage: create \n \"\"\"\n if args == \"\":\n print(\"** class name missing **\")\n return\n elif args not in self.__all_classes:\n print(\"** class doesn't exist **\")\n return\n obj = eval(args)()\n obj.save()\n print(obj.id)\n\n def do_show(self, args):\n \"\"\"\n Prints the string representation\n of an instance\n Usage: show \n \"\"\"\n saved_model = storage.all()\n if args == \"\":\n print(\"** class name missing **\")\n return\n params = args.split()\n if params[0] not in self.__all_classes:\n print(\"** class doesn't exist **\")\n return\n elif len(params) == 1:\n print(\"** instance id missing **\")\n return\n elif \"{}.{}\".format(params[0], params[1]) not in saved_model.keys():\n print(\"** no instance found **\")\n else:\n print(saved_model[\"{}.{}\".format(params[0], params[1])])\n\n def do_all(self, args):\n \"\"\"\n Prints all string representation\n of all instances or the specified one only\n Usage: all | all\n \"\"\"\n all_instances = storage.all().values()\n if args == \"\":\n print([str(obj) for obj in all_instances])\n return\n params = args.split()\n class_name = params[0]\n if class_name not in self.__all_classes:\n print(\"** class doesn't exist **\")\n else:\n print([str(obj) for obj in all_instances\n if class_name == obj.__class__.__name__])\n\n def do_destroy(self, args):\n \"\"\"\n Deletes an instance based on the class name and id\n Usage: destroy BaseModel 1234-1234-1234\n \"\"\"\n saved_model = storage.all()\n if args == \"\":\n print(\"** class name missing **\")\n return\n params = args.split()\n if params[0] not in self.__all_classes:\n print(\"** class doesn't exist **\")\n return\n elif len(params) == 1:\n print(\"** instance id missing **\")\n return\n elif \"{}.{}\".format(params[0], params[1]) not in saved_model.keys():\n print(\"** no instance found **\")\n else:\n del saved_model[\"{}.{}\".format(params[0], params[1])]\n storage.save()\n\n def do_update(self, args):\n \"\"\"\n Updates an instance based on the class name and id\n Usage: update \"\"\n \"\"\"\n saved_model = storage.all()\n if args == \"\":\n print(\"** class doesn't exist **\")\n return\n params = args.split()\n params_len = len(params)\n if params[0] not in self.__all_classes:\n print(\"** class doesn't exist **\")\n return\n if params_len == 1:\n print(\"** instance id missing **\")\n return\n if \"{}.{}\".format(params[0], params[1]) not in saved_model.keys():\n print(\"** no instance found **\")\n return\n if params_len == 2:\n print(\"** attribute name missing **\")\n return\n if params_len == 3:\n print(\"** value missing **\")\n return\n if params_len == 4:\n obj = saved_model[\"{}.{}\".format(params[0], params[1])]\n if params[2] in obj.__class__.__dict__.keys():\n attr_type = type(obj.__class__.__dict__[params[2]])\n obj.__dict__[params[2]] = attr_type(params[3])\n else:\n obj.__dict__[params[2]] = params[3]\n elif type(eval(params[2])) == dict:\n obj = saved_model[\"{}.{}\".format(params[0], params[1])]\n for k, v in eval(params[2]).items():\n if (k in obj.__class__.__dict__.keys() and\n type(obj.__class__.__dict__[k]) in {str, int, float}):\n attr_type = type(obj.__class__.__dict__[k])\n obj.__dict__[k] = attr_type(v)\n else:\n obj.__dict__[k] = v\n storage.save()\n\n\nif __name__ == \"__main__\":\n HBNBCommand().cmdloop()\n","repo_name":"wilfredcloud/AirBnB_clone","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42629280653","text":"import numpy as np\nfrom matplotlib.patches import PathPatch\nfrom matplotlib.patches import Polygon as PolygonPatch\nfrom matplotlib.path import Path\nfrom shapely.geometry import Polygon\n\nfrom brooks.util.geometry_ops import get_center_line_from_rectangle\n\n\nclass WindowCenterLinePatch(PathPatch):\n def __init__(self, rectangle: Polygon, line_distance=0.035, **kwargs):\n centerline = get_center_line_from_rectangle(\n polygon=rectangle, only_longest=True\n )[0]\n p1, p2 = [np.array(c) for c in centerline.coords]\n\n vec = p2 - p1\n normal = np.array((-vec[1], vec[0]))\n normal /= np.linalg.norm(normal, ord=2)\n\n path = (\n (p1 + line_distance / 2 * normal, Path.MOVETO),\n (p2 + line_distance / 2 * normal, Path.LINETO),\n (p1 - line_distance / 2 * normal, Path.MOVETO),\n (p2 - line_distance / 2 * normal, Path.LINETO),\n )\n\n vertices, codes = zip(*path)\n super().__init__(Path(vertices, codes, closed=False), **kwargs)\n\n\nclass WindowPatch(PolygonPatch):\n pass\n","repo_name":"Archilyse/Archilyse","sub_path":"brooks/brooks/visualization/floorplans/patches/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"27"} +{"seq_id":"34661621706","text":"#!/usr/bin/python3\n\"\"\"\nThis module creates a rectangle class\nwhich inherits from a class `Base`\n\"\"\"\nfrom models.base import Base\n\n\nclass Rectangle(Base):\n \"\"\" This is a rectangle class\"\"\"\n\n def __init__(self, width, height, x=0, y=0, id=None):\n \"\"\"this is the class constructor\"\"\"\n super().__init__(id)\n if not isinstance(width, int):\n raise TypeError(\"width must be an integer\")\n if not isinstance(height, int):\n raise TypeError(\"height must be an integer\")\n if not isinstance(x, int):\n raise TypeError(\"x must be an integer\")\n if not isinstance(y, int):\n raise TypeError(\"y must be an integer\")\n if width <= 0:\n raise ValueError(\"width must be > 0\")\n if height <= 0:\n raise ValueError(\"height must be > 0\")\n if x < 0:\n raise ValueError(\"x must be >= 0\")\n if y < 0:\n raise ValueError(\"y must be >= 0\")\n\n self.__width = width\n self.__height = height\n self.__x = x\n self.__y = y\n\n @property\n def width(self):\n \"\"\"This the the width getter\"\"\"\n return self.__width\n\n @width.setter\n def width(self, value):\n if not isinstance(value, int):\n raise TypeError(\"width must be an integer\")\n if value <= 0:\n raise ValueError(\"width must be > 0\")\n self.__width = value\n\n @property\n def height(self):\n \"\"\" This is the height getter\"\"\"\n return self.__height\n\n @height.setter\n def height(self, value):\n if not isinstance(value, int):\n raise TypeError(\"height must be an integer\")\n if value <= 0:\n raise ValueError(\"height must be > 0\")\n self.__height = value\n\n @property\n def x(self):\n \"\"\" This is the `x` getter\"\"\"\n return self.__x\n\n @x.setter\n def x(self, value):\n if not isinstance(value, int):\n raise TypeError(\"x must be an integer\")\n if value < 0:\n raise ValueError(\"x must be >= 0\")\n self.__x = value\n\n @property\n def y(self):\n \"\"\"This is the `y` getter\"\"\"\n return self.__y\n\n @y.setter\n def y(self, value):\n if not isinstance(value, int):\n raise TypeError(\"y must be an integer\")\n if value < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = value\n\n def area(self):\n \"\"\" returns the area\"\"\"\n return self.__height * self.__width\n\n def display(self):\n \"\"\" displays the rectangle in `#`\"\"\"\n print(\"\\n\" * self.__y, end=\"\")\n for i in range(self.__height):\n print(\" \" * self.__x, end=\"\")\n print(\"#\" * self.__width)\n\n def __str__(self):\n \"\"\" print the instance\"\"\"\n print_out = \"[Rectangle] ({}) {}/{} - {}/{}\"\n x = self.__x\n return print_out.format(self.id, x, self.y, self.width, self.height)\n\n def update(self, *args, **kwargs):\n \"\"\"\n updates the argument\n for the *args\n 1st argument should be the id attribute\n 2nd argument should be the width attribute\n 3rd argument should be the height attribute\n 4th argument should be the x attribute\n 5th argument should be the y attribute\n \"\"\"\n\n if len(args) != 0:\n if len(args) >= 1:\n self.id = args[0]\n if len(args) >= 2:\n self.__width = args[1]\n if len(args) >= 3:\n self.__height = args[2]\n if len(args) >= 4:\n self.__x = args[3]\n if len(args) >= 5:\n self.__y = args[4]\n else:\n for key, value in kwargs.items():\n if key == \"id\":\n self.id = value\n if key == \"width\":\n self.__width = value\n if key == \"height\":\n self.__height = value\n if key == \"x\":\n self.__x = value\n if key == \"y\":\n self.__y = value\n\n def to_dictionary(self):\n \"\"\" returns a dictionary representation or Rectangle\"\"\"\n\n h = self.__height\n w = self.__width\n idw = self.id\n dic = {'x': self.x, 'y': self.y, 'id': idw, 'height': h, 'width': w}\n return dic\n","repo_name":"Phics2022/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"23090121439","text":"load(\"@bazel_skylib//rules:write_file.bzl\", \"write_file\")\n\ndef nogo_config(name, out, analyzers, override = {}, default = {\n \"exclude_files\": {\n # Don't run linters on external dependencies\n \"external/\": \"third_party\",\n },\n}):\n \"\"\"\n nogo_config is a handy function that creates the nogo config json file programmatically from starlark\n\n This meant to provide a sane default config file.\n By feeding this a list of analyzers in used, all the external dependencies will be excluded from running nogo.\n\n For more advance usages, you should consider creating similar helper macro for your own repository.\n For more information, check rules_go's nogo documentation.\n\n Example usage:\n\n nogo_config(\n name = \"nogo_config\",\n out = \"nogo_config.json\",\n analyzers = [\"ABC1001\", \"ABC1002\"],\n override = {\n \"ABC1002\": {\n \"exclude_files\": {\n \"external/\": \"third_party\",\n \"proto/\": \"generated protobuf\",\n },\n },\n },\n )\n\n The json would be generated from this would look like this:\n\n {\n \"ABC1001\": {\n \"exclude_files\": {\n \"external/\": \"third_party\"\n }\n },\n \"ABC1002\": {\n \"exclude_files\": {\n \"external/\": \"third_party\",\n \"proto/\": \"generated protobuf\"\n }\n }\n }\n\n And you can use said configuration like this:\n\n nogo(\n name = \"nogo\",\n config = \":nogo_config.json\",\n visibility = [\"//visibility:public\"],\n deps = nogo_vet_deps(),\n )\n \"\"\"\n write_file(\n name = name,\n out = out,\n content = [\n json.encode_indent({\n analyzer: override.get(analyzer, default)\n for analyzer in analyzers\n }),\n ],\n )\n","repo_name":"sluongng/nogo-analyzer","sub_path":"def.bzl","file_name":"def.bzl","file_ext":"bzl","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"27"} +{"seq_id":"23407433741","text":"from __future__ import annotations\n\nimport datetime\nimport logging\nimport sys\nfrom typing import Any, Dict\n\nimport pyspark.sql.functions as F\nfrom pyspark.sql import Column\nfrom pyspark.sql import DataFrame\nfrom pyspark.sql.types import AtomicType, LongType, StringType, BooleanType, DoubleType, FloatType, IntegerType, \\\n ShortType, BinaryType, DateType, TimestampType, DecimalType, ByteType\n\nfrom nestedfunctions.processors.terminal_operation_processor import TerminalOperationProcessor\nfrom nestedfunctions.spark_schema.utility import SparkSchemaUtility\n\n\ndef redact(df: DataFrame, field: str) -> DataFrame:\n return RedactProcessor(field).process(df)\n\n\nlog = logging.getLogger(__name__)\n\nMIN_SHORT_VALUE = -32768\nMIN_INTEGER_VALUE = -2147483648\nMIN_BYTE_VALUE = -128\nDEFAULT_DECIMAL_TYPE = 0\n\nSPARK_TYPE_TO_REDACT_VALUE: Dict[AtomicType, Any] = {\n StringType(): \"_REDACTED_VALUE\",\n LongType(): -sys.maxsize,\n BooleanType(): False,\n DoubleType(): float(-sys.maxsize),\n FloatType(): float(-sys.maxsize),\n IntegerType(): MIN_INTEGER_VALUE,\n ShortType(): MIN_SHORT_VALUE,\n BinaryType(): bytearray(),\n DateType(): datetime.date(year=1970, month=1, day=1),\n TimestampType(): datetime.datetime(year=1970, month=1, day=1, hour=0, minute=0, second=0, microsecond=0),\n ByteType(): MIN_BYTE_VALUE,\n DecimalType(): 0,\n}\n\n\ndef column_name_with_dedicated_field_type(fieldType: AtomicType) -> Column:\n \"\"\"\n default values are set according to https://spark.apache.org/docs/latest/sql-ref-datatypes.html\n \"\"\"\n value_to_set = SPARK_TYPE_TO_REDACT_VALUE[fieldType]\n return F.lit(value_to_set).cast(fieldType)\n\n\nclass RedactProcessor(TerminalOperationProcessor):\n def __init__(self, column_to_process: str):\n super().__init__(column_to_process)\n\n def process(self, df: DataFrame) -> DataFrame:\n utility = SparkSchemaUtility()\n if not utility.is_column_exist(df.schema, self.column_to_process):\n log.warning(f\"Column {self.column_to_process} does not exist. Ignoring redacting process\")\n return df\n field_type = utility.schema_for_field(df.schema, self.column_to_process)\n if not issubclass(type(field_type), AtomicType):\n raise Exception(\n f\"Only primitive types could be redacted. Column ${self.column_to_process} has ${field_type} type. \"\n f\"Expected primitive type\")\n return super().process(df)\n\n def transform_primitive(self, primitive_value: Column, fieldType: AtomicType) -> Column:\n try:\n return F.when(F.isnull(primitive_value), primitive_value) \\\n .otherwise(column_name_with_dedicated_field_type(fieldType))\n except KeyError:\n raise Exception(\n f'Unknown type {fieldType.simpleString()} for field {self.column_to_process}. '\n f'Known types: {SPARK_TYPE_TO_REDACT_VALUE.keys()}')\n","repo_name":"golosegor/pyspark-nested-fields-functions","sub_path":"nestedfunctions/functions/redact.py","file_name":"redact.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"8084730971","text":"import multiprocessing\nimport concurrent.futures\n\nfrom numba import njit, prange\nimport numpy as np\n\n\ndef quick_cast(x, y): \n num_threads = multiprocessing.cpu_count()\n with concurrent.futures.ThreadPoolExecutor(num_threads) as executor:\n futures = {}\n limits = np.linspace(0, x.shape[0], num_threads+1).round().astype(int)\n def _cast(k0,k1):\n y[k0:k1,...] = x[k0:k1,...] \n for k in range(len(limits)-1):\n args = (_cast, limits[k], limits[k+1])\n futures[executor.submit(*args)] = k\n concurrent.futures.wait(futures)\n\n\ndef cast(dtype=np.float16):\n xc = None\n def transform(raw):\n nonlocal xc\n if (xc is None) or (xc.shape != raw.shape):\n xc = np.empty_like(raw, dtype=dtype)\n quick_cast(raw, xc)\n return xc\n return transform\n\n\n@njit(parallel=True)\ndef scale_array(in_arr, out_arr, scale):\n in_arr = in_arr.ravel()\n out_arr = out_arr.ravel()\n for i in prange(in_arr.shape[0]):\n out_arr[i] = scale[in_arr[i]]\n\n# NumPy version\n#def scale_array(in_arr, out_arr, scale):\n# out_arr[:] = scale[in_arr]\n\ndef normalize(mean=0.0, std=1.0, dtype=np.float32):\n scaled = scaled_dt = None\n\n def transform(raw):\n nonlocal scaled, scaled_dt\n if (scaled is None) or (scaled.shape != raw.shape):\n scaled = np.empty_like(raw, dtype=np.float32)\n scaled_dt = np.empty_like(raw, dtype=dtype)\n normalize_array(raw, scaled, mean, std)\n\n if dtype == np.float32:\n return scaled\n else:\n quick_cast(scaled, scaled_dt)\n return scaled_dt\n\n return transform\n\n\ndef normalize_threshold(mean=0.0, std=1.0, threshold=0.0, fill_value=0.0):\n scaled = None\n\n def transform(raw):\n nonlocal scaled\n if (scaled is None) or (scaled.shape != raw.shape):\n scaled = np.empty_like(raw, dtype=np.float32) \n normalize_threshold_array(raw, scaled, mean, std, threshold, fill_value)\n\n return scaled\n\n return transform\n\n\ndef scale_log_norm(scale, threshold=None, missing_value=None,\n fill_value=0, mean=0.0, std=1.0, dtype=np.float32):\n\n log_scale = np.log10(scale).astype(np.float32)\n if threshold is not None:\n log_scale[log_scale < np.log10(threshold)] = np.log10(fill_value)\n if missing_value is not None:\n log_scale[missing_value] = np.log10(fill_value)\n log_scale[~np.isfinite(log_scale)] = np.log10(fill_value)\n log_scale -= mean\n log_scale /= std\n scaled = scaled_dt = None\n\n def transform(raw):\n nonlocal scaled, scaled_dt\n if (scaled is None) or (scaled.shape != raw.shape):\n scaled = np.empty_like(raw, dtype=np.float32)\n scaled_dt = np.empty_like(raw, dtype=dtype)\n scale_array(raw, scaled, log_scale)\n\n if dtype == np.float32:\n return scaled\n else:\n quick_cast(scaled, scaled_dt)\n return scaled_dt\n\n return transform\n\n\ndef scale_norm(scale, threshold=None, missing_value=None,\n fill_value=0, mean=0.0, std=1.0, dtype=np.float32):\n\n scale = scale.astype(np.float32).copy()\n scale[np.isnan(scale)] = fill_value\n if threshold is not None:\n scale[scale < threshold] = fill_value\n if missing_value is not None:\n missing_value = np.atleast_1d(missing_value)\n for m in missing_value:\n scale[m] = fill_value\n scale -= mean\n scale /= std\n scaled = scaled_dt = None \n\n def transform(raw):\n nonlocal scaled, scaled_dt\n if (scaled is None) or (scaled.shape != raw.shape):\n scaled = np.empty_like(raw, dtype=np.float32)\n scaled_dt = np.empty_like(raw, dtype=dtype)\n scale_array(raw, scaled, scale)\n\n if dtype == np.float32:\n return scaled\n else:\n quick_cast(scaled, scaled_dt)\n return scaled_dt\n\n return transform\n\n\n@njit(parallel=True)\ndef threshold_array(in_arr, out_arr, threshold):\n in_arr = in_arr.ravel()\n out_arr = out_arr.ravel()\n for i in prange(in_arr.shape[0]):\n out_arr[i] = np.float32(in_arr[i] >= threshold)\n\n\ndef one_hot(values): \n translation = np.zeros(max(values)+1, dtype=int)\n num_categories = len(values)\n for (i,v) in enumerate(values):\n translation[v] = i\n onehot = onehot_dt = None\n\n def transform(raw):\n nonlocal onehot, onehot_dt\n if (onehot is None) or (onehot.shape[:-1] != raw.shape):\n onehot = np.empty(raw.shape+(num_categories,),\n dtype=np.float32)\n onehot = np.empty(raw.shape+(num_categories,),\n dtype=np.uint8)\n onehot_transform(raw, onehot, translation)\n quick_cast(onehot, onehot_dt)\n\n return onehot\n\n return transform\n \n \n@njit(parallel=True)\ndef onehot_transform(in_arr, out_arr, translation):\n for k in prange(in_arr.shape[0]):\n out_arr[k,...] = 0.0\n for t in range(in_arr.shape[1]):\n for i in range(in_arr.shape[2]):\n for j in range(in_arr.shape[3]):\n ind = np.uint64(in_arr[k,t,i,j])\n c = translation[ind]\n out_arr[k,t,i,j,c] = 1.0\n\n\n@njit(parallel=True)\ndef normalize_array(in_arr, out_arr, mean, std):\n mean = np.float32(mean)\n inv_std = np.float32(1.0/std)\n in_arr = in_arr.ravel()\n out_arr = out_arr.ravel()\n for i in prange(in_arr.shape[0]):\n out_arr[i] = (in_arr[i]-mean)*inv_std\n\n\n@njit(parallel=True)\ndef normalize_threshold_array(in_arr, out_arr, mean, std, threshold, fill_value):\n mean = np.float32(mean)\n inv_std = np.float32(1.0/std)\n threshold = np.float32(threshold)\n fill_value = np.float32(fill_value)\n in_arr = in_arr.ravel()\n out_arr = out_arr.ravel()\n for i in prange(in_arr.shape[0]):\n x = in_arr[i]\n if x < threshold:\n x = fill_value\n out_arr[i] = (x-mean)*inv_std\n\n\n# NumPy version\n#def threshold_array(in_arr, out_arr, threshold):\n# out_arr[:] = (in_arr >= threshold).astype(np.float32)\n\n\ndef R_threshold(scale, threshold): \n thresholded = None\n scale_treshold = np.nanargmax(scale > threshold)\n\n def transform(rzc_raw):\n nonlocal thresholded\n if (thresholded is None) or (thresholded.shape != rzc_raw.shape):\n thresholded = np.empty_like(rzc_raw, dtype=np.float32)\n threshold_array(rzc_raw, thresholded, scale_treshold)\n\n return thresholded\n\n return transform\n","repo_name":"MeteoSwiss/c4dl-multi","sub_path":"c4dlmulti/features/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":6550,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"27"} +{"seq_id":"39721040959","text":"'''Exercise 8: Print list in reverse order using a loop\r\nGiven:\r\n\r\nlist1 = [10, 20, 30, 40, 50]\r\nExpected output:\r\n\r\n50\r\n40\r\n30\r\n20\r\n10'''\r\n\r\nlist1=[10,20,30,40,50] \r\nnew_list=reversed(list1)\r\n\r\nfor item in new_list: \r\n print(item)","repo_name":"gaastha/my-python-learning","sub_path":"8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"11133214008","text":"#!/usr/bin/env python3\n\nimport argparse\nimport logging\nfrom pathlib import Path\n\nfrom Bio import SeqIO\n\n\ndef split_fasta(fasta_file, output_path, split_count):\n logging.info(\"Reading FASTA records from %s ...\", fasta_file)\n seqs = dict()\n seqcount = 0\n with fasta_file.open(\"r\") as fasta:\n for record in SeqIO.parse(fasta, \"fasta\"):\n seqcount += 1\n fasta_bin = seqcount % split_count + 1\n seqs.setdefault(fasta_bin, list()).append(record)\n\n for fasta_bin, fasta_records in seqs.items():\n new_fasta = output_path.joinpath(f\"{fasta_bin}_of_{split_count}_{fasta_file.name}\")\n logging.info(\"Writing bin %d to %s [%d records] ...\", fasta_bin, new_fasta, len(fasta_records))\n with new_fasta.open(\"w\") as fasta_out:\n SeqIO.write(fasta_records, fasta_out, \"fasta\")\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO, format=\"[%(asctime)s] %(message)s\")\n # get command-line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input_fasta\", help=\"FASTA file to split.\", metavar=\"FASTA\")\n parser.add_argument(\"-n\", \"--splits\", help=\"Number of files to spread the FASTA records over.\", metavar=\"N\",\n type=int, default=2)\n parser.add_argument(\"-o\", \"--output\", help=\"Output directory of the split FASTA files.\", metavar=\"DIR\",\n default=\"./\")\n args = parser.parse_args()\n\n fasta_path = Path(args.input_fasta).expanduser().resolve().absolute()\n output_path = Path(args.output).expanduser().resolve().absolute()\n split_count = args.splits\n\n split_fasta(fasta_path, output_path, split_count)\n","repo_name":"ebete/metagenome-qtl","sub_path":"split_fasta.py","file_name":"split_fasta.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"39395491780","text":"def diff(first, second):\n second = set(second)\n return [item for item in first if item not in second]\n\n\na = [['a','b','c'], ['1','2','3'], ['a','b','z']]\nb = [[i[0],i[1]] for i in a]\nc = [i[2] for i in a]\nprint('a:', a)\nprint('b:', b)\nnew_list = []\nfor i in b:\n for j in a:\n if i[0] == j[0]:\n temp = []\n if i[0] not in [x[0] for x in new_list]:\n temp.append(i + diff(j,i))\n else:\n temp.append(diff(j,i))\n\n\n\n\n\n\n","repo_name":"comigo-github/data-collection-scripts","sub_path":"data_collection_scripts/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"16435833467","text":"my_input = input(\"Puzzle input: \")\n#my_input = \"abc\"\nextra = True\n\nimport hashlib, re\n\ndef get_hash(a):\n return hashlib.md5(a.encode()).hexdigest()\n\ndef get_hash_extra(a):\n for i in range(2017):\n a = get_hash(a)\n return a\n\nn = 0\nhexdb = []\nkeys = []\np3 = r'([0-9a-f])\\1{2,}'\nwhile len(keys) < 64:\n if len(hexdb) < n + 1:\n if extra: h = get_hash_extra(my_input + str(n))\n else: h = get_hash(my_input + str(n))\n hexdb.append(h)\n m = re.findall(p3, hexdb[n])\n if m:\n p5 = r\"(\" + m[0] + r\")\\1{4,}\"\n #print(n, p5)\n for k in range(n + 1, n + 1001):\n if len(hexdb) < k + 1:\n if extra: h = get_hash_extra(my_input + str(k))\n else: h = get_hash(my_input + str(k))\n hexdb.append(h)\n \n if re.findall(p5, hexdb[k]):\n keys.append(n)\n print(\"Getting keys... {:2}/64\".format(len(keys)))\n break\n n += 1\n \nprint(\"Part {} answer:\".format(2 if extra else 1), keys[63])\n","repo_name":"daExile/advent-of-code","sub_path":"2016/python/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"20477657893","text":"from dotenv import dotenv_values\n\nfrom app.mailer import Mailer\n\n\nconfig = dotenv_values(\"../../.env\") # config = {\"USER\": \"foo\", \"EMAIL\": \"foo@example.org\"}\n\ndef run_mailer():\n mailer = Mailer(config=config)\n mailer.start_server()\n\nrun_mailer()","repo_name":"pspelman/cert_completion_mailer","sub_path":"app/test/mailer_qa.py","file_name":"mailer_qa.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26232646502","text":"\n\ndef IsWindows():\n\tfrom sys import platform\n\treturn platform == \"win32\"\n\ndef IsLinux():\n\tfrom sys import platform\n\treturn platform == \"linux\" or platform == \"linux2\"\n\n\nimport multiprocessing, os, platform, subprocess, sys\n\nif IsWindows():\n\timport vswhere\n\nENGINE_PATH = os.getcwd()\n\nTHIRD_PARTY_PATH = ENGINE_PATH + \"/third_party/\"\nINSTALL_DIR = ENGINE_PATH + \"/temp/third_party/\"\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\ndef LogError(message):\n\tprint(bcolors.FAIL + \"[E] %s\" % message + bcolors.ENDC)\n\tsys.stdout.flush()\n\t\ndef PauseAssert():\n\tif 0 == sys.platform.find(\"win\"):\n\t\tpauseCmd = \"pause\"\n\telse:\n\t\tpauseCmd = \"read\"\n\tsubprocess.call(pauseCmd, shell = True)\n\tsys.exit(1)\n\ndef LogInfo(message):\n\tprint(bcolors.OKCYAN + \"[I] %s\" % message + bcolors.ENDC)\n\tsys.stdout.flush()\n\t\ndef LogSuccess(message):\n\tprint(bcolors.OKGREEN + \"[I] %s\" % message + bcolors.ENDC)\n\tsys.stdout.flush()\n\t\ndef LogWarning(message):\n\tprint(bcolors.WARNING + \"[W] %s\" % message + bcolors.ENDC)\n\tsys.stdout.flush()\n\t\ndef CheckError(result):\n\tif result.returncode:\n\t\tif IsWindows():\n\t\t\tif result.stdout:\n\t\t\t\tLogWarning(str(result.stdout).replace(\"\\\\r\\\\n\", \"\\n\"))\n\t\t\tif result.stderr:\n\t\t\t\tLogError(str(result.stderr).replace(\"\\\\r\\\\n\", \"\\n\"))\n\t\telse:\n\t\t\tif result.stdout:\n\t\t\t\tLogWarning(str(result.stdout).replace(\"\\\\n\", \"\\n\"))\n\t\t\tif result.stderr:\n\t\t\t\tLogError(str(result.stderr).replace(\"\\\\n\", \"\\n\"))\n\t\tLogError(\"failed with exit code \" + str(result.returncode))\n\t\tPauseAssert()\n\ndef RunSubProcess(Command):\n if IsWindows():\n CheckError(subprocess.run(Command, capture_output=True))\n else:\n CheckError(subprocess.run(Command.split(), capture_output=True))\n\n\ndef BuildModule(ModuleName, BuildProj = \"ALL_BUILD.vcxproj\", CMakeOptions = \"\", NinjaLibPath = \"Null\"):\t\n\tif IsWindows():\n\t\tLibPath = THIRD_PARTY_PATH + ModuleName\n\t\tBuildPath = LibPath + \"/Build\"\n\t\tVcxProjPath = BuildPath + \"/\" + BuildProj\n\t\t# Search for MSBuild path\n\t\tVsPath = vswhere.find(\"MSBuild\\**\\Bin\\MSBuild.exe\")\n\t\t\n\t\t# Build module\n\t\tLogInfo(\"building module : \" + ModuleName)\n\t\tRunSubProcess(\"cmake -S \" + LibPath + \" -B \" + BuildPath + \" \" + CMakeOptions)\n\t\t\n\t\t# Compile module\n\t\tLogInfo(\"compiling [debug] ... \")\n\t\tRunSubProcess(VsPath[0] + \" \" + VcxProjPath + \" /t:build /p:Configuration=\\\"Debug\\\" /p:Platform=\\\"x64\\\" /p:BuildInParallel=true /p:OutDir=\" + INSTALL_DIR + \"/debug/\")\n\n\t\tLogInfo(\"compiling [release] ... \")\n\t\tRunSubProcess(VsPath[0] + \" \" + VcxProjPath + \" /t:build /p:Configuration=\\\"Release\\\" /p:Platform=\\\"x64\\\" /p:BuildInParallel=true /p:OutDir=\" + INSTALL_DIR + \"/release/\")\n\t\tLogSuccess(\"Success !\")\n\tif IsLinux():\n\t\tLibPath = THIRD_PARTY_PATH + ModuleName\n\t\tBuildPath = LibPath + \"/Build\"\n\n\t\t# Build module\n\t\tLogInfo(\"building module : \" + ModuleName)\n\t\tRunSubProcess(\"cmake -S \" + LibPath + \" -G Ninja \" + \" -B \" + BuildPath + \" \" + CMakeOptions)\n\t\t\n\t\t# Compile module\n\t\tLogInfo(\"compiling ... \")\n\t\tRunSubProcess(\"ninja -C \" + BuildPath)\n\t\tLogSuccess(\"Success !\")\n\t\t\n\t\t# Move libraries\n\t\tLogInfo(\"Move libraries\")\n\t\tRunSubProcess(\"mv \" + BuildPath + \"/\" + NinjaLibPath + \" \" + INSTALL_DIR)\n\t\tLogSuccess(\"Success !\")\n\n\n\n\n#MAIN\n\nLogInfo(\"updating git submodules ...\")\nRunSubProcess(\"git submodule update --init --recursive\")\n\nRunSubProcess(\"mkdir -p \" + INSTALL_DIR)\n\n# CppUtils\nBuildModule(\n\t\"cpputils\",\n\t\"cpputils.vcxproj\",\n\t\"\",\n\t\"bin/Lib/cpputils.a\")\n\n\n\n\nos.chdir(ENGINE_PATH)\nLogSuccess(\"Install complete !\")\n","repo_name":"PierreEVEN/jamer","sub_path":"tools/Install.py","file_name":"Install.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"40654126634","text":"from UserInputs import *\nimport time\nimport requests\nfrom datetime import datetime, timedelta, date\nfrom itertools import chain\nimport numpy as np\nimport pandas as pd\nfrom bs4 import BeautifulSoup as bs\nfrom pprint import pprint\n\n\ndef initialize(YearsBack, CloudOrSandbox, Ticker):\n # Print current date and time, and calculate now minus 5 years + 1 day\n print(datetime.now())\n DeltaDate = datetime.now().date() - timedelta(days=((365 * YearsBack) + 1))\n\n # Show all rows and columns for dataframe\n pd.set_option('display.max_columns', None)\n pd.set_option('display.max_rows', None)\n\n # Try statement to pull ticker list, tries for sec site but if it's down pulls locally\n try:\n TickerListDF = pd.read_csv('https://sec.gov/include/ticker.txt', sep='\\t', names=['Ticker', 'CIK'],\n index_col='Ticker')\n except:\n TickerListDF = pd.read_csv(r'C:/Users/modyv//Documents/GitHub/Stonk/ticker.txt', sep='\\t',\n names=['Ticker', 'CIK'], index_col='Ticker')\n\n # Selects the appropriate response based off IEX version requested\n if ('Cloud' in CloudOrSandbox) or ('cloud' in CloudOrSandbox):\n BaseUrlIEX = 'https://cloud.iexapis.com/stable/stock/'\n IEXToken = 'pk_acd6e54847cd428b8959702163eca5ba'\n IEXRate = 0.01\n print('IEX running in \\\"Cloud\\\" mode, data is accurate. WATCH CREDIT USAGE!')\n elif ('Sandbox' in CloudOrSandbox) or ('sandbox' in CloudOrSandbox):\n BaseUrlIEX = 'https://sandbox.iexapis.com/stable/stock/'\n IEXToken = 'Tpk_9f4a350423954be3b70ec31a1b20102d'\n IEXRate = 0.5\n print('IEX running in \\\"Sandbox\\\" mode, quote data is NOT accurate. FOR TESTING ONLY!')\n else:\n print('Input valid mode for IEX')\n exit()\n\n # Finds corresponding CIK number for the ticker\n TickerInfo = TickerListDF.loc[Ticker.lower()]\n CIK = str(TickerInfo['CIK'])\n CIKLeadingZeros = CIK.zfill(10)\n print('Ticker: ', Ticker.upper())\n print('CIK #: ', CIK)\n\n return DeltaDate, TickerListDF, BaseUrlIEX, IEXToken, IEXRate, CIKLeadingZeros\n\n\n# Function finds the valid filing taxonomy between US-GAAP and IFRS-Full then outputs the data in EDGAR_json\ndef sec_api_response(CIKLeadingZeros, requests_headers):\n # SEC filing api\n url = 'https://data.sec.gov/api/xbrl/companyfacts/CIK' + CIKLeadingZeros + '.json'\n response = requests.get(url, headers=requests_headers)\n\n # Try US-GAAP and IFRS-Full taxonomies to find out which one is valid\n USGAAPList = []\n IFRSFullList = []\n try:\n # Output EDGAR data\n EDGAR_json = response.json()\n EDGAR_DEI_json = EDGAR_json['facts']['dei']\n EDGAR_json = EDGAR_json['facts']['us-gaap']\n JSONKEYS = EDGAR_json.keys()\n List = list(JSONKEYS)\n USGAAPList += [List]\n except:\n # Output EDGAR data\n EDGAR_json = response.json()\n EDGAR_DEI_json = EDGAR_json['facts']['dei']\n EDGAR_json = EDGAR_json['facts']['ifrs-full']\n JSONKEYS = EDGAR_json.keys()\n List = list(JSONKEYS)\n IFRSFullList += [List]\n\n # Decides what the valid taxonomy is for the given ticker\n if USGAAPList:\n ValidTaxonomy = USGAAPList\n elif IFRSFullList:\n ValidTaxonomy = IFRSFullList\n else:\n print('No valid taxonomy')\n exit()\n return ValidTaxonomy, EDGAR_json, EDGAR_DEI_json, response, url\n\n\n# Create a list to see if our LookUpValue exists. Since there are 13k+ companies with files dating back to who knows\n# when it is best to pass multiple lookup values, however, if the lookup value doesn't exist then the code kicks back\n# an error. Creating a list called 'LookUpValueExists' allows us to screen the given value before searching\ndef lookup_value_exists(LookUpValue, ValidTaxonomy):\n LookUpValueExists = []\n for i in range(0, len(LookUpValue)):\n if LookUpValue[i] in ValidTaxonomy[0]:\n LookUpValueExists += [i]\n else:\n LookUpValueExists += [-1]\n return LookUpValueExists\n\n\n# Return the form, the end date, the value, and identify if it is a quarterly or annual result\ndef get_requested_data(LookUpValueExists, EDGAR_json, EDGAR_DEI_json, LookUpValue, DeltaDate, ISO8601):\n FormResults = []\n EndDateResults = []\n ValResults = []\n QorKResults = []\n ValLookedUpResults = []\n for i in range(0, len(LookUpValueExists)):\n if LookUpValueExists[i] >= 0:\n LookUp = EDGAR_json[LookUpValue[i]]['units']\n # Automatically find the next json key\n Key = list(LookUp.keys())[0]\n LookUp = LookUp[Key]\n # Returns how many data points we are pulling for the given LookUpValue\n LookUpCount = len(LookUp)\n LookUpFormList = []\n LookUpEndDateList = []\n LookUpValList = []\n QorKColumn = []\n ValLookedUpList = []\n # Using the for loop below to screen the 'frame' json key. This frame key is used to identify whether the data\n # returned is quarterly annual within an annual filings. We also populate our dataframe 'Quarterly or Annual'\n # column using the frame data\n if LookUpValue[0] == 'D & A':\n for j in range(0, LookUpCount):\n Frame = LookUp[j]['fp']\n Form = LookUp[j]['form']\n EndDate = LookUp[j]['end']\n Val = LookUp[j]['val']\n ValLookedUp = [LookUpValueExists[i]]\n if (('Q' in Frame) or ('q' in Frame)) and (\n datetime.strptime(EndDate, ISO8601).date() >= DeltaDate):\n QorKColumn += ['Q']\n LookUpFormList += [Form]\n LookUpEndDateList += [EndDate]\n LookUpValList += [Val]\n ValLookedUpList += [ValLookedUp]\n elif (('Q' not in Frame) or ('q' not in Frame)) and (\n datetime.strptime(EndDate, ISO8601).date() >= DeltaDate):\n QorKColumn += ['K']\n LookUpFormList += [Form]\n LookUpEndDateList += [EndDate]\n LookUpValList += [Val]\n ValLookedUpList += [ValLookedUp]\n\n else:\n for j in range(0, LookUpCount):\n if 'frame' in LookUp[j]:\n Frame = LookUp[j]['frame']\n Form = LookUp[j]['form']\n EndDate = LookUp[j]['end']\n Val = LookUp[j]['val']\n ValLookedUp = [LookUpValueExists[i]]\n if (('Q' in Frame) or ('q' in Frame)) and (datetime.strptime(EndDate, ISO8601).date() >= DeltaDate):\n QorKColumn += ['Q']\n LookUpFormList += [Form]\n LookUpEndDateList += [EndDate]\n LookUpValList += [Val]\n ValLookedUpList += [ValLookedUp]\n elif (('Q' not in Frame) or ('q' not in Frame)) and (datetime.strptime(EndDate, ISO8601).date() >= DeltaDate):\n QorKColumn += ['K']\n LookUpFormList += [Form]\n LookUpEndDateList += [EndDate]\n LookUpValList += [Val]\n ValLookedUpList += [ValLookedUp]\n FormResults += [LookUpFormList]\n EndDateResults += [LookUpEndDateList]\n ValResults += [LookUpValList]\n QorKResults += [QorKColumn]\n ValLookedUpResults += [ValLookedUpList]\n\n return FormResults, EndDateResults, ValResults, QorKResults, ValLookedUpResults\n\n\ndef data_to_dataframe(FormResults, EndDateResults, ValResults, QorKResults, ValLookedUpResults, LookUpValue):\n ColumnHeader = LookUpValue[0]\n # Compile output into numpy array prior to dataframe creation\n FormResults = list(chain.from_iterable(FormResults))\n EndDateResults = list(chain.from_iterable(EndDateResults))\n ValResults = list(chain.from_iterable(ValResults))\n QorKResults = list(chain.from_iterable(QorKResults))\n ValLookedUpResults = list(chain.from_iterable(ValLookedUpResults))\n\n TotalLookUpCount = len(ValResults)\n FormResultsArray = np.array(FormResults).reshape(TotalLookUpCount, 1)\n EndDateResultsArray = np.array(EndDateResults).reshape(TotalLookUpCount, 1)\n ValResultsArray = np.array(ValResults).reshape(TotalLookUpCount, 1)\n QorKResultsArray = np.array(QorKResults).reshape(TotalLookUpCount, 1)\n ValLookedUpArray = np.array(ValLookedUpResults).reshape(TotalLookUpCount, 1)\n\n FilingResultsArray = np.concatenate((FormResultsArray,\n EndDateResultsArray, ValResultsArray,\n QorKResultsArray,\n ValLookedUpArray), axis=1)\n FilingResultsDF = pd.DataFrame(FilingResultsArray, columns=['Form', 'End Date', ColumnHeader, 'Q or K', 'Lookup Val'])\n\n # Dataframe must be double sorted for next step to work properly\n FilingResultsDF.sort_values(by=['End Date', 'Q or K'], inplace=True)\n FilingResultsDF.reset_index(drop=True, inplace=True)\n return FilingResultsDF\n\n\ndef remove_inferior_rows(LookUpValue, FilingResultsDF):\n # Goes through FilingResultsDF and checks to see if multiple data points for the same date and the same 'Q or K' value.\n # First for loop makes sure j+1 value does not fall outside the dataframe\n # The inferior value (based off the user defined LookUpValue list order) is deleted\n InferiorLookupValIndex = []\n for i in range(1, (len(LookUpValue) + 1)):\n for j in range(0, (len(FilingResultsDF) - i)):\n if ((FilingResultsDF.iloc[j + i, 4] > FilingResultsDF.iloc[j, 4]) and\n (FilingResultsDF.iloc[j + i, 1] == FilingResultsDF.iloc[j, 1]) and\n (FilingResultsDF.iloc[j + i, 3] == FilingResultsDF.iloc[j, 3])):\n InferiorLookupValIndex += [j + i]\n elif ((FilingResultsDF.iloc[j + i, 4] < FilingResultsDF.iloc[j, 4]) and\n (FilingResultsDF.iloc[j + i, 1] == FilingResultsDF.iloc[j, 1]) and\n (FilingResultsDF.iloc[j + i, 3] == FilingResultsDF.iloc[j, 3])):\n InferiorLookupValIndex += [j]\n\n FilingResultsDF.drop(InferiorLookupValIndex, inplace=True)\n FilingResultsDF.reset_index(drop=True, inplace=True)\n\n return FilingResultsDF\n\n\ndef select_quarterly_over_annual(FilingResultsDF):\n # Identify duplicate End Dates\n EndDateList = FilingResultsDF.loc[:, 'End Date']\n EndDateList = EndDateList.values\n # Returns the values of only the duplicates\n UniqueEndDates, EndDateCount = np.unique(EndDateList, return_counts=True)\n DuplicateEndDates = UniqueEndDates[EndDateCount > 1]\n\n # Identifies the duplicate end dates index values\n DuplicateDatesIndices = []\n for i in range(0, len(DuplicateEndDates)):\n Duplicates = FilingResultsDF[FilingResultsDF['End Date'] == DuplicateEndDates[i]].index.values\n Duplicates = Duplicates.tolist()\n DuplicateDatesIndices += Duplicates\n\n # Loops through dataframe to identify entries with identical dates with a Q and K val in the 'Q or K' column\n RowsToDelete = []\n for i in range(0, len(DuplicateDatesIndices)):\n RepetitiveDatesCheckVal1 = DuplicateDatesIndices[i]\n for j in range(0, len(DuplicateDatesIndices)):\n RepetitiveDatesCheckVal2 = DuplicateDatesIndices[j]\n if i != j:\n if (FilingResultsDF.iloc[RepetitiveDatesCheckVal1, 1] == FilingResultsDF.iloc[\n RepetitiveDatesCheckVal2, 1]) \\\n and ((FilingResultsDF.iloc[RepetitiveDatesCheckVal1, 3] == 'Q') and\n (FilingResultsDF.iloc[RepetitiveDatesCheckVal2, 3] == 'K')):\n RowsToDelete += [RepetitiveDatesCheckVal2]\n elif (FilingResultsDF.iloc[RepetitiveDatesCheckVal1, 1] == FilingResultsDF.iloc[\n RepetitiveDatesCheckVal2, 1]) \\\n and ((FilingResultsDF.iloc[RepetitiveDatesCheckVal1, 3] == 'K') and\n (FilingResultsDF.iloc[RepetitiveDatesCheckVal2, 3] == 'Q')):\n RowsToDelete += [RepetitiveDatesCheckVal1]\n elif (FilingResultsDF.iloc[RepetitiveDatesCheckVal1, 1] == FilingResultsDF.iloc[\n RepetitiveDatesCheckVal2, 1]) \\\n and (('Q' in FilingResultsDF.iloc[RepetitiveDatesCheckVal1, 0]) and\n ('K' in FilingResultsDF.iloc[RepetitiveDatesCheckVal2, 0])):\n RowsToDelete += [RepetitiveDatesCheckVal2]\n elif (FilingResultsDF.iloc[RepetitiveDatesCheckVal1, 1] == FilingResultsDF.iloc[\n RepetitiveDatesCheckVal2, 1]) \\\n and (('K' in FilingResultsDF.iloc[RepetitiveDatesCheckVal1, 0]) and\n ('Q' in FilingResultsDF.iloc[RepetitiveDatesCheckVal2, 0])):\n RowsToDelete += [RepetitiveDatesCheckVal1]\n\n # Deletes previously identified rows\n UniqueRowsToDelete = np.unique(RowsToDelete)\n UniqueRowsToDelete = UniqueRowsToDelete.tolist()\n FilingResultsDF.drop(RowsToDelete, inplace=True)\n\n # Remove duplicate rows if they've managed to pass through and reset the dataframe index\n FilingResultsDF.drop_duplicates(inplace=True)\n FilingResultsDF.reset_index(drop=True, inplace=True)\n\n return FilingResultsDF\n\n\ndef calculate_quarterly(k, FilingResultsDF, ISO8601, LookUpValueList):\n # Get dataframe index values of entries with annual result reported\n AnnualValueList = (FilingResultsDF[FilingResultsDF['Q or K'] == 'K']).index.tolist()\n ColumnHeader = LookUpValueList[k][0]\n if 'Shares Outstanding' not in ColumnHeader:\n # Find the index of the annual values and calculate a fourth quarter value only if there are 3 prior quarterly values\n CalculatedFourthQuarterVal = []\n ValidAnnualValList = []\n for i in range(0, len(AnnualValueList)):\n if AnnualValueList[i] > 2:\n AnnualIndexVal = AnnualValueList[i]\n AnnualVal = float(FilingResultsDF.iloc[AnnualIndexVal, 2])\n FirstThreeQuartersList = []\n for j in range(1, 4):\n if datetime.strptime(FilingResultsDF.iloc[AnnualIndexVal, 1], ISO8601) - datetime.strptime(\n FilingResultsDF.iloc[AnnualIndexVal - j, 1], ISO8601) < timedelta(days=365):\n FirstThreeQuartersList += [float(FilingResultsDF.iloc[AnnualIndexVal - j, 2])]\n if len(FirstThreeQuartersList) > 2:\n FourthQuarterVal = AnnualVal - sum(FirstThreeQuartersList)\n if FourthQuarterVal.is_integer():\n FourthQuarterVal = int(FourthQuarterVal)\n CalculatedFourthQuarterVal += [FourthQuarterVal]\n ValidAnnualValList += [AnnualIndexVal]\n\n # Replace the annual value with a calculated fourth quarter value\n if len(CalculatedFourthQuarterVal) == len(ValidAnnualValList):\n for i in range(0, len(ValidAnnualValList)):\n FilingResultsDF.at[ValidAnnualValList[i], ColumnHeader] = CalculatedFourthQuarterVal[i]\n FilingResultsDF.at[ValidAnnualValList[i], 'Q or K'] = 'Q - Calc'\n # If the dataframe is empty then exit\n if len(FilingResultsDF) == 0:\n print(ColumnHeader, 'Query returned no values, check SEC database to find additional json tags')\n exit()\n\n return FilingResultsDF\n\n\ndef historic_quote_date(FilingResultsDF, ISO8601):\n # Determines if filing end date falls on a weekend or holiday for use in finding an historic stock quote\n ValidEndDateList = []\n for i in range(0, len(FilingResultsDF)):\n EndDateForQuote = datetime.strptime(FilingResultsDF.iloc[i, 0], ISO8601).date()\n Today = date.today()\n if Today - EndDateForQuote < timedelta(days=((365 * 5) + 1)):\n DayValue = EndDateForQuote.weekday()\n MarketHolidaysURL = 'http://www.market-holidays.com/' + str(EndDateForQuote.year)\n MarketHolidayResponse = requests.get(MarketHolidaysURL)\n data = MarketHolidayResponse.text\n soup = bs(data, 'html.parser')\n # If end date falls on weekday, check if the day is a holiday. If it does not, put the end date in a list. If it\n # does, keep subtracting one day until it is not on a holiday or weekend\n if DayValue < 5:\n HolidayList = []\n for td in soup.find_all('td'):\n Holiday = td.get_text()\n # This if statement pulls holiday dates\n if str(EndDateForQuote.year) in Holiday:\n Holiday = datetime.strptime(Holiday, '%B %d, %Y')\n Holiday = str(Holiday.date())\n HolidayList += [Holiday]\n if str(EndDateForQuote) not in HolidayList:\n ValidEndDateList += [str(EndDateForQuote)]\n else:\n ValidDate = 0\n DayDelta = 1\n while ValidDate == 0:\n EndDateForQuote = EndDateForQuote - timedelta(days=DayDelta)\n DayValue = EndDateForQuote.weekday()\n if DayValue < 5:\n HolidayList = []\n for td in soup.find_all('td'):\n Holiday = td.get_text()\n # This if statement pulls holiday dates\n if str(EndDateForQuote.year) in Holiday:\n Holiday = datetime.strptime(Holiday, '%B %d, %Y')\n Holiday = str(Holiday.date())\n HolidayList += [Holiday]\n if str(EndDateForQuote) not in HolidayList:\n ValidEndDateList += [str(EndDateForQuote)]\n ValidDate = 1\n DayDelta = DayDelta + 1\n # If end date falls on weekend, keep subtracting one day until it is not on a holiday or weekend\n else:\n ValidDate = 0\n DayDelta = DayValue - 4\n while ValidDate == 0:\n EndDateForQuoteCalc = EndDateForQuote - timedelta(days=DayDelta)\n HolidayList = []\n for td in soup.find_all('td'):\n Holiday = td.get_text()\n # This if statement pulls holiday dates\n if str(EndDateForQuoteCalc.year) in Holiday:\n Holiday = datetime.strptime(Holiday, '%B %d, %Y')\n Holiday = str(Holiday.date())\n HolidayList += [Holiday]\n if str(EndDateForQuoteCalc) not in HolidayList:\n ValidEndDateList += [str(EndDateForQuoteCalc)]\n ValidDate = 1\n DayDelta = DayDelta + 1\n return ValidEndDateList\n\n\ndef historic_quote(ValidEndDateList, ISO8601, requests_headers, BaseUrlIEX, Ticker, IEXToken, IEXRate):\n # Create a list of historic stock quotes as well as the valid quote date\n HistoricQuoteDateList = []\n HistoricQuoteList = []\n for i in range(0, len(ValidEndDateList)):\n ValidEndDate = datetime.strptime(ValidEndDateList[i], ISO8601)\n ValidEndDateStripped = str(ValidEndDateList[i]).replace('-', '')\n IEXUrl = BaseUrlIEX + Ticker + '/chart/date/' + ValidEndDateStripped + '?chartByDay=true&token=' + IEXToken\n # Pull IEX historic quote value, timer is to prevent rate limiting (1 request every 10 ms)\n IEXStartTimer = time.perf_counter()\n IEXResponse = requests.get(IEXUrl, headers=requests_headers)\n StatusCode = IEXResponse.status_code\n if StatusCode == 200:\n HistoricQuote = IEXResponse.json()[0]['close']\n HistoricQuoteDateList += [str(ValidEndDate.date())]\n HistoricQuoteList += [HistoricQuote]\n else:\n iex_http_response_codes(IEXResponse)\n exit()\n IEXStopTimer = time.perf_counter()\n ProcessTime = IEXStopTimer - IEXStartTimer\n # IEX allows 1 search every 10 ms in cloud mode\n if ProcessTime < IEXRate:\n time.sleep(IEXRate - ProcessTime)\n\n return HistoricQuoteDateList, HistoricQuoteList\n\n\ndef merge_dataframes(HistoricQuoteList, HistoricQuoteDateList, FilingResultsDF):\n # Merge dataframes\n if len(HistoricQuoteList) == len(HistoricQuoteDateList) == len(FilingResultsDF):\n HistoricQuoteDataframe = pd.DataFrame(HistoricQuoteList, HistoricQuoteDateList)\n HistoricQuoteDataframe.reset_index(level=0, inplace=True)\n HistoricQuoteDataframe.rename(columns={HistoricQuoteDataframe.columns[0]: 'Quote Date',\n HistoricQuoteDataframe.columns[1]: 'Historic Quote'}, inplace=True)\n ConcatDataFrame = pd.concat([FilingResultsDF, HistoricQuoteDataframe], axis=1)\n #print(ConcatDataFrame)\n else:\n print(len(HistoricQuoteList), len(HistoricQuoteDateList), len(FilingResultsDF))\n print('Number of data points do not match')\n exit()\n\n return ConcatDataFrame\n\n\ndef pe_ratio(ConcatDataFrame):\n HistoricQuoteList = ConcatDataFrame['Historic Quote'].tolist()\n EPSList = ConcatDataFrame['EPS'].tolist()\n PERatioList = []\n if len(HistoricQuoteList) == len(EPSList):\n for i in range(0, len(HistoricQuoteList)):\n PERatioList += [float(HistoricQuoteList[i])/float(EPSList[i])]\n ConcatDataFrame['PE Ratio'] = PERatioList\n print(ConcatDataFrame)\n\n\ndef general_http_response_codes(response):\n response_status = str(response.status_code)\n if '200' in response_status:\n print('Website Response = 200: Connected successfully')\n elif '301' in response_status:\n print('Website Response = 301: Redirected to a different endpoint')\n elif '400' in response_status:\n print('Website Response = 400: Bad request, try again')\n elif '401' in response_status:\n print('Website Response = 401: Login required')\n elif '403' in response_status:\n print('Website Response = 403: Forbidden')\n elif '404' in response_status:\n print('Website Response = 404: Cannot access requested site')\n elif '429' in response_status:\n print('Website Response = 429: Too many requests (rate limited)')\n elif '503' in response_status:\n print('Website Response = 503: Server is not ready to handle request')\n else:\n print('N/A')\n\n\ndef iex_http_response_codes(IEXResponse):\n response_status = str(IEXResponse.status_code)\n if '200' in response_status:\n print('IEX Response =', response_status,\n 'Connected successfully')\n elif '400' in response_status:\n print('IEX Response =', response_status,\n 'Invalid values were supplied for the API request/'\n 'No symbol provided/'\n 'Batch request \\\"types\\\" parameter requires a valid value')\n elif '401' in response_status:\n print('IEX Response =', response_status,\n 'Hashed token authorization is restricted/'\n 'Hashed token authorization is required/'\n 'The requested data is marked restricted and the account does not have access/'\n 'An API key is required to access the requested endpoint/'\n 'The secret key is required to access to requested endpoint/'\n 'The referer in the request header is not allowed due to API token domain restrictions')\n elif '402' in response_status:\n print('IEX Response =', response_status,\n 'You have exceeded your allotted credit quota/'\n 'The requested endpoint is not available to free accounts/'\n 'The requested data is not available to your current tier')\n elif '403' in response_status:\n print('IEX Response =', response_status,\n 'Hashed token authorization is invalid/'\n 'The provided API token has been disabled/'\n 'The provided API token is not valid/'\n 'A test token was used for a production endpoint/'\n 'A production token was used for a sandbox endpoint/'\n 'Your pay-as-you-go circuit breaker has been engaged and further requests are not allowed/'\n 'Your account is currently inactive')\n elif '404' in response_status:\n print('IEX Response =', response_status,\n 'Unknown symbol provided/'\n 'Resource not found')\n elif '413' in response_status:\n print('IEX Response =', response_status,\n 'Maximum number of \\\"types\\\" values provided in a batch request')\n elif '429' in response_status:\n print('IEX Response =', response_status,\n 'Too many requests hit the API too quickly. An exponential backoff of your requests is recommended')\n elif '451' in response_status:\n print('IEX Response =', response_status,\n 'The requested data requires additional permission to access')\n elif '500' in response_status:\n print('IEX Response =', response_status,\n 'Something went wrong on an IEX Cloud server')\n else:\n print('IEX Response =', response_status,\n 'Unknown error')\n\n\n# The order of look up items matter, sorted from most to least important\nRevenueList = ['Revenue',\n 'Revenues',\n 'RevenuesNetOfInterestExpense',\n 'SalesRevenueNet',\n 'RevenueFromContractWithCustomerExcludingAssessedTax']\n\nNetIncomeList = ['Net Income',\n 'NetIncomeLoss',\n 'ProfitLoss']\n\nEPSList = ['EPS',\n 'EarningsPerShareBasic',\n 'EarningsPerShareDiluted']\n\nSharesOutstandingList = ['Shares Outstanding',\n 'WeightedAverageNumberOfSharesOutstandingBasic',\n 'CommonStockSharesOutstanding',\n 'PreferredStockValueOutstanding']\n\nEBITList = ['EBIT',\n 'OperatingIncomeLoss',\n 'CostsAndExpenses']\n\nDepreciationAndAmortization = ['D & A',\n 'DepreciationAndAmortization',\n 'DepreciationDepletionAndAmortization',\n 'Depreciation']\n","repo_name":"PopePiusXIII/Stonk","sub_path":"Edgar/Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":26834,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"31295196909","text":"#! /usr/bin/env python\nimport rospy\nimport os\nfrom ur_dashboard_msgs.srv import GetRobotMode\nfrom std_srvs.srv import Trigger\nfrom requests import post, get\n\nclass InitializerNH:\n def __init__(self):\n rospy.init_node('initializer', anonymous=True)\n\n self.status = \"OFF\"\n self.rate = rospy.Rate(3) # 3 hz\n\n self.robot_ip = \"http://192.168.171\"\n\n self.instantiate_the_robot()\n\n def check_ping(self):\n hostname = self.robot_ip\n ping_status = False\n while not ping_status:\n response = get(hostname)\n # and then check the response...\n print(response.status_code)\n if response.status_code == 200:\n ping_status = True\n else:\n ping_status = False\n\n break\n return ping_status\n\n def instantiate_the_robot(self):\n self.robot_power_status = self.power_on()\n\n if \"Powering on\" in self.robot_power_status.message:\n print(self.robot_power_status.message)\n self.status = \"Powering on\"\n\n while self.status == \"Powering on\":\n self.robot_power_status = self.wait_for_status()\n print(self.robot_power_status)\n if \"IDLE\" in self.robot_power_status.answer:\n print(self.robot_power_status.answer)\n self.status = \"IDLE\"\n\n rospy.sleep(1.0)\n self.robot_power_status = self.brake_release()\n if \"IDLE\" in self.robot_power_status.answer:\n print(self.robot_power_status.answer)\n self.status = \"IDLE\"\n message = \"Robot Initialized\"\n\n os.system(\"pkill rosmaster && pkill roscore\")\n\n return True, message\n\n\n def brake_release(self):\n rospy.wait_for_service('/ur_hardware_interface/dashboard/brake_release', timeout=10.0)\n\n try:\n service = rospy.ServiceProxy('/ur_hardware_interface/dashboard/brake_release', GetRobotMode)\n resp = service()\n return resp\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\" % e)\n\n def wait_for_status(self):\n rospy.wait_for_service('/ur_hardware_interface/dashboard/get_robot_mode', timeout=10.0)\n\n try:\n service = rospy.ServiceProxy('/ur_hardware_interface/dashboard/get_robot_mode', GetRobotMode)\n resp = service()\n return resp\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\" % e)\n\n def power_on(self):\n rospy.wait_for_service('/ur_hardware_interface/dashboard/power_on', timeout=10.0)\n\n try:\n service = rospy.ServiceProxy('/ur_hardware_interface/dashboard/power_on', Trigger)\n resp = service()\n return resp\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\" % e)\n\n\nif __name__ == \"__main__\":\n g = InitializerNH()\n","repo_name":"asabri97/catkin_wsBackup","sub_path":"src/example/src/initialize_robot.py","file_name":"initialize_robot.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"42745327174","text":"def validarnombre(nombre):\n while len(nombre)>12 or len(nombre)<6 or nombre.isalnum()==False:\n if len(nombre)>12 or len(nombre)<6:\n print(\"El usuario debe tener un mínimo de 6 caracteres y un máximo de 12.\")\n if nombre.isalnum()==False:\n print(\"El nombre de usuario solo puede contener letras y números\")\n nombre = input(\"Introduzca un nuevo nombre de usuario: \")\n print(\"Usuario válido\")\n return True\n \n","repo_name":"carlosmanri/Logic-Thinking","sub_path":"Carlos/Ejercicios-2/validarnombre.py","file_name":"validarnombre.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"40585814007","text":"import json\n\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import render\nfrom training.db import *\n\nfrom .role import Role\n\n\nclass Userrole:\n\tdef __init__(self, info):\n\t\tself.info = info\n\n\tdef home(self):\n\t\treturn \"organizeuser/home.html\", {}\n\n\tdef list(self):\n\t\tcategory = \"usercode\"\n\t\tif \"page\" in self.info.keys():\n\t\t\tpage_num = self.info[\"page\"]\n\t\telse:\n\t\t\tpage_num = 1\n\t\tif \"q\" in self.info.keys() and self.info[\"q\"] != \"\":\n\t\t\tq = self.info[\"q\"]\n\t\t\tcommand = \"select * from ou_userrole where usercode REGEXP %s\"\n\t\t\tqueryset = fetch_all(command, (q))\n\t\telse:\n\t\t\tcommand = \"select * from ou_userrole order by usercode\"\n\t\t\tqueryset = fetch_all(command, (None))\n\n\t\tusers = []\n\t\tfor obj in queryset:\n\t\t\tusercode = obj[\"usercode\"]\n\t\t\texist_user = []\n\t\t\tfor item in users:\n\t\t\t\texist_user.append(item[\"usercode\"])\n\t\t\tif usercode not in exist_user:\n\t\t\t\tusers.append(obj)\n\t\t\n\t\tpaginator = Paginator(users, 10)\n\t\tpage_obj = paginator.page(page_num)\n\t\tcontext = {'page_obj': page_obj}\n\t\tif \"q\" in self.info.keys() and self.info[\"q\"] != \"\":\n\t\t\tcontext['q'] = q\n\t\treturn \"organizeuser/userrole/list.html\", context\n\n\tdef rolelist(self):\n\t\tquery = 'SELECT * FROM ou_role'\n\t\tqueryset = fetch_all(query,())\n\t\tpaginator = Paginator(queryset,10)\n\t\tpage_obj = paginator.get_page(1)\n\t\tHTML = \"organizeuser/userrole/rolelist.html\"\n\t\treturn HTML,{'page_obj':page_obj}\n\n\tdef rolelistuser(self):\n\t\trolecode = self.info[\"rolecode\"]\n\t\tquery = 'SELECT * FROM ou_userrole where rolecode=%s'\n\t\tqueryset = fetch_all(query,(rolecode))\n\t\tusers = []\n\t\tusernames = []\n\t\tfor each in queryset:\n\t\t\tusercode = each[\"usercode\"]\n\t\t\tprint(usercode)\n\t\t\tquery2 = 'SELECT * FROM ou_account where usercode=%s'\n\t\t\tqueryset2 = fetch_one(query2,(usercode))\n\t\t\tprint(queryset2)\n\t\t\tusers.append(queryset2)\n\t\t\tusernames.append(queryset2[\"username\"])\n\t\tHTML = \"organizeuser/userrole/rolelistuser.html\"\n\t\treturn HTML,{'users':users,\"rolecode\":rolecode,\"usernames\":usernames}\n\n\tdef updaterolelistuser(self):\n\t\trolecode = self.info[\"rolecode\"]\n\t\tusers = self.info[\"users\"]\n\t\tquery = 'Delete FROM ou_userrole where rolecode=%s'\n\t\tdelete(query,(rolecode))\n\t\tselectusers = users.split(\",\")\n\t\tfor each in selectusers:\n\t\t\tquery3 = 'SELECT * from ou_account where username = %s'\n\t\t\tqueryset3 = fetch_one(query3,(each))\n\t\t\tusercode = queryset3[\"usercode\"]\n\t\t\tquery = 'Delete FROM ou_userrole where usercode=%s'\n\t\t\tdelete(query,(usercode))\n\t\t\tquery2 = 'Insert into ou_userrole (id,usercode,rolecode) values (%s,%s,%s)'\n\t\t\tinsertid = findNextId('djangomysql.ou_userrole')\n\t\t\tinsert(query2,(insertid,usercode,rolecode))\t\n\t\tquery = 'SELECT * FROM ou_userrole where rolecode=%s'\n\t\tqueryset = fetch_all(query,(rolecode))\n\t\tusers = []\n\t\tfor each in queryset:\n\t\t\tusercode = each[\"usercode\"]\n\t\t\tquery2 = 'SELECT * FROM ou_account where usercode=%s'\n\t\t\tqueryset2 = fetch_one(query2,(usercode))\n\t\t\tusers.append(queryset2)\n\t\tHTML = \"organizeuser/userrole/rolelistuser.html\"\n\t\treturn HTML,{'users':users,\"rolecode\":rolecode}\n\n\n\n\tdef select(self):\n\t\tusercode = self.info[\"usercode\"]\n\t\tall_roles = fetch_all(\"select * from ou_userrole where usercode = %s\", (usercode))\n\t\tls = []\n\t\tcontext = {}\n\t\tfor obj in all_roles:\n\t\t\tls.append(obj[\"rolecode\"])\n\t\tcontext[\"selectedRole\"] = ls\n\n\t\ttarget = fetch_all(\"select * from ou_role\", (None))\n\n\t\tif \"page\" in self.info.keys():\n\t\t\tpage_num = self.info[\"page\"]\n\t\telse:\n\t\t\tpage_num = 1\n\t\tpaginator = Paginator(target, 10)\n\t\tpage_obj = paginator.page(page_num)\n\t\tcontext['page_obj'] = page_obj\n\t\tcontext['usercode'] = self.info[\"usercode\"]\n\t\treturn \"organizeuser/userrole/update.html\", context\n\n\tdef update(self):\n\t\tprint(\"1111\")\n\t\tif \"update\" not in self.info:\n\t\t\treturn self.select()\n\t\tusercode = self.info[\"usercode\"]\n\t\tprint(\"2222\")\n\t\tif \"selectedRole\" in self.info:\n\t\t\troles = self.info[\"selectedRole\"].split(',')\n\t\t\tprint(self.info[\"selectedRole\"])\n\n\t\t\t# delete all rows related to this user\n\t\t\tcommand = \"delete from ou_userrole where usercode = %s\"\n\t\t\tdelete(command, (usercode))\n\n\t\t\t# insert new selected roles for this user\n\t\t\tcommand = \"insert into ou_userrole (id, usercode, rolecode) values (%s, %s, %s)\"\n\t\t\tfor rolecode in roles:\n\t\t\t\tinsertid = findNextId('djangomysql.ou_userrole')\n\t\t\t\tinsert(command, (str(insertid),usercode, rolecode))\n\t\telse:\n\t\t\tcommand = \"delete from ou_userrole where usercode = %s\"\n\t\t\tdelete(command, (usercode))\n\t\treturn None, None\n\n\t# def delete(self):\n\t# \tcommand = \"delete from ou_userrole where usercode = %s\"\n\t# \tdelete(command, (self.info[\"usercode\"]))\n\t# \treturn None, None\n\n\ndef findNextId(table):\n\tsql = \"Select * from \" + table\n\tres = fetch_all(sql,())\n\tif len(res) == 0:\n\t\treturn 1\n\tids = []\n\tfor each in res:\n\t\tids.append(each[\"id\"])\n\treturn max(ids)+1\n","repo_name":"cp555/employee_management_application","sub_path":"src/organizeuser/modules/userrole.py","file_name":"userrole.py","file_ext":"py","file_size_in_byte":4687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"14753636406","text":"from collections import *\ndef solution(gems):\n answer = [1,len(gems)]\n number = len(set(gems))\n\n start, end = 0, 0\n dict = defaultdict(int)\n dict[gems[end]]=1\n while start < len(gems) and end < len(gems):\n if len(dict)==number:\n if answer[1]-answer[0] > end-start:\n answer[0]=start+1\n answer[1]=end+1\n\n\n\n if len(dict) < number :\n\n end += 1\n if end == len(gems):\n break\n dict[gems[end]] += 1\n\n else:\n dict[gems[start]] -= 1\n if dict[gems[start]] == 0:\n del dict[gems[start]]\n\n start += 1\n\n\n\n print(answer)\n \n\n\n return [answer[0],answer[1]]\n","repo_name":"vpfl95/CodingTest","sub_path":"프로그래머스/lv3/67258. [카카오 인턴] 보석 쇼핑/[카카오 인턴] 보석 쇼핑.py","file_name":"[카카오 인턴] 보석 쇼핑.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"74702890310","text":"#!/usr/bin/env python\n__author__ = 'Sergei F. Kliver'\n\nimport os\nfrom RouToolPa.Tools.Abstract import Tool\n\n\nclass FaCut(Tool):\n def __init__(self, path=\"\", max_threads=1):\n Tool.__init__(self, \"facut\", path=path, max_threads=max_threads)\n\n def filter_by_mean_quality(self, quality_threshold, output_prefix, forward_reads, reverse_reads=None,\n quality_type=\"phred64\", stat_file=None, name_type=None):\n\n options = \" -q %s\" % quality_type\n options += \" -f %s\" % forward_reads\n options += \" -r %s\" % reverse_reads if reverse_reads else \"\"\n options += \" -u\" if not reverse_reads else \"\"\n options += \" -t %i\" % quality_threshold\n options += \" -p %s\" % output_prefix\n options += \" -n %s\" % name_type if name_type else \"\"\n options += \" > %s\" % stat_file if stat_file else \"\"\n\n self.execute(options, cmd=\"filter_by_mean_quality\")\n\n def split_fastq(self, input_file, output_prefix):\n\n options = \" -i %s\" % input_file\n options += \" -p %s\" % output_prefix\n\n self.cmd(options, cmd=\"split_fastq\")\n\nif __name__ == \"__main__\":\n pass\n\n\n","repo_name":"mahajrod/RouToolPa","sub_path":"RouToolPa/Tools/Filter/FaCut.py","file_name":"FaCut.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13682037325","text":"import os\nimport time\nimport argparse\n\n\ndef parse_argv():\n parser = argparse.ArgumentParser()\n timestamp = time.strftime(\"%Y-%m-%d_%H:%M:%S\", time.gmtime())\n\n parser.add_argument('--feature', help='cut texts after this number of words',\n type=int, metavar='max_features', dest='max_features', default=20000)\n parser.add_argument('--embd', help='embedding vector size & first LSTM hidden layer size',\n type=int, metavar='embd_size', dest='embd_size', default=128)\n parser.add_argument('--type', help='what kind of recurrent neurons to use',\n choices=['SimpleRNN', 'GRU', 'LSTM', 'CuDNNGRU', 'CuDNNLSTM'],\n metavar='neuron', default='LSTM')\n\n parser.add_argument('-l', '--log', help='save detailed trainning log (.csv file)',\n dest='log_path', metavar='path') # default: defined below\n parser.add_argument('-m', '--model', help='save trainned moldel (.h5)',\n dest='model_path', metavar='path') # default: defined below\n parser.add_argument('-s', '--statistics', help='where to store statistics file (.csv)',\n dest='statistics_path', metavar='path', default='statistics.csv')\n\n parser.add_argument('--allow_growth', help='whether to set allow_growth for TensorFlow',\n action='store_true', default=False)\n parser.add_argument('--fp16', help='whether to use mixed_precision / mixed_float16 training',\n action='store_true', default=False)\n\n args = parser.parse_args()\n\n if args.log_path is None:\n args.log_path = '{type}_{max_features}_{embd_size}_{timestamp}.csv'.format(\n type=args.type, max_features=args.max_features, embd_size=args.embd_size,\n timestamp=timestamp\n )\n\n if args.model_path is None:\n args.model_path = '{type}_{max_features}_{embd_size}_{timestamp}.h5'.format(\n type=args.type, max_features=args.max_features, embd_size=args.embd_size,\n timestamp=timestamp\n )\n\n args.log_path = os.path.abspath(args.log_path)\n args.model_path = os.path.abspath(args.model_path)\n args.statistics_path = os.path.abspath(args.statistics_path)\n\n return args\n\n\ndef main():\n options = parse_argv()\n for key in options.__dict__:\n print('{}: {}'.format(key, options.__dict__[key]))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"AlexLeoTW/PoUw_Test_Ground","sub_path":"performance analysis/Keras_RNN_IMDB/cmdargv.py","file_name":"cmdargv.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"34418539771","text":"from girder.models.model_base import AccessControlledModel\nfrom girder.utility.model_importer import ModelImporter\nfrom girder.utility import path as path_util\nfrom girder.constants import AccessType\nfrom ..constants import TransferStatus\nfrom bson import objectid\nimport datetime\n\n\nclass Transfer(AccessControlledModel):\n OLD_TRANSFER_LIMIT = datetime.timedelta(minutes=1)\n\n def initialize(self):\n self.name = 'transfer'\n self.exposeFields(level=AccessType.READ,\n fields={'_id', 'ownerId', 'sessionId', 'itemId', 'status', 'error',\n 'size', 'transferred', 'path', 'startTime', 'endTime'})\n self.itemModel = ModelImporter.model('item')\n\n def validate(self, transfer):\n return transfer\n\n def createTransfer(self, user, itemId, sessionId):\n existing = self.findOne(query={'itemId': itemId, 'ownerId': user['_id'],\n 'sessionId': sessionId})\n\n if existing is not None:\n transferId = existing['_id']\n else:\n transferId = objectid.ObjectId()\n\n try:\n pathFromRoot = self.getPathFromRoot(user, itemId)\n except KeyError as ex:\n # item does not exist any more, so delete existing transfer\n if existing is not None:\n self.remove(existing)\n raise ex\n transfer = {\n '_id': transferId,\n 'ownerId': user['_id'],\n 'sessionId': sessionId,\n 'itemId': itemId,\n 'status': TransferStatus.QUEUED,\n 'error': None,\n 'size': 0,\n 'transferred': 0,\n 'path': pathFromRoot\n }\n\n self.setUserAccess(transfer, user=user, level=AccessType.ADMIN)\n transfer = self.save(transfer)\n\n return transfer\n\n def getPathFromRoot(self, user, itemId):\n item = self.itemModel.load(itemId, user=user, level=AccessType.READ)\n return path_util.getResourcePath('item', item, user=user)\n\n def setStatus(self, transferId, status, error=None, size=0, transferred=0,\n setTransferStartTime=False, setTransferEndTime=False):\n\n update = {\n '$set': {\n 'status': status,\n 'error': error,\n 'size': size,\n 'transferred': transferred\n }\n }\n\n if setTransferStartTime or setTransferEndTime:\n update['$currentDate'] = {}\n\n if setTransferStartTime:\n update['$currentDate']['startTime'] = {'$type': 'timestamp'}\n\n if setTransferEndTime:\n update['$currentDate']['endTime'] = {'$type': 'timestamp'}\n\n self.update(\n query={'_id': transferId},\n update=update\n )\n\n def list(self, user=None, sessionId=None, discardOld=True):\n if sessionId is None:\n return self.listAllForUser(user, discardOld=discardOld)\n else:\n return self.listAllForSession(user, sessionId, discardOld=discardOld)\n\n def listAll(self, discardOld=True):\n query = self.getTimeConstraintQuery(discardOld)\n return self.find(query)\n\n def listAllForUser(self, user, discardOld=True):\n query = self.getTimeConstraintQuery(discardOld)\n query['ownerId'] = user['_id']\n return self.find(query)\n\n def listAllForSession(self, user, sessionId, discardOld=True):\n query = self.getTimeConstraintQuery(discardOld)\n query['ownerId'] = user['_id']\n query['sessionId'] = sessionId\n return self.find(query)\n\n def getTimeConstraintQuery(self, discardOld):\n if discardOld:\n return {\n '$or': [\n {\n 'endTime': {\n '$exists': False\n }\n },\n {\n 'endTime': {\n '$gte': datetime.datetime.utcnow() - Transfer.OLD_TRANSFER_LIMIT\n }\n }\n ]\n }\n else:\n return {}\n","repo_name":"whole-tale/girder_wt_data_manager","sub_path":"server/models/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"20255907745","text":"import copy\n\nclass Life(object):\n def __init__(self, num_of_row=10, num_of_column=10):\n self.grid = []\n self.next_grid = []\n self.num_live_neighbor = []\n self.next_num_live_neighbor = []\n for r in range(num_of_row):\n self.grid.append([0]*num_of_column)\n self.next_grid.append([0]*num_of_column)\n self.num_live_neighbor.append([0]*num_of_column)\n self.next_num_live_neighbor.append([0]*num_of_column)\n\n def set_next_live(self, row, col):\n grid = self.next_grid\n num_live_nb = self.next_num_live_neighbor\n grid[row][col] = 1\n for (i, j) in self.neighbors(row, col):\n num_live_nb[i][j] += 1\n\n def set_next_dead(self, row, col):\n grid = self.next_grid\n grid[row][col] = 0\n for (i, j) in self.neighbors(row, col):\n assert self.next_num_live_neighbor[i][j] > 0\n self.next_num_live_neighbor[i][j] -= 1\n\n def sync_next(self):\n self.grid = copy.deepcopy(self.next_grid)\n self.num_live_neighbor = copy.deepcopy(self.next_num_live_neighbor)\n\n def neighbors(self, row, col):\n res = []\n min_row = row-1 if row>0 else row\n max_row = row+1 if row0 else col\n max_col = col+1 if col 3:\n live = False\n if live != self.grid[r][c]:\n is_changed = True\n if live:\n self.set_next_live(r, c)\n else:\n self.set_next_dead(r, c)\n if is_changed:\n self.sync_next()\n\n def display(self):\n res = \" \"\n for c in range(len(self.grid[0])):\n res += \"%2d\" % c\n res += \"\\n \"\n for c in range(len(self.grid[0])):\n res += \"--\"\n for r in range(len(self.grid)):\n res += \"\\n%d |\" % (r)\n for c in range(len(self.grid[0])):\n res += \"%2d\" % self.grid[r][c]\n res += \"\\n\"\n return res\n","repo_name":"cocoon333/DSL","sub_path":"data_structure_learning/lifegame/life.py","file_name":"life.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"36755816212","text":"#!/usr/bin/python3\n\n\"\"\"Optimize the geometry of molecules or fragments.\"\"\"\n\nimport argparse\n\nfrom ase import Atoms\nfrom ase.optimize import BFGS\nfrom ase.constraints import FixAtoms\nfrom xtb.ase.calculator import XTB\nfrom mendeleev import element\n\nfrom rmsd import read_xyz, write_xyz\nfrom swing import smd2gbsa\n\n\ndef minimize(\n atomno,\n coord,\n method=\"GFN2-xTB\",\n accuracy=1.0,\n electronic_temperature=300.0,\n max_iterations=250,\n solvent=\"water\",\n cache_api=True,\n constraints=None,\n):\n atoms = Atoms(numbers=atomno, positions=coord)\n calc = XTB(\n method=method,\n accuracy=accuracy,\n electronic_temperature=electronic_temperature,\n max_iterations=max_iterations,\n solvent=solvent,\n cache_api=cache_api,\n )\n atoms.set_calculator(calc)\n\n if constraints is not None:\n for c in constraints:\n atoms.set_constraint(c)\n\n opt = BFGS(atoms)\n opt.run(fmax=0.05)\n\n return atoms.numbers, atoms.get_positions()\n\n\ndef main():\n \"\"\"Run main procedure.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"xyz_files\", type=argparse.FileType(\"r\"), default=\"-\", nargs=\"+\"\n )\n # TODO(schneiderfelipe): set charge and multiplicity\n parser.add_argument(\n \"-a\",\n \"--acc\",\n help=\"accuracy for SCC calculation, lower is better\",\n type=float,\n default=1.0,\n )\n parser.add_argument(\n \"--iterations\",\n help=\"number of iterations in SCC\",\n type=int,\n default=250,\n )\n parser.add_argument(\n \"--gfn\",\n help=\"specify parametrisation of GFN-xTB\",\n type=int,\n default=2,\n )\n parser.add_argument(\n \"--etemp\", help=\"electronic temperature\", type=float, default=300.0\n )\n parser.add_argument(\n \"-s\",\n \"--solvent\",\n help=(\"solvent (SMD/GBSA implicit solvation models)\"),\n default=\"none\",\n )\n parser.add_argument(\n \"--do-not-cache-api\",\n dest=\"cache_api\",\n help=\"Do not reuse generate API objects (not recommended)\",\n action=\"store_false\",\n )\n\n parser.add_argument(\n \"--free-atoms\",\n help=(\n \"Only optimize the given atoms, as comma-separated one-based \"\n \"indices, ranges or atomic symbols\"\n ),\n )\n args = parser.parse_args()\n print(args)\n\n method = f\"GFN{args.gfn}-xTB\"\n solvent = smd2gbsa[args.solvent.lower()]\n\n if args.free_atoms:\n free_atom_indices = set()\n free_atom_nos = set()\n for i in args.free_atoms.split(\",\"):\n try:\n free_atom_indices.add(int(i) - 1)\n except ValueError:\n if \"-\" in i:\n start_plus_one, end = (int(j) for j in i.split(\"-\", 1))\n free_atom_indices.update(range(start_plus_one - 1, end))\n else:\n free_atom_nos.add(\n element(i).atomic_number\n ) # atomic numbers\n\n for xyz_file in args.xyz_files:\n atomno, comment, coord = read_xyz(xyz_file)\n atomno, comment, coord = atomno[-1], comment[-1], coord[-1]\n\n constraints = []\n if args.free_atoms:\n print(free_atom_indices)\n print(free_atom_nos)\n indices = list(\n set(range(len(atomno)))\n - free_atom_indices\n - set(i for i, no in enumerate(atomno) if no in free_atom_nos)\n )\n print(f\"Constraining atoms {indices}\")\n constraints.append(FixAtoms(indices=indices))\n\n atomno, coord = minimize(\n atomno,\n coord,\n method=method,\n accuracy=args.acc,\n electronic_temperature=args.etemp,\n max_iterations=args.iterations,\n solvent=solvent,\n cache_api=args.cache_api,\n constraints=constraints,\n )\n\n with open(xyz_file.name, \"w\") as stream:\n stream.write(write_xyz(atomno, coord, comment))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"schneiderfelipe/scripts","sub_path":"minimize.py","file_name":"minimize.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"40799297781","text":"from django.contrib.auth.models import User\nfrom django.core.mail import send_mail\nfrom django.db import models\n\n\nclass Mail(models.Model):\n target = models.ForeignKey(\n User,\n related_name='mails',\n )\n\n title = models.CharField(\n max_length=250,\n )\n\n text = models.TextField()\n\n sent = models.BooleanField(default=False)\n\n def save(self, *args, **kwargs):\n if self.target.email:\n send_mail(self.title, self.text,\n 'testingdjango111@gmail.com', [self.target.email])\n self.sent = True\n\n super(Mail, self).save(*args, **kwargs)","repo_name":"MichalGumkowski/django-tasker","sub_path":"core/models/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6740707524","text":"import random\nfrom datetime import datetime, timedelta\nimport matplotlib.pyplot as plt\n\n\ndef make_trade_data(num_data):\n # 기본 식물 가격\n base_price = 10000\n\n # 거래 데이터를 저장할 리스트\n trade_data = []\n\n for i in range(num_data):\n plant_num = i + 1\n plant_status = random.randint(0, 4)\n\n if plant_status == 0:\n price = base_price * random.uniform(0.4, 0.6)\n status_str = \"very bad\"\n elif plant_status == 1:\n price = base_price * random.uniform(0.6, 0.8)\n status_str = \"bad\"\n elif plant_status == 2:\n price = base_price * random.uniform(0.8, 1.0)\n status_str = \"soso\"\n elif plant_status == 3:\n price = base_price * random.uniform(1.0, 1.1)\n status_str = \"good\"\n else:\n price = base_price * random.uniform(1.1, 1.25)\n status_str = \"very good\"\n\n # 거래일시 생성\n trade_time = datetime.now() - timedelta(days=random.randint(0, 365))\n\n price = int(price)\n # 거래 데이터 리스트에 추가\n trade_data.append([plant_num, status_str, price, trade_time])\n\n return trade_data\n\n\ndef plot_trade_data(trade_data):\n # 상태 목록\n status_list = [\"very bad\", \"bad\", \"soso\", \"good\", \"very good\"]\n\n # 서브플��� 개수\n num_subplots = len(status_list)\n\n # 그래프 사이즈 설정\n fig, axs = plt.subplots(num_subplots, 1, figsize=(10, 5*num_subplots))\n\n # 각 상태별로 서브플롯에 그래프 출력\n for i, status in enumerate(status_list):\n # 해당 상태의 데이터 추출\n data = [d for d in trade_data if d[1] == status]\n\n # 날짜순으로 정렬\n data = sorted(data, key=lambda x: x[3])\n\n # x, y 값 추출\n x_values = [d[3] for d in data]\n y_values = [d[2] for d in data]\n\n # 서브플롯에 그래프 출력\n axs[i].plot(x_values, y_values)\n axs[i].set_title(f\"{status.capitalize()} Plants\")\n axs[i].set_xlabel(\"Trade Date\")\n axs[i].set_ylabel(\"Price\")\n\n plt.subplots_adjust(top=0.95, bottom=0.05, left=0.05, right=0.95, hspace=0.5)\n plt.show()\n\n\n\n\n# 거래 데이터 생성\ntrade_data = make_trade_data(100)\nprint(trade_data)\n# 그래프 출력\nplot_trade_data(trade_data)\n","repo_name":"crazy-oung/planttech","sub_path":"ai/pytorch/state/make_price_and_vis.py","file_name":"make_price_and_vis.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"2182846846","text":"import torch.nn as nn\r\nimport torch\r\nimport random\r\n# 32 -> 10\r\n'''\r\n(i+2p-k)/s+1 \r\ni = 32 s = 2 p = 0 k = ? -> o = 10? :k = 23\r\ni = 64 s = 2 p = 1 k = ? -> o = 32? :k = 2\r\ni = 128 s = 2 p = ? k->6 -> o = 64? :k = 2/\r\ni = 10 s=1 p=0 k= ? -> o = 1? : k = 10\r\n\r\ni = 32 \r\n'''\r\n'''\r\nconv1 = nn.Conv2d(1, 1, 14, 2, 0)\r\n\r\ninput = torch.randn(1,1,32,32)\r\n\r\noutput = conv1(input)\r\n\r\nprint(output.size())\r\n\r\nconv2 = nn.ConvTranspose2d(1,1,4,2,1)\r\nout = conv2(input)\r\nprint(out.size())\r\n\r\nconv3 = nn.ConvTranspose2d(1,1,2,2,0)\r\nout3 = conv3(out)\r\nprint(out3.size())\r\n\r\ntransconv1 = nn.ConvTranspose2d(1, 1, 23, 1, 0)\r\n\r\nout2 = transconv1(output)\r\n\r\nprint(out2.size())\r\n'''\r\n'''\r\nBCE_loss = nn.BCELoss()\r\n\r\ns = torch.Tensor([0.5,1])\r\nt = torch.Tensor([1,1])\r\n\r\nloss = BCE_loss(s,t)\r\nprint(loss)\r\n\r\nbbb = torch.randn((1,2))\r\nprint(bbb.size())\r\n'''\r\ndef Transconv_size_test(input_size,kernel,stride,padding):\r\n tensor = torch.randn(input_size)\r\n deconv = nn.ConvTranspose2d(1,1,kernel,stride,padding)\r\n tmp = deconv(tensor)\r\n print(tmp.size())\r\ndef Conv_size_test(input_size,kernel,stride,padding):\r\n tensor = torch.randn(input_size)\r\n deconv = nn.Conv2d(1,1,kernel,stride,padding)\r\n tmp = deconv(tensor)\r\n print(tmp.size())\r\n\r\nConv_size_test((1,1,32,32),16,2,1)\r\n\r\n\r\n\r\n'''\r\ntensor = torch.zeros(1,1,5,5)\r\nprint(tensor.size())\r\n\r\ntensor = tensor+random.randint(0,255)\r\nprint(tensor)\r\n\r\nclass PILencode(object):\r\n\tdef __call__(self,tensor):\r\n\t\ttmp = (tensor-127.5)/127.5\r\n\t\treturn tmp\r\n\t\t\r\nclass PILdecode(object):\r\n\tdef __call__(self,tensor):\r\n\t\ttmp = (tensor*127.5)+127.5\r\n\t\treturn tmp\r\n\r\ne = PILencode()\r\nd = PILdecode()\r\ntensor = e(tensor)\r\nprint(tensor)\r\n\r\ntensor = d(tensor)\r\nprint(tensor)\r\n'''","repo_name":"hui98/CV","sub_path":"UT_conv.py","file_name":"UT_conv.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12587936415","text":"\"\"\"more tag setting states\n\nRevision ID: f2525e71bdbb\nRevises: 120fa9a324bb\nCreate Date: 2022-01-22 16:19:29.481074\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'f2525e71bdbb'\ndown_revision = '120fa9a324bb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('participants_tag_setting', sa.Column('not_include_for_statistics', sa.Boolean(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('participants_tag_setting', 'not_include_for_statistics')\n # ### end Alembic commands ###\n","repo_name":"Alexsogge/MasterProject","sub_path":"Webserver/migrations/versions/f2525e71bdbb_more_tag_setting_states.py","file_name":"f2525e71bdbb_more_tag_setting_states.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"27"} +{"seq_id":"5506371540","text":"#!/usr/bin/env python3\n################################################################################\n#\n# Filename: test_number_of_islands.py\n#\n# Author: Arnaud Ongenae\n#\n################################################################################\nfrom ..number_of_islands import Solution\nfrom .leetcode_test import LeetcodeTest\n\n\nclass _Mixin(object):\n\n def validate(self, grid, expected):\n solution = Solution().numIslands(grid)\n self.assertEqual(\n solution,\n expected,\n _Mixin._format_error(grid, solution, expected)\n )\n\n @staticmethod\n def _format_error(grid, solution, expected):\n return (\n 'wrong result for the following grid:\\n'\n '\\n'\n '{}\\n'\n '\\n'\n ' solution: {}\\n'\n ' expected: {}\\n'\n .format('\\n'.join(_Mixin._format_grid(grid)), solution, expected)\n )\n\n @staticmethod\n def _format_grid(grid):\n for rows in grid:\n yield ''.join(col for col in rows)\n\n\nclass TestLongestCommonPrefix(LeetcodeTest, _Mixin):\n\n def test_one_island_1(self):\n grid = [\n ['1', '1', '1', '1', '0'],\n ['1', '1', '0', '1', '0'],\n ['1', '1', '0', '0', '0'],\n ['0', '0', '0', '0', '0']\n ]\n self.validate(grid, expected=1)\n\n def test_three_islands_1(self):\n grid = [\n ['1', '1', '0', '0', '0'],\n ['1', '1', '0', '0', '0'],\n ['0', '0', '1', '0', '0'],\n ['0', '0', '0', '1', '1']\n ]\n self.validate(grid, expected=3)\n","repo_name":"aongenae/leetcode","sub_path":"src/test/test_number_of_islands.py","file_name":"test_number_of_islands.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"17114350995","text":"import urllib.request\nfrom flask import Flask, request, jsonify\nimport os\nimport cv2\nimport numpy as np\nfrom tqdm import tqdm\nfrom preprocessing import parse_annotation\nfrom utils import draw_boxes\nfrom frontend import YOLO\n\napp = Flask(__name__)\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nweights_path = \"trained_wts.h5\"\nimage_path = \"1.jpg\"\n\nyolo = YOLO(backend = \"Full Yolo\",\n input_size = 416, \n labels = [\"Potholes\"], \n max_box_per_image = 15,\n anchors = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828]\n )\nyolo.load_weights(weights_path)\n\n@app.route('/', methods=['GET', 'POST'])\ndef predict():\n import keras.backend.tensorflow_backend as tb\n tb._SYMBOLIC_SCOPE.value = True\n\n url = request.form.get('url')\n urllib.request.urlretrieve(url, '1.jpg')\n image = cv2.imread(\"1.jpg\")\n boxes = yolo.predict(image)\n image = draw_boxes(image, boxes, \"Pothole\")\n\n # print(len(boxes), 'boxes are found')\n\n cv2.imwrite(\"detected.jpg\", image)\n return jsonify(\n NUM_POTHOLES=len(boxes),\n coordinates=boxes\n )\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\",port=5000,threaded=False)","repo_name":"Amagnum/CARON","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"4866724506","text":"from typing import List\n\nimport requests\nfrom requests import RequestException\n\nfrom pokeapi_service.pokeapi_rest_service import PokeAPIRestService\nfrom pokemon import Pokemon\n\n\nclass PokeAPISecuentialRestService(PokeAPIRestService):\n\n def get_by_names(self, names: List[str]) -> List[Pokemon]:\n pokemons = []\n for name in names:\n try:\n response = requests.get(self.BASE_API_URL + name)\n response.raise_for_status()\n pokemon_json = response.json()\n except RequestException:\n continue\n\n pokemons.append(Pokemon(id=pokemon_json['id'], nombre=pokemon_json['name'], tipos=[tipo['type']['name'] for tipo in pokemon_json['types']]))\n\n return pokemons\n","repo_name":"RusselWolf23/Carreras_Python","sub_path":"Desarrollador y tester en Python/Concurrencia en Python/pokeapi_service/pokeapi_secuential_rest_service.py","file_name":"pokeapi_secuential_rest_service.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"43172943207","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Post\nfrom django.views.generic.list import ListView\nfrom django.views.generic.detail import DetailView\nfrom .forms import EmailPostForm, CommentPostForm, SearchForm\nfrom django.core.mail import send_mail\nfrom taggit.models import Tag\nfrom django.core.paginator import Paginator, EmptyPage\nfrom django.db.models import Count\nfrom django.contrib.postgres.search import SearchVector, SearchQuery, SearchRank\n\n\nclass PostListView(ListView):\n queryset = Post.published.all()\n paginate_by = 3 # 3 posts in each page\n template_name = 'blog/post/list.html'\n\n\ndef post_list(request, tag_slug=None):\n object_list = Post.published.all()\n tag = None\n if tag_slug:\n tag = get_object_or_404(Tag, slug=tag_slug)\n object_list = object_list.filter(tags__in=[tag, ])\n\n paginator = Paginator(object_list, 3)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return render(request, 'blog/post/list.html', {'page_obj': page_obj, 'tag': tag})\n\n\nclass PostDetailView(DetailView):\n template_name = 'blog/post/detail.html'\n\n def get_queryset(self):\n queryset = Post.published.filter(\n publish__year=self.kwargs['year'],\n publish__month=self.kwargs['month'],\n publish__day=self.kwargs['day'],\n slug=self.kwargs['slug']\n )\n return queryset\n\n\ndef post_detail(request, year, month, day, slug):\n post = get_object_or_404(\n Post, slug=slug,\n status='published',\n publish__year=year,\n publish__month=month,\n publish__day=day)\n\n # List of active comments for this post\n comments = post.comments.filter(active=True)\n\n new_comment = None # new_comment will be None because views always redirect\n if request.method == 'POST':\n # A comment was posted\n comment_form = CommentPostForm(data=request.POST)\n if comment_form.is_valid():\n # Create Comment object but don't save to database yet\n new_comment = comment_form.save(commit=False)\n # Assign the current post to the comment\n new_comment.post = post\n # Save the comment to the database\n new_comment.save()\n return redirect(post)\n else:\n comment_form = CommentPostForm()\n\n # list of similar posts\n\n # retrieve q.s. with list of tag ids from current post. flat = True - get single value for each id\n post_tags_ids = post.tags.values_list('id', flat=True)\n\n # retrieve all occurrence post (with post repeat) with tags_ids, excluding current post\n similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(pk=post.id)\n\n similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags', '-publish')[:4]\n return render(request, 'blog/post/detail.html', {'post': post,\n 'comments': comments,\n 'new_comment': new_comment,\n 'comment_form': comment_form,\n 'similar_posts': similar_posts,\n })\n\n\ndef post_share(request, post_id):\n # Retrieve post by id\n post = get_object_or_404(Post, pk=post_id, status='published')\n sent = False\n if request.method == 'POST':\n # form was submitted\n form = EmailPostForm(request.POST)\n if form.is_valid():\n # form fields passed validation\n cd = form.cleaned_data\n # send mail\n post_url = request.build_absolute_uri(post.get_absolute_url())\n subject = f\"{cd['name']} recommends you read {post.title}\"\n message = f\"Read {post.title} at {post_url}\\n\\n\\n{cd['name']}'s comments: {cd['comments']}\"\n from_email = cd['email']\n to = cd['to']\n send_mail(subject=subject, message=message, from_email=from_email, recipient_list=[to, ])\n sent = True\n else:\n form = EmailPostForm()\n return render(request, 'blog/post/share.html', {'form': form, 'post': post, 'sent': sent})\n\n\ndef post_search(request):\n form = SearchForm()\n query = None\n results = []\n\n if 'query' in request.GET:\n form = SearchForm(request.GET)\n if form.is_valid():\n query = form.cleaned_data['query']\n search_vector = SearchVector('title', weight='A') + SearchVector('body', weight='B')\n search_query = SearchQuery(query)\n results = Post.published.annotate(search=search_vector, rank=SearchRank(search_vector, search_query)\n ).filter(rank__gte=0.3).order_by('-rank')\n\n return render(request, 'blog/post/search.html', {'form': form, 'results': results, 'query': query})","repo_name":"WellingtonIdeao/django-by-example-blog","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"33164991788","text":"import time\nfrom unittest import TestCase\nfrom scorecardlib.scoreboard import Scoreboard\n\n\"\"\"Sample table provided as an input and can be used for writing tests as well\"\"\"\n\n# a. Mexico 0 - Canada 5\n# b. Spain 10 - Brazil 2\n# c. Germany 2 - France 2\n# d. Uruguay 6 - Italy 6\n# e. Argentina 3 - Australia 1\n\n\nclass TestScoreBoard(TestCase):\n def test_init_scoreboard(self):\n scoreboard = Scoreboard()\n self.assertEqual(scoreboard.games, dict())\n\n def test_start_game(self):\n leaderboard = Scoreboard()\n leaderboard.start_game(\"Germany\", \"France\")\n self.assertEqual(leaderboard.games[\"Germany-France\"].home_team, \"Germany\")\n self.assertEqual(leaderboard.games[\"Germany-France\"].away_team, \"France\")\n\n def test_game_exists(self):\n leaderboard = Scoreboard()\n home_team = \"Germany\"\n away_team = \"France\"\n leaderboard.start_game(home_team, away_team)\n\n with self.assertRaises(Exception) as context:\n leaderboard.start_game(home_team, away_team)\n\n self.assertTrue(f\"There is an ongoing game between {home_team} and {away_team}\\n\" in str(context.exception))\n\n\n\n\n\n def test_update_Score(self):\n leaderboard = Scoreboard()\n home_team = \"Germany\"\n away_team = \"France\"\n match_key = f\"{home_team}-{away_team}\"\n leaderboard.start_game(home_team, away_team)\n self.assertEqual(leaderboard.games[match_key].home_team, \"Germany\")\n self.assertEqual(leaderboard.games[match_key].away_team, \"France\")\n leaderboard.update_score(home_team, away_team, 10, 5)\n self.assertEqual(leaderboard.games[match_key].home_score, 10)\n self.assertEqual(leaderboard.games[match_key].away_score, 5)\n\n def test_check_if_match_ongoing(self):\n # Test to check if any team exists\n leaderboard = Scoreboard()\n home_team = \"Britain\"\n away_team = \"Isreal\"\n with self.assertRaises(Exception) as context:\n leaderboard.update_score(home_team, away_team, 10, 5)\n\n self.assertTrue(f\"There is no ongoing game between {home_team} and {away_team}\\n\" in str(context.exception))\n\n def test_finish_game(self):\n leaderboard = Scoreboard()\n home_team = \"Germany\"\n away_team = \"France\"\n match_key = f\"{home_team}-{away_team}\"\n leaderboard.start_game(home_team, away_team)\n leaderboard.update_score(home_team, away_team, 2, 2)\n home_team_2 = \"Spain\"\n away_team_2 = \"Brazil\"\n match_key_2 = f\"{home_team_2}-{away_team_2}\"\n leaderboard.start_game(home_team_2, away_team_2)\n leaderboard.update_score(home_team, away_team, 10, 2)\n self.assertEqual(leaderboard.games[match_key].home_team, \"Germany\")\n self.assertEqual(leaderboard.games[match_key_2].home_team, \"Spain\")\n leaderboard.finish_game(home_team, away_team)\n self.assertNotIn(match_key, leaderboard.games.keys())\n\n def test_get_games_summary(self):\n \"\"\"\"We will first insert sample given games in leaderboard and then update their score\"\"\"\n # a. Mexico 0 - Canada 5\n # b. Spain 10 - Brazil 2\n # c. Germany 2 - France 2\n # d. Uruguay 6 - Italy 6\n # e. Argentina 3 - Australia 1\n leaderboard = Scoreboard()\n leaderboard.start_game(\"Mexico\", \"Canada\")\n leaderboard.start_game(\"Spain\", \"Brazil\")\n leaderboard.start_game(\"Germany\", \"France\")\n # adding some delay to make sure that there is a delay between starting of two games\n # which have similar total score\n time.sleep(5)\n leaderboard.start_game(\"Uruguay\", \"Italy\")\n leaderboard.start_game(\"Argentina\", \"Australia\")\n\n leaderboard.update_score(\"Mexico\", \"Canada\", 0, 5)\n leaderboard.update_score(\"Spain\", \"Brazil\", 10, 2)\n leaderboard.update_score(\"Germany\", \"France\", 2, 2)\n leaderboard.update_score(\"Uruguay\", \"Italy\", 6, 6)\n leaderboard.update_score(\"Argentina\", \"Australia\", 3, 1)\n\n # 1. Uruguay 6 - Italy 6\n # 2. Spain 10 - Brazil 2\n # 3. Mexico 0 - Canada 5\n # 4. Argentina 3 - Australia 1\n # 5. Germany 2 - France 2\n summary = leaderboard.get_games_summary(verbose=True)\n self.assertEqual(summary[0].home_team, \"Uruguay\")\n self.assertEqual(summary[0].away_team, \"Italy\")\n self.assertEqual(summary[0].home_score, 6)\n self.assertEqual(summary[0].away_score, 6)\n\n self.assertEqual(summary[1].home_team, \"Spain\")\n self.assertEqual(summary[1].away_team, \"Brazil\")\n self.assertEqual(summary[1].home_score, 10)\n self.assertEqual(summary[1].away_score, 2)\n\n self.assertEqual(summary[2].home_team, \"Mexico\")\n self.assertEqual(summary[2].away_team, \"Canada\")\n self.assertEqual(summary[2].home_score, 0)\n self.assertEqual(summary[2].away_score, 5)\n\n self.assertEqual(summary[3].home_team, \"Argentina\")\n self.assertEqual(summary[3].away_team, \"Australia\")\n self.assertEqual(summary[3].home_score, 3)\n self.assertEqual(summary[3].away_score, 1)\n\n self.assertEqual(summary[4].home_team, \"Germany\")\n self.assertEqual(summary[4].away_team, \"France\")\n self.assertEqual(summary[4].home_score, 2)\n self.assertEqual(summary[4].away_score, 2)\n\n def test_get_score_from_scoreboard(self):\n leaderboard = Scoreboard()\n leaderboard.start_game(\"Mexico\", \"Canada\")\n leaderboard.start_game(\"Spain\", \"Brazil\")\n leaderboard.start_game(\"Germany\", \"France\")\n leaderboard.start_game(\"Uruguay\", \"Italy\")\n leaderboard.start_game(\"Argentina\", \"Australia\")\n\n leaderboard.update_score(\"Mexico\", \"Canada\", 0, 5)\n leaderboard.update_score(\"Spain\", \"Brazil\", 10, 2)\n leaderboard.update_score(\"Germany\", \"France\", 2, 2)\n leaderboard.update_score(\"Uruguay\", \"Italy\", 6, 6)\n leaderboard.update_score(\"Argentina\", \"Australia\", 3, 1)\n response = f\"Scores for Argentina-Australia is 3-1\"\n self.assertEqual(leaderboard.get_game_score(\"Argentina\", \"Australia\"), response)\n\n","repo_name":"vivekfe/scorecardlib","sub_path":"tests/Test_scoreboard_class.py","file_name":"Test_scoreboard_class.py","file_ext":"py","file_size_in_byte":6132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"3749909036","text":"__author__ = 'Kal Ahmed'\n\nfrom quince.core.qsort import sort_all, sort_modified\n\n\ndef parser(subparsers):\n \"\"\"\n Adds a parser for the quince sort sub-command\n :param subparsers: The list of quince sub-command parsers to append the resulting parser to\n :return: None\n \"\"\"\n sort_parser = subparsers.add_parser(\n 'sort',\n help='Ensure that the quad files in the Quince repository properly sorted'\n )\n sort_parser.add_argument('--all', '-a',\n help='Check the sorting of all files even if they are not locally modified',\n action='store_true')\n sort_parser.add_argument('--since', '-s',\n help='Check the sorting of all files modified since the specified commit')\n sort_parser.set_defaults(func=main)\n\n\ndef main(args):\n if args.all:\n sort_all()\n else:\n sort_modified(args.since)\n return True","repo_name":"kal/quince","sub_path":"quince/cli/quince_sort.py","file_name":"quince_sort.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"41050363633","text":"import json\nimport tempfile\nimport unittest\n\nimport process\nfrom web.services.verb_definition import SenseData\nfrom web.services import directory_service\n\nclass process_tests(unittest.TestCase):\n\n def test_jsonEncodesVerbDefinition(self):\n test_list = set([1,2,3])\n definition = SenseData()\n definition.hypernyms = test_list\n\n result = process._encode_for_json(definition)\n\n self.assertIsInstance(result['hypernyms'], list)\n\n def test_integration(self):\n tempFileWrapepr = tempfile.NamedTemporaryFile(suffix='.json')\n\n test_list = set([1,2,3])\n definition = dict()\n definition['random_key'] = test_list\n\n new_dict = {'run':SenseData()}\n\n synsets_list = {\n 'blah.l.01':test_list\n }\n\n file_data = {\n 'directory': new_dict,\n 'synsets': synsets_list\n }\n\n with open(tempFileWrapepr.name, \"w\") as tempFile:\n json.dump(file_data, tempFile, default=process._encode_for_json)\n\n with open(tempFileWrapepr.name, \"r\") as tempFile:\n loaded = json.load(tempFile, object_hook=directory_service._decode_complex)\n self.assertEqual(len(loaded['synsets']['blah.l.01']), 3)","repo_name":"JessFairbairn/PhysNet","sub_path":"process_test.py","file_name":"process_test.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"73310587913","text":"import asyncio\nimport logging\n\nfrom .oyoyo.parse import parse_raw_irc_command\nfrom .protocol import ClientProtocol\n\nfrom aioyoyo.oyoyo.cmdhandler import IRCClientError\n\n\nclass IRCClient(object):\n def __init__(self, loop=None, address=None, port=None, protocol=ClientProtocol):\n \"\"\"\n A basic Async IRC client. Use coroutine IRCClient.connect to initiate\n the connection. Takes the event loop, a host (address, port) and if\n wanted an alternate protocol can be defined. By default will use the\n ClientProtocol class, which just uses the IRCClient's tracebacks and\n passes received data to the client.\n \"\"\"\n self.loop = loop if loop else asyncio.get_event_loop()\n self.host = (address, port)\n self.address = address\n self.port = port\n self.protocol = protocol(self)\n\n self.logger = logging.getLogger(\"aioyoyo\")\n self.logger.setLevel(logging.INFO)\n\n def __repr__(self):\n return \"{0}(address={1}, port={2}, protocol={3})\".format(self.__class__.__name__, self.host[0], self.host[1], self.protocol.__class__.__name__)\n\n async def connect(self):\n \"\"\"Initiate the connection, creates a connection using the defined\n protocol\"\"\"\n await self.loop.create_connection(lambda: self.protocol, self.address, self.port)\n\n async def connection_made(self):\n \"\"\"Called on a successful connection, by default forwarded by\n protocol.connection_made\"\"\"\n logging.info('connecting to %s:%s' % self.host)\n\n async def data_received(self, data):\n \"\"\"Called when data is received by the connection, by default\n forwarded by protocol.data_received, passes bytes not str\"\"\"\n logging.info('received: %s' % data.decode())\n\n async def connection_lost(self, exc):\n \"\"\"Called when the connection is dropped, by default prints\n the exception if there is one. Forwarded by protocol.connection_lost\"\"\"\n logging.info('connection lost: %s' % exc)\n\n async def send(self, *args):\n \"\"\"Send a message to the connected server. all arguments are joined\n with a space for convenience, for example the following are identical\n\n >>> cli.send(\"JOIN %s\" % some_room)\n >>> cli.send(\"JOIN\", some_room)\n\n In python 3, all args must be of type str or bytes, *BUT* if they are\n str they will be converted to bytes with the encoding specified by the\n 'encoding' keyword argument (default 'utf8').\n \"\"\"\n # Convert all args to bytes if not already\n bargs = []\n for arg in args:\n if isinstance(arg, str):\n bargs.append(arg.encode())\n elif isinstance(arg, bytes):\n bargs.append(arg)\n else:\n raise IRCClientError('Refusing to send one of the args from provided: %s'\n % repr([(type(arg), arg) for arg in args]))\n\n msg = b\" \".join(bargs)\n await self.protocol.send_raw(msg + b\"\\r\\n\")\n logging.info('---> send \"%s\"' % msg)\n\n async def send_msg(self, message):\n \"\"\"Send a str to the server from absolute raw, none of the formatting\n from IRCClient.send\"\"\"\n await self.protocol.send(message)\n\n async def send_raw(self, data):\n \"\"\"Send raw bytes to the server, none of the formatting from IRCClient.send\"\"\"\n await self.protocol.send_raw(data)\n\n async def close(self):\n \"\"\"Close the connection\"\"\"\n logging.info('close transport')\n self.protocol.transport.close()\n\n def run(self):\n \"\"\"Starts the client, blocking. For a non-blocking coroutine use client.connect()\"\"\"\n self.loop.run_until_complete(self.connect())\n self.loop.run_forever()\n\nclass CommandClient(IRCClient):\n \"\"\"IRCClient, using a command handler\"\"\"\n def __init__(self, cmd_handler, **kwargs):\n \"\"\"Takes a command handler (see oyoyo.cmdhandler.CommandHandler)\n whose attributes are the commands you want callable, for example\n with a privmsg cmdhandler.privmsg will be awaited with the\n appropriate *args, decorate methods with @protected to make it\n uncallable as a command\"\"\"\n super().__init__(self, **kwargs)\n self.command_handler = cmd_handler(self)\n\n async def data_received(self, data):\n \"\"\"On IRCClient.data_received parse for a command and pass to the\n command_handler to run()\"\"\"\n prefix, command, args = parse_raw_irc_command(data)\n await self.command_handler.run(command, prefix, *args)\n\n","repo_name":"henry232323/aioyoyo","sub_path":"aioyoyo/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4586,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"70297250951","text":"import numpy as np\nimport gzip\n\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\n\nfrom analysis.confustion_matrix import plot_confusion_matrix\nfrom analysis.one_hot_encoder import indices_to_one_hot\nfrom network.network import MultiLayerNetwork\nfrom network.preprocessor import Preprocessor\nfrom network.trainer import Trainer\n\n\nfilename = [\n [\"training_images\", \"train-images-idx3-ubyte.gz\"],\n [\"test_images\", \"t10k-images-idx3-ubyte.gz\"],\n [\"training_labels\", \"train-labels-idx1-ubyte.gz\"],\n [\"test_labels\", \"t10k-labels-idx1-ubyte.gz\"]\n]\n\nprefix_path = \"dataset/digits_mnist\"\n\nfile_path = \"{prefix}/{file}\"\n\n\ndef main():\n\n class_labels = [str(x) for x in range(10)]\n\n train_x, train_labels, test_x, test_labels = load_mnist()\n\n # Convert the label class into a one-hot representation\n train_y = indices_to_one_hot(train_labels, 10)\n test_y = indices_to_one_hot(test_labels, 10)\n\n # normalize the input data (since max value is at most 255)\n train_x = train_x / 255\n test_x = test_x / 255\n\n input_dim = 784\n neurons = [128, 64, 10]\n activations = [\"relu\", \"relu\", \"identity\"]\n net = MultiLayerNetwork(input_dim, neurons, activations)\n\n trainer = Trainer(\n network=net,\n batch_size=512,\n nb_epoch=256,\n learning_rate=0.007,\n loss_fun=\"cross_entropy\",\n shuffle_flag=True,\n )\n\n trainer.train(train_x, train_y)\n print(\"Train loss = \", trainer.eval_loss(train_x, train_y))\n print(\"Validation loss = \", trainer.eval_loss(test_x, test_y))\n\n preds = net(test_x).argmax(axis=1).squeeze()\n targets = test_y.argmax(axis=1).squeeze()\n accuracy = (preds == targets).mean()\n print(\"Validation accuracy: {}\".format(accuracy))\n\n # Confusion matrix\n\n cm = confusion_matrix(targets, preds)\n plot_confusion_matrix(cm, class_labels)\n\n\ndef load_mnist():\n mnist = {}\n for name in filename[:2]:\n path = file_path.format(prefix=prefix_path, file=name[1])\n with gzip.open(path, 'rb') as f:\n mnist[name[0]] = np.frombuffer(\n f.read(), np.uint8, offset=16).reshape(-1, 28*28)\n for name in filename[-2:]:\n path = file_path.format(prefix=prefix_path, file=name[1])\n with gzip.open(path, 'rb') as f:\n mnist[name[0]] = np.frombuffer(f.read(), np.uint8, offset=8)\n\n return mnist[\"training_images\"], mnist[\"training_labels\"], mnist[\"test_images\"], mnist[\"test_labels\"]\n\n\ndef visualise_image(label, x_set, y_set):\n img_idx = np.where(y_set == label)[0][0]\n img = np.reshape(x_set[img_idx], (28, 28))\n plt.figure()\n plt.imshow(img)\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jedrazb/python-neural-network","sub_path":"digits_mnist_demo.py","file_name":"digits_mnist_demo.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"27"} +{"seq_id":"21253663816","text":"from flask import request\n\n\"\"\" Função para verificar se os campos estão sendo enviados na requisição \"\"\"\ndef verify_fields(fields, allow_null=False, check_carrinho=False):\n json_data = request.get_json()\n if not json_data:\n return False\n\n if check_carrinho:\n carrinho = json_data.get('carrinho', [])\n for item in carrinho:\n for field in fields:\n if field not in item:\n return False\n if not allow_null and item.get(field) is None:\n return False\n else:\n for field in fields:\n if field not in json_data:\n return False\n if not allow_null and json_data.get(field) is None:\n return False\n\n return True","repo_name":"thmartins27/techfood","sub_path":"src/verifyJson.py","file_name":"verifyJson.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"6376056510","text":"class AverageTo2d(nn.Module):\n '''https://github.com/calico/basenji/blob/master/basenji/layers.py'''\n def __init__(self, concat_d = False, n = None, mode = 'avg'):\n \"\"\"\n Inputs:\n concat_d: True if positional encoding should be appended\n n: spatial dimension\n mode: 'average' for default mode, 'concat' to concat instead of average, 'outer' to use outer product\n \"\"\"\n super(AverageTo2d, self).__init__()\n assert mode in self.mode_options, f'Invalid mode: {mode}'\n self.concat_d = concat_d\n self.mode = mode\n if concat_d:\n self.get_positional_encoding(n)\n\n def get_positional_encoding(self, n):\n assert n is not None\n d = torch.zeros((n, n))\n for i in range(1, n):\n y = torch.diagonal(d, offset = i)\n y[:] = torch.ones(n-i) * i\n d = d + torch.transpose(d, 0, 1)\n d = torch.unsqueeze(d, 0)\n d = torch.unsqueeze(d, 0)\n self.d = d\n\n @property\n def mode_options(self):\n return {'avg', 'concat', 'outer', 'concat-outer', 'avg-outer', None}\n\n def forward(self, x):\n # assume x is of shape N x C x m\n # (N = batches, C = channels, m = nodes)\n # memory expensive\n assert len(x.shape) == 3, \"shape must be 3D\"\n N, C, m = x.shape\n\n out_list = []\n for mode in self.mode.split('-'):\n if mode is None:\n print(\"Warning: mode is None\")\n # code will probably break if you get here\n out = x\n elif mode == 'avg':\n x1 = torch.tile(x, (1, 1, m))\n x1 = torch.reshape(x1, (-1, C, m, m))\n x2 = torch.transpose(x1, 2, 3)\n x1 = torch.unsqueeze(x1, 0)\n x2 = torch.unsqueeze(x2, 0)\n out = torch.cat((x1, x2), dim = 0)\n out = torch.mean(out, dim = 0, keepdim = False)\n elif mode == 'concat':\n x1 = torch.tile(x, (1, 1, m))\n x1 = torch.reshape(x1, (-1, C, m, m))\n x2 = torch.transpose(x1, 2, 3)\n out = torch.cat((x1, x2), dim = 1)\n elif mode == 'outer':\n # see test_average_to_2d_outer for evidence that this works\n x1 = torch.tile(x, (1, C, m))\n x1 = torch.reshape(x1, (-1, C*C, m, m))\n x2 = torch.transpose(x1, 2, 3)\n\n # use indices to permute x2\n indices = []\n for i in range(C):\n indices.extend(range(i, i + C * (C-1) + 1, C))\n indices = torch.tensor(indices)\n if x2.is_cuda:\n indices = indices.to(x2.get_device())\n x2 = torch.index_select(x2, dim = 1, index = indices)\n\n out = torch.einsum('ijkl,ijkl->ijkl', x1, x2)\n\n del x1, x2\n out_list.append(out)\n\n if self.concat_d:\n # append abs(i - j)\n if out.is_cuda:\n self.d = self.d.to(out.get_device())\n out_list.append(torch.tile(self.d, (N, 1, 1, 1)))\n\n out = torch.cat(out_list, dim = 1)\n return out\n","repo_name":"ERSchultz/sequences_to_contact_maps","sub_path":"scripts/neural_nets/AverageTo2d.py","file_name":"AverageTo2d.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"38474193592","text":"\"\"\"\nDFS with cache... TLEs...\n\n def minPathSum(self, grid: List[List[int]]) -> int:\n queue, cache, res = [(0, 0, grid[0][0])], {}, inf\n M, N = len(grid), len(grid[0])\n while queue:\n i, j, total = queue.pop()\n if i == M - 1 and j == N - 1:\n res = min(res, total)\n else:\n if (i, j) in cache and cache[(i, j)] <= total:\n # not worth exploring...\n continue\n\n # add to cache\n cache[(i, j)] = total\n\n # explore down and right, if possible\n nxt_points = []\n if i != M - 1: nxt_points.append((i + 1, j))\n if j != N - 1: nxt_points.append((i, j + 1))\n for nxt_i, nxt_j in nxt_points:\n queue.append((nxt_i, nxt_j, total + grid[nxt_i][nxt_j]))\n return res\n\nprobably should use dijkstra\n(sum, i, j)\n\n\nimport heapq\n\nclass Solution:\n def minPathSum(self, grid: List[List[int]]) -> int:\n queue, seen = [(grid[0][0], 0, 0)], set()\n M, N = len(grid), len(grid[0])\n while queue:\n total, i, j = heapq.heappop(queue)\n if i == M - 1 and j == N - 1:\n return total\n if (i, j) not in seen:\n seen.add((i, j))\n\n nxt_points = []\n if i != M - 1: nxt_points.append((i + 1, j))\n if j != N - 1: nxt_points.append((i, j + 1))\n for nxt_i, nxt_j in nxt_points:\n heapq.heappush(queue, (total + grid[nxt_i][nxt_j], nxt_i, nxt_j))\n\"\"\"\n\n\nclass Solution:\n def minPathSum(self, grid: List[List[int]]) -> int:\n M, N = len(grid), len(grid[0])\n for i in range(M - 1, -1, -1):\n for j in range(N - 1, -1, -1):\n if i == M - 1 and j != N - 1:\n # last row and not right corner\n grid[i][j] += grid[i][j + 1]\n elif i != M - 1 and j == N - 1:\n # not last row but right column\n grid[i][j] += grid[i + 1][j]\n elif i != M - 1:\n grid[i][j] += min(grid[i + 1][j], grid[i][j + 1])\n return grid[0][0]\n\n\n\n\n\n","repo_name":"jsphweid/chops","sub_path":"lc/answers/minimum-path-sum/2022.01.01-12.42.31.py","file_name":"2022.01.01-12.42.31.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"11586376690","text":"class Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n \n dic = dict()\n \n for ind, val in enumerate(nums):\n if target - val in dic.keys():\n return [ind, dic[target - val]]\n dic[val] = ind\n return","repo_name":"aoquresh/LeetCode","sub_path":"Blind 75/Arrays/Easy/1. Two Sum/Dict_O(n).py","file_name":"Dict_O(n).py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32746204431","text":"import datetime\nimport socket\n\n\n\n\nHOST = '10.94.15.69' # Endereco IP do Servidor\nPORT = 9999 # Porta que o Servidor está\n\nsensores = {}\ncontrole = ''\n\n\ntcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\norig = (HOST, PORT)\n\ntcp.bind(orig)\ntcp.listen(10)\n\nprint(\"Servidor ON\")\n\nwhile True:\n con, cliente = tcp.accept()\n msg = con.recv(1024)\n m = str(msg, 'cp437').split()\n\n while not (b'fim' in msg):\n msg = con.recv(1024)\n if not msg: continue\n m = str(msg, 'cp437').split()\n\n print(m)\n con.close()\n\n\n\n","repo_name":"SamuelSSan28/Sockets-em-Python","sub_path":"server_2.py","file_name":"server_2.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"71236452873","text":"from collections import defaultdict\nimport json\nimport random\n\nfrom oslo_log import log as logging\n\nfrom neutron_lbaas import agent_scheduler\nfrom neutron_lbaas.extensions import lbaas_agentschedulerv2\n\nLOG = logging.getLogger(__name__)\n\n\nclass TenantScheduler(agent_scheduler.ChanceScheduler):\n \"\"\"Finds an available agent for the tenant/environment.\"\"\"\n\n def __init__(self):\n \"\"\"Initialze with the ChanceScheduler base class.\"\"\"\n super(TenantScheduler, self).__init__()\n\n def get_lbaas_agent_hosting_loadbalancer(self, plugin, context,\n loadbalancer_id, env=None):\n \"\"\"Return the agent that is hosting the loadbalancer.\"\"\"\n LOG.debug('Getting agent for loadbalancer %s with env %s' %\n (loadbalancer_id, env))\n\n with context.session.begin(subtransactions=True):\n # returns {'agent': agent_dict}\n lbaas_agent = plugin.db.get_agent_hosting_loadbalancer(\n context,\n loadbalancer_id\n )\n # if the agent bound to this loadbalancer is alive, return it\n if lbaas_agent is not None:\n if (not lbaas_agent['agent']['alive'] or\n not lbaas_agent['agent']['admin_state_up']) and \\\n env is not None:\n # The agent bound to this loadbalancer is not live\n # or is not active. Find another agent in the same\n # environment and environment group if possible\n ac = self.deserialize_agent_configurations(\n lbaas_agent['agent']['configurations']\n )\n # get a environment group number for the bound agent\n if 'environment_group_number' in ac:\n gn = ac['environment_group_number']\n else:\n gn = 1\n\n reassigned_agent = self.rebind_loadbalancers(\n context, plugin, env, gn, lbaas_agent['agent'])\n if reassigned_agent:\n lbaas_agent = {'agent': reassigned_agent}\n\n return lbaas_agent\n\n def rebind_loadbalancers(\n self, context, plugin, env, group, current_agent):\n env_agents = self.get_agents_in_env(context, plugin, env,\n group=group, active=True)\n if env_agents:\n reassigned_agent = env_agents[0]\n bindings = \\\n context.session.query(\n agent_scheduler.LoadbalancerAgentBinding).filter_by(\n agent_id=current_agent['id']).all()\n for binding in bindings:\n binding.agent_id = reassigned_agent['id']\n context.session.add(binding)\n LOG.debug(\"%s Loadbalancers bound to agent %s now bound to %s\" %\n (len(bindings),\n current_agent['id'],\n reassigned_agent['id']))\n return reassigned_agent\n else:\n return None\n\n def get_dead_agents_in_env(\n self, context, plugin, env, group=None):\n return_agents = []\n all_agents = self.get_agents_in_env(context,\n plugin,\n env,\n group,\n active=None)\n\n for agent in all_agents:\n\n if not plugin.db.is_eligible_agent(active=True, agent=agent):\n agent_dead = plugin.db.is_agent_down(\n agent['heartbeat_timestamp'])\n if not agent['admin_state_up'] or agent_dead:\n return_agents.append(agent)\n return return_agents\n\n def scrub_dead_agents(self, context, plugin, env, group=None):\n dead_agents = self.get_dead_agents_in_env(context, plugin, env, group)\n for agent in dead_agents:\n self.rebind_loadbalancers(context, plugin, env, group, agent)\n\n def get_agents_in_env(\n self, context, plugin, env, group=None, active=None):\n \"\"\"Get an active agents in the specified environment.\"\"\"\n return_agents = []\n\n with context.session.begin(subtransactions=True):\n candidates = []\n try:\n candidates = plugin.db.get_lbaas_agents(context, active=active)\n except Exception as ex:\n LOG.error(\"Exception retrieving agent candidates for \"\n \"scheduling: {}\".format(ex))\n\n for candidate in candidates:\n ac = self.deserialize_agent_configurations(\n candidate['configurations'])\n if 'environment_prefix' in ac:\n if ac['environment_prefix'] == env:\n if group:\n if ('environment_group_number' in ac and\n ac['environment_group_number'] == group):\n return_agents.append(candidate)\n else:\n return_agents.append(candidate)\n\n return return_agents\n\n def get_agents_hosts_in_env(\n self, context, plugin, env):\n \"\"\"Get an active agents in the specified environment.\"\"\"\n # TODO(xie)\n # maybe replace: get_agent-id-hosting-the-lb\n return_agents_hosts = []\n\n with context.session.begin(subtransactions=True):\n candidates = []\n try:\n candidates = plugin.db.get_lbaas_agents(context)\n except Exception as ex:\n LOG.error(\"Exception retrieving agent candidates for \"\n \"scheduling: {}\".format(ex))\n\n for candidate in candidates:\n ac = self.deserialize_agent_configurations(\n candidate['configurations'])\n if 'environment_prefix' in ac:\n if ac['environment_prefix'] == env:\n return_agents_hosts.append(candidate['host'])\n # TODO(xie) check form here later.\n return return_agents_hosts\n\n def get_capacity(self, configurations):\n \"\"\"Get environment capacity.\"\"\"\n if 'environment_capacity_score' in configurations:\n return configurations['environment_capacity_score']\n else:\n return 0.0\n\n def deserialize_agent_configurations(self, agent_conf):\n \"\"\"Return a dictionary for the agent configuration.\"\"\"\n if not isinstance(agent_conf, dict):\n try:\n agent_conf = json.loads(agent_conf)\n except ValueError as ve:\n LOG.error(\"Can't decode JSON %s : %s\"\n % (agent_conf, ve.message))\n return {}\n return agent_conf\n\n def schedule(self, plugin, context, loadbalancer_id, env=None):\n \"\"\"Schedule the loadbalancer to an active loadbalancer agent.\n\n If there is no enabled agent hosting it.\n \"\"\"\n\n with context.session.begin(subtransactions=True):\n loadbalancer = plugin.db.get_loadbalancer(context, loadbalancer_id)\n # If the loadbalancer is hosted on an active agent\n # already, return that agent or one in its env\n lbaas_agent = self.get_lbaas_agent_hosting_loadbalancer(\n plugin,\n context,\n loadbalancer.id,\n env\n )\n\n if lbaas_agent:\n lbaas_agent = lbaas_agent['agent']\n LOG.debug(' Assigning task to agent %s.'\n % (lbaas_agent['id']))\n return lbaas_agent\n\n # There is no existing loadbalancer agent binding.\n # Find all active agent candidates in this env.\n # We use environment_prefix to find F5® agents\n # rather then map to the agent binary name.\n candidates = self.get_agents_in_env(\n context,\n plugin,\n env,\n active=True\n )\n\n LOG.debug(\"candidate agents: %s\", candidates)\n if len(candidates) == 0:\n LOG.error('No f5 lbaas agents are active for env %s' % env)\n raise lbaas_agentschedulerv2.NoActiveLbaasAgent(\n loadbalancer_id=loadbalancer.id)\n\n # We have active candidates to choose from.\n # Qualify them by tenant affinity and then capacity.\n chosen_agent = None\n agents_by_group = defaultdict(list)\n capacity_by_group = {}\n\n for candidate in candidates:\n # Organize agents by their environment group\n # and collect each group's max capacity.\n ac = self.deserialize_agent_configurations(\n candidate['configurations']\n )\n gn = 1\n if 'environment_group_number' in ac:\n gn = ac['environment_group_number']\n agents_by_group[gn].append(candidate)\n\n # populate each group's capacity\n group_capacity = self.get_capacity(ac)\n if gn not in capacity_by_group:\n capacity_by_group[gn] = group_capacity\n else:\n if group_capacity > capacity_by_group[gn]:\n capacity_by_group[gn] = group_capacity\n\n # Do we already have this tenant assigned to this\n # agent candidate? If we do and it has capacity\n # then assign this loadbalancer to this agent.\n assigned_lbs = plugin.db.list_loadbalancers_on_lbaas_agent(\n context, candidate['id'])\n for assigned_lb in assigned_lbs:\n if loadbalancer.tenant_id == assigned_lb.tenant_id:\n chosen_agent = candidate\n break\n\n if chosen_agent:\n # Does the agent which had tenants assigned\n # to it still have capacity?\n if group_capacity >= 1.0:\n chosen_agent = None\n else:\n break\n\n # If we don't have an agent with capacity associated\n # with our tenant_id, let's pick an agent based on\n # the group with the lowest capacity score.\n if not chosen_agent:\n # lets get an agent from the group with the\n # lowest capacity score\n lowest_utilization = 1.0\n selected_group = 1\n for group, capacity in capacity_by_group.items():\n if capacity < lowest_utilization:\n lowest_utilization = capacity\n selected_group = group\n\n LOG.debug('%s group %s scheduled with capacity %s'\n % (env, selected_group, lowest_utilization))\n if lowest_utilization < 1.0:\n # Choose a agent in the env group for this\n # tenant at random.\n chosen_agent = random.choice(\n agents_by_group[selected_group]\n )\n\n # If there are no agents with available capacity, raise exception\n if not chosen_agent:\n LOG.warn('No capacity left on any agents in env: %s' % env)\n LOG.warn('Group capacity in environment %s were %s.'\n % (env, capacity_by_group))\n raise lbaas_agentschedulerv2.NoEligibleLbaasAgent(\n loadbalancer_id=loadbalancer.id)\n\n binding = agent_scheduler.LoadbalancerAgentBinding()\n binding.agent = chosen_agent\n binding.loadbalancer_id = loadbalancer.id\n context.session.add(binding)\n\n LOG.debug(('Loadbalancer %(loadbalancer_id)s is scheduled to '\n 'lbaas agent %(agent_id)s'),\n {'loadbalancer_id': loadbalancer.id,\n 'agent_id': chosen_agent['id']})\n\n return chosen_agent\n","repo_name":"F5Networks/f5-openstack-lbaasv2-driver","sub_path":"f5lbaasdriver/v2/bigip/agent_scheduler.py","file_name":"agent_scheduler.py","file_ext":"py","file_size_in_byte":12322,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"27"} +{"seq_id":"39417676119","text":"'''\nMouse Touchscreen Program, Moorman Lab 2020 \nCode written by Jason Biundo, version 1.1 10/2020\n\nThis program utilizes the pygame library to create an interactive program that mice can \ninteract with on a touchscreen that is run by a Raspberry pi. This program is designed to \neventually be generalizable to a variety of different tasks.\n'''\n\n# Import and initialize the pygame library\nimport pygame\nimport time\nimport logging \n\npygame.init()\npygame.display.set_caption('Mouse Touchscreen Program')\n\npi=False\n\n#Initialize logging \nlogging.basicConfig(filename ='test.log', level= logging.INFO, format='%(asctime)s:%(levelname)s:%(message)s')\n#format='%(asctime)s:%(levelname)s:%(message)s'\n\n# Import pygame.locals for easier access to key coordinates\nfrom pygame.locals import (\n K_UP, K_DOWN, K_LEFT, K_RIGHT, K_ESCAPE, KEYDOWN, QUIT,\n)\n\n#colors\nred = (255,0,0)\ngreen = (0,255,0)\nblue = (0,0,255)\nblack = (0,0,0)\nyellow = (255,255,0)\npurple = (255,0,255)\nwhite = (255,255,255)\ndeepskyblue = (0,191,255)\n\n#sound effects - uncomment if you have the sound file \n#effect = pygame.mixer.Sound('chime.wav')\n\n#Functions to add\n''' \n- Add shapes (one function for each shape most likely, i.e circle, square, polygon)\n- Check collison \n- Update screen?\n-Play sound \n'''\n\n# Set up the display window. Touchscreen dimensions = 800x400\nscreen_width = 800 \nscreen_height = 400\nscreen = pygame.display.set_mode([screen_width, screen_height])\nlogging.info('Program Started')\n#make fullscreen on touchscreen\n#screen = pygame.display.set_mode((800, 400), pygame.FULLSCREEN)\n\n# Function to check collision of mouse with shape \ndef check_collision(object, mouse_pos, left_click, color=(0,0,0)):\n if object.collidepoint(mouse_pos) and left_click:\n '''\n Function to check if mouse click collides with one of the objects. \n Takes in object, mouse position (tuple of coordinates) and pressed (boolean, True if mouse was clicked)\n Default argument for color is black, but can be changed. \n '''\n print('You pressed', object)\n screen.fill(color)\n #effect.play() #plays sound if uncommented\n pygame.display.flip()\n pygame.mouse.set_pos(0,0)\n pygame.time.wait(1000) #pauses program for 1000ms for flash\n logging.info('Shape: {}'.format(object))\n\n\nrunning = True\noff = False \n\n# Main loop, run until the user asks to quit\nwhile running:\n #turn on/off visibility of mouse cursor (True=visible, False=hidden)\n pygame.mouse.set_visible(running)\n \n #draw test shapes\n obj1= pygame.draw.circle(screen, yellow, (75,250), 57)\n obj2= pygame.draw.circle(screen, purple , (260, 250), 57)\n obj3 = pygame.draw.rect(screen, red, (400,200, 100,100))\n obj4 = pygame.draw.rect(screen, blue, (600,200, 125,100))\n objList = [obj1,obj2,obj3,obj4]\n \n mouse_pos = pygame.mouse.get_pos()\n\n for event in pygame.event.get():\n #check for mousebutton \n if event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos= pygame.mouse.get_pos()\n print('The position of the cursor is', mouse_pos)\n logging.info('Coordinates:' + str(mouse_pos))\n\n\n #mouse_pos = pygame.mouse.get_pos()\n left_click, pressed2, right_click = pygame.mouse.get_pressed() #pressed 1 is left click, pressed 3 is right click \n \n # Check if the object \"collided\" with the mouse pos and if the left mouse button was pressed\n check_collision(obj1,mouse_pos,left_click, yellow)\n check_collision(obj2,mouse_pos,left_click, purple)\n check_collision(obj3,mouse_pos,left_click, red)\n check_collision(obj4,mouse_pos,left_click, blue)\n # for obj in objList:\n # check_collision(obj,mouse_pos,left_click,white)\n\n #escape from program\n if event.type == KEYDOWN:\n # Was it the Escape key? If so, stop the loop.\n if event.key == K_ESCAPE:\n running = False\n # elif event.key == K_UP:\n # screen = pygame.display.set_mode([screen_width, screen_height],pygame.FULLSCREEN)\n elif event.type == pygame.QUIT:\n running = False\n \n # Fill the background with black\n screen.fill(black)\n # Draw circles\n pygame.draw.circle(screen, purple, (260, 250), 57)\n pygame.draw.circle(screen, yellow, (75,250), 57)\n #Draw squares\n pygame.draw.rect(screen, red, (400,200, 100,100))\n pygame.draw.rect(screen, blue, (600,200, 125,100))\n\n #Reset mouse position every loop to avoid problems with touchscreens. Comment this if using a computer\n #pygame.mouse.set_pos(0,0)\n\n # Update the display\n pygame.display.flip()\n\n# Done! Time to quit.\npygame.quit()\n\n\n\n# Check if the object \"collided\" with the mouse pos and if the left mouse button was pressed\n # if circ1.collidepoint(mouse_pos) and left_click:\n # print('You pressed the circle')\n # screen.fill(purple)\n # pygame.display.flip()\n # effect.play()\n # time.sleep(1)\n # pygame.mouse.set_pos(0,0)\n# if rec1.collidepoint(mouse_pos) and left_click:\n# print('You pressed the square')\n# screen.fill(red)\n# pygame.display.flip()\n# pygame.mouse.set_pos(0,0)\n# effect.play()\n# time.sleep(.5)\n# if circ2.collidepoint(mouse_pos) and left_click:\n# print('You pressed the yellow circle')\n# screen.fill(yellow)\n# pygame.display.flip()\n# pygame.mouse.set_pos(0,0)\n# effect.play()\n# time.sleep(.5)\n# if rec2.collidepoint(mouse_pos) and left_click:\n# print('You pressed the rectangle')\n# screen.fill(blue)\n# pygame.display.flip()\n# pygame.mouse.set_pos(0,0)\n# effect.play()\n# time.sleep(.5)","repo_name":"moormanlab/testproject","sub_path":"Mouse_touchscreen ver 1.1.py","file_name":"Mouse_touchscreen ver 1.1.py","file_ext":"py","file_size_in_byte":5895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"74357461510","text":"from django.db.models import Prefetch\n\nfrom hempfieldbaseball.teammanagement.models import Player\nfrom hempfieldbaseball.velocities.models import Velocity\n\n\n# Global for number of Leaderboard players\ntop = 15\n\n\n#\n# IMPROVEMENT LEADERBOARDS\n#\ndef get_velocity_improvement_leaders(position):\n improvement_leaders = []\n\n for player in Player.active_players.prefetch_related(\n Prefetch(\n \"velocity_records\",\n queryset=Velocity.objects.filter(position=position),\n )\n ).all():\n velocities = list(player.velocity_records.all())\n if len(velocities) > 1:\n baseline = velocities[0]\n latest = velocities[-1]\n improvement = latest.velocity - baseline.velocity\n\n if improvement >= 0:\n improvement_leaders.append(\n {\n \"player\": player,\n \"improvement\": improvement,\n \"baseline\": baseline,\n \"latest\": latest,\n }\n )\n\n return sorted(improvement_leaders, key=lambda l: l[\"improvement\"], reverse=True)\n\n\ndef get_body_weight_improvement_leaders():\n improvement_leaders = []\n\n for player in Player.active_players.prefetch_related(\n Prefetch(\"bodyweight_records\"),\n ).all():\n body_weights = list(player.bodyweight_records.all())\n if len(body_weights) > 1:\n baseline = body_weights[0]\n latest = body_weights[-1]\n improvement = round(latest.weight - baseline.weight, 1)\n\n if improvement >= 0:\n improvement_leaders.append(\n {\n \"player\": player,\n \"improvement\": improvement,\n \"baseline\": baseline,\n \"latest\": latest,\n }\n )\n\n return sorted(improvement_leaders, key=lambda l: l[\"improvement\"], reverse=True)\n","repo_name":"dmanning21h/hempfieldbaseball-old","sub_path":"hempfieldbaseball/leaderboards/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"21934720606","text":"import sys\nimport datetime\n\n\nclass OutputHandler(object):\n \"\"\"\n Emma's output handler\n \"\"\"\n def __init__(self, print_stdout=False, log_file=None, log_flush=False):\n self.stdout = sys.stdout\n self.print_stdout = print_stdout\n self.log_flush = log_flush\n sys.stdout = self\n if log_file:\n self.log_fp = file(log_file, \"a+\")\n else:\n self.log_fp = None\n self.debug = print_stdout or log_file\n\n def write(self, s):\n \"\"\"\n @param s: str\n @return:\n \"\"\"\n if self.print_stdout:\n self.stdout.write(s)\n if self.log_flush:\n self.stdout.flush()\n if self.log_fp:\n s = s.strip(\"\\r\\n\")\n if not s:\n # do not write empty lines to logfile\n return\n timestamp = str(datetime.datetime.now())[0:22]\n self.log_fp.write(\n \"%s %s\\n\" % (timestamp, s.replace(\"\\n\", \"\\n \" + (\" \" * len(timestamp)))))\n if self.log_flush:\n self.log_fp.flush()\n","repo_name":"fastflo/emma","sub_path":"emmalib/OutputHandler.py","file_name":"OutputHandler.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"27"} +{"seq_id":"10253096934","text":"#!/usr/bin/env python3\nimport argparse\nimport xml.etree.ElementTree as ET\n\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument('pom', type=str)\nparser.add_argument('url', type=str)\n\nargs = parser.parse_args()\n\nET.register_namespace('','http://maven.apache.org/POM/4.0.0')\ntree = ET.parse(args.pom)\nroot = tree.getroot()\n\ndistributionManagement = ET.Element('distributionManagement')\nrepository = ET.Element('repository')\n_id = ET.Element('id')\n_id.text = 'maven'\nname = ET.Element('name')\nname.text = 'maven'\nurl = ET.Element('url')\nurl.text = args.url\nrepository.extend([_id, name, url])\ndistributionManagement.append(repository)\n\nroot.append(distributionManagement)\n\ntree.write(args.pom)\n","repo_name":"MuXiu1997/openapi-generator-demo","sub_path":"scripts/set-maven-repository.py","file_name":"set-maven-repository.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"9590641393","text":"from igraph import *\nfrom igraph.drawing import graph\nimport csv\nimport argparse\nfrom graph_functions import *\nfrom csv_functions import *\nimport matplotlib.pyplot as plt\nfrom table_functions import *\n\n###############################################################\n# #\n# Passando dados para a linha de comando com o argparse #\n# #\n###############################################################\ndef pair(s):\n try:\n a, t = map(str, s.split(','))\n return a, t\n except:\n raise argparse.ArgumentTypeError(\"Pairs must be: atribute,threshold (no space in between)\")\nparser = argparse.ArgumentParser(description=\"STUDENTS GRAPH\")\nparser.add_argument('-n', '--name', help=\"Archive name\", required=True, metavar='', type=str)\nparser.add_argument('-p', '--pairs', help=\"Pairs: atribute, threshold\", required=True, dest=\"pairs\", type=pair, nargs='+')\nparser.add_argument('-tn', '--tableNumber', type=int, help=\"Number of edges on table\", required=True)\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('-a', '--all', action='store_true', help='all edges with given atributes')\ngroup.add_argument('-o', '--only', action='store_true', help='only edges of pairs with all given atributes in common')\nargs = parser.parse_args()\n\n# Estruturas usadas:\ndescr = [] #guarda os campos do dicionário\nperson = dict() #cada pessoa é um dicionário\nstudents = [] #lista de pessoas (dicionários)\nline_count = 0 #quantidade de linhas do csv\ng = Graph() #grafo com as informações do csv \ng_colors = [] #cores dos edges\nv_colors = [] #cores dos vértices (azul para homem, rosa para mulher)\n\n\nif __name__ =='__main__':\n line_count = import_csv(descr, person, students)\n graph_info(g, line_count, descr, students, v_colors)\n if args.only:\n find_edges_pairs(args.pairs, students, g, g_colors)\n elif args.all:\n find_edges_all(args.pairs, students, g, g_colors)\n else:\n print(\"choose between -o or -a\")\n\n # Configurações de plot:\n visual_style = {}\n visual_style[\"vertex_size\"] = 10\n visual_style[\"vertex_color\"] = v_colors\n visual_style[\"edge_color\"] = g_colors\n visual_style[\"vertex_label\"] = g.vs[\"ID\"]\n visual_style[\"vertex_label_size\"] = 10\n visual_style[\"vertex_label_dist\"] = 1.5\n visual_style[\"edge_width\"] = 0.1\n visual_style[\"layout\"] = \"mds\"\n visual_style[\"bbox\"] = (3000, 3000)\n visual_style[\"margin\"] = 10\n name = args.name+\".pdf\"\n plot(g, name, **visual_style)\n plot_ordered(g, args.tableNumber, pairsTOatributes(args.pairs), students)\n\n","repo_name":"shmjade/Students-Graph","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"32180134368","text":"\"\"\"\nFunction(s) for dealing with time.\n\"\"\"\n\ndef str_to_secs(timestr):\n \"\"\"Converts a time string (i.e. 30s) to a number of seconds (i.e. 30)\n\n Arguments:\n timestr {str} -- string representation of the time.\n Returns:\n {int} -- number of seconds given by the time string input.\n \"\"\"\n length = len(timestr)\n\n if not timestr[0:length-1].isdigit() and length > 1:\n # Check if there are any letters before the last char in the input.\n # If length is 1, then we're checking the empty string, so we have to\n # skip that case.\n return -1\n\n if not timestr[length - 1].isalpha():\n # Default with no suffix is seconds.\n return int(timestr)\n elif timestr[length - 1] == 's':\n # Suffix of 's' means seconds\n return int(timestr[0:length-1])\n elif timestr[length - 1] == 'm':\n # Suffix of 'm' means minutes, so multiply by 60.\n return int(timestr[0:length-1]) * 60\n elif timestr[length - 1] == 'h':\n # Suffix of 'h' means hour, so multiply by 60*60\n return int(timestr[0:length-1]) * 360\n return -1\n\n\nif __name__ == '__main__':\n time_str = '0'\n time_int = str_to_secs(time_str)\n print(time_int)\n","repo_name":"koml12/okupy","sub_path":"src/utils/times.py","file_name":"times.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"75010844550","text":"# This file is part of ArcJail.\n#\n# ArcJail is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# ArcJail is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with ArcJail. If not, see .\n\nfrom entities.constants import WORLD_ENTITY_INDEX\nfrom entities.entity import Entity\nfrom entities.helpers import index_from_pointer\nfrom entities.hooks import EntityCondition, EntityPreHook\nfrom entities import TakeDamageInfo\nfrom listeners.tick import Delay\nfrom memory import make_object\nfrom messages import KeyHintText\nfrom players.constants import HideHudFlags\nfrom players.helpers import get_client_language\n\nfrom controlled_cvars.handlers import int_handler\n\nfrom ..classes.base_player_manager import BasePlayerManager\nfrom ..internal_events import InternalEvent\nfrom ..resource.strings import build_module_strings\n\nfrom . import build_module_config\nfrom .players import player_manager\nfrom .teams import GUARDS_TEAM, PRISONERS_TEAM\n\n\nstrings_module = build_module_strings('damage_hook')\nconfig_manager = build_module_config('damage_hook')\n\nconfig_manager.controlled_cvar(\n int_handler,\n \"protection_hp\",\n default=100,\n description=\"Amount of health used to protect players against damage\",\n)\n\n\nKEYHINT_REFRESH_INTERVAL = 1\nFINISHING_DAMAGE = 1000\nHIDEHUD_PROP = 'm_Local.m_iHideHUD'\n\n\ndef is_world(index):\n if index == WORLD_ENTITY_INDEX:\n return True\n\n if Entity(index).classname != 'player':\n return True\n\n return False\n\n\ndef get_hook(flags, next_hook=(lambda counter, info: True)):\n # Hooks: S = self, W = world, P = prisoners, G = guards\n flags = flags.upper()\n\n def hook(counter, info):\n if 'S' in flags:\n if counter.owner.player.index == info.attacker:\n return next_hook(counter, info)\n\n if 'W' in flags:\n if is_world(info.attacker):\n return next_hook(counter, info)\n\n if (info.attacker != counter.owner.player.index and\n not is_world(info.attacker)):\n\n attacker = player_manager[info.attacker]\n if 'P' in flags:\n if attacker.team == PRISONERS_TEAM:\n return next_hook(counter, info)\n\n if 'G' in flags:\n if attacker.team == GUARDS_TEAM:\n return next_hook(counter, info)\n\n return False\n\n return hook\n\n\nclass ProtectedPlayer:\n class HealthCounter:\n def __init__(self, owner, health, display):\n self.owner = owner\n self.display = display\n if health is None:\n self.health = owner.player.health\n else:\n self.health = health\n\n self.hook_hurt = lambda health_counter, info: True\n self.hook_death = lambda health_counter, info: True\n\n def _hurt(self, info):\n if not self.hook_hurt(self, info):\n return True\n\n self.health -= info.damage\n if self.health <= 0:\n if (self.hook_death(self, info) and\n not self.owner.dead):\n\n self.owner._show_health(hide=True)\n self.owner.dead = True\n\n info.damage = FINISHING_DAMAGE\n\n else:\n self.owner._show_health()\n\n info.damage = 0\n\n return None\n\n def delete(self):\n self.owner.delete_counter(self)\n\n def format_display(self):\n if not self.display:\n return None\n\n return self.display.tokenize(\n amount=int(self.health) if self.health > 0 else '∞')\n\n def __init__(self, player):\n self.player = player\n self._counters = []\n self._pre_protection_health = None\n self._language = get_client_language(player.index)\n self.dead = player.dead\n\n def set_protected(self):\n if self._pre_protection_health is not None:\n return\n\n if self.dead:\n return\n\n self._pre_protection_health = self.player.health\n self.player.health = config_manager['protection_hp']\n\n hidehud = self.player.get_property_int(\n HIDEHUD_PROP) | HideHudFlags.HEALTH\n\n self.player.set_property_int(HIDEHUD_PROP, hidehud)\n\n def unset_protected(self):\n if self._pre_protection_health is None:\n return\n\n if self.dead:\n return\n\n self.player.health = self._pre_protection_health\n self._pre_protection_health = None\n\n hidehud = self.player.get_property_int(\n HIDEHUD_PROP) & ~HideHudFlags.HEALTH\n\n self.player.set_property_int(HIDEHUD_PROP, hidehud)\n\n def new_counter(self, health=None, display=False):\n counter = self.HealthCounter(self, health, display)\n self._counters.append(counter)\n return counter\n\n def delete_counter(self, counter):\n if counter not in self._counters:\n return\n\n self._counters.remove(counter)\n if not self._counters:\n self._show_health(hide=True)\n\n def _hurt(self, info):\n if self._pre_protection_health is None:\n return\n\n rs = []\n for counter in self._counters:\n rs.append(counter._hurt(info))\n\n if any(rs) and not self.dead:\n return True\n\n return None\n\n def _spawn(self):\n self._pre_protection_health = None\n self.dead = False\n self._counters = []\n\n def _show_health(self, hide=False):\n if hide:\n KeyHintText(\"\").send(self.player.index)\n return\n\n content = []\n for counter in self._counters:\n counter_str = counter.format_display()\n if counter_str is not None:\n content.append(counter_str.get_string(self._language))\n\n if not content:\n return\n\n KeyHintText('\\n'.join(content)).send(self.player.index)\n\n\nprotected_player_manager = BasePlayerManager(ProtectedPlayer)\n\n\n@InternalEvent('player_created')\ndef on_player_created(player):\n protected_player_manager.create(player)\n\n\n@InternalEvent('player_deleted')\ndef on_player_deleted(player):\n protected_player_manager.delete(player)\n\n\n@EntityPreHook(EntityCondition.is_player, 'on_take_damage')\ndef on_take_damage(args):\n protected_player = protected_player_manager[index_from_pointer(args[0])]\n if protected_player.dead:\n return\n\n info = make_object(TakeDamageInfo, args[1])\n return protected_player._hurt(info)\n\n\n@InternalEvent('player_respawn')\ndef on_player_respawn(player):\n protected_player = protected_player_manager[player.index]\n protected_player._spawn()\n\n\ndelay = None\n\n\n@InternalEvent('load')\ndef on_load():\n def callback():\n for protected_player in protected_player_manager.values():\n if not protected_player.dead:\n protected_player._show_health()\n\n global delay\n delay = Delay(KEYHINT_REFRESH_INTERVAL, callback)\n\n callback()\n\n\n@InternalEvent('unload')\ndef on_unload():\n if delay:\n delay.cancel()\n","repo_name":"KirillMysnik/ArcJail","sub_path":"srcds/addons/source-python/plugins/arcjail/modules/damage_hook.py","file_name":"damage_hook.py","file_ext":"py","file_size_in_byte":7516,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"35167955125","text":"import matplotlib.pyplot as plt\nfrom datetime import date\nimport ipyleaflet as ipyl\nimport ipywidgets as ipyw\n#import eodag.api.core.SatImagesAPI as satimagesapi\n#import eodag as dag2\nfrom eodag.api.core import EODataAccessGateway\nimport datetime\n\ndef make_map_rectangle(longitude_center, latitude_center, degrees_from_center=0.05):\n lonmin = longitude_center - degrees_from_center\n lonmax = longitude_center + degrees_from_center\n latmin = latitude_center - degrees_from_center\n latmax = latitude_center + degrees_from_center\n\n extent = {\n 'lonmin': lonmin,\n 'lonmax': lonmax,\n 'latmin': latmin,\n 'latmax': latmax\n }\n return extent\n\ndef subselect(longitude_center, latitude_center, degrees_from_center):\n longmin = longitude_center+degrees_from_center\n latmin = latitude_center+degrees_from_center\n longmax = longitude_center-degrees_from_center\n latmax = latitude_center-degrees_from_center\n print(\"{}, {}, {}, {}\".format(longmin, longmax, latmin, latmax))\n return (longmin, longmax, latmin, latmax)\n\ndef no_subselect(extent):\n longmin = extent['lonmin']\n latmin = extent['latmin']\n longmax = extent['lonmax']\n latmax = extent['latmax']\n return longmin, latmin, longmax, latmax\n\ndef main():\n base_dir = '/home/sobloo/hack-tbd/original-sobloo'\n conf_file = base_dir + \"/eodagconf.yml\"\n dag = EODataAccessGateway(user_conf_file_path = conf_file)\n product_type = 'S2_MSI_L1C'\n\n\n # Hungary\n longitude_center = 18.282810\n latitude_center = 46.127194\n# # Sweden\n# longitude_center = 18.330000\n# latitude_center = 59.400000\n degrees_from_center = 0.0035\n extent = make_map_rectangle(longitude_center=longitude_center,\n latitude_center=latitude_center,\n degrees_from_center=degrees_from_center)\n\n dag.set_preferred_provider(provider='airbus-ds')\n #prodTypeList = dag.list_product_types('airbus-ds')\n #print(prodTypeList)\n\n products = dag.search(product_type,startTimeFromAscendingNode='2016-01-17',completionTimeFromAscendingNode='2018-09-20',geometry=extent,cloudCover=1)\n #products = dag.search(product_type)\n for i in range(len(products)):\n try:\n print('{} : {}'.format(i, products[i]))\n #print(products)\n product = products[i]\n xx, yy = product.as_dict()['geometry']['coordinates'][0][4]\n\n #longmin, latmin, longmax, latmax = subselect(longitude_center=xx, latitude_center=yy, degrees_from_center=degrees_from_center)\n longmin, latmin, longmax, latmax = no_subselect(extent=extent)\n\n\n\n VIR = product.get_data(crs='epsg:4326', resolution=0.0001, band='B04', extent=(longmin, latmin, longmax, latmax))\n NIR = product.get_data(crs='epsg:4326', resolution=0.0001, band='B08', extent=(longmin, latmin, longmax, latmax))\n NDVI = (NIR - VIR * 1.) / (NIR + VIR)\n\n plt.imshow(NDVI, cmap='RdYlGn', aspect='auto')\n hms = datetime.datetime.now().strftime('%H%M%S')\n plt.savefig('{}/img/ndvi-{}.png'.format(base_dir, hms))\n except Exception as e:\n print('Exception: {}'.format(e))\n continue\n\nif __name__ == '__main__':\n main()\n","repo_name":"tkivisik/hack-tbd","sub_path":"original-sobloo/nvdi.py","file_name":"nvdi.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"37265512029","text":"#!-*- coding:utf-8 -*-\r\n'''\r\nCreated on 2017年11月10日\r\n\r\n@author: zhang.meng\r\n'''\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nimport time,threading\r\n\r\nclass RecordingPlayBack(threading.Thread):\r\n def __init__(self,currentTable=None,startLink=None,browserIndex=0):\r\n threading.Thread.__init__(self)\r\n self.currentTable = currentTable\r\n self.startLink = startLink\r\n self.browserIndex = browserIndex\r\n self.driver = None\r\n self.tempWindow = None\r\n self.storeVlaue = dict()\r\n def run(self):\r\n self.playByTable()\r\n \r\n def getJs(self,xpath):\r\n return '''\r\n var evaluator = new XPathEvaluator(); \r\n var result = evaluator.evaluate(\"{0}\", document.documentElement, null,XPathResult.FIRST_ORDERED_NODE_TYPE, null); \r\n resultValue = result.singleNodeValue;\r\n resultValue.style.borderWidth = '2px';\r\n resultValue.style.borderStyle = 'outset';\r\n resultValue.style.borderColor = '#0099FF';\r\n '''.format(xpath.replace(\"\\\"\",\"'\"))\r\n \r\n def playByTable(self):\r\n try:\r\n if self.browserIndex==0:\r\n self.driver = webdriver.Chrome()\r\n elif self.browserIndex==1:\r\n# 'C:/Program Files/Mozilla Firefox/firefox.exe'\r\n self.driver = webdriver.Firefox()\r\n elif self.browserIndex==2:\r\n# \"C:/Program Files/Internet Explorer/iexplore.exe\"\r\n self.driver = webdriver.Ie()\r\n except Exception as e:\r\n print( e)\r\n raise Exception(u\"此浏览器不可用!\")\r\n return\r\n self.driver.maximize_window()\r\n time.sleep(2)\r\n rows = self.currentTable.rowCount()\r\n try:\r\n self.driver.get(self.startLink)\r\n self.driver.implicitly_wait(30)\r\n self.tempWindow = self.driver.window_handles\r\n for row_index in range(rows):\r\n print( row_index)\r\n fieldName=(self.currentTable.cellWidget(row_index,0).currentText())\r\n fieldXpath=(self.currentTable.item(row_index,1).text() if self.currentTable.item(row_index,1) else '')\r\n fieldValue=(self.currentTable.item(row_index,2).text() if self.currentTable.item(row_index,2) else '')\r\n fieldExtension=(self.currentTable.item(row_index,3).text() if self.currentTable.item(row_index,3) else '')\r\n fieldId=(self.currentTable.item(row_index,4).text() if self.currentTable.item(row_index,4) else '')\r\n fieldClass=(self.currentTable.item(row_index,5).text() if self.currentTable.item(row_index,5) else '')\r\n fieldInput=(self.currentTable.item(row_index,6).text() if self.currentTable.item(row_index,6) else '')\r\n fieldIframeType = (self.currentTable.item(row_index,7).text() if self.currentTable.item(row_index,7) else '')\r\n# print( fieldName,fieldXpath,fieldValue,fieldExtension,fieldId,fieldClass,fieldInput\r\n# print( type(fieldName)\r\n \r\n if fieldExtension==u\"-1\":\r\n self.driver.switch_to.default_content()\r\n elif fieldExtension:\r\n iframeStepOrder = fieldExtension.split(',')\r\n iframeStepType = fieldIframeType.split(',')\r\n try:\r\n self.driver.switch_to.default_content()\r\n except Exception as e:\r\n print(\"链接断开了\")\r\n self.driver.switch_to_window(self.driver.window_handles[0])\r\n self.tempWindow = self.driver.window_handles\r\n wait = WebDriverWait(self.driver, 30)\r\n for typeIndex in range(len(iframeStepType)):\r\n if iframeStepType[typeIndex]=='str':\r\n print( 'str类型')\r\n wait.until(EC.frame_to_be_available_and_switch_to_it(iframeStepOrder[typeIndex]))\r\n elif iframeStepType[typeIndex]=='int':\r\n wait.until(EC.frame_to_be_available_and_switch_to_it(int(iframeStepOrder[typeIndex])))\r\n print( 'int类型')\r\n# self.driver.switch_to_frame(ifm)\r\n \r\n if fieldName==u'弹框':\r\n try:\r\n alert = self.driver.switch_to.alert\r\n if fieldXpath==u'yes':\r\n alert.accept()\r\n else:\r\n alert.dismiss()\r\n continue\r\n except Exception as e:\r\n print( e)\r\n \r\n# self.checkAlert()\r\n \r\n if fieldName==u'变量':\r\n variablevalue = self.storeInputValue(fieldXpath,fieldValue,fieldExtension,fieldId,fieldClass,fieldInput)\r\n self.storeVlaue[str(row_index+1)] = variablevalue\r\n time.sleep(2)\r\n continue\r\n \r\n if fieldName==u'输入':\r\n self.inputValue(fieldXpath,fieldValue,fieldExtension,fieldId,fieldClass,fieldInput)\r\n time.sleep(2)\r\n continue\r\n \r\n if fieldName==u'悬停':\r\n self.hoverOption(fieldXpath,fieldValue,fieldExtension,fieldId,fieldClass,fieldInput)\r\n time.sleep(2)\r\n continue\r\n \r\n if fieldName==u'单击':\r\n self.clickValue(fieldXpath,fieldValue,fieldExtension,fieldId,fieldClass,fieldInput)\r\n time.sleep(2)\r\n \r\n try:\r\n for window in self.driver.window_handles:\r\n self.tempWindow.index(window)\r\n except Exception as e:\r\n try:\r\n self.driver.switch_to_window(window)\r\n self.tempWindow = self.driver.window_handles\r\n except Exception as e:\r\n try:\r\n self.driver.switch_to_window(self.driver.window_handles[0])\r\n self.tempWindow = self.driver.window_handles\r\n except Exception as e:\r\n print (u'还是错')\r\n continue\r\n \r\n if fieldName==u'元素':\r\n self.elementValue(fieldXpath,fieldValue,fieldExtension,fieldId,fieldClass,fieldInput)\r\n time.sleep(2)\r\n continue\r\n \r\n if fieldName==u'右击':\r\n self.rightClickValue(fieldXpath,fieldValue,fieldExtension,fieldId,fieldClass,fieldInput)\r\n time.sleep(2)\r\n continue\r\n \r\n if fieldName==u'双击':\r\n self.doubleClickValue(fieldXpath,fieldValue,fieldExtension,fieldId,fieldClass,fieldInput)\r\n time.sleep(2)\r\n continue\r\n \r\n except Exception as e:\r\n print( e)\r\n self.driver.quit()\r\n finally:\r\n self.driver.quit()\r\n\r\n def inputValue(self,fieldXpath,fieldValue,fieldExtension,fieldId,fieldClass,fieldInput):\r\n inputs = self.gainElement(fieldXpath,fieldId,fieldClass)\r\n if fieldValue.startswith('@@@'):\r\n fieldValue = self.storeVlaue.get(fieldValue[3:],'')\r\n \r\n if inputs.tag_name == u'select':\r\n allOptions = inputs.find_elements_by_tag_name(\"option\")\r\n for option in allOptions:\r\n if option.text == fieldValue:\r\n option.click()\r\n else:\r\n try:\r\n# inputs.clear()\r\n# inputs.send_keys(fieldValue)\r\n inputs.send_keys(Keys.CONTROL + \"a\")\r\n inputs.send_keys(Keys.DELETE)\r\n inputs.send_keys(fieldValue)\r\n except Exception as e:\r\n print( e)\r\n \r\n def submitValue(self,fieldXpath,fieldValue,fieldExtension,fieldId,fieldClass,fieldInput):\r\n submitVal = self.gainElement(fieldXpath,fieldId,fieldClass)\r\n if submitVal.tag_name==u'input':\r\n parentForm = submitVal.find_element_by_xpath('..')\r\n while parentForm.tag_name != u'form':\r\n parentForm = parentForm.find_element_by_xpath('..')\r\n try:\r\n form_target = parentForm.get_attribute('target')\r\n except:\r\n form_target = None\r\n submitVal.submit()\r\n time.sleep(3)\r\n return form_target\r\n elif submitVal.tag_name==u'button' and submitVal.get_attribute('type')==u'button':\r\n submitVal.click()\r\n return\r\n submitVal.submit()\r\n time.sleep(3)\r\n \r\n def clickValue(self,fieldXpath,fieldValue,fieldExtension,fieldId,fieldClass,fieldInput):\r\n# print( self.driver.page_source\r\n alink = self.gainClickedElement(fieldXpath,fieldId,fieldClass)\r\n try:\r\n alink.click()\r\n except Exception as e:\r\n self.driver.execute_script(\"arguments[0].click();\", alink)\r\n print( e)\r\n \r\n\r\n def elementValue(self,fieldXpath,fieldValue,fieldExtension,fieldId,fieldClass,fieldInput):\r\n try:\r\n if fieldId:\r\n element = WebDriverWait(self.driver,30).until(EC.visibility_of_element_located((By.ID,fieldId)))\r\n elif fieldXpath:\r\n element = WebDriverWait(self.driver,30).until(EC.visibility_of_element_located((By.XPATH,fieldXpath)))\r\n# self.driver.execute_script(self.getJs(fieldXpath))\r\n except Exception as e:\r\n print( e)\r\n getMyValue = element.text\r\n print( getMyValue)\r\n if getMyValue==fieldValue:\r\n print( u'真是厉害,你找到我了!')\r\n time.sleep(3)\r\n \r\n def hoverOption(self,fieldXpath,fieldValue,fieldExtension,fieldId,fieldClass,fieldInput):\r\n hoverEle = self.gainElement(fieldXpath,fieldId,fieldClass)\r\n \r\n# self.driver.execute_script(self.getJs(fieldXpath))\r\n ActionChains(self.driver).move_to_element(hoverEle).perform()\r\n time.sleep(2)\r\n \r\n def checkAlert(self):\r\n try:\r\n alert = self.driver.switch_to.alert\r\n alert.accept()\r\n except Exception as e:\r\n print( e)\r\n \r\n def rightClickValue(self,fieldXpath,fieldValue,fieldExtension,fieldId,fieldClass,fieldInput):\r\n# print( self.driver.page_source\r\n right_alink = self.gainClickedElement(fieldXpath,fieldId,fieldClass)\r\n try:\r\n ActionChains(self.driver).context_click(right_alink).perform()\r\n time.sleep(2)\r\n except Exception as e:\r\n self.driver.execute_script(\"arguments[0].contextmenu();\", right_alink)\r\n print( e)\r\n \r\n \r\n def doubleClickValue(self,fieldXpath,fieldValue,fieldExtension,fieldId,fieldClass,fieldInput):\r\n# print( self.driver.page_source\r\n double_alink = self.gainClickedElement(fieldXpath,fieldId,fieldClass)\r\n try:\r\n ActionChains(self.driver).double_click(double_alink).perform()\r\n except Exception as e:\r\n self.driver.execute_script(\"arguments[0].dblclick();\", double_alink)\r\n print( e)\r\n \r\n def storeInputValue(self,fieldXpath,fieldValue,fieldExtension,fieldId,fieldClass,fieldInput):\r\n element_value = self.gainElement(fieldXpath,fieldId,fieldClass)\r\n try:\r\n return element_value.text\r\n except Exception as e:\r\n return ''\r\n \r\n def gainElement(self,fieldXpath,fieldId,fieldClass):\r\n if fieldId:\r\n indexCount = 0\r\n while True and indexCount<2:\r\n try:\r\n element_value = WebDriverWait(self.driver,30).until(EC.visibility_of_element_located((By.ID,fieldId)))\r\n break\r\n except Exception as e:\r\n indexCount += 1\r\n elif fieldXpath:\r\n indexCount = 0\r\n while True and indexCount<2:\r\n try:\r\n element_value = WebDriverWait(self.driver,30).until(EC.visibility_of_element_located((By.XPATH,fieldXpath)))\r\n break\r\n except Exception as e:\r\n indexCount += 1\r\n self.checkLoopNum(indexCount)\r\n elif fieldClass:\r\n indexCount = 0\r\n while True and indexCount<2:\r\n try:\r\n element_value = WebDriverWait(self.driver,30).until(EC.visibility_of_element_located((By.CSS_SELECTOR,fieldClass)))\r\n break\r\n except Exception as e:\r\n indexCount += 1\r\n self.checkLoopNum(indexCount)\r\n# self.driver.execute_script(self.getJs(fieldXpath))\r\n return element_value\r\n \r\n def gainClickedElement(self,fieldXpath,fieldId,fieldClass):\r\n print( u'单击')\r\n if fieldId:\r\n indexCount = 0\r\n while True and indexCount<2:\r\n try:\r\n clicked_element = WebDriverWait(self.driver,30).until(EC.element_to_be_clickable((By.ID,fieldId)))\r\n clicked_element = WebDriverWait(self.driver,30).until(EC.visibility_of_element_located((By.ID,fieldId)))\r\n break\r\n except Exception as e:\r\n indexCount += 1\r\n self.checkLoopNum(indexCount)\r\n elif fieldXpath:\r\n indexCount = 0\r\n while True and indexCount<2:\r\n try:\r\n clicked_element = WebDriverWait(self.driver,30).until(EC.element_to_be_clickable((By.XPATH,fieldXpath)))\r\n clicked_element = WebDriverWait(self.driver,30).until(EC.visibility_of_element_located((By.XPATH,fieldXpath)))\r\n break\r\n except Exception as e:\r\n indexCount += 1\r\n self.checkLoopNum(indexCount)\r\n elif fieldClass:\r\n indexCount = 0\r\n while True and indexCount<2:\r\n try:\r\n clicked_element = WebDriverWait(self.driver,30).until(EC.element_to_be_clickable((By.CSS_SELECTOR,fieldClass)))\r\n clicked_element = WebDriverWait(self.driver,30).until(EC.visibility_of_element_located((By.CSS_SELECTOR,fieldClass)))\r\n break\r\n except Exception as e:\r\n indexCount += 1\r\n self.checkLoopNum(indexCount)\r\n# self.driver.execute_script(self.getJs(fieldXpath))\r\n return clicked_element\r\n \r\n def checkLoopNum(self,indexCount):\r\n if indexCount>=2:\r\n raise Exception(u'找不到元素')\r\n","repo_name":"chenjinzhi1990/zinows","sub_path":"play/playBack.py","file_name":"playBack.py","file_ext":"py","file_size_in_byte":15690,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"14060017401","text":"import pandas as pd\nimport numpy as np\nimport json \nimport os \nimport xml.etree.ElementTree as ET\nimport glob\nimport random\nimport string\nimport ast\n\ndef id_generator(size=6, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.SystemRandom().choices(chars, k=size))\n\n\ndef get_ufo_stalker_data():\n # --- aggregate UFO Stalker data --- #\n if os.path.exists('Data/ufo_stalker.csv'):\n df = pd.read_csv('Data/ufo_stalker.csv', compression='gzip') \n df['urls'] = [ast.literal_eval(u) for u in df['urls'].tolist()]\n return df\n \n if not os.path.exists('Data/Resource/ufo_stalker.json'):\n data = []\n for file in os.listdir('Data/Input/ufo-stalker-json'):\n with open('Data/Input/ufo-stalker-json/{}'.format(file), 'r') as f:\n data.extend(json.load(f)['content'])\n\n with open('Data/Resources/ufo_stalker.json', 'w') as f:\n for d in data: \n f.write(json.dumps(d) + '\\n') \n\n columns = [\n 'city',\n 'country',\n 'detailedDescription',\n 'duration',\n 'latitude',\n 'longitude',\n 'summary',\n 'shape',\n 'urls',\n 'id',\n 'submitted',\n 'occurred' \n ]\n\n valid_formats = {\n 'jpg', 'jpeg', 'png', 'bmp', 'gif', 'tiff'\n }\n\n mapping = {\n 'city' : 'municipality', \n 'detailedDescription' \n : 'description', \n 'id' : 'event_id',\n 'submitted' : 'sighted_at',\n 'occurred' : 'reported_at'\n }\n\n df = (pd.read_json('Data/Resources/ufo_stalker.json', lines=True)\n .reindex(columns=columns)\n .rename(columns=mapping)\n .drop_duplicates(subset=['event_id'])\n )\n # remove invalid links\n df['urls'] = [\n [j for j in i if j.lower().rsplit('.', 1)[-1] in valid_formats] if i else [] for i in df.urls\n ]\n # country\n df['country'] = df['country'].replace({'US' : 'United States', 'UK' : 'United Kingdom', 'BR' : 'Brazil'})\n df.loc[df['country'].str.len().eq(2), 'country'] = np.nan \n # convert epoch time to datetime\n df['sighted_at'] = pd.to_datetime(df['sighted_at'], errors='coerce', unit='ms')\n df['reported_at'] = pd.to_datetime(df['reported_at'], errors='coerce', unit='ms')\n # fix shape column\n df['shape'] = df['shape'].str.strip().str.replace(r'(?:,\\s*)?N,\\s*A', '').str.replace('Rectagular', 'Rectangular')\n df.loc[df['shape'].str.len().eq(0), 'shape'] = np.nan\n # load caption and object files\n\n if os.path.exists('Data/Resources/cap.txt') and os.path.exists('Data/Resources/obj.txt'):\n cap = pd.read_csv('Data/Resources/cap.txt', usecols=['caption', 'event_id'])\n obj = pd.read_csv('Data/Resources/obj.txt', usecols=['label', 'event_id'])\n # merge event data with obeject and caption data\n df = df.merge(cap.merge(obj, on='event_id', how='outer'), on='event_id', how='left')\n # save to CSV\n df.to_csv('Data/ufo_stalker.csv', compression='gzip', index=False)\n \n return df\n\n\ndef get_british_ufo_data():\n if os.path.exists('Data/ufo_british.csv'):\n return pd.read_csv('Data/ufo_british.csv', compression='gzip') \n\n records = []\n for base_path in glob.glob('Data/Resources/ocr-output/DEFE-*'):\n root = os.path.join(base_path, 'outtxt-clean-tika')\n for file in os.listdir(root):\n try:\n r = ET.parse(os.path.join(root, file)).getroot()\n records.append([tag.text for tag in r[1]])\n except ET.ParseError:\n continue\n\n df = (pd.DataFrame(\n records, \n columns=[\n 'description', 'duration', 'location', 'reported_at', 'sighted_at', 'shape'\n ] \n ).apply(lambda x: x.str.title())\n .replace('\"\"', np.nan)\n )\n df = df.dropna(\n subset=df.columns.difference(['description']).tolist(), \n how='all'\n )\n df['description'] = df['description'].str.strip('Split By Pdf Splitter\\n').str.replace('\\n', ' ')\n df[['sighted_at', 'reported_at']] = df[['sighted_at', 'reported_at']].apply(pd.to_datetime, errors='coerce')\n \n m = df.sighted_at > df.reported_at\n df.loc[m, 'sighted_at'], df.loc[m, 'reported_at'] = df.loc[m, 'reported_at'], df.loc[m, 'sighted_at']\n\n df.to_csv('Data/ufo_british.csv', compression='gzip', index=False)\n\n return df\n\n\ndef get_ufo_awesome_data():\n return pd.read_csv('Data/ufo_awesome.csv', compression='gzip')\n\nif __name__ == '__main__':\n df_list = []\n for x, y in [\n ('UFO Stalker', get_ufo_stalker_data), \n ('UFO British', get_british_ufo_data), \n ('UFO Awesome', get_ufo_awesome_data)]:\n print(f'Loading {x} data...\\t', end='\\r')\n\n df_list.append(y())\n\n print(f'Loading {x} data...\\tDONE')\n\n df = pd.concat(df_list, ignore_index=True).sort_index(axis=1)\n\n print('Generating random IDs...\\t', end='\\r')\n df['event_id'] = [id_generator() if pd.isnull(x) else x for x in df['event_id'].tolist()]\n print('Generating random IDs...\\tDONE', end='\\r')\n\n df.to_csv('Data/ufo_awesome_v2.csv', compression='gzip', index=False)\n\n\n","repo_name":"Coldsp33d/UFO-Awesome-v2","sub_path":"aggregator.py","file_name":"aggregator.py","file_ext":"py","file_size_in_byte":5228,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"27"} +{"seq_id":"27020770775","text":"import datetime\n\nimport utils\n\n\ndef adjust_count(start_date, end_date):\n return (end_date - start_date).days // 7\n\n\ndef write_assists_gsheet(toggl, workspace_id, sheet_id):\n start_date = datetime.date(2023, 12, 27)\n yesterday_date = start_date + datetime.timedelta(days=20)\n data = utils.get_range_data(workspace_id, start_date, yesterday_date)\n report_data = toggl.getDetailedReport(data)\n\n if 'total_grand' not in report_data:\n print(\"Error: 'total_grand' key is missing in the credentials dictionary.\")\n return\n\n gc = utils.read_service_account()\n sheet = gc.open_by_key(sheet_id).sheet1\n\n existing_projects = sheet.col_values(1)[4:]\n\n new_projects = list(set([entry['project'] for entry in report_data['data']]) - set(existing_projects))\n\n start_row = len(existing_projects) + 5\n\n for project_name in new_projects:\n sheet.update_cell(start_row, 1, project_name)\n start_row += 1\n","repo_name":"artromone/TogglAnalysys","sub_path":"src/archive.py","file_name":"archive.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"23501653780","text":"\t\t\t\t\t###Remove Duplicates from Sorted List II###\n\n# Given a sorted linked list, delete all nodes that have duplicate numbers, leaving only distinct numbers from the original list.\n# For example,\n# Given 1->2->3->3->4->4->5, return 1->2->5.\n# Given 1->1->1->2->3, return 2->3.\n\n\nclass Solution(object):\n def deleteDuplicates(self, head):\n dummy = curr = ListNode(0)\n dummy.next = head\n \n while head and head.next:\n if head.val == head.next.val:\n while head and head.next and head.val == head.next.val:\n head = head.next\n head = head.next\n curr.next = head\n else:\n curr.next = head\n curr = curr.next\n head = head.next\n return dummy.next \n","repo_name":"tusharkailash-zz/LeetCode-Problems-in-Python-","sub_path":"Linked List/remove_duplicates2.py","file_name":"remove_duplicates2.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"21852793964","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport logging\nimport collections\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.rnn import BasicLSTMCell\n\nfrom ops import gru_cell\nfrom ops import input_ops\nimport subprocess\nimport encoder_manager\nimport senteval\nimport configuration\n\ndef get_zipf_mat(vocab_size):\n m = np.arange(vocab_size)\n m = (np.log(m + 2) - np.log(m + 1)) / np.log(vocab_size + 1)\n return m\n\n\ndef random_orthonormal_initializer(shape, dtype=tf.float32,\n partition_info=None): # pylint: disable=unused-argument\n \"\"\"Variable initializer that produces a random orthonormal matrix.\"\"\"\n if len(shape) != 2 or shape[0] != shape[1]:\n raise ValueError(\"Expecting square shape, got %s\" % shape)\n _, u, _ = tf.svd(tf.random_normal(shape, dtype=dtype), full_matrices=True)\n return u\n\ndef pad_up_to(t, max_in_dims, constant_values):\n s = tf.shape(t)\n paddings = [[0, m-s[i]] for (i,m) in enumerate(max_in_dims)]\n return tf.pad(t, paddings, 'CONSTANT')\n\n\nclass SkipThoughtsModel(object):\n \"\"\"Skip-thoughts model.\"\"\"\n\n def __init__(self, config, mode=\"train\", input_reader=None):\n\n if mode not in [\"train\", \"eval\", \"encode\"]:\n raise ValueError(\"Unrecognized mode: %s\" % mode)\n\n self.config = config\n self.mode = mode\n self.reader_bookcorpus = input_reader if input_reader else tf.TFRecordReader()\n self.reader_visual = input_reader if input_reader else tf.TFRecordReader()\n self.reader_val = input_reader if input_reader else tf.TFRecordReader()\n self.reader_sim = input_reader if input_reader else tf.TFRecordReader()\n self.reader_sim_val = input_reader if input_reader else tf.TFRecordReader()\n self.reader_nn = input_reader if input_reader else tf.TFRecordReader()\n self.reader_nn_val = input_reader if input_reader else tf.TFRecordReader()\n\n # Initializer used for non-recurrent weights.\n self.uniform_initializer = tf.random_uniform_initializer(\n minval=-self.config.uniform_init_scale,\n maxval=self.config.uniform_init_scale)\n\n self.n_video_lstm_step = config.n_video_lstm_step\n self.n_caption_lstm_step = config.n_caption_lstm_step\n \n if self.config.textual == \"SK\":\n name = \"w_embedding/word_embedding\"\n else:\n name = \"word_embedding\"\n\n word_emb = tf.get_variable(\n name = name,\n shape=[self.config.vocab_size, self.config.word_embedding_dim],\n initializer=self.uniform_initializer,\n trainable = True)\n\n self.word_emb = word_emb\n\n\n\n\n\n if self.config.textual != \"SK\":\n self.word_target = tf.get_variable(\n name = \"word_target\",\n shape=[self.config.vocab_size, self.config.word_embedding_dim],\n initializer=self.uniform_initializer,\n trainable = True)\n\n if self.config.video_embedding != \"T\":\n with tf.variable_scope('mapping') as scope:\n self.W1 = tf.get_variable(name='W1',\n shape=[self.config.encoder_dim,self.config.hidden_dim],\n initializer=tf.random_uniform_initializer(minval=-0.05,maxval=0.05),\n trainable = True)\n self.b1 = tf.get_variable(name='b1',\n shape=[self.config.hidden_dim],\n initializer=tf.random_uniform_initializer(minval=-0.05,maxval=0.05),\n trainable = True)\n self.W2 = tf.get_variable(name='W2',\n shape=[self.config.hidden_dim,self.config.video_encoder_dim],\n initializer=tf.random_uniform_initializer(minval=-0.05,maxval=0.05),\n trainable = True)\n self.b2 = tf.get_variable(name='b2',\n shape=[self.config.video_encoder_dim],\n initializer=tf.random_uniform_initializer(minval=-0.05,maxval=0.05),\n trainable = True)\n \"\"\"\n with tf.variable_scope('mapping_image') as scope:\n self.W1_image = tf.get_variable(name='W1_image',\n shape=[self.config.video_encoder_dim,self.config.video_encoder_dim],\n initializer=tf.random_uniform_initializer(minval=-0.05,maxval=0.05),\n trainable = True)\n self.b1_image = tf.get_variable(name='b1_image',\n shape=[self.config.video_encoder_dim],\n initializer=tf.random_uniform_initializer(minval=-0.05,maxval=0.05),\n trainable = True)\n self.W2_image = tf.get_variable(name='W2_image',\n shape=[self.config.video_encoder_dim,self.config.video_encoder_dim],\n initializer=tf.random_uniform_initializer(minval=-0.05,maxval=0.05),\n trainable = True)\n self.b2_image = tf.get_variable(name='b2_image',\n shape=[self.config.video_encoder_dim],\n initializer=tf.random_uniform_initializer(minval=-0.05,maxval=0.05),\n trainable = True)\n \"\"\"\n\n def feed_foward_NN(self,x):\n if self.config.nb_layer == 1:\n return tf.add(tf.matmul(tf.nn.relu(tf.add(tf.matmul(x, self.W1), self.b1)), self.W2),self.b2)\n elif self.config.nb_layer == 0:\n return tf.add(tf.matmul(x, self.W1), self.b1)\n\n def feed_foward_NN_image(self,x):\n if self.config.nb_layer == 1:\n return tf.add(tf.matmul(tf.nn.relu(tf.add(tf.matmul(x, self.W1_image), self.b1_image)), self.W2_image),self.b2_image)\n elif self.config.nb_layer == 0:\n return tf.add(tf.matmul(x, self.W1_image), self.b1_image)\n\n def _initialize_gru_cell(self, num_units, trainable):\n\n return gru_cell.LayerNormGRUCell(\n num_units,\n w_initializer=self.uniform_initializer,\n u_initializer=random_orthonormal_initializer,\n b_initializer=tf.constant_initializer(0.0),\n trainable = trainable)\n\n\n def extract_axis_1(self,data, ind):\n\n batch_range = tf.range(tf.shape(data)[0])\n indices = tf.stack([batch_range, ind], axis=1)\n res = tf.gather_nd(data, indices)\n return res\n\n\n def mask_ids(self, data, vocab):\n\n nb_s = len(data)\n ids = np.zeros([nb_s,70])\n mask = np.zeros([nb_s,70])\n\n for i in range(nb_s):\n\n sentence = [w.lower() for w in data[i].split(\" \")]\n\n if self.config.textual == \"SK\":\n id_s = [vocab.get(w, 1) for w in sentence]\n len_s = len(sentence)\n else:\n id_s = []\n len_s = 0\n for w in sentence:\n if w in vocab:\n len_s += 1\n id_s.append(vocab.get(w, -1))\n ids[i][:len_s] = id_s\n mask[i][:len_s] = np.ones([len_s])\n\n return ids,mask\n\n def pearson(self, proj = False):\n\n devA, devB, devS = [], [], []\n with open(self.config.SICK_trial, 'rb') as f:\n for line in f:\n text = line.strip().split('\\t')\n devA.append(text[1].decode('utf8'))\n devB.append(text[2].decode('utf8'))\n devS.append(text[3].decode('utf8'))\n devA, devB, devS = devA[1:], devB[1:], [float(s) for s in devS[1:]]\n\n # Load vocabulary\n vocab = collections.OrderedDict()\n with tf.gfile.GFile(os.path.join(self.config.bin_dir+\"bookcorpus_data_\"+self.config.textual,\"vocab.txt\"), mode=\"r\") as f:\n for i, line in enumerate(f):\n word = line.decode(\"utf-8\").strip()\n if word not in vocab:\n vocab[word] = i\n\n s1_ids, s1_mask = self.mask_ids(devA, vocab)\n s2_ids, s2_mask = self.mask_ids(devB, vocab)\n human_scores = devS\n\n s1_emb = tf.nn.embedding_lookup(self.word_emb, tf.cast(s1_ids,tf.int32))\n s2_emb = tf.nn.embedding_lookup(self.word_emb, tf.cast(s2_ids,tf.int32))\n encoded_s1 = self.text_encoder(s1_emb, s1_mask, reuse = True)\n encoded_s2 = self.text_encoder(s2_emb, s2_mask, reuse = True)\n\n if proj:\n encoded_s1 = self.feed_foward_NN(encoded_s1)\n encoded_s2 = self.feed_foward_NN(encoded_s2)\n\n s1 = tf.nn.l2_normalize(encoded_s1, 1)\n s2 = tf.nn.l2_normalize(encoded_s2, 1)\n scores = tf.reduce_sum(tf.multiply(s1,s2),1)\n scores = scores - tf.reduce_mean(scores)\n human_scores = tf.cast(human_scores - tf.reduce_mean(human_scores),tf.float32)\n sigma_scores = tf.sqrt(tf.reduce_mean(tf.multiply(scores,scores)))\n sigma_human_scores = tf.sqrt(tf.reduce_mean(tf.multiply(human_scores,human_scores)))\n rho = tf.reduce_mean(tf.multiply(scores,human_scores))/(sigma_scores*sigma_human_scores)\n\n return rho\n\n def cosine(self,x,y):\n s1 = tf.nn.l2_normalize(x, 1)\n s2 = tf.nn.l2_normalize(y, 1)\n return tf.matmul(s1,tf.transpose(s2))\n\n def NN(self, x, k):\n distance = self.cosine(x, x)\n return tf.nn.top_k(distance, k=k).indices\n\n\n def mNNO(self, x, y, K, depth, batch):\n\n nnx = self.NN(x, k=K+1)[:,1:]\n nny = self.NN(y, k=K+1)[:,1:]\n return tf.reduce_sum(tf.multiply(tf.reduce_sum(tf.one_hot(nnx,depth=depth,axis=-1),1), tf.reduce_sum(tf.one_hot(nny,depth=depth,axis=-1),1)))/(batch*K)\n\n def compute_mNNO_dev(self, K):\n\n \n sent = []\n with open(os.path.join(os.environ[\"CODE_DIR\"],\"coco_val.en\"), \"r\") as f:\n for line in f:\n sent.append(line.replace(\"\\n\",\"\"))\n sent = sent[:1000]\n\n # Load vocabulary\n vocab = collections.OrderedDict()\n with tf.gfile.GFile(os.path.join(self.config.bin_dir + \"bookcorpus_data_\"+self.config.textual,\"vocab.txt\"), mode=\"r\") as f:\n for i, line in enumerate(f):\n word = line.decode(\"utf-8\").strip()\n if word not in vocab:\n vocab[word] = i\n\n\n s_ids, s_mask = self.mask_ids(sent, vocab)\n s_emb = tf.nn.embedding_lookup(self.word_emb, tf.cast(s_ids,tf.int32))\n encoded_s = self.text_encoder(s_emb, s_mask, reuse = True)\n \n encoded_im = tf.convert_to_tensor(np.load(os.path.join(os.environ[\"CODE_DIR\"],\"coco_dev_ims.npy\")))\n encoded_im = encoded_im[:1000,:]\n \n #encoded_s = tf.convert_to_tensor(np.load(\"/net/sister/bordes/coco_skipthought_eval.npy\")[:1000,:])\n\n tf.summary.scalar(\"eval/mNNO_10_x_y\" , self.mNNO(encoded_s, encoded_im, 10, 1000, 1000))\n tf.summary.scalar(\"eval/mNNO_10_fx_y\" , self.mNNO(self.feed_foward_NN(encoded_s), encoded_im, 10, 1000, 1000))\n tf.summary.scalar(\"eval/mNNO_10_x_fx\" , self.mNNO(encoded_s, self.feed_foward_NN(encoded_s), 10, 1000, 1000))\n tf.summary.scalar(\"eval/mNNO_10_x_x\" , self.mNNO(encoded_s, encoded_s, 10, 1000, 1000))\n\n\n def build_bookcorpus_inputs(self):\n\n\n if self.mode == \"encode\":\n # Word embeddings are fed from an external vocabulary which has possibly\n # been expanded (see vocabulary_expansion.py).\n encode_ids = None\n decode_pre_ids = None\n decode_post_ids = None\n encode_mask = tf.placeholder(tf.int8, (None, None), name=\"encode_mask\")\n decode_pre_mask = None\n decode_post_mask = None\n else:\n # Prefetch serialized tf.Example protos.\n input_queue = input_ops.prefetch_input_data(\n self.reader_bookcorpus,\n self.config.bookcorpus_pattern,\n shuffle=self.config.shuffle_input_data,\n capacity=self.config.bookcorpus_capacity,\n num_reader_threads=self.config.num_input_reader_threads,\n dataset=\"bookcorpus\")\n\n # Deserialize a batch.\n serialized = input_queue.dequeue_many(self.config.bookcorpus_batch_size)\n encode, decode_pre, decode_post = input_ops.parse_text_batch(\n serialized)\n\n encode_ids = pad_up_to(encode.ids, [self.config.bookcorpus_batch_size, self.config.len_sentence], 0)\n decode_pre_ids = pad_up_to(decode_pre.ids, [self.config.bookcorpus_batch_size, self.config.len_sentence], 0)\n decode_post_ids = pad_up_to(decode_post.ids, [self.config.bookcorpus_batch_size, self.config.len_sentence], 0)\n\n encode_mask = pad_up_to(encode.mask, [self.config.bookcorpus_batch_size, self.config.len_sentence], 0)\n decode_pre_mask = pad_up_to(decode_pre.mask, [self.config.bookcorpus_batch_size, self.config.len_sentence], 0)\n decode_post_mask = pad_up_to(decode_pre.mask, [self.config.bookcorpus_batch_size, self.config.len_sentence], 0)\n\n self.encode_ids = encode_ids\n self.decode_pre_ids = decode_pre_ids\n self.decode_post_ids = decode_post_ids\n\n self.encode_mask = encode_mask\n self.decode_pre_mask = decode_pre_mask\n self.decode_post_mask = decode_post_mask\n\n if self.mode == \"encode\":\n self.encode_emb = tf.placeholder(tf.float32, (None, None, self.config.word_embedding_dim), \"encode_emb\")\n self.decode_pre_emb = None\n self.decode_post_emb = None\n else:\n self.decode_pre_emb = tf.nn.embedding_lookup(self.word_emb, self.decode_pre_ids)\n self.decode_post_emb = tf.nn.embedding_lookup(self.word_emb, self.decode_post_ids)\n self.encode_emb = tf.nn.embedding_lookup(self.word_emb, self.encode_ids)\n\n\n self.thought_vectors = tf.identity(self.text_encoder(self.encode_emb, self.encode_mask, reuse = (self.config.video_embedding != \"T\")), name = \"thought_vectors\")\n\n def text_encoder(self,encode_emb, encode_mask, reuse ):\n if self.config.textual == \"SK\":\n return self.SK_encoder(encode_emb, encode_mask, reuse = reuse)\n else:\n return self.FS_encoder(encode_emb, encode_mask)\n\n\n def build_MSVD_inputs(self, reader, pattern, batch_size, reuse):\n\n\n if self.mode == \"encode\":\n\n caption = None\n input_MSVD_vector = tf.placeholder(tf.float32, (None, None), name=\"video_vector\")\n video = tf.placeholder(tf.float32, (None, None, self.config.dim_image), name=\"video\")\n video_length = tf.placeholder(tf.float32, (None), name=\"video_length\")\n MSVD_mask = tf.placeholder(tf.int32, (None, None), name=\"caption_mask\")\n MSVD_ids = tf.placeholder(tf.int32, (None, None), name=\"caption_ids\")\n nb_sentences = tf.placeholder(tf.int32, name=\"nb_sentences\")\n MSVD_emb = tf.placeholder(tf.float32, (None, None, self.config.word_embedding_dim), \"caption_emb\")\n\n encoded_caption = self.text_encoder(MSVD_emb, MSVD_mask, reuse = reuse)\n encoded_visual = self.video_encoder(video, video_length, batch_size = batch_size)\n\n else:\n\n input_queue = input_ops.prefetch_input_data(\n reader,\n pattern,\n shuffle=self.config.shuffle_input_data,\n capacity=self.config.visual_capacity,\n num_reader_threads=self.config.num_input_reader_threads,\n dataset=\"MSVD\")\n\n # Deserialize a batch.\n serialized = input_queue.dequeue_many(batch_size)\n caption, video, video_length = input_ops.parse_video_example_batch(serialized)\n video = tf.reshape(video,[batch_size,-1,self.config.dim_image])\n\n MSVD_ids = caption.ids\n MSVD_mask = caption.mask\n\n caption_word_emb = tf.nn.embedding_lookup(self.word_emb, MSVD_ids)\n encoded_caption = self.text_encoder(caption_word_emb, MSVD_mask, reuse = reuse)\n\n caption_W2V_emb = tf.reduce_sum(tf.nn.embedding_lookup(self.w2v_matrix, MSVD_ids),1)\n encoded_visual = self.video_encoder(video, video_length, h_t = caption_W2V_emb, batch_size = batch_size)\n\n if self.config.video_embedding == \"TG\":\n return MSVD_ids, encoded_caption, video, video_length, encoded_visual\n else:\n return encoded_caption, encoded_visual\n\n\n\n\n def build_ranking_text_inputs(self, reader, pattern, batch_size, reuse):\n # Prefetch serialized tf.Example protos.\n input_queue = input_ops.prefetch_input_data(\n reader,\n pattern,\n shuffle=self.config.shuffle_input_data,\n capacity=self.config.visual_capacity,\n num_reader_threads=self.config.num_input_reader_threads,\n dataset=\"COCO\")\n\n # Deserialize a batch.\n serialized = input_queue.dequeue_many(batch_size)\n\n s1, s2 = input_ops.parse_pair_example_batch(serialized)\n\n s1_ids = s1.ids\n s1_mask = s1.mask\n s1_word_emb = tf.nn.embedding_lookup(self.word_emb, s1_ids)\n s2_ids = s2.ids\n s2_mask = s2.mask\n s2_word_emb = tf.nn.embedding_lookup(self.word_emb, s2_ids)\n\n encoded_s1 = self.text_encoder(s1_word_emb, s1_mask, reuse = reuse)\n encoded_s2 = self.text_encoder(s2_word_emb, s2_mask, reuse = True)\n\n\n return encoded_s1, encoded_s2, s2_word_emb, s2_ids, s2_mask\n\n\n\n\n def build_COCO_inputs(self, reader, pattern, batch_size, reuse):\n # Prefetch serialized tf.Example protos.\n input_queue = input_ops.prefetch_input_data(\n reader,\n pattern,\n shuffle=self.config.shuffle_input_data,\n capacity=self.config.visual_capacity,\n num_reader_threads=self.config.num_input_reader_threads,\n dataset=\"COCO\")\n\n serialized = input_queue.dequeue_many(batch_size)\n\n if (self.config.video_embedding == \"O\"):\n caption, indices = input_ops.parse_HL_batch(serialized)\n indices = tf.cast(indices, tf.int32)\n input_COCO_vector = tf.one_hot(indices[:,0], 80)\n\n if (self.config.video_embedding == \"R\") or (self.config.video_embedding == \"IF\"):\n caption, input_COCO_vector = input_ops.parse_image_example_batch(serialized)\n\n encoded_visual = input_COCO_vector\n\n COCO_ids = caption.ids\n COCO_mask = caption.mask\n caption_word_emb = tf.nn.embedding_lookup(self.word_emb, COCO_ids)\n\n encoded_caption = self.text_encoder(caption_word_emb, COCO_mask, reuse = reuse)\n\n return encoded_caption, encoded_visual\n\n\n\n def SK_encoder(self, encode_emb, encode_mask, reuse):\n\n\n with tf.variable_scope(\"encoder\", reuse = reuse) as scope:\n length = tf.to_int32(tf.reduce_sum(encode_mask, 1), name=\"length\")\n if self.config.bidirectional_encoder:\n if self.config.encoder_dim % 2:\n raise ValueError(\n \"encoder_dim must be even when using a bidirectional encoder.\")\n num_units = self.config.encoder_dim // 2\n cell_fw = self._initialize_gru_cell(num_units, trainable = True) # Forward encoder\n cell_bw = self._initialize_gru_cell(num_units, trainable = True) # Backward encoder\n _, states = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cell_fw,\n cell_bw=cell_bw,\n inputs=encode_emb,\n sequence_length=length,\n dtype=tf.float32,\n scope=scope)\n thought_vectors = tf.concat(states, 1)\n else:\n cell = self._initialize_gru_cell(self.config.encoder_dim, trainable = True)\n self.h_t, state = tf.nn.dynamic_rnn(\n cell=cell,\n inputs=encode_emb,\n sequence_length=length,\n dtype=tf.float32,\n scope=scope)\n thought_vectors = state\n\n return thought_vectors\n\n def video_encoder(self, video, video_length, batch_size, h_t = None):\n\n\n video_rep = tf.matmul(tf.diag(tf.cast(1/video_length,tf.float32)),tf.reduce_sum(video,1))\n\n video_rep = tf.identity(video_rep, name=\"vision_vectors\")\n\n return video_rep\n\n\n def _build_decoder(self, name, embeddings, targets, mask, initial_state,\n reuse_logits, reuse_decoder, setting):\n\n with tf.variable_scope(name , reuse = reuse_decoder) as scope:\n # Decoder RNN.\n cell = self._initialize_gru_cell(self.config.encoder_dim, trainable = True)\n\n # Add a padding word at the start of each sentence (to correspond to the\n # prediction of the first word) and remove the last word.\n decoder_input = tf.pad(embeddings[:, :-1, :], [[0, 0], [1, 0], [0, 0]], name=\"input\")\n length = tf.reduce_sum(mask, 1, name=\"length\")\n decoder_output, _ = tf.nn.dynamic_rnn(\n cell=cell,\n inputs=decoder_input,\n sequence_length=length,\n initial_state=initial_state,\n scope=scope)\n\n # Stack batch vertically.\n decoder_output = tf.reshape(decoder_output, [-1, self.config.encoder_dim])\n targets = tf.reshape(targets, [-1])\n weights = tf.to_float(tf.reshape(mask, [-1]))\n\n # Logits.\n with tf.variable_scope(\"logits\", reuse=reuse_logits) as scope:\n logits = tf.contrib.layers.fully_connected(\n inputs=decoder_output,\n num_outputs=self.config.vocab_size,\n activation_fn=None,\n weights_initializer=self.uniform_initializer,\n scope=scope,\n trainable = True)\n\n losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=logits)\n batch_loss = tf.reduce_mean(losses * weights)\n tf.losses.add_loss(batch_loss)\n\n return batch_loss\n\n\n\n def FS_encoder(self, encode_emb, encode_mask):\n\n\n if self.mode == \"encode\":\n word_dim = self.config.word_encode_dim\n else:\n word_dim = self.config.word_embedding_dim\n\n encode_emb = tf.reshape(encode_emb, [-1, word_dim])\n weights = tf.to_float(tf.reshape(encode_mask, [-1, 1]))\n encode_emb = encode_emb * weights\n seq_len = tf.shape(encode_mask)[-1]\n\n encode_emb = tf.reshape(encode_emb, tf.stack([-1, seq_len, word_dim]))\n thought_vectors = tf.reduce_sum(encode_emb,axis=1)\n return thought_vectors\n\n\n def _build_FS_decoder(self, targets, mask):\n\n multiples = tf.stack([1, self.config.len_sentence, 1])\n\n neg_thought = tf.reshape(tf.tile(tf.expand_dims(self.thought_vectors, 1), multiples),[-1,self.config.encoder_dim])\n negative_word, _, _ = tf.nn.fixed_unigram_candidate_sampler(\n true_classes=tf.ones([self.config.bookcorpus_batch_size*self.config.len_sentence,1],tf.int64),\n num_true=1,\n num_sampled=self.config.text_negative_number * self.config.bookcorpus_batch_size * self.config.len_sentence,\n unique=True,\n range_max=self.config.vocab_size,\n distortion=0.75,\n unigrams=list(get_zipf_mat(self.config.vocab_size)))\n\n negatives = tf.reshape(negative_word, (self.config.bookcorpus_batch_size*self.config.len_sentence, self.config.text_negative_number))\n negative_context = tf.nn.embedding_lookup(self.word_target,negatives)\n negative_sim = tf.einsum(\"ij,ikj->ik\",neg_thought,negative_context)\n tf.summary.histogram(\"debug/negative_sim\", negative_sim)\n\n pos_thought = tf.reshape(tf.tile(tf.expand_dims(self.thought_vectors, 1), multiples),[-1,self.config.encoder_dim])\n positive_context = tf.nn.embedding_lookup(self.word_target,targets)\n positive_context = tf.reshape(positive_context,[-1,self.config.encoder_dim])\n\n positive_sim = tf.matmul(tf.expand_dims(positive_context, 1), tf.expand_dims(pos_thought, 2))[:, 0, 0]\n tf.summary.histogram(\"debug/positive_sim\", positive_sim)\n\n sim_scores = tf.log(tf.sigmoid(positive_sim)) + tf.reduce_sum(tf.log(tf.sigmoid(-negative_sim)), axis=1)\n tf.summary.histogram(\"debug/sim_scores\", sim_scores)\n\n weights = tf.to_float(tf.reshape(mask, [-1]))\n batch_loss = - tf.reduce_mean(tf.multiply(sim_scores, weights))\n return batch_loss\n\n\n def textual_loss(self):\n\n if self.config.textual == \"SK\":\n return self.skip_thought_loss()\n elif (self.config.textual == \"FS\"):\n return self.fast_sent_loss()\n\n\n def kiela_loss(self,s1,s2_word_emb, s2_ids, s2_mask,batch_size, reuse):\n\n loss = self._build_decoder(\"decoder_pre\", s2_word_emb,\n s2_ids, s2_mask,\n s1, reuse_logits = reuse, reuse_decoder = reuse, setting = \"skip_thought\")\n\n return loss\n\n\n def skip_thought_loss(self):\n\n\n loss_pre = self._build_decoder(\"decoder_pre\", self.decode_pre_emb,\n self.decode_pre_ids, self.decode_pre_mask,\n self.thought_vectors, reuse_logits = False, reuse_decoder = False, setting = \"skip_thought\")\n loss_post = self._build_decoder(\"decoder_post\", self.decode_post_emb,\n self.decode_post_ids, self.decode_post_mask,\n self.thought_vectors, reuse_logits = True, reuse_decoder = False, setting = \"skip_thought\")\n loss = loss_pre + loss_post\n return loss\n\n\n def fast_sent_loss(self):\n\n loss_pre = self._build_FS_decoder(self.decode_pre_ids, self.decode_pre_mask)\n loss_post = self._build_FS_decoder(self.decode_post_ids, self.decode_post_mask)\n loss_auto = self._build_FS_decoder(self.encode_ids, self.encode_mask)\n loss = loss_pre + loss_post + loss_auto\n return loss\n\n def ranking_text_loss(self, s1, s2, batch_size, proj = False):\n\n if proj:\n s1 = self.feed_foward_NN(s1)\n s2 = self.feed_foward_NN(s2)\n\n return self.max_margin(s1, s2, batch_size)\n \"\"\"\n V = tf.nn.l2_normalize(tf.cast(s1, tf.float32), 1)\n S = tf.nn.l2_normalize(tf.cast(s2, tf.float32), 1)\n A = tf.matmul(S,tf.transpose(V))\n A = tf.maximum(0.,A - tf.multiply(tf.diag_part(A),tf.ones([batch_size,batch_size],tf.float32)) + self.config.gamma_sentences)\n loss = tf.reduce_mean(A)\n return loss\n \"\"\"\n\n def rho_vis(self, vision_vectors, caption_vectors, batch_size, proj = False):\n \n if proj:\n caption_vectors = self.feed_foward_NN(caption_vectors)\n #vision_vectors = self.feed_foward_NN_image(vision_vectors)\n \n S = tf.nn.l2_normalize(tf.cast(caption_vectors, tf.float32), 1)\n V = tf.nn.l2_normalize(tf.cast(vision_vectors, tf.float32), 1)\n #import ipdb\n #ipdb.set_trace()\n scores_V = tf.reshape(tf.matmul(V,tf.transpose(V)),[-1])\n scores_S = tf.reshape(tf.matmul(S,tf.transpose(S)),[-1])\n scores_V = scores_V - tf.reduce_mean(scores_V)\n scores_S = scores_S - tf.reduce_mean(scores_S)\n std_V = tf.sqrt(tf.reduce_mean(tf.multiply(scores_V,scores_V)))\n std_S = tf.sqrt(tf.reduce_mean(tf.multiply(scores_S,scores_S)))\n if self.config.distance == \"rho\":\n rho = - tf.reduce_mean(tf.multiply(scores_V,scores_S))/(std_V*std_S)\n elif self.config.distance == \"L2\":\n rho = tf.reduce_mean(tf.squared_difference(scores_V ,scores_S))\n elif self.config.distance == \"L1\":\n rho = tf.losses.absolute_difference(scores_V,scores_S)\n return rho\n\n\n def max_margin(self, a, b, batch_size, nb_neg=10, margin=0.5):\n\n a = tf.cast(a, tf.float32)\n b = tf.cast(b, tf.float32)\n Aa = a[:-nb_neg, :]\n Ab = b[:-nb_neg, :]\n Ba = a[-nb_neg:, :]\n Bb = b[-nb_neg:, :]\n\n normalized_Aa = tf.nn.l2_normalize(Aa,1)\n normalized_Ab = tf.nn.l2_normalize(Ab,1)\n normalized_Ba = tf.nn.l2_normalize(Ba,1)\n normalized_Bb = tf.nn.l2_normalize(Bb,1)\n #normalized_Aa = Aa \n #normalized_Ab = Ab\n #normalized_Ba = Ba\n #normalized_Bb = Bb\n\n #normalized_b = F.normalize(Ab)\n pos = tf.reduce_sum(normalized_Aa * normalized_Ab, 1) # batch - nb_neg\n neg1 = tf.transpose(tf.matmul(normalized_Ba, tf.transpose(normalized_Ab))) # (batch - nb_neg) X nb_neg\n neg2 = tf.matmul(normalized_Aa, tf.transpose(normalized_Bb)) \n\n M1 = margin - pos[:, None] + neg1 # (batch - nb_neg) X nb_neg\n relu_M1 = tf.maximum(tf.zeros([batch_size-nb_neg,nb_neg],tf.float32), M1)\n\n M2 = margin - pos[:, None] + neg2 # (batch - nb_neg) X nb_neg\n relu_M2 = tf.maximum(tf.zeros([batch_size-nb_neg,nb_neg],tf.float32), M2)\n\n return tf.reduce_mean(0.5 * tf.reduce_sum(relu_M1, 1) + 0.5 * tf.reduce_sum(relu_M2, 1))\n\n def visual_loss(self, vision_vectors, caption_vectors, batch_size):\n\n #vision_vectors = tf.matmul(vision_vectors,self.W)\n #caption_vectors = tf.matmul(caption_vectors,self.W)\n caption_vectors = self.feed_foward_NN(caption_vectors)\n if self.config.rank_distance == \"triplet\":\n return self.max_margin(caption_vectors, vision_vectors, batch_size)\n #V = tf.nn.l2_normalize(tf.cast(vision_vectors, tf.float32), 1)\n #S = tf.nn.l2_normalize(tf.cast(caption_vectors, tf.float32), 1)\n #A = tf.matmul(S,tf.transpose(V))\n #A = tf.maximum(0.,A - tf.multiply(tf.diag_part(A),tf.ones([batch_size,batch_size],tf.float32)) + self.config.gamma_sentences)\n #return tf.reduce_mean(A)\n elif self.config.rank_distance == \"L2\":\n return tf.reduce_mean(tf.squared_difference(caption_vectors, vision_vectors))\n\n\n\n def infer_loss(self, A, B, batch_size, dim, reuse):\n\n with tf.variable_scope(\"infer\", reuse = reuse) as scope:\n W1_infer = tf.get_variable(name='W1_infer',\n shape=[dim,500],\n initializer=tf.random_uniform_initializer(minval=-0.05,maxval=0.05),\n trainable = True)\n b1_infer = tf.get_variable(name='b1_infer',\n shape=[500],\n initializer=tf.random_uniform_initializer(minval=-0.05,maxval=0.05),\n trainable = True)\n W2_infer = tf.get_variable(name='W2_infer',\n shape=[500,2],\n initializer=tf.random_uniform_initializer(minval=-0.05,maxval=0.05),\n trainable = True)\n b2_infer = tf.get_variable(name='b2_infer',\n shape=[2],\n initializer=tf.random_uniform_initializer(minval=-0.05,maxval=0.05),\n trainable = True)\n\n b_2 = int(batch_size/2)\n A1 = A[:b_2]\n A2 = A[b_2:]\n B1 = B[:b_2]\n B2 = B[b_2:]\n A1B1 = tf.concat([A1,B1],1)\n A1B2 = tf.concat([A1,B2],1)\n A2B1 = tf.concat([A2,B1],1)\n A2B2 = tf.concat([A2,B2],1)\n X = tf.concat([A1B1,A2B2,A1B2,A2B1],0)\n Y = tf.concat([tf.ones(batch_size),tf.zeros(batch_size)],0)\n pred_X = tf.add(tf.matmul(tf.nn.relu(tf.add(tf.matmul(X, W1_infer), b1_infer)), W2_infer), b2_infer)\n Y = tf.one_hot(tf.cast(Y,tf.int32),2)\n\n return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = tf.expand_dims(Y,1), logits= pred_X))\n\n def quick_loss(self, A, B, batch_size, dim, reuse):\n\n \"\"\"\n with tf.variable_scope(\"infer\", reuse = reuse) as scope:\n W1_infer = tf.get_variable(name='W1_infer',\n shape=[dim,500],\n initializer=tf.random_uniform_initializer(minval=-0.05,maxval=0.05),\n trainable = True)\n b1_infer = tf.get_variable(name='b1_infer',\n shape=[500],\n initializer=tf.random_uniform_initializer(minval=-0.05,maxval=0.05),\n trainable = True)\n W2_infer = tf.get_variable(name='W2_infer',\n shape=[500,2],\n initializer=tf.random_uniform_initializer(minval=-0.05,maxval=0.05),\n trainable = True)\n b2_infer = tf.get_variable(name='b2_infer',\n shape=[2],\n initializer=tf.random_uniform_initializer(minval=-0.05,maxval=0.05),\n trainable = True)\n \"\"\"\n\n b_2 = int(batch_size/2)\n B = self.feed_foward_NN(B)\n A1 = A[:b_2]\n A2 = A[b_2:]\n B1 = B[:b_2]\n B2 = B[b_2:]\n A1B1 = tf.expand_dims(tf.reduce_sum(tf.multiply(A1,B1),1),1)\n A1B2 = tf.expand_dims(tf.reduce_sum(tf.multiply(A1,B2),1),1)\n A2B1 = tf.expand_dims(tf.reduce_sum(tf.multiply(A2,B1),1),1)\n A2B2 = tf.expand_dims(tf.reduce_sum(tf.multiply(A2,B2),1),1)\n pred_X = tf.concat([tf.concat([A1B1,A1B2],1),tf.concat([A2B2,A2B1],1),tf.concat([A1B2,A1B1],1),tf.concat([A2B1,A2B2],1)],0)\n Y = tf.concat([tf.ones(batch_size),tf.zeros(batch_size)],0)\n return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = tf.expand_dims(Y,1), logits= pred_X))\n\n \"\"\"\n A1A2B1 = tf.concat([A1,A2,B1],1)\n A2A1B2 = tf.concat([A2,A1,B2],1)\n A2A1B1 = tf.concat([A2,A1,B1],1)\n A1A2B2 = tf.concat([A1,A2,B2],1)\n X = tf.concat([A1A2B1, A2A1B2, A2A1B1, A1A2B2],0)\n Y = tf.concat([tf.ones(batch_size),tf.zeros(batch_size)],0)\n pred_X = tf.add(tf.matmul(tf.nn.relu(tf.add(tf.matmul(X, W1_infer), b1_infer)), W2_infer), b2_infer)\n Y = tf.one_hot(tf.cast(Y,tf.int32),2)\n return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = tf.expand_dims(Y,1), logits= pred_X))\n \"\"\"\n\n def build_multi_loss(self):\n\n multi_loss = 0.\n\n if self.config.lambda_rho > 0:\n multi_loss += self.config.lambda_rho * self.loss_rho\n if self.config.lambda_rho_f > 0:\n multi_loss += self.config.lambda_rho_f * self.loss_rho_f\n if self.config.lambda_text > 0:\n multi_loss += self.config.lambda_text * self.loss_textual\n if self.config.lambda_ranking > 0:\n multi_loss += self.config.lambda_ranking * self.loss_ranking #+ self.config.mu_reg*tf.norm(self.W2)*tf.norm(self.W2)\n if self.config.lambda_ranking_text > 0:\n multi_loss += self.config.lambda_ranking_text * self.loss_ranking_text\n if self.config.lambda_ranking_text_f > 0:\n multi_loss += self.config.lambda_ranking_text_f * self.loss_ranking_text_f\n if self.config.lambda_cap2cap > 0:\n multi_loss += self.config.lambda_cap2cap * self.loss_cap2cap\n if self.config.lambda_nn > 0:\n multi_loss += self.config.lambda_nn * self.loss_nn\n if self.config.lambda_infer > 0:\n multi_loss += self.config.lambda_infer * self.loss_infer\n if self.config.lambda_Iinfer > 0:\n multi_loss += self.config.lambda_Iinfer * self.loss_Iinfer\n if self.config.lambda_quick > 0:\n multi_loss += self.config.lambda_quick * self.loss_quick\n return multi_loss\n\n\n\n\n def build_global_step(self):\n\n with tf.variable_scope(tf.get_variable_scope()):\n self.global_step = tf.train.create_global_step()\n\n\n\n def build(self):\n\n if self.config.video_embedding in [\"VG-A\"]:\n caption, visual = self.build_MSVD_inputs(self.reader_visual, self.config.visual_pattern, self.config.visual_batch_size, reuse = False)\n caption_val, visual_val = self.build_MSVD_inputs(self.reader_val, self.config.validation_pattern, self.config.val_batch_size, reuse = True)\n elif self.config.video_embedding in [\"R\",\"IF\"]:\n caption, visual = self.build_COCO_inputs(self.reader_visual, self.config.visual_pattern, self.config.visual_batch_size, reuse = False)\n caption_val, visual_val = self.build_COCO_inputs(self.reader_val, self.config.validation_pattern, self.config.val_batch_size, reuse = True)\n \"\"\"\n if self.config.video_embedding == \"R\":\n visual = visual[:,:self.config.video_encoder_dim]\n visual_val = visual_val[:,:self.config.video_encoder_dim]\n \"\"\"\n if self.config.video_embedding != \"T\":\n s1, s2, s2_word_emb, s2_ids, s2_mask = self.build_ranking_text_inputs(self.reader_sim, self.config.sim_pattern, self.config.sim_batch_size, reuse = True)\n s1_val, s2_val, s2_word_emb_val, s2_ids_val, s2_mask_val = self.build_ranking_text_inputs(self.reader_sim_val, self.config.val_sim_pattern, self.config.sim_val_batch_size, reuse = True)\n\n nn_s1, nn_s2, nn_s2_word_emb, nn_s2_ids, nn_s2_mask = self.build_ranking_text_inputs(self.reader_nn, self.config.nn_pattern, self.config.sim_batch_size, reuse = True)\n nn_s1_val, nn_s2_val, nn_s2_word_emb_val, nn_s2_ids_val, nn_s2_mask_val = self.build_ranking_text_inputs(self.reader_nn_val, self.config.val_nn_pattern, self.config.sim_val_batch_size, reuse = True)\n\n self.build_bookcorpus_inputs()\n\n if self.mode != \"encode\":\n\n if self.config.lambda_ranking_text > 0:\n self.loss_ranking_text_val = self.ranking_text_loss(s1_val, s2_val, self.config.val_batch_size)\n self.loss_ranking_text = self.ranking_text_loss(s1, s2, self.config.visual_batch_size)\n tf.summary.scalar(\"losses_val/ranking_text_val\", self.loss_ranking_text_val)\n tf.summary.scalar(\"losses/ranking_text\", self.loss_ranking_text)\n if self.config.lambda_ranking_text_f > 0:\n self.loss_ranking_text_val_f = self.ranking_text_loss(s1_val, s2_val, self.config.val_batch_size, proj = True)\n self.loss_ranking_text_f = self.ranking_text_loss(s1, s2, self.config.visual_batch_size, proj = True)\n tf.summary.scalar(\"losses_val/ranking_text_val\", self.loss_ranking_text_val_f)\n tf.summary.scalar(\"losses/ranking_text\", self.loss_ranking_text_f)\n if self.config.lambda_ranking > 0:\n self.loss_ranking_val = self.visual_loss(visual_val, caption_val, self.config.val_batch_size)\n self.loss_ranking = self.visual_loss(visual, caption, self.config.visual_batch_size)\n tf.summary.scalar(\"losses_val/ranking_val\", self.loss_ranking_val)\n tf.summary.scalar(\"losses/ranking\", self.loss_ranking)\n if self.config.lambda_rho > 0:\n self.loss_rho = self.rho_vis(visual, caption, self.config.visual_batch_size)\n self.loss_rho_val = self.rho_vis(visual_val, caption_val, self.config.val_batch_size)\n tf.summary.scalar(\"losses/rho\", self.loss_rho)\n tf.summary.scalar(\"losses_val/rho_val\", self.loss_rho_val)\n if self.config.lambda_rho_f > 0:\n self.loss_rho_f = self.rho_vis(visual, caption, self.config.visual_batch_size, proj = True)\n self.loss_rho_f_val = self.rho_vis(visual_val, caption_val, self.config.val_batch_size, proj = True)\n tf.summary.scalar(\"losses/rho_f\", self.loss_rho_f)\n tf.summary.scalar(\"losses_val/rho_f_val\", self.loss_rho_f_val)\n if self.config.lambda_text > 0:\n self.loss_textual = self.textual_loss()\n tf.summary.scalar(\"losses/textual_\", self.loss_textual)\n if self.config.lambda_cap2cap > 0:\n self.loss_cap2cap = self.kiela_loss(s1,s2_word_emb, s2_ids, s2_mask,self.config.visual_batch_size, False)\n self.loss_cap2cap_val = self.kiela_loss(s1_val,s2_word_emb_val, s2_ids_val, s2_mask_val,self.config.val_batch_size, True)\n tf.summary.scalar(\"losses/cap2cap\", self.loss_cap2cap)\n tf.summary.scalar(\"losses_val/cap2cap_val\", self.loss_cap2cap_val)\n if self.config.lambda_nn > 0:\n self.loss_nn_val = self.ranking_text_loss(nn_s1_val, nn_s2_val, self.config.val_batch_size)\n self.loss_nn = self.ranking_text_loss(nn_s1, nn_s2, self.config.visual_batch_size)\n tf.summary.scalar(\"losses_val/nn_val\", self.loss_nn_val)\n tf.summary.scalar(\"losses/nn\", self.loss_nn)\n if self.config.lambda_infer > 0:\n self.loss_infer = self.infer_loss(s1, s2, self.config.visual_batch_size, 2*self.config.encoder_dim, False)\n self.loss_infer_val = self.infer_loss(s1_val, s2_val, self.config.val_batch_size, 2*self.config.encoder_dim, True)\n tf.summary.scalar(\"losses_val/infer_val\", self.loss_infer_val)\n tf.summary.scalar(\"losses/infer\", self.loss_infer)\n if self.config.lambda_Iinfer > 0:\n self.loss_Iinfer = self.infer_loss(caption, visual, self.config.visual_batch_size, self.config.video_encoder_dim+self.config.encoder_dim, False)\n self.loss_Iinfer_val = self.infer_loss(caption_val, visual_val, self.config.val_batch_size, self.config.video_encoder_dim+self.config.encoder_dim, True)\n tf.summary.scalar(\"losses_val/Iinfer_val\", self.loss_Iinfer_val)\n tf.summary.scalar(\"losses/Iinfer\", self.loss_Iinfer)\n if self.config.lambda_quick > 0:\n self.loss_quick = self.quick_loss(visual, caption, self.config.visual_batch_size, 2*self.config.video_encoder_dim+self.config.encoder_dim, False)\n self.loss_quick_val = self.quick_loss(visual_val, caption_val, self.config.val_batch_size, 2*self.config.video_encoder_dim+self.config.encoder_dim, True)\n tf.summary.scalar(\"losses_val/quick_val\", self.loss_quick_val)\n tf.summary.scalar(\"losses/quick\", self.loss_quick)\n\n self.total_loss = self.build_multi_loss()\n\n #rho_val = self.pearson()\n #tf.summary.scalar(\"eval/SICK_valid\" , rho_val)\n #rho_f_val = self.pearson(proj = True)\n #tf.summary.scalar(\"eval/SICK_f_val\" , rho_f_val)\n\n #self.compute_mNNO_dev(K=10)\n\n self.build_global_step()\n #self.f_thought_vectors = tf.identity(tf.concat([self.thought_vectors,tf.add(tf.matmul(tf.nn.relu(tf.add(tf.matmul(self.thought_vectors, self.W1), self.b1)), self.W2),self.b2)],1), name = \"f_thought_vectors\")\n #self.f_thought_vectors = tf.identity(tf.concat([self.thought_vectors,tf.add(tf.matmul(self.thought_vectors, self.W1), self.b1)],1), name = \"f_thought_vectors\")\n #self.f_thought_vectors = tf.identity(tf.add(tf.matmul(self.thought_vectors, self.W1), self.b1), name = \"f_thought_vectors\")\n #self.f_thought_vectors = tf.identity(self.thought_vectors, name = \"f_thought_vectors\")\n\n\n\n\n\n","repo_name":"pbordes/multimodal_sentence_rep","sub_path":"skip_thoughts/skip_thoughts_model.py","file_name":"skip_thoughts_model.py","file_ext":"py","file_size_in_byte":40065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"31225940252","text":"'''You are given two positive integers A and B. Find the number of pairs\nof positive integers (X,Y) such that 1≤X≤A, 1≤Y≤B and X+Y is even.\n\nInput The first line of the input contains a single integer T denoting\nthe number of test cases. The description of T test cases follows. The\nfirst and only line of each test case contains two space-separated\nintegers A and B. Output For each test case, print a single line\ncontaining one integer ― the number of valid pairs.\n\nConstraints\n1≤T≤1,000\n1≤A,B≤10^9\nSubtasks\nSubtask #1 (10 points): A,B≤10\nSubtask #2 (10 points): A,B≤1,000\nSubtask #3 (80 points): original constraints\n\nExample Input\n4\n1 1\n2 3\n4 6\n8 9\nExample Output\n1\n3\n12\n36\n'''\n\nt=int(input())\na,b=[],[]\nfor y in range(t):\n x=input()\n s=x.split(' ')\n a.append(int(s[0]))\n b.append(int(s[1]))\n\ndef nodd(x): #no of odd numbers from 1 to x\n if x%2==0:\n return int(x/2)\n else:\n return int((x+1)/2)\n\ndef neven(x): #no of even numbers from 1 to x\n if x%2==0:\n return int(x/2)\n else:\n return int((x-1)/2)\n \nfor y in range(0,t):\n print((neven(a[y])*neven(b[y]))+(nodd(a[y])*nodd(b[y])))\n","repo_name":"vcos611/cp","sub_path":"codechef/2020 - December Long/Q2_even_pair_sum.py","file_name":"Q2_even_pair_sum.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"20375206555","text":"import cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef homomorphic_filter(src, d0=1, rL=0.2, rH=3, c=4, h=2.0, l=0.5):\r\n\r\n #图像灰度化处理\r\n gray = src.copy()\r\n if len(src.shape) > 2:#维度>2\r\n gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\r\n\r\n #设置数据维度n\r\n rows = gray.shape[0]\r\n cols = gray.shape[1]\r\n\r\n #傅里叶变换\r\n gray_fft = np.fft.fft2(gray) \r\n\r\n #将零频点移到频谱的中间,就是中间化处理\r\n gray_fftshift = np.fft.fftshift(gray_fft)\r\n\r\n #生成一个和gray_fftshift一样的全零数据结构\r\n dst_fftshift = np.zeros_like(gray_fftshift)\r\n\r\n #arange函数用于创建等差数组,分解f(x,y)=i(x,y)r(x,y)\r\n M, N = np.meshgrid(np.arange(-cols // 2, cols // 2), np.arange(-rows//2, rows//2))#注意,//就是除法\r\n\r\n #使用频率增强函数处理原函数(也就是处理原图像dst_fftshift)\r\n D = np.sqrt(M ** 2 + N ** 2)#**2是平方\r\n Z = (rH - rL) * (1 - np.exp(-c * (D ** 2 / d0 ** 2))) + rL\r\n dst_fftshift = Z * gray_fftshift\r\n dst_fftshift = (h - l) * dst_fftshift + l\r\n\r\n #傅里叶反变换(之前是正变换,现在该反变换变回去了)\r\n dst_ifftshift = np.fft.ifftshift(dst_fftshift)\r\n dst_ifft = np.fft.ifft2(dst_ifftshift)\r\n\r\n #选取元素的实部\r\n dst = np.real(dst_ifft)\r\n\r\n #dst中,比0小的都会变成0,比0大的都变成255\r\n #uint8是专门用于存储各种图像的(包括RGB,灰度图像等),范围是从0-255\r\n dst = np.uint8(np.clip(dst, 0, 255))\r\n return dst\r\n\r\nif __name__ == \"__main__\":\r\n img = cv2.imread('./image/HF.jpg',0)\r\n #将图片执行同态滤波器\r\n img_new = homomorphic_filter(img)\r\n\r\n plt.subplot(211)\r\n plt.axis('off')\r\n plt.title('original image')\r\n plt.imshow(img, cmap='gray')\r\n\r\n plt.subplot(212)\r\n plt.axis('off')\r\n plt.title('result image')\r\n plt.imshow(img_new, cmap='gray')\r\n\r\n plt.show()\r\n\r\n\r\n\r\n","repo_name":"kieran0625/Programm","sub_path":"Computer-vision/同态滤波增强暗部细节/同态滤波.py","file_name":"同态滤波.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"30011655526","text":"import datetime as dt\nimport json\nimport uuid\nfrom dataclasses import dataclass\n\nimport aioredis\n\nfrom app.domain.model.aggregate import Aggregate\nfrom app.domain.model.event import EventIn, Event\nfrom app.domain.services.util import DateTimeEncoder\nfrom app.infrastructure.config import app_config\nfrom app.infrastructure.log import logger\n\n\n@dataclass\nclass EventStore:\n redis: aioredis.Redis = None\n\n async def init_redis(self):\n if self.redis is None:\n logger.info('Init redis connection in Event Store')\n url = f'redis://{app_config.REDIS_HOST}:{app_config.REDIS_PORT}'\n self.redis = await aioredis.create_redis_pool(url)\n\n def __post_init__(self):\n logger.info('Init EventStore')\n\n @staticmethod\n def _generate_uuid():\n return uuid.uuid4().hex\n\n async def save(self, event: EventIn) -> Event:\n logger.info(f'Saving event in event store : {event}')\n ret = Event(\n created_at=dt.datetime.utcnow(),\n event_id=self._generate_uuid(),\n aggregate_id=event.aggregate_id if event.aggregate_id else self._generate_uuid(),\n aggregate_type=event.aggregate_type,\n event_type=event.event_type,\n event_data=event.event_data\n )\n logger.info('Saving it with hset')\n # Add event in a hash\n await self.redis.hset(ret.event_id, 'json', json.dumps(ret.dict(), cls=DateTimeEncoder))\n logger.info('Adding it to aggregate list')\n # Add event commit ID to the event store of the aggregate\n await self.redis.rpush(ret.aggregate_id, ret.event_id)\n return ret\n\n async def get_aggregate(self, aggregate_id: str) -> Aggregate:\n events = []\n nb_events = await self.redis.llen(aggregate_id)\n events_hash = await self.redis.lrange(aggregate_id, 0, nb_events - 1)\n for hash in events_hash:\n event = await self.redis.hget(hash, 'json')\n event = json.loads(event)\n events.append(event)\n return Aggregate(\n events=events,\n aggregate_id=events[0]['aggregate_id'] if len(events) else '',\n aggregate_type=events[0]['aggregate_type'] if len(events) else ''\n )\n\n async def close(self):\n logger.info('Closing redis Connection for Event Store')\n self.redis.close()\n await self.redis.wait_closed()\n","repo_name":"robinleruth/Microservices-architecture-test","sub_path":"event/app/domain/services/event_store/event_store.py","file_name":"event_store.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"5751104861","text":"import matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, auc\n\nfrom scipy.optimize import brentq\nfrom scipy.interpolate import interp1d\n\ndef plotROC(true, predicted, ttl = None):\n fpr, tpr, threshold = roc_curve(true, predicted, pos_label = 1)\n roc_auc = auc(fpr, tpr)\n\n\n plt.figure()\n lw = 2\n plt.plot(fpr, tpr, color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title(ttl)\n plt.legend(loc=\"lower right\")\n plt.show()\n\n\ndef EER_THR(y_test, scores):\n fpr, tpr, threshold = roc_curve(y_test, scores, pos_label=1)\n eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.)\n thresh = interp1d(fpr, threshold)(eer)\n\n return eer, thresh\n","repo_name":"anotherotherme/Expert_Analytical_System","sub_path":"src/evaluation_plot.py","file_name":"evaluation_plot.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"73153591753","text":"import time\nimport threading\nfrom .metrics import MetricProvider\nfrom twitter.common.quantity import Amount, Time\n\ntry:\n from twitter.common import log\nexcept ImportError:\n log = None\n\nclass MetricSampler(threading.Thread, MetricProvider):\n \"\"\"\n A thread that periodically samples from a MetricProvider and caches the\n samples.\n \"\"\"\n def __init__(self, metric_registry, period = Amount(1, Time.SECONDS)):\n self._registry = metric_registry\n self._period = period\n self._last_sample = self._registry.sample()\n self._lock = threading.Lock()\n self._shutdown = False\n threading.Thread.__init__(self)\n\n def sample(self):\n with self._lock:\n return self._last_sample\n\n def run(self):\n if log: log.debug('Starting metric sampler.')\n while not self._shutdown:\n time.sleep(self._period.as_(Time.SECONDS))\n new_sample = self._registry.sample()\n with self._lock:\n self._last_sample = new_sample\n\n def shutdown(self):\n if log: log.debug('Shutting down metric sampler.')\n self._shutdown = True\n","repo_name":"foursquare/commons-old","sub_path":"src/python/twitter/common/metrics/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"27936672919","text":"from typing import Any\nfrom pymath2 import Undefined, override, final, complete, FinishSet\nfrom .math_obj import MathObj\nfrom pymath2.builtins.operable import Operable\nfrom pymath2.builtins.derivable import Derivable\nif __debug__:\n\tfrom pymath2 import inloop\nclass ValuedObj(Operable, Derivable):\n\t@override(Operable, Derivable)\n\tasync def __ainit__(self, value: Any = Undefined, **kwargs) -> None:\n\t\tassert inloop()\n\t\tasync with FinishSet() as f:\n\t\t\tf.future(super().__ainit__(**kwargs))\n\t\t\tf.future(self._avalue_setter(value))\n\n\t@final\n\tdef value():\n\t\t@final\n\t\tdef fget(self) -> (Any, Undefined):\n\t\t\tassert not inloop()\n\t\t\treturn complete(self._avalue)\n\t\t@final\n\t\tdef fset(self, val: Any) -> None:\n\t\t\tassert not inloop()\n\t\t\treturn complete(self._avalue_setter(val))\n\t\t@final\n\t\tdef fdel(self) -> None:\n\t\t\tassert not inloop()\n\t\t\treturn complete(self._avalue_deleter())\n\t\treturn locals()\n\tvalue = property(**value())\n\n\t@property\n\tasync def _avalue(self):\n\t\tassert inloop()\n\t\treturn self._value\n\n\tasync def _avalue_setter(self, val: Any) -> None:\n\t\tassert inloop()\n\t\tawait self.__asetattr__('_value', val)\n\n\tasync def _avalue_deleter(self) -> None:\n\t\tassert inloop()\n\t\tawait self._avalue_setter(Undefined)\n\n\t@property\n\t@final\n\tdef hasvalue(self) -> bool:\n\t\tassert not inloop()\n\t\treturn complete(self._ahasvalue)\n\n\t@property\n\tasync def _ahasvalue(self) -> bool:\n\t\tassert inloop()\n\t\treturn await self._avalue is not Undefined #await\n\n\t@override(Derivable)\n\tasync def _aisconst(self, du: 'Variable'):\n\t\tassert inloop()\n\t\treturn not self.__aeq__(du)\n\n\t@final\n\tdef __abs__(self) -> float:\n\t\tassert not inloop()\n\t\treturn complete(self.__aabs__())\n\tasync def __aabs__(self) -> float:\n\t\treturn abs(self.__afloat__(self))\n\n\t@final\n\tdef __bool__(self) -> bool: \n\t\tassert not inloop()\n\t\treturn complete(self.__aabs__())\n\tasync def __abool__(self) -> bool:\n\t\tassert inloop()\n\t\treturn bool(await self._avalue)\n\n\t@final\n\tdef __int__(self) -> int:\n\t\tassert not inloop()\n\t\treturn complete(self.__aint__())\n\tasync def __aint__(self) -> int:\n\t\tassert inloop()\n\t\treturn int(await self._avalue)\n\n\t@final\n\tdef __float__(self) -> float:\n\t\tassert not inloop()\n\t\treturn complete(self.__afloat__())\n\tasync def __afloat__(self) -> float:\n\t\tassert inloop()\n\t\treturn float(await self._avalue) \n\n\t@final\n\tdef __complex__(self) -> complex:\n\t\tassert not inloop()\n\t\treturn complete(self.__acomplex__())\n\tasync def __acomplex__(self) -> complex:\n\t\tassert inloop()\n\t\treturn complex(self._avalue)\n\n\t@final\n\tdef __round__(self, digits: int) -> (int, float):\n\t\tassert not inloop()\n\t\treturn complete(self.__around__())\n\n\tasync def __around__(self, digits: int) -> (int, float):\n\t\tassert inloop()\n\t\treturn round(await self.__afloat__(), int(digits))\n\n\t@override(MathObj)\n\tasync def __aeq__(self, other: Any) -> bool:\n\t\tassert inloop()\n\t\tother = self.scrub(other)\n\t\tif not hasattr(other, 'value'):\n\t\t\treturn False\n\t\tasync with FinishSet() as f:\n\t\t\tmyv = f.future(self._avalue)\n\t\t\totv = f.future(other._avalue)\n\t\tif myv.result() == otv.result() and myv.result() is not Undefined:\n\t\t\treturn True\n\t\treturn super().__eq__(other)\n\n\t@override(Operable, Derivable)\n\tasync def __astr__(self) -> str:\n\t\tassert inloop()\n\t\tasync with FinishSet() as f: \n\t\t\tvalue = f.future(self._avalue)\n\t\t\thasvalue = f.future(self._ahasvalue)\n\t\tif not hasvalue.result():\n\t\t\treturn self.generic_str('unvalued')\n\t\tstr_attr = await self.get_asyncattr(value.result(), '__str__')\n\t\tassert not isinstance(str_attr, MathObj)\n\t\treturn str(str_attr)\n\n\t@override(Operable, Derivable)\n\tasync def __arepr__(self) -> str:\n\t\tassert inloop()\n\t\tvalue = await self._avalue\n\t\tvalue_attr = await self.get_asyncattr(self._avalue)\n\t\tassert not isinstance(value_attr, MathObj)\n\t\tvalue_repr = repr(value_attr)\n\t\treturn '{}({})'.format(self.__class__.__name__, value_repr)\n\n\n\n\n\n\n\n\n\n","repo_name":"sampersand/pymath2","sub_path":"builtins/objs/valued_obj.py","file_name":"valued_obj.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"43117787260","text":"STOP_WORDS = [\n 'a', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'for', 'from', 'has', 'he',\n 'i', 'in', 'is', 'it', 'its', 'of', 'on', 'that', 'the', 'to', 'were',\n 'will', 'with'\n]\n\npunct = '''!()-[]{};:'\"\\, <>./?@#$%^&*_~'''\n\n\ndef print_word_freq(file):\n \"\"\"Read in `file` and print out the frequency of words in that file.\"\"\"\n file = open(file)\n words = file.read().lower()\n \n\n for ele in punct:\n words = words.replace(ele, \" \")\n \n\n wordslist = words.split()\n \n\n word_count =[]\n\n for w in wordslist:\n if w not in STOP_WORDS:\n word_count.append(w)\n\n \n \n word_frequency = {}\n\n for w in word_count:\n if w in word_frequency:\n word_frequency[w] += 1\n else:\n word_frequency[w] = 1\n \n\n sorted_txt= sorted(word_frequency, key=word_frequency.get, reverse=True)\n for num in sorted_txt:\n print(num, word_frequency[num], '*' * word_frequency[num])\n\n\nif __name__ == \"__main__\":\n import argparse\n from pathlib import Path\n\n parser = argparse.ArgumentParser(\n description='Get the word frequency in a text file.')\n parser.add_argument('file', help='file to read')\n args = parser.parse_args()\n\n file = Path(args.file)\n if file.is_file():\n print_word_freq(file)\n else:\n print(f\"{file} does not exist!\")\n exit(1)\n","repo_name":"momentum-team-6/word-frequency-Calixd7","sub_path":"word_frequency.py","file_name":"word_frequency.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"26025357795","text":"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom transformers import BertModel\nfrom .inductive_lp import InductiveLinkPrediction\n\nclass WordEmbeddingsLP(InductiveLinkPrediction):\n \"\"\"Description encoder with pretrained embeddings, obtained from BERT or a\n specified tensor file.\n \"\"\"\n def __init__(self, rel_model, loss_fn, num_relations, regularizer,\n dim=None, encoder_name=None, embeddings=None):\n if not encoder_name and not embeddings:\n raise ValueError('Must provided one of encoder_name or embeddings')\n\n if encoder_name is not None:\n encoder = BertModel.from_pretrained(encoder_name)\n embeddings = encoder.embeddings.word_embeddings\n else:\n emb_tensor = torch.load(embeddings)\n num_embeddings, embedding_dim = emb_tensor.shape\n embeddings = nn.Embedding(num_embeddings, embedding_dim)\n embeddings.weight.data = emb_tensor\n\n if dim is None:\n dim = embeddings.embedding_dim\n\n super().__init__(dim, rel_model, loss_fn, num_relations, regularizer)\n\n self.embeddings = embeddings\n\n def _encode_entity(self, text_tok, text_mask):\n raise NotImplementedError\n\n def _encode_relation(self, text_tok, text_mask):\n raise NotImplementedError\n\n\nclass BOW(WordEmbeddingsLP):\n \"\"\"Bag-of-words (BOW) description encoder, with BERT low-level embeddings.\n \"\"\"\n def _encode_entity(self, text_tok, text_mask=None):\n if text_mask is None:\n text_mask = torch.ones_like(text_tok, dtype=torch.float)\n # Extract average of word embeddings\n embs = self.embeddings(text_tok)\n lengths = torch.sum(text_mask, dim=-1, keepdim=True)\n embs = torch.sum(text_mask.unsqueeze(dim=-1) * embs, dim=1)\n embs = embs / lengths\n\n return embs\n\n def _encode_relation(self, text_tok, text_mask=None):\n if text_mask is None:\n text_mask = torch.ones_like(text_tok, dtype=torch.float)\n # Extract average of word embeddings\n embs = self.embeddings(text_tok)\n lengths = torch.sum(text_mask, dim=-1, keepdim=True)\n embs = torch.sum(text_mask.unsqueeze(dim=-1) * embs, dim=1)\n embs = embs / lengths\n\n return embs\n\n\nclass DKRL(WordEmbeddingsLP):\n \"\"\"Description-Embodied Knowledge Representation Learning (DKRL) with CNN\n encoder, after\n Zuo, Yukun, et al. \"Representation learning of knowledge graphs with\n entity attributes and multimedia descriptions.\"\n \"\"\"\n\n def __init__(self, dim, rel_model, loss_fn, num_relations, regularizer,\n encoder_name=None, embeddings=None):\n super().__init__(rel_model, loss_fn, num_relations, regularizer,\n dim, encoder_name, embeddings)\n\n emb_dim = self.embeddings.embedding_dim\n self.conv1 = nn.Conv1d(emb_dim, self.dim, kernel_size=2)\n self.conv2 = nn.Conv1d(self.dim, self.dim, kernel_size=2)\n\n def _encode_entity(self, text_tok, text_mask):\n if text_mask is None:\n text_mask = torch.ones_like(text_tok, dtype=torch.float)\n # Extract word embeddings and mask padding\n embs = self.embeddings(text_tok) * text_mask.unsqueeze(dim=-1)\n\n # Reshape to (N, C, L)\n embs = embs.transpose(1, 2)\n text_mask = text_mask.unsqueeze(1)\n\n # Pass through CNN, adding padding for valid convolutions\n # and masking outputs due to padding\n embs = F.pad(embs, [0, 1])\n embs = self.conv1(embs)\n embs = embs * text_mask\n if embs.shape[2] >= 4:\n kernel_size = 4\n elif embs.shape[2] == 1:\n kernel_size = 1\n else:\n kernel_size = 2\n embs = F.max_pool1d(embs, kernel_size=kernel_size)\n text_mask = F.max_pool1d(text_mask, kernel_size=kernel_size)\n embs = torch.tanh(embs)\n embs = F.pad(embs, [0, 1])\n embs = self.conv2(embs)\n lengths = torch.sum(text_mask, dim=-1)\n embs = torch.sum(embs * text_mask, dim=-1) / lengths\n embs = torch.tanh(embs)\n\n return embs\n\n def _encode_relation(self, text_tok, text_mask):\n if text_mask is None:\n text_mask = torch.ones_like(text_tok, dtype=torch.float)\n # Extract word embeddings and mask padding\n embs = self.embeddings(text_tok) * text_mask.unsqueeze(dim=-1)\n\n # Reshape to (N, C, L)\n embs = embs.transpose(1, 2)\n text_mask = text_mask.unsqueeze(1)\n\n # Pass through CNN, adding padding for valid convolutions\n # and masking outputs due to padding\n embs = F.pad(embs, [0, 1])\n embs = self.conv1(embs)\n embs = embs * text_mask\n if embs.shape[2] >= 4:\n kernel_size = 4\n elif embs.shape[2] == 1:\n kernel_size = 1\n else:\n kernel_size = 2\n embs = F.max_pool1d(embs, kernel_size=kernel_size)\n text_mask = F.max_pool1d(text_mask, kernel_size=kernel_size)\n embs = torch.tanh(embs)\n embs = F.pad(embs, [0, 1])\n embs = self.conv2(embs)\n lengths = torch.sum(text_mask, dim=-1)\n embs = torch.sum(embs * text_mask, dim=-1) / lengths\n embs = torch.tanh(embs)\n\n return embs\n","repo_name":"GenetAsefa/RAILD","sub_path":"models/word_embeddings_lp.py","file_name":"word_embeddings_lp.py","file_ext":"py","file_size_in_byte":5296,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"27"} +{"seq_id":"42402720700","text":"#!/usr/bin/python\n# encoding: utf-8\n\nimport sha\n\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker, aliased\n\nfrom sqlalchemy import Column, String, Integer\nfrom sqlalchemy import select\n\nfrom sqlalchemy import and_\n\n\nBase = declarative_base()\nengine = create_engine('sqlite:///data/a.db')\nsession = sessionmaker()\nsession.configure(bind=engine)\n\nclass ModelBase(object):\n \"\"\"# ModelBase: docstring\"\"\"\n\n\n @classmethod\n def in_(cls, attr_name, value_list):\n s = session()\n l = s.query(cls).filter(getattr(cls, attr_name).in_(value_list))\n return l\n\n @classmethod\n def and_in_(cls, attr_name_1, value_list_1, attr_name_2, value_list_2):\n s = session()\n l = s.query(cls).filter(and_(getattr(cls, attr_name_1).in_(value_list_1), getattr(cls, attr_name_2).in_(value_list_2)))\n return l\n\n\n @classmethod\n def delete(cls, id):\n s = session()\n n = s.query(cls).get(id)\n s.delete(n)\n s.commit()\n\n @classmethod\n def new(cls, timu, peitu, daan, xueqi, nandu):\n \"\"\"# new: docstring \"\"\"\n n = cls()\n n.timu = timu\n n.peitu = peitu if peitu else ''\n n.daan = daan\n n.xueqi = xueqi\n n.nandu = nandu\n\n n.id = sha.new(''.join([\n n.timu.encode('utf-8'),\n n.peitu.encode('utf-8'),\n n.daan.encode('utf-8'),\n str(n.xueqi),\n str(n.nandu),\n ])).hexdigest()\n s = session()\n s.add(n)\n s.commit()\n return n\n\n @classmethod\n def list_all(cls, ):\n s = session()\n all = s.execute(select([\n cls.id,\n cls.timu,\n cls.peitu,\n cls.daan,\n cls.xueqi,\n cls.nandu,\n ]))\n return all\n\n\nclass Xuanzeti(Base, ModelBase):\n \"\"\"# Xuanzeti: 选择题储存表\"\"\"\n __tablename__ = \"xuanzeti\"\n id = Column(String, primary_key=True)\n timu = Column(String)\n peitu = Column(String)\n daan = Column(String)\n xueqi = Column(String)\n nandu = Column(Integer)\n\n\nclass Tiankongti(Base, ModelBase):\n \"\"\"# Tiankongti: 填空题储存表\"\"\"\n __tablename__ = \"tiankongti\"\n id = Column(String, primary_key=True)\n timu = Column(String)\n peitu = Column(String)\n daan = Column(String)\n xueqi = Column(String)\n nandu = Column(Integer)\n\n\n\n\nBase.metadata.create_all(engine)\n","repo_name":"laonger/biology_exam_paper_backend","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"13347377214","text":"class Solution(object):\n def canCompleteCircuit(self, gas, cost):\n \"\"\"\n :type gas: List[int]\n :type cost: List[int]\n :rtype: int\n \"\"\"\n diffs = [gas[i]-cost[i] for i in range(len(gas))]\n least = 0\n min_index = 0\n sum = 0\n for i, diff in enumerate(diffs):\n if sum -*-\n__author__ = 'xiao'\n__date__ = '2019/4/17 1:29 AM'\n\nfrom pyecharts import (\n Pie, Bar, Line, WordCloud, Map, Geo\n)\nfrom pyecharts.charts import (\n bar, map, pie, wordcloud, line, geo\n)\nfrom app.charts.constants import WIDTH, HEIGHT\nfrom pyecharts import Style\nfrom typing import List, Tuple\n\n\ndef bar_ssdd(attr_v1_v2: List[Tuple[str, int, int]], chart_name: str, v1_name: str, v2_name: str) -> bar.Bar:\n \"\"\"\n 生成一个数据堆叠的图表\n :param attr_v1_v2: 包含想要显示的数据列表\n :param chart_name: 图表名\n :param v1_name: 数据一名\n :param v2_name: 数据二名\n \"\"\"\n style = Style(\n width=WIDTH, height=HEIGHT\n )\n\n attr = [a[0] for a in attr_v1_v2]\n v1 = [v[1] for v in attr_v1_v2]\n v2 = [v[2] for v in attr_v1_v2]\n chart = Bar(chart_name, **style.init_style)\n chart.add(v1_name, attr, v1, is_stack=True)\n chart.add(v2_name, attr, v2, is_stack=True, is_more_utils=True)\n\n return chart\n\n\ndef bar_sssf(attr_v1: List[Tuple[str, int]], chart_name: str, v1_name: str) -> bar.Bar:\n \"\"\"\n 生成柱状图-数据缩放图表\n :param attr_v1: 主要数据\n :param chart_name: 图表名\n :param v1_name: 数据一名\n \"\"\"\n style = Style(\n width=WIDTH, height=HEIGHT\n )\n attr = [a[0] for a in attr_v1]\n v1 = [v[1] for v in attr_v1]\n chart = Bar(chart_name, **style.init_style)\n chart.add(v1_name, attr, v1, is_datazoom_show=True, datazoom_type='both',\n datazoom_range=[0, 20])\n\n return chart\n\n\ndef line_ssdd(attr_v1_v2: List[Tuple[str, int, int]], chart_name: str, v1_name: str, v2_name: str) -> line.Line:\n \"\"\"\n 生成一个折线图-数据堆叠图\n :param attr_v1_v2: 包含想要显示的数据列表\n :param chart_name: 图表名\n :param v1_name: 数据一名\n :param v2_name: 数据二名\n \"\"\"\n style = Style(\n width=WIDTH, height=HEIGHT\n )\n attr = [a[0] for a in attr_v1_v2]\n v1 = [v[1] for v in attr_v1_v2]\n v2 = [v[2] for v in attr_v1_v2]\n # chart = Line(chart_name, **style.init_style)\n # chart.add(v1_name, attr, v1, is_label_show=True, is_smooth=True)\n # chart.add(v2_name, attr, v2, is_label_show=True, is_smooth=True)\n\n chart = Line(chart_name, **style.init_style)\n chart.add(v1_name, attr, v1, mark_point=[\"average\"])\n chart.add(v2_name, attr, v2, is_smooth=True, mark_line=[\"max\", \"average\"], is_more_utils=True)\n\n return chart\n\n\ndef geo_qgtd(attr_v1: List[Tuple[str, int]], chart_name: str, v1_name: str) -> geo.Geo:\n \"\"\"\n 生成全国地图-数据通道图\n :param attr_v1: 主要数据\n :param chart_name: 图表名\n :param v1_name: 数据一名\n \"\"\"\n style = Style(\n title_color=\"#fff\",\n title_pos=\"center\",\n width=900,\n height=600,\n background_color='#404a59'\n )\n # chart = Map(chart_name, **style.init_style)\n # chart.add(v1_name, attr, value, maptype='china', is_visualmap=True,\n # visual_text_color='#000')\n chart = Geo(chart_name, \"\", **style.init_style)\n attr, value = chart.cast(attr_v1)\n chart.add(v1_name, attr, value, visual_range=[0, 70000], visual_text_color=\"#fff\", is_legend_show=False,\n symbol_size=15, is_visualmap=True, tooltip_formatter='{b}', label_emphasis_textsize=15,\n label_emphasis_pos='right', type='effectScatter')\n\n return chart\n\n\ndef map_qgtd(attr_v1: List[Tuple[str, int]], chart_name: str, v1_name: str) -> map.Map:\n style = Style(\n width=WIDTH, height=HEIGHT\n )\n chart = Map(chart_name, **style.init_style)\n attr, value = chart.cast(attr_v1)\n chart.add(v1_name, attr, value, maptype='china', is_visualmap=True,\n visual_text_color='#000')\n\n return chart\n\n\ndef pie_yht(attr_v1: List[Tuple[str, int]], chart_name: str, v1_name: str) -> pie.Pie:\n \"\"\"\n 生成饼图-圆环图\n :param attr_v1: 主要数据\n :param chart_name: 图表名\n :param v1_name: 数据一名\n \"\"\"\n style = Style(\n width=WIDTH, height=HEIGHT\n )\n attr = [a[0] for a in attr_v1]\n v1 = [v[1] for v in attr_v1]\n chart = Pie(chart_name, title_pos='center', **style.init_style)\n chart.add(v1_name, attr, v1, radius=[40, 75], label_text_color=None,\n is_label_show=True, legend_orient='vertical', legend_pos='left')\n\n return chart\n\n\ndef wordcloud_zdy(attr_v1: List[Tuple[str, int]], chart_name: str, v1_name: str) -> wordcloud.WordCloud:\n \"\"\"\n 生成词云图\n :param attr_v1: 主要数据\n :param chart_name: 图表名\n :param v1_name: 数据一名\n \"\"\"\n style = Style(\n width=1100, height=600\n )\n name = [n[0] for n in attr_v1]\n value = [v[1] for v in attr_v1]\n chart = WordCloud(chart_name, **style.init_style)\n chart.add(v1_name, name, value, word_size_range=[30, 100], shape='diamond')\n\n return chart\n","repo_name":"pipoted/bs","sub_path":"da_flask/app/tool/creat_chart.py","file_name":"creat_chart.py","file_ext":"py","file_size_in_byte":4919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"12272404796","text":"from src.game_state import GameState\nfrom src.units_factory import AllyFactory, EnemyFactory\nfrom src.utils import Team\n\n\nclass Test:\n\n def test_singleton(self):\n a = GameState()\n b = GameState()\n assert id(a) == id(b)\n a.update_coins(Team.ALLY.value, 5)\n assert b.get_ally_coins() == 5\n a.update_coins(Team.ALLY.value, -5)\n\n def test_creating_units(self):\n unit = AllyFactory().get_big_fighter()\n assert GameState().get_ally_warriors_count() == 1\n unit.death()\n assert GameState().get_ally_warriors_count() == 0\n\n def test_getting_reward(self):\n GameState().clear()\n unit = AllyFactory().get_big_fighter()\n unit.death()\n assert GameState().get_enemy_coins() == unit.reward\n\n def test_getting_reward_twice(self):\n GameState().clear()\n unit = AllyFactory().get_big_fighter()\n unit.death()\n unit.death()\n assert GameState().get_enemy_coins() == unit.reward\n\n def test_upgrade(self):\n GameState().clear()\n unit = AllyFactory().get_big_fighter()\n print(unit)\n old_hp = unit.hp\n unit.upgrade_attribute('hp', 12)\n assert unit.hp == old_hp + 12\n\n","repo_name":"AlexSaplin/tech_prog_game","sub_path":"tests/test_units.py","file_name":"test_units.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"27978402785","text":"from llama_index import SimpleDirectoryReader,GPTListIndex,GPTVectorStoreIndex,LLMPredictor,PromptHelper,ServiceContext,StorageContext,load_index_from_storage\nfrom llama_index.langchain_helpers.agents import IndexToolConfig, LlamaIndexTool, LlamaToolkit, create_llama_chat_agent\nfrom langchain import OpenAI\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.agents import Tool\nfrom langchain.chains.conversation.memory import ConversationBufferMemory\nimport sys, requests, pandas as pd\nimport os, io\nimport openai\nfrom dotenv import load_dotenv\nopenai.api_key = \"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\"\nos.environ[\"OPENAI_API_KEY\"] = openai.api_key\ndef create_index(path):\n max_input = 4096\n tokens = 200\n chunk_size = 600 #for LLM, we need to define chunk size\n max_chunk_overlap = 20\n \n #define prompt\n promptHelper = PromptHelper(max_input,tokens,max_chunk_overlap,chunk_size_limit=chunk_size)\n \n #define LLM — there could be many models we can use, but in this example, let’s go with OpenAI model\n llmPredictor = LLMPredictor(llm=OpenAI(temperature=0, model_name=\"text-ada-001\",max_tokens=tokens))\n \n #load data — it will take all the .txtx files, if there are more than 1\n docs = SimpleDirectoryReader(path).load_data()\n\n #create vector index\n service_context = ServiceContext.from_defaults(llm_predictor=llmPredictor,prompt_helper=promptHelper)\n \n vectorIndex = GPTVectorStoreIndex.from_documents(documents=docs,service_context=service_context)\n vectorIndex.storage_context.persist(persist_dir = 'ChatGPT')\n\n\nindex_configs= []\nstorage_context = StorageContext.from_defaults(persist_dir = 'D:\\Code\\AAA_github\\DATN_Web\\BotGPT\\ChatGPT_0307_4')\nindex = load_index_from_storage(storage_context)\nquery_engine = index.as_query_engine()\ntool_config = IndexToolConfig(\n query_engine = query_engine,\n name=f\"Chatbot\",\n description=f\"trả lời câu hỏi liên quan về sách, dữ liệu sách như giá tiền, id, thể loại, mô tả, tác giả.\",\n tool_kwargs={\"return_direct\": True}\n)\nindex_configs.append(tool_config)\ntool = LlamaIndexTool.from_tool_config(tool_config)\ntoolkit = LlamaToolkit(index_configs = index_configs)\nmemory = ConversationBufferMemory(memory_key=\"chat_history\")\n# llm=OpenAI(model_name =\"text-davinci-003\" , temperature=0)\nllm=ChatOpenAI(temperature=0)\nagent_chain=create_llama_chat_agent(\n toolkit,\n llm,\n memory=memory,\n verbose=True\n)\n\ndef answerMe(question):\n storage_context = StorageContext.from_defaults(persist_dir = 'D:\\Code\\AAA_github\\DATN_Web\\BotGPT\\ChatGPT_2707_4')\n index = load_index_from_storage(storage_context)\n query_engine = index.as_query_engine()\n response = query_engine.query(question)\n return response\n # return agent_chain.run(input=question)\n\ndef chatGPTwithMemory(question):\n return agent_chain.run(input=question)\n\ndef build_storage(data_dir):\n documents = SimpleDirectoryReader(data_dir).load_data()\n\n index = GPTVectorStoreIndex.from_documents(documents)\n index.storage_context.persist()\n return index\n\ndef read_from_storage(persist_dir):\n storage_context = StorageContext.from_defaults(persist_dir=persist_dir)\n return load_index_from_storage(storage_context)\n\ndef adding_data_to_GPT():\n load_dotenv()\n persist_dir = \"./storage\"\n data_dir = \"./data\"\n index = None\n if os.path.exists(persist_dir):\n index = read_from_storage(persist_dir)\n else:\n index = build_storage(data_dir)\n query_engine = index.as_query_engine()\n\n response = query_engine.query(\n \"When did Ran Bar-Zik create his first pull request in CyberArk?\"\n )\n print(response)\n\n# response = answerMe(\"có quyển Trên đường băng không?, nếu có chỉ trả lời số id của quyển sách, nếu không chỉ trả lời -1\")\n# print(response)\n# response = answerMe(\"có quyển sách nào tên là người thương đã cũ không?, nếu có chỉ trả lời id (dạng số) của quyển sách, nếu không chỉ trả lời -1\")\n# print(response)\n# print(agent_chain.run(\"quyển sách đắt nhất\"))\n# response = answerMe(\"giới thiệu quyển đấy\")\n# print(response.output)","repo_name":"ghuioio/DATN_Web","sub_path":"BotGPT/RASA/actions/custom_model.py","file_name":"custom_model.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"33947450919","text":"'''\r\n#1019 홀수와 짝수의 개수\r\nn = int(input())\r\nnum = list()\r\necnt = 0\r\nocnt = 0\r\n\r\nwhile n > 0:\r\n x = int(input())\r\n num.append(x)\r\n if x % 2 == 0:\r\n ecnt += 1\r\n else:\r\n ocnt += 1\r\n n -= 1\r\n\r\nprint(ecnt)\r\nprint(ocnt)\r\n\r\n#1020 짝수와 홀수\r\nx = int(input())\r\n\r\ne = list()\r\nes = 0\r\n\r\no = list()\r\nos = 0\r\n\r\nwhile x > 0:\r\n n = int(input())\r\n if n % 2 == 0:\r\n e.append(n)\r\n else:\r\n o.append(n)\r\n x -= 1\r\n\r\nes = sum(e)\r\nos = sum(o)\r\n\r\nprint(es, os) \r\n \r\n#1030 Graphing\r\nn = int(input())\r\nf = list()\r\nfcnt = 0\r\n\r\nwhile n > 0:\r\n x = int(input())\r\n f.append(x)\r\n fcnt += 1\r\n n -= 1\r\n\r\nfor i in range(fcnt):\r\n print(f[i], '*' * f[i])\r\n'''\r\n\r\n#1026 Black\r\na = list(map(int, input().split()))\r\n\r\nb = [1, 1, 2, 2, 2, 8]\r\nfor i in range(6):\r\n print(b[i] - a[i], end = ' ')\r\n\r\n#1094 파티\r\nl, p = map(int, input().split())\r\nt = l * p\r\n\r\nn = list(map(int, input().split()))\r\n\r\nfor i in range(5):\r\n print(n[i] - t, end = ' ')\r\n\r\n#1139 숫자 슬라이스\r\na , b, c = map(int, input().split())\r\nx = a * b * c\r\n\r\ncnt = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n\r\nwhile x:\r\n cnt[x % 10] += 1 \r\n x //= 10\r\n \r\nfor i in range(10):\r\n print(cnt[i])","repo_name":"Caesar0984/magicalcoding1_Python","sub_path":"28. 리스트.py","file_name":"28. 리스트.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"22797551478","text":"def calcular_total_consumo(cidade):\n total_residencial = 0\n total_comercial = 0\n total_industrial = 0\n\n for habitante in cidade:\n consumo = habitante['consumo']\n codigo = habitante['codigo']\n\n if codigo == 1:\n total_residencial += consumo\n elif codigo == 2:\n total_comercial += consumo\n elif codigo == 3:\n total_industrial += consumo\n\n return total_residencial, total_comercial, total_industrial\n\n\ndef main():\n num_habitantes = int(input(\"Digite o número de habitantes da cidade: \"))\n valor_kwh = float(input(\"Digite o valor do kWh: \"))\n\n cidade = []\n\n for i in range(num_habitantes):\n consumo = float(input(f\"Digite o consumo do habitante {i + 1}: \"))\n codigo = int(input(f\"Digite o código do consumidor (1-Residencial, 2-Comercial, 3-Industrial) do habitante {i + 1}: \"))\n\n habitante = {'consumo': consumo, 'codigo': codigo}\n cidade.append(habitante)\n\n consumos = [habitante['consumo'] for habitante in cidade]\n maior_consumo = max(consumos)\n menor_consumo = min(consumos)\n media_consumo = sum(consumos) / num_habitantes\n\n total_residencial, total_comercial, total_industrial = calcular_total_consumo(cidade)\n\n print(\"=== Resultados ===\")\n print(\"Maior consumo:\", maior_consumo)\n print(\"Menor consumo:\", menor_consumo)\n print(\"Média de consumo:\", media_consumo)\n print(\"Total de consumo residencial:\", total_residencial)\n print(\"Total de consumo comercial:\", total_comercial)\n print(\"Total de consumo industrial:\", total_industrial)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ANDREOGP33/Programacao-em-Python-do-basico-ao-avancado","sub_path":"seção 06/s06_ex59.py","file_name":"s06_ex59.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"18445072374","text":"\"\"\"\nLook at properties of an effective parent independent process.\n\nSample a GTR rate matrix.\nThen construct a constrained parent independent process rate matrix\ndefined by properties of the GTR rate matrix.\nCheck that the mutual information of the parent independent process\nat time t is a lower bound of the mutual information of the\nsampled process.\n\"\"\"\n\nfrom StringIO import StringIO\nimport random\nimport math\n\nimport numpy as np\nimport scipy\nfrom scipy import linalg\n\nimport Form\nimport FormOut\nimport mrate\nimport ctmcmi\nimport msimpl\nimport combobreaker\nimport MatrixUtil\nfrom MatrixUtil import ndot\n\n\ndef get_form():\n form_objects = [\n Form.Integer('nstates', 'number of states', 4, low=2, high=10),\n Form.CheckGroup('partition_options', 'partition options', [\n Form.CheckItem('bipartitioned', 'bipartitioned', True)]),\n ]\n return form_objects\n\ndef get_form_out():\n return FormOut.Report()\n\nclass Accumulate:\n def __init__(self, nstates, simplification):\n self.nstates = nstates\n self.simplification = simplification\n self.counterexample = None\n self.n_too_close = 0\n def __call__(self):\n \"\"\"\n Look for a counterexample.\n \"\"\"\n # Sample a rate matrix.\n # Use a trick by Robert Kern to left and right multiply by diagonals.\n # http://mail.scipy.org/pipermail/numpy-discussion/2007-March/\n # 026809.html\n S = MatrixUtil.sample_pos_sym_matrix(self.nstates)\n v = mrate.sample_distn(self.nstates)\n R = (v**-0.5)[:,np.newaxis] * S * (v**0.5)\n R -= np.diag(np.sum(R, axis=1))\n # Construct a parent-independent process\n # with the same max rate and stationary distribution\n # as the sampled process.\n #max_rate = max(-np.diag(R))\n #expected_rate = np.dot(v, -np.diag(R))\n #logical_entropy = np.dot(v, 1-v)\n #randomization_rate = expected_rate / logical_entropy\n Q = self.simplification(R)\n #Q = np.outer(np.ones(self.nstates), v)\n #Q -= np.diag(np.sum(Q, axis=1))\n #Q *= max(np.diag(R) / np.diag(Q))\n # sample a random time\n t = random.expovariate(1)\n # Check that the mutual information of the\n # parent independent process is smaller.\n mi_R = ctmcmi.get_expected_ll_ratio(R, t)\n mi_Q = ctmcmi.get_expected_ll_ratio(Q, t)\n if np.allclose(mi_R, mi_Q):\n self.n_too_close += 1\n return False\n if mi_R < mi_Q:\n out = StringIO()\n print >> out, 'found a counterexample'\n print >> out\n print >> out, 'sampled symmetric matrix S:'\n print >> out, S\n print >> out\n print >> out, 'sampled stationary distribution v:'\n print >> out, v\n print >> out\n print >> out, 'implied rate matrix R:'\n print >> out, R\n print >> out\n print >> out, 'parent independent process Q:'\n print >> out, Q\n print >> out\n print >> out, 'sampled time t:', t\n print >> out\n print >> out, 'mutual information of sampled process:', mi_R\n print >> out, 'mutual information of p.i. process:', mi_Q\n print >> out\n self.counterexample = out.getvalue().rstrip()\n return True\n def __str__(self):\n out = StringIO()\n print >> out, 'iterations where m.i. was too close to call:',\n print >> out, self.n_too_close\n if self.counterexample:\n print >> out, self.counterexample\n else:\n print >> out, 'no counterexample was found'\n return out.getvalue().rstrip()\n\ndef get_response_content(fs):\n # get the user data\n nstates = fs.nstates\n # request a limited amount of time\n nseconds = 4.0\n # get the results\n if fs.bipartitioned:\n simplification = msimpl.get_fast_meta_f81_autobarrier\n else:\n simplification = msimpl.get_fast_f81\n accum = Accumulate(nstates, simplification)\n info = combobreaker.run_callable(accum, nseconds=nseconds)\n return str(info)\n\n","repo_name":"argriffing/xgcode","sub_path":"20120507b.py","file_name":"20120507b.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"27"} +{"seq_id":"1387674875","text":"import copy\nfrom abjad import mathtools\nfrom .Leaf import Leaf\n\n\nclass Chord(Leaf):\n \"\"\"\n Chord.\n\n .. container:: example\n\n >>> chord = abjad.Chord(\"4\")\n >>> abjad.show(chord) # doctest: +SKIP\n\n .. docs::\n\n >>> abjad.f(chord)\n 4\n\n \"\"\"\n\n ### CLASS VARIABLES ###\n\n __documentation_section__ = 'Leaves'\n\n __slots__ = (\n '_note_heads',\n )\n\n ### INITIALIZER ###\n\n def __init__(self, *arguments):\n import abjad\n from abjad.ly import drums\n assert len(arguments) in (0, 1, 2)\n self._note_heads = abjad.NoteHeadList(client=self)\n if len(arguments) == 1 and isinstance(arguments[0], str):\n string = '{{ {} }}'.format(arguments[0])\n parsed = abjad.parse(string)\n assert len(parsed) == 1 and isinstance(parsed[0], Leaf)\n arguments = [parsed[0]]\n are_cautionary = []\n are_forced = []\n are_parenthesized = []\n if len(arguments) == 1 and isinstance(arguments[0], Leaf):\n leaf = arguments[0]\n written_pitches = []\n written_duration = leaf.written_duration\n if 'written_pitch' in dir(leaf):\n written_pitches.append(leaf.note_head.written_pitch)\n are_cautionary = [leaf.note_head.is_cautionary]\n are_forced = [leaf.note_head.is_forced]\n are_parenthesized = [leaf.note_head.is_parenthesized]\n elif 'written_pitches' in dir(leaf):\n written_pitches.extend(\n x.written_pitch for x in leaf.note_heads\n )\n are_cautionary = [x.is_cautionary for x in leaf.note_heads]\n are_forced = [x.is_forced for x in leaf.note_heads]\n are_parenthesized = [\n x.is_parenthesized for x in\n leaf.note_heads\n ]\n elif len(arguments) == 2:\n written_pitches, written_duration = arguments\n if isinstance(written_pitches, str):\n written_pitches = [x for x in written_pitches.split() if x]\n elif isinstance(written_pitches, type(self)):\n written_pitches = written_pitches.written_pitches\n elif len(arguments) == 0:\n written_pitches = [0, 4, 7]\n written_duration = abjad.Duration(1, 4)\n else:\n message = 'can not initialize chord from {!r}.'\n message = message.format(arguments)\n raise ValueError(message)\n Leaf.__init__(self, written_duration)\n if not are_cautionary:\n are_cautionary = [None] * len(written_pitches)\n if not are_forced:\n are_forced = [None] * len(written_pitches)\n if not are_parenthesized:\n are_parenthesized = [None] * len(written_pitches)\n for written_pitch, is_cautionary, is_forced, is_parenthesized in zip(\n written_pitches, are_cautionary, are_forced, are_parenthesized):\n if not is_cautionary:\n is_cautionary = None\n if not is_forced:\n is_forced = None\n if not is_parenthesized:\n is_parenthesized = None\n if written_pitch not in drums:\n note_head = abjad.NoteHead(\n written_pitch=written_pitch,\n is_cautionary=is_cautionary,\n is_forced=is_forced,\n is_parenthesized=is_parenthesized,\n )\n else:\n note_head = abjad.DrumNoteHead(\n written_pitch=written_pitch,\n is_cautionary=is_cautionary,\n is_forced=is_forced,\n is_parenthesized=is_parenthesized,\n )\n self._note_heads.append(note_head)\n if len(arguments) == 1 and isinstance(arguments[0], Leaf):\n self._copy_override_and_set_from_leaf(arguments[0])\n\n ### SPECIAL METHODS ###\n\n def __copy__(self, *arguments):\n \"\"\"\n Shallow copies chord.\n\n Returns new chord.\n \"\"\"\n new_chord = Leaf.__copy__(self, *arguments)\n new_chord.note_heads[:] = []\n for note_head in self.note_heads:\n note_head = copy.copy(note_head)\n new_chord.note_heads.append(note_head)\n return new_chord\n\n def __getnewargs__(self):\n \"\"\"\n Gets new chord arguments.\n\n Returns pair.\n \"\"\"\n return self.written_pitches, self.written_duration\n\n ### PRIVATE METHODS ###\n\n def _format_before_slot(self, bundle):\n import abjad\n result = []\n result.append(self._format_grace_body())\n result.append(('comments', bundle.before.comments))\n commands = bundle.before.commands\n if abjad.inspect(self).has_indicator(abjad.Tremolo):\n tremolo_command = self._format_repeat_tremolo_command()\n commands = list(commands)\n commands.append(tremolo_command)\n commands = tuple(commands)\n result.append(('commands', commands))\n result.append(('indicators', bundle.before.indicators))\n result.append(('grob overrides', bundle.grob_overrides))\n result.append(('context settings', bundle.context_settings))\n result.append(('spanners', bundle.before.spanners))\n return result\n\n def _format_close_brackets_slot(self, bundle):\n import abjad\n result = []\n if abjad.inspect(self).has_indicator(abjad.Tremolo):\n brackets_close = ['}']\n result.append([('close brackets', ''), brackets_close])\n return result\n\n def _format_leaf_nucleus(self):\n import abjad\n indent = abjad.LilyPondFormatManager.indent\n result = []\n note_heads = self.note_heads\n if any('\\n' in format(x) for x in note_heads):\n for note_head in note_heads:\n current_format = format(note_head)\n format_list = current_format.split('\\n')\n format_list = [indent + x for x in format_list]\n result.extend(format_list)\n result.insert(0, '<')\n result.append('>')\n result = '\\n'.join(result)\n result += str(self._get_formatted_duration())\n elif abjad.inspect(self).has_indicator(abjad.Tremolo):\n reattack_duration = self._get_tremolo_reattack_duration()\n duration_string = reattack_duration.lilypond_duration_string\n durated_pitches = []\n for note_head in note_heads:\n durated_pitch = format(note_head) + duration_string\n durated_pitches.append(durated_pitch)\n tremolo = abjad.inspect(self).indicator(abjad.Tremolo)\n if tremolo.is_slurred:\n durated_pitches[0] = durated_pitches[0] + r' \\('\n durated_pitches[-1] = durated_pitches[-1] + r' \\)'\n result = ' '.join(durated_pitches)\n else:\n result.extend([format(_) for _ in note_heads])\n result = '<%s>%s' % (\n ' '.join(result),\n self._get_formatted_duration(),\n )\n # single string, but wrapped in list bc contribution\n return ['nucleus', [result]]\n\n def _format_open_brackets_slot(self, bundle):\n import abjad\n result = []\n if abjad.inspect(self).has_indicator(abjad.Tremolo):\n brackets_open = ['{']\n result.append([('open brackets', ''), brackets_open])\n return result\n\n def _format_repeat_tremolo_command(self):\n import abjad\n tremolo = abjad.inspect(self).indicator(abjad.Tremolo)\n reattack_duration = self._get_tremolo_reattack_duration()\n repeat_count = self.written_duration / reattack_duration / 2\n if not mathtools.is_integer_equivalent(repeat_count):\n message = 'can not tremolo duration {} with {} beams.'\n message = message.format(self.written_duration, tremolo.beam_count)\n raise Exception(message)\n repeat_count = int(repeat_count)\n command = r'\\repeat tremolo {}'.format(repeat_count)\n return command\n\n def _get_compact_representation(self):\n return '<{}>{}'.format(\n self._get_summary(),\n self._get_formatted_duration(),\n )\n\n def _get_compact_representation_with_tie(self):\n logical_tie = self._get_logical_tie()\n if 1 < len(logical_tie) and self is not logical_tie[-1]:\n #return '{} ~'.format(self._get_body()[0])\n return '{} ~'.format(self._get_compact_representation())\n else:\n #return self._get_body()[0]\n return self._get_compact_representation()\n\n def _get_sounding_pitches(self):\n import abjad\n if 'sounding pitch' in abjad.inspect(self).indicators(str):\n return self.written_pitches\n else:\n instrument = self._get_effective(abjad.Instrument)\n if instrument:\n sounding_pitch = instrument.middle_c_sounding_pitch\n else:\n sounding_pitch = abjad.NamedPitch('C4')\n interval = abjad.NamedPitch('C4') - sounding_pitch\n sounding_pitches = [\n interval.transpose(pitch)\n for pitch in self.written_pitches\n ]\n return tuple(sounding_pitches)\n\n def _get_summary(self):\n return ' '.join([str(x) for x in self.note_heads])\n\n def _get_tremolo_reattack_duration(self):\n import abjad\n tremolos = abjad.inspect(self).indicators(abjad.Tremolo)\n if not tremolos:\n return\n tremolo = tremolos[0]\n exponent = 2 + tremolo.beam_count\n denominator = 2 ** exponent\n reattack_duration = abjad.Duration(1, denominator)\n return reattack_duration\n\n ### PUBLIC PROPERTIES ###\n\n @property\n def note_heads(self):\n r\"\"\"\n Gets note-heads in chord.\n\n .. container:: example\n\n Gets note-heads in chord:\n\n >>> chord = abjad.Chord(\"4\")\n >>> abjad.show(chord) # doctest: +SKIP\n\n >>> abjad.f(chord.note_heads)\n abjad.NoteHeadList(\n [\n abjad.NoteHead(\n written_pitch=abjad.NamedPitch(\"g'\"),\n ),\n abjad.NoteHead(\n written_pitch=abjad.NamedPitch(\"c''\"),\n ),\n abjad.NoteHead(\n written_pitch=abjad.NamedPitch(\"e''\"),\n ),\n ]\n )\n\n .. container:: example\n\n Sets note-heads with pitch names:\n\n >>> chord = abjad.Chord(\"4\")\n >>> abjad.show(chord) # doctest: +SKIP\n\n >>> chord.note_heads = \"c' d' fs'\"\n >>> abjad.show(chord) # doctest: +SKIP\n\n .. docs::\n\n >>> abjad.f(chord)\n 4\n\n .. container:: example\n\n Sets note-heads with pitch numbers:\n\n >>> chord = abjad.Chord(\"4\")\n >>> abjad.show(chord) # doctest: +SKIP\n\n >>> chord.note_heads = [16, 17, 19]\n >>> abjad.show(chord) # doctest: +SKIP\n\n .. docs::\n\n >>> abjad.f(chord)\n 4\n\n Set note-heads with any iterable.\n\n Returns note-head list.\n \"\"\"\n return self._note_heads\n\n @note_heads.setter\n def note_heads(self, note_heads):\n self._note_heads[:] = []\n if isinstance(note_heads, str):\n note_heads = note_heads.split()\n self.note_heads.extend(note_heads)\n\n @property\n def written_duration(self):\n \"\"\"\n Gets written duration of chord.\n\n .. container:: example\n\n Get written duration:\n\n >>> chord = abjad.Chord(\"4\")\n >>> abjad.show(chord) # doctest: +SKIP\n\n >>> chord.written_duration\n Duration(1, 4)\n\n .. container:: example\n\n Set written duration:\n\n >>> chord = abjad.Chord(\"4\")\n >>> abjad.show(chord) # doctest: +SKIP\n\n >>> chord.written_duration = abjad.Duration(1, 16)\n >>> abjad.show(chord) # doctest: +SKIP\n\n Set duration.\n\n Returns duration.\n \"\"\"\n return Leaf.written_duration.fget(self)\n\n @written_duration.setter\n def written_duration(self, argument):\n Leaf.written_duration.fset(self, argument)\n\n @property\n def written_pitches(self):\n \"\"\"\n Written pitches in chord.\n\n .. container:: example\n\n Get written pitches:\n\n >>> chord = abjad.Chord(\"4\")\n >>> abjad.show(chord) # doctest: +SKIP\n\n >>> chord.written_pitches\n PitchSegment(\"g' c'' e''\")\n\n .. container:: example\n\n Set written pitches with pitch names:\n\n >>> chord = abjad.Chord(\"4\")\n >>> abjad.show(chord) # doctest: +SKIP\n\n >>> chord.written_pitches = \"f' b' d''\"\n >>> abjad.show(chord) # doctest: +SKIP\n\n .. docs::\n\n >>> abjad.f(chord)\n 4\n\n >>> chord.written_pitches\n PitchSegment(\"f' b' d''\")\n\n Set written pitches with any iterable.\n\n Returns tuple.\n \"\"\"\n import abjad\n return abjad.PitchSegment(\n items=(note_head.written_pitch for note_head in self.note_heads),\n item_class=abjad.NamedPitch,\n )\n\n @written_pitches.setter\n def written_pitches(self, pitches):\n self.note_heads = pitches\n","repo_name":"gsy/gmajor","sub_path":"abjad_demo/env/lib/python3.6/site-packages/abjad/core/Chord.py","file_name":"Chord.py","file_ext":"py","file_size_in_byte":13894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"31103357059","text":"import base64 as b64\r\nimport requests\r\n\r\nfrom functools import wraps\r\n\r\nfrom .Exceptions import *\r\nfrom .Simplificators import *\r\n\r\n\r\ndef token_verificator(func):\r\n @wraps(func)\r\n def inner(*args, **kwargs):\r\n if len(args) == 0 or args[0] == '':\r\n raise BadToken('You must specify a valid token')\r\n else:\r\n return func(*args, **kwargs)\r\n\r\n return inner\r\n\r\n\r\ndef login(username, password, hack_mod=False):\r\n payload = 'data={ \"identifiant\": \"' + str(username) + \\\r\n '\", \"motdepasse\": \"' + str(password) + '\", \"acceptationCharte\": true }'\r\n try:\r\n result = requests.post('https://api.ecoledirecte.com/v3/login.awp', data=payload).json()\r\n except Exception as exception:\r\n result = {'token': ''}\r\n if type(exception).__name__ == \"ConnectionError\":\r\n raise ConnectionError\r\n else:\r\n raise UnknownError('You have a very strange device :)')\r\n finally:\r\n if result['token'] == '':\r\n if not hack_mod:\r\n raise BadCreditentials('Bad username or password')\r\n else:\r\n return False\r\n else:\r\n if not hack_mod:\r\n infos = {\r\n 'token': result['token'],\r\n 'id': result['data']['accounts'][0]['id'],\r\n 'nom': result['data']['accounts'][0]['nom'],\r\n 'prenom': result['data']['accounts'][0]['prenom'],\r\n 'identifiant': result['data']['accounts'][0]['identifiant'],\r\n 'email': result['data']['accounts'][0]['email'],\r\n 'etablissement': result['data']['accounts'][0]['nomEtablissement'],\r\n 'classe_id': result['data']['accounts'][0]['profile']['classe']['id'],\r\n 'ogec': result['data']['accounts'][0]['codeOgec']\r\n }\r\n else:\r\n return True\r\n return infos\r\n\r\n\r\n@token_verificator\r\ndef fetch_number_of_notes(token, user_id):\r\n payload = 'data={\"token\": \"' + token + '\"}'\r\n url = 'https://api.ecoledirecte.com/v3/Eleves/' + str(user_id) + '/notes.awp?verbe=get&'\r\n reponse = requests.post(url, payload).json()\r\n del reponse['token'], reponse['code'], reponse['host']\r\n return len(reponse['data']['notes'])\r\n\r\n\r\n@token_verificator\r\ndef fetch_notes(token, user_id, simplified=True, periode=None, matiere=None, min_max_moy=False):\r\n payload = 'data={\"token\": \"' + token + '\"}'\r\n url = 'https://api.ecoledirecte.com/v3/Eleves/' + str(user_id) + '/notes.awp?verbe=get&'\r\n reponse = requests.post(url, payload).json()\r\n del reponse['token'], reponse['code'], reponse['host']\r\n if simplified:\r\n return notesAnalyse(reponse['data']['notes'], periode_notes=periode, matiere=matiere, min_max_moy=min_max_moy)\r\n else:\r\n return reponse['data']['notes']\r\n\r\n\r\n@token_verificator\r\ndef fetch_moyennes(token, user_id, simplified=True, periode=None):\r\n try:\r\n payload = 'data={\"token\": \"' + token + '\"}'\r\n url = 'https://api.ecoledirecte.com/v3/Eleves/' + str(user_id) + '/notes.awp?verbe=get&'\r\n reponse = requests.post(url, payload).json()\r\n del reponse['token'], reponse['code'], reponse['host']\r\n if simplified:\r\n return moyennesAnalyse(reponse, periode)\r\n else:\r\n return reponse\r\n except ConnectionError:\r\n raise BadToken('You must specify a valid token')\r\n\r\n\r\n@token_verificator\r\ndef fetch_emploi_du_temps(token, user_id, date_debut='2020-12-21', date_fin='2020-12-27'):\r\n try:\r\n payload = 'data={\"dateDebut\": \"' + date_debut + '\", \"dateFin\": \"' + date_fin + \\\r\n '\", \"avecTrous\": false, \"token\": \"' + token + '\"}'\r\n url = \"https://api.ecoledirecte.com/v3/E/\" + str(user_id) + \"/EmploiDuTemps.awp?verbe=get&\"\r\n reponse = requests.post(url, payload).json()\r\n del reponse['token'], reponse['code'], reponse['host']\r\n return reponse\r\n except ConnectionError:\r\n raise BadToken('You must specify a valid token')\r\n\r\n\r\n@token_verificator\r\ndef fetch_agenda(token, user_id, date=None, simplified=True):\r\n try:\r\n payload = 'data={\"token\": \"' + token + '\"}'\r\n if date is None:\r\n url = 'https://api.ecoledirecte.com/v3/Eleves/' + str(user_id) + '/cahierdetexte.awp?verbe=get&'\r\n else:\r\n url = 'https://api.ecoledirecte.com/v3/Eleves/' + str(\r\n user_id) + '/cahierdetexte/' + date + '.awp?verbe=get&'\r\n reponse = requests.post(url, payload).json()\r\n del reponse['token'], reponse['code'], reponse['host']\r\n print(reponse)\r\n if simplified and date is not None:\r\n devoir = []\r\n for a in reponse['data']['matieres']:\r\n try:\r\n devoir.append((a['codeMatiere'], b64.b64decode(a['aFaire']['contenu'].encode()),\r\n a['aFaire']['rendreEnLigne']))\r\n except:\r\n continue\r\n\r\n devoirs = {\r\n 'date': reponse['data']['date'],\r\n 'contenus_de_seances': [(a['codeMatiere'], b64.b64decode(a['contenuDeSeance']['contenu'].encode())) for\r\n a in reponse['data']['matieres'] if a['contenuDeSeance']['contenu'] != []],\r\n 'devoirs': devoir,\r\n 'evaluations': [a['codeMatiere'] for a in reponse['data']['matieres'] if a['interrogation']]\r\n }\r\n return devoirs\r\n else:\r\n return reponse\r\n except ConnectionError:\r\n raise BadToken('You must specify a valid token or date')\r\n\r\n\r\n@token_verificator\r\ndef fetch_cloud(token, user_id):\r\n try:\r\n payload = 'data={\"token\": \"' + token + '\"}'\r\n url = \"https://api.ecoledirecte.com/v3/cloud/E/\" + str(user_id) + \".awp?verbe=get&\"\r\n reponse = requests.post(url, payload).json()\r\n del reponse['token'], reponse['code'], reponse['host']\r\n return reponse\r\n except ConnectionError:\r\n raise BadToken('You must specify a valid token')\r\n\r\n\r\n@token_verificator\r\ndef fetch_messages(token, user_id):\r\n try:\r\n payload = 'data={\"token\": \"' + token + '\"}'\r\n url = \"https://api.ecoledirecte.com/v3/eleves/\" + str(\r\n user_id) + \"/messages.awp?verbe=getall&typeRecuperation=received&orderBy=date&order=\\\r\n desc&page=0&itemsPerPage=20&onlyRead=&query=&idClasseur=0\"\r\n reponse = requests.post(url, payload).json()\r\n del reponse['token'], reponse['code'], reponse['host']\r\n return reponse\r\n except ConnectionError:\r\n raise BadToken('You must specify a valid token')\r\n\r\n\r\n@token_verificator\r\ndef fetch_message(token, user_id, message_id, content=True):\r\n try:\r\n payload = 'data={\"token\": \"' + token + '\"}'\r\n url = 'https://api.ecoledirecte.com/v3/eleves/' + str(user_id) + '/messages/' + str(\r\n message_id) + '.awp?verbe=get&mode=destinataire'\r\n reponse = requests.post(url, payload).json()\r\n del reponse['token'], reponse['code'], reponse['host']\r\n if content:\r\n content = {\r\n 'sender': reponse['data']['from']['nom'] + ' ' + reponse['data']['from']['prenom'],\r\n 'contenu': b64.b64decode((reponse['data']['content']).encode()),\r\n 'pieces_jointes': [] if reponse['data']['files'] == [] else [(a['id'], a['libelle']) for a in\r\n reponse['data']['files']]\r\n }\r\n return content\r\n else:\r\n return reponse\r\n except ConnectionError:\r\n raise BadToken('You must specify a valid token')\r\n\r\n\r\n@token_verificator\r\ndef fetch_qcms(token, user_id):\r\n try:\r\n payload = 'data={\"token\": \"' + token + '\"}'\r\n url = 'https://api.ecoledirecte.com/v3/eleves/' + str(user_id) + '/qcms/0/associations.awp?verbe=get&'\r\n reponse = requests.post(url, payload).json()\r\n del reponse['token'], reponse['code'], reponse['host']\r\n return reponse\r\n except ConnectionError:\r\n raise BadToken('You must specify a valid token')\r\n\r\n\r\n@token_verificator\r\ndef fetch_workspace(token, user_id):\r\n try:\r\n payload = 'data={\"token\": \"' + token + '\"}'\r\n url = 'https://api.ecoledirecte.com/v3/eleves/' + str(user_id) + '/espacestravail.awp?verbe=get&='\r\n reponse = requests.post(url, payload).json()\r\n return reponse['data']\r\n except ConnectionError:\r\n raise BadToken('You must specify a valid token')\r\n\r\n\r\n@token_verificator\r\ndef fetch_workspace_topics(token, user_id, workspace_id):\r\n try:\r\n payload = 'data={\"token\": \"' + token + '\"}'\r\n url = 'https://api.ecoledirecte.com/v3/E/' + str(user_id) + '/espacestravail/' + str(\r\n workspace_id) + '/topics.awp?verbe=get&'\r\n reponse = requests.post(url, payload).json()\r\n del reponse['token'], reponse['code'], reponse['host']\r\n return reponse\r\n except ConnectionError:\r\n raise BadToken('You must specify a valid token')\r\n\r\n\r\n@token_verificator\r\ndef fetch_workspace_discussio_messages(token, user_id, wordkspace_id, discussion_id, decode=True):\r\n try:\r\n payload = 'data={\"token\": \"' + token + '\"}'\r\n url = 'https://api.ecoledirecte.com/v3/E/' + str(user_id) + '/espacestravail/' + \\\r\n str(wordkspace_id) + '/topics/' + str(discussion_id) + '/messages.awp?verbe=get&'\r\n reponse = (requests.post(url, payload).json())['data']['messages']\r\n if decode:\r\n for a in reponse:\r\n a['contenu'] = (b64.b64decode(a['contenu'].encode('UTF-8'))).decode('UTF-8')\r\n return reponse\r\n except ConnectionError:\r\n raise BadToken('You must specify a valid token')\r\n\r\n\r\n@token_verificator\r\ndef fetch_teachers_list(token, classe_id, simplified=True):\r\n try:\r\n payload = 'data={\"token\": \"' + token + '\"}'\r\n url = 'https://api.ecoledirecte.com/v3/messagerie/contacts/professeurs.awp?verbe=get&idClasse=' + \\\r\n str(classe_id) + '&nom='\r\n reponse = requests.post(url, payload).json()\r\n del reponse['token'], reponse['code'], reponse['host']\r\n if simplified:\r\n infos = [(a['civilite'] + \" \" + a['prenom'] + \" \" + a['particule'] + \" \" + a['nom'], a['id'])\r\n for a in reponse['data']['contacts']]\r\n return infos\r\n else:\r\n return reponse\r\n except ConnectionError:\r\n raise BadToken('You must specify a valid token')\r\n\r\n\r\n@token_verificator\r\ndef send_workspace_message(token, user_id, wordkspace_id, discussion_id, message):\r\n try:\r\n payload = 'data={\"token\": \"' + token + '\", \"idTopic\": \"' + str(discussion_id) + '\",\\\r\n \"contenu\":\"' + (b64.b64encode((str(message)).encode('UTF-8'))).decode('UTF-8') + '\"}'\r\n url = 'https://api.ecoledirecte.com/v3/E/' + str(user_id) + '/espacestravail/' + str(wordkspace_id) + \\\r\n '/topics/' + str(discussion_id) + '/messages.awp?verbe=post&'\r\n reponse = requests.post(url, payload).json()\r\n return reponse\r\n except ConnectionError:\r\n raise BadToken('You must specify a valid token')\r\n\r\n\r\n@token_verificator\r\ndef get_worspace_users(token, user_id, workspace_id, simplification=True):\r\n try:\r\n payload = 'data={\"token\": \"' + token + '\"}'\r\n url = 'https://api.ecoledirecte.com/v3/E/' + str(user_id) + \\\r\n '/espacestravail/' + str(workspace_id) + '/membres.awp?verbe=get&='\r\n reponse = (requests.post(url, payload).json())['data']['membres']\r\n if simplification:\r\n reponse = [(a['idMembre'], a['nom'], a['prenom'], a['profil']) for a in reponse]\r\n return reponse\r\n except ConnectionError:\r\n raise BadToken('You must specify a valid token')\r\n\r\n\r\n@token_verificator\r\ndef fetch_all_workspaces_users(token, user_id):\r\n lst_membres = []\r\n for workspace in fetch_workspace(token, user_id):\r\n for eleve in get_worspace_users(token, user_id, workspace['id']):\r\n lst_membres.append(eleve)\r\n return lst_membres\r\n\r\n\r\n@token_verificator\r\ndef send_message(token, receiver_id, receiver_type, subject, message, files=[]):\r\n files_list = []\r\n for file in files:\r\n directory = _televersement(token, file)\r\n files_list.append(\r\n {\r\n '\"unc\": \"' + directory + '\",\\\r\n \"libelle\": \"\",\\\r\n \"data\": { \"unc\": \"' + directory + '\"},\\\r\n \"code\": 200, \"message\": \"' + directory + '\"'\r\n })\r\n files_liste = (str(files_list)).replace('\\'', '')\r\n try:\r\n payload = 'data={\\\r\n \"message\": {\\\r\n \"groupesDestinataires\": [\\\r\n {\\\r\n \"destinataires\": [\\\r\n {\\\r\n \"civilite\": \"\",\\\r\n \"prenom\": \"\",\\\r\n \"particule\": \"\",\\\r\n \"nom\": \"\",\\\r\n \"sexe\": \"\",\\\r\n \"id\": ' + str(receiver_id) + ',\\\r\n \"type\": \"' + str(receiver_type) + '\",\\\r\n \"matiere\": \"\",\\\r\n \"photo\": \"\",\\\r\n \"telephone\": \"\",\\\r\n \"email\": \"\",\\\r\n \"estBlackList\": false,\\\r\n \"isPP\": false,\\\r\n \"etablissements\": [],\\\r\n \"classe\": {\\\r\n },\\\r\n \"responsable\": {\\\r\n },\\\r\n \"fonction\": {\\\r\n },\\\r\n \"isSelected\": true,\\\r\n \"to_cc_cci\": \"to\"\\\r\n }\\\r\n ],\\\r\n \"selection\": {\\\r\n }\\\r\n }\\\r\n ],\\\r\n \"content\": \"' + (b64.b64encode((str(message)).encode('UTF-8'))).decode('UTF-8') + '\",\\\r\n \"subject\": \"' + str(subject) + '\",\\\r\n \"files\": ' + files_liste + '\\\r\n },\\\r\n \"anneeMessages\": \"\",\\\r\n \"token\": \"' + token + '\"\\\r\n }'\r\n url = 'https://api.ecoledirecte.com/v3/eleves/568/messages.awp?verbe=post&='\r\n reponse = (requests.post(url, payload)).json()\r\n return reponse\r\n except ConnectionError:\r\n raise BadToken('You must specify a valid token')\r\n\r\n\r\n@token_verificator\r\ndef _televersement(token, file):\r\n try:\r\n file = {'file': (file, open(file, 'rb'))}\r\n payload = {\r\n 'token': token,\r\n }\r\n url = 'https://api.ecoledirecte.com/v3/televersement.awp'\r\n result = (requests.post(url, payload, files=file).json())['data']['unc']\r\n return result\r\n except ConnectionError:\r\n raise BadToken('You must specify a valid token')\r\n\r\n\r\ndef bruteforce(username, wordlist=None, afficher=False):\r\n with open(wordlist, 'r') as w:\r\n for char in w:\r\n if login(username, char, True):\r\n if afficher:\r\n print('psswd =', char)\r\n return char\r\n\r\n\r\nif __name__ == '__main__':\r\n print('Ecole Directe api by Ismaël Gaye')\r\n","repo_name":"4THEEND/ecole_directe_api_python","sub_path":"EcoleDirecte.py","file_name":"EcoleDirecte.py","file_ext":"py","file_size_in_byte":15372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"27"} +{"seq_id":"25279958793","text":"# coding:utf-8\n\n# if needle not in haystack:\n# return -1\n# return haystack.find(needle)\n#当needle 为空 返回0\n\ndef strStr():\n \"\"\"\n :type haystack: str\n :type needle: str\n :rtype: int\n \"\"\"\n needle = ''#//\"ll\"\n haystack = 'hello'\n if needle not in haystack:\n return -1\n print(haystack.find(needle))\n return haystack.find(needle)\nif __name__ == __name__:\n strStr()\n\n\n# 大神做法:同时解决了str 为空得问题\n# # try:\n# # index = haystack.index(needle)\n# # return index\n# # except:\n# # return -1\n# # 描述\n# # Python index() 方法检测字符串中是否包含子字符串 str ,如果指定 beg(开始) 和 end(结束) 范围,则检查是否包含在指定\n# 范围内,该方法与 python find()方法一样,只不过如果str不在 string中会报一个异常。\n# #\n# # 语法\n# # index()方法语法:\n# #\n# # str.index(str, beg=0, end=len(string))\n# # 参数\n# # str -- 指定检索的字符串\n# # beg -- 开始索引,默认为0。\n# # end -- 结束索引,默认为字符串的长度。\n# # 返回值\n# # 如果包含子字符串返回开始的索引值,否则抛出异常。","repo_name":"wwshadow/leetcode","sub_path":"strstr.py","file_name":"strstr.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"27"} +{"seq_id":"18266440166","text":"from django.forms import ModelForm\nfrom django.db.models import Q\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Fieldset\nfrom crispy_forms.bootstrap import Field\nfrom core.forms.base import FormulaireBase\nfrom core.models import PortailDocument, TypePiece\nfrom core.utils.utils_commandes import Commandes\n\n\nclass Formulaire(FormulaireBase, ModelForm):\n class Meta:\n model = PortailDocument\n fields = \"__all__\"\n\n def __init__(self, *args, **kwargs):\n super(Formulaire, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'portail_documents_form'\n self.helper.form_method = 'post'\n self.helper.use_custom_control = False\n\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-md-2'\n self.helper.field_class = 'col-md-10'\n\n # Sélectionne uniquement les activités autorisées\n self.fields[\"type_piece\"].queryset = TypePiece.objects.filter(Q(structure__in=self.request.user.structures.all()) | Q(structure__isnull=True)).order_by(\"nom\")\n\n # Affichage\n self.helper.layout = Layout(\n Commandes(annuler_url=\"{% url 'portail_documents_liste' %}\"),\n Fieldset(\"Généralités\",\n Field(\"titre\"),\n Field(\"texte\"),\n Field(\"couleur_fond\"),\n ),\n Fieldset(\"Document joint\",\n Field(\"document\"),\n ),\n Fieldset(\"Type de pièce associé\",\n Field(\"type_piece\"),\n ),\n Fieldset(\"Structure associée\",\n Field(\"structure\"),\n ),\n )\n","repo_name":"Noethys/Noethysweb","sub_path":"noethysweb/parametrage/forms/portail_documents.py","file_name":"portail_documents.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"28"} +{"seq_id":"40366234689","text":"#This is intended to be a list of weapons\n\n#weapon class\n\nmeleeWeapons = ['sword', 'axe', 'lance', 'bow']\nmagicWeapons = ['light', 'darkness', 'anima']\n\n\nclass Weapon:\n currentID = 0\n def __init__(self, name, weaponClass, weaponType, might, hit, crit, weight, range):\n self.id = self.currentID\n self.currentID+=1\n\n self.name = name\n self.weaponClass = weaponClass #intended to be 0 or 1, 0 for physical, 1 for magical\n self.weaponType = weaponType # a number 0 to (2 or 3) (see arrays above)\n self.might = might\n self.hit = hit\n self.crit = crit\n self.weight = weight\n self.act_range = range\n\n\n def __repr__(self):\n return \"< Weapon - mt:%s hit:%s crit:%s wt:%s range:%s>\" % (self.might, self.hit, self.crit, self.weight, self.act_range)\n \n\nweaponsList = []\n\n","repo_name":"jenningsjin/FE_RPG","sub_path":"lib/weapons.py","file_name":"weapons.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"29804665237","text":"import re\r\nf1=open('Reg_Num.txt','r')\r\nf2=open('PY_Reg_Num','w')\r\nrule='[L][U][M][1][1][P][Y]\\d{2}'\r\nfor i in f1:\r\n reg=i.rstrip('\\n')\r\n matcher=re.fullmatch(rule,reg)\r\n if matcher is not None:\r\n f2.write(reg)\r\n f2.write('\\n')\r\n\r\n","repo_name":"SagAr633/PYTHON-Works","sub_path":"Regular_Expresion_Examples/Reg_No_check.py","file_name":"Reg_No_check.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"13367692093","text":"from __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Any, Optional, Sequence\n\nimport torch\nimport lightning.pytorch as pl\nimport lightning.pytorch.callbacks as plc\n\n\nclass PredictionWriter(plc.BasePredictionWriter):\n def __init__(self, output_dir: Path, datasets_names: list[str], filename: str) -> None:\n super().__init__(\"batch_and_epoch\")\n self.output_dir = output_dir\n self.datasets_names = datasets_names\n self.filename = filename\n self.outputs = {}\n\n def write_on_batch_end(\n self,\n trainer: pl.Trainer,\n pl_module: pl.LightningModule,\n prediction: tuple[torch.Tensor, torch.Tensor],\n batch_indices: Sequence[int] | None,\n batch: tuple[torch.Tensor, torch.Tensor],\n batch_idx: int,\n dataloader_idx: int,\n ) -> None:\n if dataloader_idx not in self.outputs:\n self.outputs[dataloader_idx] = {}\n self.outputs[dataloader_idx][batch_idx] = prediction\n\n def write_on_epoch_end(\n self,\n trainer: pl.Trainer,\n pl_module: pl.LightningModule,\n predictions: Sequence[Any],\n batch_indices: Sequence[Any] | None,\n ) -> None:\n for dataloader_idx, dataloader_outputs in self.outputs.items():\n dataloader_scores, dataloader_predictions = [], []\n for batch_idx in sorted(dataloader_outputs.keys()):\n batch_scores, batch_predictions = dataloader_outputs[batch_idx]\n dataloader_scores.append(batch_scores)\n dataloader_predictions.append(batch_predictions)\n dataloader_scores = torch.cat(dataloader_scores, dim=0).tolist()\n dataloader_predictions = torch.cat(dataloader_predictions, dim=0).tolist()\n output_file_path: Path = self.output_dir / self.datasets_names[dataloader_idx] / self.filename\n output_file_path.parent.mkdir(parents=True, exist_ok=True)\n with open(output_file_path, \"w\") as output_file:\n for score, prediction in zip(dataloader_scores, dataloader_predictions):\n output_file.write(f\"{score:.8f},{1 if prediction else 0}\\n\")","repo_name":"ndido98/frcsyn","sub_path":"prediction_writer.py","file_name":"prediction_writer.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"36969601365","text":"import unittest\nfrom datetime import datetime\nimport sys\nfrom line_profiler import LineProfiler\nfrom data.db_connector import DbConnection\nfrom data.hdfs import HDFSLoader\nfrom data.models import TweetList\nfrom data.user import User\nfrom data.user_repo import HDFSSQLiteUserRepository\nfrom util.cal import unix_timestamp\n\n\nclass TestDbConnection(unittest.TestCase):\n def setUp(self):\n self.conn = DbConnection()\n self.cur = self.conn.get_cursor()\n\n def test_connection(self):\n tables = self.cur.execute(\"\"\"PRAGMA database_list;\"\"\").fetchall()\n self.assertEqual(len(tables), 3)\n\n def test_db_data(self):\n data = self.cur.execute(\"\"\"SELECT tweet_time FROM db.tweets WHERE user_id=12 LIMIT 1;\"\"\").fetchall()\n self.assertEqual(len(data), 1)\n\n def tearDown(self):\n self.cur.close()\n self.conn.close()\n\n\nclass TestHDFSLoader(unittest.TestCase):\n def setUp(self):\n self.loader = HDFSLoader()\n\n def test_loaded_data(self):\n self.assertListEqual(\n list(self.loader.get_tweets(5320502)), # data for @sadjad\n [1177066462, 1179306824, 1180405750, 1180695756, 1228836295, 1228980215, 1229602451]\n )\n\n def tearDown(self):\n del self.loader\n\n\nclass TestTweetList(unittest.TestCase):\n def setUp(self):\n self.tweet_times = [unix_timestamp(datetime(2000, 10, 1, 23, 0, 0)),\n unix_timestamp(datetime(2000, 10, 2, 23, 0, 0)),\n unix_timestamp(datetime(2000, 10, 2, 23, 1, 0)),\n unix_timestamp(datetime(2000, 10, 4, 23, 0, 0)),\n unix_timestamp(datetime(2000, 10, 5, 23, 0, 0)),\n unix_timestamp(datetime(2000, 10, 6, 23, 0, 0)),\n unix_timestamp(datetime(2000, 10, 7, 23, 0, 0))]\n\n def test_sub_view(self):\n tweet_list = TweetList(self.tweet_times)\n sub_list = tweet_list.sublist(start_date=datetime(2000, 10, 2),\n end_date=datetime(2000, 10, 6))\n\n self.assertEqual(len(sub_list), 4)\n daily = sub_list.get_day_tweets(datetime(2000, 10, 2))\n\n self.assertListEqual(list(daily), self.tweet_times[1:3])\n\n def test_intensity(self):\n tweet_list = TweetList([2880, 3240, 3960, 8640, 606600, 607320])\n print(tweet_list.get_periodic_intensity())\n\n def tearDown(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Networks-Learning/broadcast_ref","sub_path":"broadcast/data/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"14838627050","text":"import datetime\nimport os\n\nfrom glitch_this import ImageGlitcher\n\n\ndef glitch_scrapes_directory():\n todays_date_string = str(datetime.date.today())\n dirname = os.path.dirname(__file__)\n\n # path to scraped images\n images_directory = os.path.join(dirname, '../image-scraper/scrapes', todays_date_string)\n\n # create directory to store today's glitched images\n todays_directory = os.path.join(dirname, 'glitched', todays_date_string)\n os.makedirs(todays_directory, exist_ok=True)\n print(os.listdir(images_directory))\n\n glitcher = ImageGlitcher()\n\n for filename in os.listdir(images_directory):\n if filename == '.gitkeep':\n continue\n\n print(f'glitching {filename}')\n glitched_img = glitcher.glitch_image(\n f\"{images_directory}/{filename}\",\n 2,\n color_offset=True)\n glitched_img.save(f\"{todays_directory}/glitched-{filename}\")\n\n\nif __name__ == '__main__':\n glitch_scrapes_directory()\n","repo_name":"steezeburger/miley-virus","sub_path":"packages/image-glitcher/glitch-images.py","file_name":"glitch-images.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"42893713128","text":"import torchreid\nimport torch\nimport numpy as np\nimport cv2\nfrom scipy.optimize import linear_sum_assignment\nimport imageio\n\n\ndef linear_assignment(cost_matrix):\n x, y = linear_sum_assignment(cost_matrix)\n return np.array(list(zip(x, y)))\n\n\nextractor = torchreid.utils.FeatureExtractor(\n model_name=\"osnet_x1_0\", model_path=\"./weights/osnet_x1_0.pth.tar\", device=\"cuda\"\n)\n\nvideo1 = cv2.VideoCapture(\n \"/home/mbenencase/projects/daedalus/datasets/multi-camera-tracking/epfl/cam1.mp4\"\n)\nassert video1.isOpened(), \"Could not open video1\"\nvideo2 = cv2.VideoCapture(\n \"/home/mbenencase/projects/daedalus/datasets/multi-camera-tracking/epfl/cam4.mp4\"\n)\nassert video2.isOpened(), \"Could not open video2\"\n\n# Loading yolov5 model\ndetector = torch.hub.load(\"ultralytics/yolov5\", \"yolov5m\")\ndetector.agnostic = True\ndetector.classes = [0]\ndetector.conf = 0.5\n\nnum_frames = int(video1.get(cv2.CAP_PROP_FRAME_COUNT))\n\nCOS_THRES: float = 0.80\nCOLORS = np.random.randint(0, 255, size=(100, 3), dtype=\"uint8\")\n\nvideo = None\nfor idx in range(num_frames):\n # Get frames\n frame1 = video1.read()[1]\n frame2 = video2.read()[1]\n\n # Run object detection\n anno = detector([frame1, frame2])\n\n preds1 = anno.xyxy[0].cpu().numpy()\n preds2 = anno.xyxy[1].cpu().numpy()\n\n cam1_features = []\n cam2_features = []\n for pred in preds1:\n x1, y1, x2, y2, _, _ = np.int0(pred)\n crop = frame1[y1:y2, x1:x2, :]\n\n feat = extractor(crop)[0].cpu().numpy()\n feat = feat / np.linalg.norm(feat)\n cam1_features.append(feat)\n for pred in preds2:\n x1, y1, x2, y2, _, _ = np.int0(pred)\n crop = frame2[y1:y2, x1:x2, :]\n\n feat = extractor(crop)[0].cpu().numpy()\n feat = feat / np.linalg.norm(feat)\n cam2_features.append(feat)\n\n cam1_features = np.array(cam1_features)\n cam2_features = np.array(cam2_features)\n\n sim_matrix = cam1_features @ cam2_features.T\n matched_indices = linear_assignment(-sim_matrix)\n\n for idx, match in enumerate(matched_indices):\n if sim_matrix[match[0], match[1]] < COS_THRES:\n continue\n else:\n # Draw bounding boxes\n x1, y1, x2, y2, _, _ = np.int0(preds1[match[0]])\n cv2.rectangle(frame1, (x1, y1), (x2, y2), (0, 255, 0), 2)\n cv2.putText(\n frame1,\n f\"{idx}\",\n (x1, y1),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1,\n (0, 255, 0),\n 2,\n )\n x1, y1, x2, y2, _, _ = np.int0(preds2[match[1]])\n cv2.rectangle(frame2, (x1, y1), (x2, y2), (0, 255, 0), 2)\n cv2.putText(\n frame2, f\"{idx}\", (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2\n )\n\n vis = np.hstack([frame1, frame2])\n\n if video is None:\n H, W, _ = vis.shape\n video = cv2.VideoWriter(\n \"./reid.avi\",\n cv2.VideoWriter_fourcc(*\"MJPG\"),\n 15,\n (W, H),\n True,\n )\n\n video.write(vis)\n\n cv2.namedWindow(\"Vis\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Vis\", vis)\n key = cv2.waitKey(1)\n\n if key == ord(\"q\"):\n break\n\nvideo1.release()\nvideo2.release()\nvideo.release()\ncv2.destroyAllWindows()\n","repo_name":"daedalus-tech/mc-mot","sub_path":"test_torchreid.py","file_name":"test_torchreid.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"28"} +{"seq_id":"27976291147","text":"from functools import lru_cache \nimport sys \nsys.setrecursionlimit(999999) #限制递归的深度\nclass StringHash:\n def __init__(self, s, P = 1e9+7):\n self._P = P \n self.S_hash = [0] * len(s)\n self.pow_arr = [0] * len(s)\n\n self.pow_arr[0] = 1\n for i in range(1,len(s)):\n self.pow_arr[i] = (self.pow_arr[i-1] * self._P) % (1 << 64)\n \n self.S_hash[0] = s[0]\n for i in range(1,len(s)):\n self.S_hash[i] = (self.S_hash[i-1] * self._P + s[i]) % (1 << 64)\n \n def get_hash(self,l,r):\n if l == 0:\n return self.S_hash[r]\n else:\n return (self.S_hash[r] - self.S_hash[l-1] * self.pow_arr[r-l+1]) % (1 << 64)\n\nclass Solution:\n def longestCommonSubpath(self, n: int, paths: List[List[int]]) -> int:\n min_len = 0x7fffffff\n if len(paths) <= 1:\n return 0\n \n for path in paths:\n min_len = min(min_len,len(path))\n #对于每一个path都构建一个字符串hash表\n maps = [StringHash(path) for path in paths]\n def get_hash_set(sub_len,path_idx):\n n = len(paths[path_idx])\n res = set() \n tmp = maps[path_idx]\n for i in range(n):\n if i + sub_len <= n:\n res.add(tmp.get_hash(i,i+sub_len-1))\n else:\n break \n return res\n \n def check(sub_len):\n n = len(paths)\n c = collections.Counter()\n flag = False\n for i in range(n):\n s = get_hash_set(sub_len,i)\n for val in s:\n c[val] += 1\n if c[val] == len(paths):\n flag = True \n break \n return flag\n \n l,r = 1, min_len\n ans = 0\n while l <= r:\n mid = (l + r) // 2 \n if check(mid):\n ans = mid\n l = mid + 1\n else:\n r = mid - 1\n return ans ","repo_name":"Yohager/Leetcode","sub_path":"python版本/5803-LongestCommonSubpath.py","file_name":"5803-LongestCommonSubpath.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"40495362766","text":"import time\nfrom locust import HttpUser, task\nimport json\nimport random\n\nclass QuickstartUser(HttpUser):\n casos = []\n with open('traffic.json') as json_file:\n data = json.load(json_file)\n casos.extend(data)\n\n @task\n def insercion_caso(self):\n time.sleep(1)\n response = self.client.post(\"/add-caso\",json=random.choice(self.casos))\n json_response_dict = response.json()\n print(response.json())\n ","repo_name":"GermanJosePazCordon/Laboratorio_SO1_2S22","sub_path":"Clases/Clase12/code/locust/locust.py","file_name":"locust.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"28"} +{"seq_id":"7047913693","text":"import streamlit as st\nfrom streamlit_chat import message\nfrom langchain import OpenAI\n#from langchain.chains import ConversationChain\nfrom langchain.memory import ConversationSummaryBufferMemory\nfrom langchain import PromptTemplate\nfrom langchain import LLMChain\nimport openai\nimport pinecone\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv('../.env')\n\nOPENAI_API_KEY = os.environ['OPENAI_API_KEY']\nPINECONE_API_KEY = os.environ['PINECONE_API_KEY']\nPINECONE_API_ENV = os.environ['PINECONE_API_ENV']\nPINECONE_INDEX_NAME = os.environ['PINECONE_INDEX_NAME']\nEMBEDDING_MODEL = os.environ['EMBEDDING_MODEL']\n\n# initialize connection to pinecone (get API key at app.pinecone.io)\npinecone.init(\n api_key=PINECONE_API_KEY,\n environment=PINECONE_API_ENV # may be different, check at app.pinecone.io\n)\n# connect to index\nindex = pinecone.Index(PINECONE_INDEX_NAME)\n\n@st.cache_resource\ndef LLM_chain_response():\n prompt = PromptTemplate(\n input_variables=[\"history\", \"input\"],\n template=\"Answer the question based on the context below. If you cannot answer based on the context, or general knowledge of the company Wells Fargo, truthfull answer that you don't know. Use Markdown and text formatting to format your answer. \\n\\nCurrent conversation:\\n{history}\\nHuman: {input}\\nAI:\"\n )\n\n llm = OpenAI(\n temperature=0.3,\n openai_api_key=OPENAI_API_KEY,\n model_name=\"text-davinci-003\",\n max_tokens=128\n )\n\n chatgpt_chain = LLMChain(\n llm=llm,\n prompt=prompt,\n memory= ConversationSummaryBufferMemory(llm=llm, max_token_limit=256),\n verbose=True\n )\n return chatgpt_chain\n\n# Define the retrieve function\ndef retrieve(query):\n # retrieve from Pinecone\n res = openai.Embedding.create(input=[query],model=EMBEDDING_MODEL)\n xq = res['data'][0]['embedding']\n\n # get relevant contexts\n pinecone_res = index.query(xq, top_k=10, include_metadata=True)\n contexts = [x['metadata']['chunk_text'] for x in pinecone_res['matches']]\n\n pinecone_contexts = (\n \"\\n\\n---\\n\\n\".join(contexts)\n )\n return pinecone_contexts\n\n# From here down is all the StreamLit UI.\nimage = open(\"Pinecone logo white.png\", \"rb\").read()\nst.image(image)\nst.write(\"### Lord of the Rings - Fellowship of the Ring Demo\")\n\nif \"generated\" not in st.session_state:\n st.session_state[\"generated\"] = []\n\nif \"past\" not in st.session_state:\n st.session_state[\"past\"] = []\n\n\ndef get_text():\n input_text = st.text_input(\"You: \", \"Who is Bilbo Baggins?\", key=\"input\")\n return input_text\n\n# Main function for the Streamlit app\ndef main():\n chatgpt_chain = LLM_chain_response()\n user_input = get_text()\n if user_input:\n with st.spinner(\"Thinking...\"):\n query = user_input\n pinecone_contexts = retrieve(query)\n output = chatgpt_chain.predict(input=query + '\\nContext: ' + pinecone_contexts)\n st.session_state.past.append(user_input)\n st.session_state.generated.append(output)\n\n if st.session_state[\"generated\"]:\n for i in range(len(st.session_state[\"generated\"]) - 1, -1, -1):\n message(st.session_state[\"generated\"][i], key=str(i))\n message(st.session_state[\"past\"][i], is_user=True, key=str(i) + \"_user\", avatar_style=\"shapes\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pinecone-io/genqa-rag-demo","sub_path":"streamlit_app/todo-splade/Snowflake_app_no_splade_local.py","file_name":"Snowflake_app_no_splade_local.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"28"} +{"seq_id":"38065684821","text":"# Python version of Day 1 exercise, count number of\n# increases in a text file\n\n# Read numbers into a list\nnn = [int(x) for x in open('input.txt')]\n\n# Part 1: count up number of increase from previous value\nincreases = 0\nfor i in range(1, len(nn)):\n if nn[i] > nn[i - 1]:\n increases += 1\nprint(f'Part 1: {increases} increases found')\n\n# Part 2: count up sliding window of 3\nprev = increases = 0\nfor i in range(3, len(nn)+1):\n l = nn[i-3:i]\n if prev > 0 and sum(l) > prev:\n increases += 1\n prev = sum(l)\nprint(f'Part 2: {increases} increases found')\n\n","repo_name":"andreaskaempf/adventofcode2021","sub_path":"day01/day01.py","file_name":"day01.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"7115752635","text":"from django.urls import path\nfrom .views import ContactoCreateAPIView, ContactoUpdateAPIView, ContactoDetailAPIView, ContactoDeleteAPIView, DeudaListAPIView, DeudaCreateAPIView, DeudaUpdateAPIView\n\nurlpatterns = [\n path('contact-views/', ContactoDetailAPIView.as_view()),\n path('contact-create/', ContactoCreateAPIView.as_view()),\n path('contact-delete//', ContactoDeleteAPIView.as_view()),\n path('contact-update//', ContactoUpdateAPIView.as_view()),\n\n path('deuda-views/', DeudaListAPIView.as_view()),\n path('deuda-create/', DeudaCreateAPIView.as_view()),\n path('deuda-update//', DeudaUpdateAPIView.as_view()),\n\n]\n","repo_name":"MiguelAngel0107/TePago","sub_path":"TePago-Backend/core/apps/contacts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"36974139395","text":"import event_stream\nimport numpy.lib.recfunctions\nimport pathlib\ndirname = pathlib.Path(__file__).resolve().parent\n\nt_window = 2000\n\ndecoder = event_stream.Decoder(dirname / 'media' / 'dvs.es')\nencoder = event_stream.Encoder(dirname / 'media' / 'dvs_filtered.es', decoder.type, decoder.width, decoder.height)\n\nts = numpy.zeros((decoder.width, decoder.height), dtype=' 0 and ts[x - 1, y] > t)\n or (y > 0 and ts[x, y - 1] > t)\n or (x < decoder.width - 1 and ts[x + 1, y] > t)\n or (y < decoder.height - 1 and ts[x, y + 1] > t))\n encoder.write(packet[selection])\n","repo_name":"neuromorphicsystems/event_stream","sub_path":"examples/dvs_background_filter.py","file_name":"dvs_background_filter.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"28"} +{"seq_id":"2202779678","text":"#!/usr/bin/env python3.2\n\nimport subprocess\nimport os\nimport sys\nimport time\n\ngitPath = '/usr/bin/git'\n\nif len(sys.argv) < 2:\n\tprint(\"Syntax: py git-view.py \")\n\texit(0)\n\npath = sys.argv[1]\nif path[-1] != \"/\":\n\tpath = path + \"/\"\n\ndef callGit (args):\n\tpr = subprocess.Popen([gitPath] + args.split(' '), cwd=path, shell = False, stdout = subprocess.PIPE, stderr = subprocess.PIPE )\n\t(out, error) = pr.communicate()\n\tif len(error) != 0:\n#\t\tprint(\"Error running git \" + args + \":\\n\" + error.decode('UTF-8'))\n\t\treturn None\n\telse:\n\t\treturn out.decode('UTF-8').split('\\n')\n\ndef newCommit (name):\n\tcommit = {}\n\tcommit['name'] = name\n\tcommit['branches'] = set() # set of branches at this commit\n\tcommit['merge'] = [] # list of branch names from a merge\n\tcommit['parents'] = [] # list of parents commits (from merges or just previous commits)\n\tcommit['children'] = [] # list of children commits\n\tcommit['level'] = 0\n\treturn commit\n\n# get branches\nbranchLines = callGit('branch -a')\nactiveBranchNames = []\nfor branchLine in branchLines:\n\tif len(branchLine) == 0:\n\t\tcontinue\n\tbranchName = branchLine[2:]\n\tif branchName.startswith('remotes/origin/HEAD'):\n\t\tcontinue\n\tif branchName.startswith('remotes/'):\n\t\tcontinue\n\tactiveBranchNames.append(branchName)\nbranchNames = set()\nbranchNames.update(activeBranchNames)\n\n# add only the remotes we care about\nremoteBranchNames = []\nfor branchName in activeBranchNames:\n\tremoteBranchNames.append('origin/' + branchName)\nactiveBranchNames.extend(remoteBranchNames)\n\n# get commits\nheadCommits = []\ncommits = {}\ncommitsByDate = {}\nremotes = []\nmerges = []\n\nfor branchName in activeBranchNames:\n\tcount = 0\n\tlastCommit = None\n\tlogLines = callGit('log --date=raw ' + (('-n ' + sys.argv[2] + ' ') if len(sys.argv) == 3 else '') + branchName)\n\tif logLines is None:\n\t\tcontinue # not a valid branch, so ignore it\n\tfor logLine in logLines:\n\t\tif logLine.startswith('commit '):\n\t\t\tname = logLine[7:]\n\t\t\tif name not in commits:\n\t\t\t\tcommit = newCommit(name)\n\t\t\t\tcommits[name] = commit\n\t\t\tif lastCommit is None:\t\n\t\t\t\tcommits[name]['branches'].add(branchName)\n\t\t\tlastCommit = name\n\t\telif logLine.startswith('Date'):\n\t\t\tdate = int(logLine[8:-6])\n\t\t\tcommits[lastCommit]['date'] = date\n\t\t\tcommitsByDate[date] = lastCommit\n\t\telif logLine.startswith(' Merge branch \\''):\n\t\t\tfromBranchNameEndIndex = logLine.find('\\'', 18)\n\t\t\tfirstRemote = (logLine[fromBranchNameEndIndex + 2:fromBranchNameEndIndex + 4] == 'of')\n\t\t\tfromBranch = ('origin/' if firstRemote else '') + logLine[18:fromBranchNameEndIndex]\n\t\t\tbranchNames.add(fromBranch)\n\t\t\ttoBranchNameStartIndex = logLine.find('into ')\n\t\t\tif toBranchNameStartIndex != -1:\n\t\t\t\ttoBranch = logLine[5 + toBranchNameStartIndex:]\n\t\t\t\tbranchNames.add(toBranch)\n\t\t\telse:\n\t\t\t\ttoBranch = 'master' # if no to branch is named, it defaults to master\n\t\t\t\t# BUG : this is slightly broken, because it may be either master or origin/master\n\t\t\tmerges.append([lastCommit, fromBranch, toBranch])\n\t\telif logLine.startswith(' Merge pull request'):\n\t\t\tfromBranchNameStartIndex = logLine.rfind(' ') + 1\n\t\t\ttoBranch = ''\n\t\t\tfromBranch = logLine[fromBranchNameStartIndex:]\n\t\t\tbranchNames.add(fromBranch)\n\t\t\tmerges.append([lastCommit, fromBranch, toBranch])\n\n# get parents of each commit\ndummyCommits = {}\nfor commitName in commits:\n\tcommits[commitName]['parents'] = (callGit('rev-list --parents -n 1 ' + commitName)[0].split(' '))[1:]\n\tfor parentCommitName in commits[commitName]['parents']:\n\t\tif parentCommitName in commits and commitName not in commits[parentCommitName]['children']:\n\t\t\tcommits[parentCommitName]['children'].append(commitName)\n\t\telif parentCommitName not in commits and parentCommitName not in dummyCommits:\n\t\t\t# make a dummy commit\n\t\t\tcommit = newCommit(parentCommitName)\n\t\t\tcommit['date'] = min(commitsByDate) - 1\n\t\t\tdummyCommits[parentCommitName] = commit\n\t\t\tcommitsByDate[commit['date']] = parentCommitName\ncommits.update(dummyCommits)\n\n# fill in branch info based on merge comments\nfor merge in merges:\n\tcommitName = merge[0]\n\tfromBranch = merge[1]\n\ttoBranch = merge[2]\n\tif commits[commitName]['parents'][0] in commits:\n\t\tif toBranch != '':\n\t\t\tcommits[commitName]['branches'].add(toBranch)\n\t\t\tcommits[commits[commitName]['parents'][0]]['branches'].add(toBranch)\n\t\telse:\n\t\t\tcommits[commits[commitName]['parents'][0]]['branches'].update(commits[commitName]['branches'])\n\tif commits[commitName]['parents'][1] in commits:\n\t\tif fromBranch != '':\n\t\t\tcommits[commits[commitName]['parents'][1]]['branches'].add(fromBranch)\n\n# propagate the branches to the ancestors\nfor date in sorted(commitsByDate.keys(), reverse=True):\n\tcommitName = commitsByDate[date]\n\tif len(commits[commitName]['parents']) == 1:\n\t\tif commits[commitName]['parents'][0] in commits:\n\t\t\tif len(commits[commits[commitName]['parents'][0]]['children']) == 1:\n\t\t\t\tif len(commits[commits[commitName]['parents'][0]]['branches']) == 0:\n\t\t\t\t\tcommits[commits[commitName]['parents'][0]]['branches'].update(commits[commitName]['branches'])\n\t\t\telif len(commits[commits[commitName]['parents'][0]]['children']) == 2:\n\t\t\t\tfor branchName in commits[commitName]['branches']:\n\t\t\t\t\tif branchName in ['master', 'staging', 'production', 'origin/master', 'origin/staging', 'origin/production']:\n\t\t\t\t\t\tcommits[commits[commitName]['parents'][0]]['branches'].add(branchName)\n\n\t\t# noBranchParentName = ''\n\t\t# for parentName in commits[commitName]['parents']:\n\t\t\t# if parentName in commits and branchName in commits[parentName]['branches']:\n\t\t\t\t# noBranchParentName = ''\n\t\t\t\t# break # the branch is already in a parent\n\t\t\t# if parentName in commits and len(commits[parentName]['branches']) == 0:\n\t\t\t\t# if noBranchParentName is not '':\n\t\t\t\t\t# noBranchParentName = ''\n\t\t\t\t\t# break # more than one parent with no branch\n\t\t\t\t# noBranchParentName = parentName\n\t\t# if noBranchParentName is not '':\n\t\t\t# commits[noBranchParentName]['branches'].add(branchName)\n\n# propogate the branches to the descendants\n# for date in sorted(commitsByDate.keys(), reverse=False):\n\t# commitName = commitsByDate[date]\n\t# for branchName in commits[commitName]['branches']:\n\t\t# noBranchChildName = ''\n\t\t# for childName in commits[commitName]['children']:\n\t\t\t# if childName in commits and branchName in commits[childName]['branches']:\n\t\t\t\t# noBranchChildName = ''\n\t\t\t\t# break # the branch is already in a child\n\t\t\t# if childName in commits and len(commits[childName]['branches']) == 0:\n\t\t\t\t# if noBranchChildName is not '':\n\t\t\t\t\t# noBranchChildName = ''\n\t\t\t\t\t# break # more than one child with no branch\n\t\t\t\t# noBranchChildName = childName\n\t\t# if noBranchChildName is not '':\n\t\t\t# commits[noBranchChildName]['branches'].add(branchName)\n\n# get commit levels\nmaxLevel = 0\ncount = 0\nfor date in sorted(commitsByDate.keys(), reverse=True):\n\tcommitName = commitsByDate[date]\n\tcommits[commitName]['level'] = count\n\tcount = count + 1\n\t# for parentCommitName in commits[commitName]['parents']:\n\t\t# if parentCommitName in commits:\n\t\t\t# commits[parentCommitName]['level'] = max(commits[parentCommitName]['level'], commits[commitName]['level'] + 1)\n\tmaxLevel = max(maxLevel, commits[commitName]['level'])\n\n# get node text for each commit\nfor commitName in commits:\n\tcommits[commitName]['nodetext'] = commits[commitName]['name'][:8] + ' ' + str(commits[commitName]['date'])\n\n# print html\nf = open('html/git-view.html', 'w')\nprint('', file = f )\n\n# print graph\nprint('''\n\n\n\n\n\n
\n
\n\n''', file = f)\n\n# print top of table\nprint('', file = f)\n\n","repo_name":"fiddleplum/git-view","sub_path":"node-view.py","file_name":"node-view.py","file_ext":"py","file_size_in_byte":9189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"72419524875","text":"\"\"\"Tests for super-resolution models.\"\"\"\n\nimport numpy as np\nimport pytest\nfrom loguru import logger\nfrom PIL import Image\n\nfrom nos import hub\nfrom nos.models import SuperResolution\nfrom nos.models.super_resolution import SuperResolutionLDM, SuperResolutionSwin2SR\nfrom nos.test.benchmark import run_benchmark\nfrom nos.test.utils import NOS_TEST_IMAGE, PyTestGroup, skip_if_no_torch_cuda\n\n\nMODELS = list(SuperResolution.configs.keys())\nUNIQUE_MODELS = list(SuperResolutionSwin2SR.configs.keys())[:1] + list(SuperResolutionLDM.configs.keys())[:1]\n\n\ndef _test_predict(_model):\n img = Image.open(NOS_TEST_IMAGE)\n img = img.resize((160, 120))\n W, H = img.size\n\n predictions = _model(img)\n assert predictions is not None\n assert isinstance(predictions, np.ndarray)\n assert predictions.dtype == np.uint8\n UH, UW = predictions.shape[-2:]\n assert UW >= W * 2 and UH >= H * 2\n\n predictions = _model([img, img])\n assert predictions is not None\n assert len(predictions) == 2\n assert isinstance(predictions, np.ndarray)\n assert predictions.dtype == np.uint8\n UH, UW = predictions.shape[-2:]\n assert UW >= W * 2 and UH >= H * 2\n\n\n@skip_if_no_torch_cuda\n@pytest.mark.parametrize(\"model_name\", UNIQUE_MODELS)\ndef test_superres_predict_one(model_name):\n \"\"\" \"Load/infer first depth estimation model.\"\"\"\n logger.debug(f\"Testing model: {model_name}\")\n spec = hub.load_spec(model_name)\n model = hub.load(spec.name)\n _test_predict(model)\n\n\n@skip_if_no_torch_cuda\n@pytest.mark.benchmark(group=PyTestGroup.HUB)\n@pytest.mark.parametrize(\"model_name\", MODELS)\ndef test_superres_predict_all(model_name):\n \"\"\" \"Benchmark load/infer all depth estimation models.\"\"\"\n logger.debug(f\"Testing model: {model_name}\")\n spec = hub.load_spec(model_name)\n model = hub.load(spec.name)\n _test_predict(model)\n\n\n@skip_if_no_torch_cuda\n@pytest.mark.benchmark(group=PyTestGroup.MODEL_BENCHMARK)\n@pytest.mark.parametrize(\"model_name\", MODELS)\n@pytest.mark.parametrize(\"img_size\", [(640, 480), (1280, 960)])\ndef test_superres_predict_benchmark(model_name, img_size):\n \"\"\" \"Benchmark inference for all depth estimation models.\"\"\"\n\n img = Image.open(NOS_TEST_IMAGE)\n img = img.resize(img_size)\n\n logger.debug(f\"Benchmarking model: {model_name}, img_size: {img_size}\")\n spec = hub.load_spec(model_name)\n model = hub.load(spec.name)\n time_ms = run_benchmark(\n lambda: model(img),\n num_iters=100,\n )\n logger.debug(f\"[{model_name}]: {time_ms:.2f} ms / step\")\n","repo_name":"autonomi-ai/nos","sub_path":"tests/models/test_superresolution.py","file_name":"test_superresolution.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"28"} +{"seq_id":"3989168321","text":"from celery import shared_task\nfrom time import sleep\nfrom random import randint\nfrom .models import MessagesTable\n\n\n@shared_task(bind=True)\ndef send_message(self, messenger, user_id, user_id_2, message):\n try:\n sleep(randint(1, 3)) # Имитация задержки отправки сообщения от 1 до 3 секунд.\n if randint(1, 10) == 10: # 10% вероятность неудачной отправки.\n raise Exception\n except Exception as exc:\n raise self.retry(exc=exc, countdown=1, max_retries=5) # В случае неудачи будет произведено еще 4 попытки отправки.\n MessagesTable.objects.create(messenger=messenger,\n sender_id=user_id,\n recipient_id=user_id_2,\n message=message)\n return \"added message'\" + message + \"' from \" + str(user_id) + \" to \" + str(user_id_2)\n","repo_name":"anisimovih/Message_sending_emulator","sub_path":"message_sender/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"32937097770","text":"shapes = [\n [[1, 1], [1, 1]], # 네모\n [[1, 1, 1, 1]], # 길쭉이 1\n [[1, 1, 1], [0, 1, 0]], # T-모양 1\n [[1, 1, 0], [0, 1, 1]], # 꺽은 모양 1\n [[1, 1, 1], [0, 0, 1]], # L-모양 1\n]\n\ndef place(shape, i, j, A):\n val = 0\n for r in range(len(shape)):\n for c in range(len(shape[0])):\n val += shape[r][c] * A[i + r][j + c]\n return val\n\ndef flip(s):\n h, w = len(s), len(s[0])\n shape = [[0] * w for _ in range(h)]\n for i in range(h):\n for j in range(w):\n shape[i][w - 1 - j] = s[i][j]\n return shape\n\ndef rotate(s):\n h, w = len(s[0]), len(s)\n shape = [[0] * w for _ in range(h)]\n for i in range(h):\n for j in range(w):\n shape[i][j] = s[w - 1 - j][i]\n return shape\n\ndef maximize(shape, n, m, A):\n opt = 0\n for i in range(n - len(shape) + 1):\n for j in range(m - len(shape[0]) + 1):\n opt = max(opt, place(shape, i, j, A))\n return opt\n\ndef get_shape(shape, k):\n if k == 0: return shapes[0]\n elif k == 1: return shapes[1]\n elif k == 3: return shapes[2]\n elif k == 7: return shapes[3]\n elif k == 11: return shapes[4]\n elif k in [9, 15]: return flip(shape)\n else: return rotate(shape)\n\ndef solve(n, m, A):\n opt = 0\n shape = []\n for k in range(19):\n shape = get_shape(shape, k)\n opt = max(opt, maximize(shape, n, m, A))\n return opt\n \nn, m = map(int, input().split())\nA = [list(map(int, input().split())) for _ in range(n)]\nprint(solve(n, m, A))","repo_name":"joonion/boj","sub_path":"Chap.77.어려운문제들/14500.테트로미노/solve.2.py","file_name":"solve.2.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"6740715686","text":"# Space Invaders\n# Dan Soloha\n# 11/16/2019\n\nimport math, random\nfrom superwires import games, color\n\ngames.init(screen_width = 640, screen_height = 480, fps = 60)\n\nclass Wrapper(games.Sprite):\n \"\"\"A sprite that can wrap around the screen\"\"\"\n\n def update(self):\n \"\"\"Wrap sprite around screen\"\"\"\n if self.top > games.screen.height:\n self.bottom = 0\n\n if self.bottom < 0:\n self.top = games.screen.height\n\n if self.left > games.screen.width:\n self.right = 0\n\n if self.right < 0:\n self.left = games.screen.width\n\n def die(self):\n \"\"\"Destroy self\"\"\"\n self.destroy()\n\nclass Collider(Wrapper):\n \"\"\"A Wrapper that can collide with another object\"\"\"\n\n def update(self):\n \"\"\"Check for overlapping sprites\"\"\"\n super(Collider, self).update()\n\n if self.overlapping_sprites:\n for sprite in self.overlapping_sprites:\n sprite.die()\n self.die()\n\n def die(self):\n \"\"\"Destroy self and leave behind explosion\"\"\"\n new_explosion = Explosion(x = self.x, y = self.y)\n games.screen.add(new_explosion)\n self.destroy()\n\nclass Ship(Collider):\n \"\"\"A ship object\"\"\"\n image = games.load_image(\"ship.bmp\")\n sound = games.load_sound(\"thrust.wav\")\n ROTATION_STEP = 3\n VELOCITY_STEP = .03\n VELOCITY_MAX = 3\n MISSILE_DELAY = 25\n\n def __init__(self, game, x, y):\n \"\"\"Initialize ship sprite\"\"\"\n super(Ship, self).__init__(image = Ship.image, x = x, y = y)\n self.missile_wait = 0\n self.game = game\n\n def fire(self):\n \"\"\"Fire missile if possible\"\"\"\n new_missile = Missile(self.x, self.y, self.angle)\n games.screen.add(new_missile)\n self.missile_wait = Ship.MISSILE_DELAY\n\n def die(self):\n \"\"\"Destroy ship\"\"\"\n super(Ship, self).die()\n\nclass Player(Ship):\n \"\"\"The player's ship\"\"\"\n\n def update(self):\n \"\"\" Rotate and thrust based on keys pressed. \"\"\"\n super(Player, self).update()\n\n # rotate based on left and right arrow keys\n if games.keyboard.is_pressed(games.K_LEFT):\n self.angle -= Ship.ROTATION_STEP\n if games.keyboard.is_pressed(games.K_RIGHT):\n self.angle += Ship.ROTATION_STEP\n\n # apply thrust based on up arrow key\n if games.keyboard.is_pressed(games.K_UP):\n Ship.sound.play()\n\n # change velocity components based on ship's angle\n angle = self.angle * math.pi / 180 # convert to radians\n self.dx += Ship.VELOCITY_STEP * math.sin(angle)\n self.dy += Ship.VELOCITY_STEP * -math.cos(angle)\n\n # if waiting until ship can fire, decrease wait\n if self.missile_wait > 0:\n self.missile_wait -= 1\n\n # fire missile if spacebar pressed\n if games.keyboard.is_pressed(games.K_SPACE) and self.missile_wait == 0:\n super(Player, self).fire()\n\n # cap velocity in each direction\n self.dx = min(max(self.dx, -Ship.VELOCITY_MAX), Ship.VELOCITY_MAX)\n self.dy = min(max(self.dy, -Ship.VELOCITY_MAX), Ship.VELOCITY_MAX)\n\n def die(self):\n \"\"\"Destroy the ship and end the game\"\"\"\n super(Player, self).die()\n self.game.end()\n\nclass Alien(Ship):\n \"\"\"An alien ship\"\"\"\n VELOCITY_FACTOR = 1\n POINTS = 30\n total = 0\n\n def __init__(self, game, x, y):\n \"\"\"Initialize alien sprite\"\"\"\n super(Alien, self).__init__(\n game = game,\n x = x, y = y)\n\n self.game = game\n Alien.total += 1\n\n # if all aliens are gone advance to the next level\n if Alien.total == 0:\n self.game.advance()\n\n def update(self):\n # if waiting until ship can fire, decrease wait\n if self.missile_wait > 0:\n self.missile_wait -= 1\n\n # fire\n if self.missile_wait == 0:\n super(Alien, self).fire()\n\n # turn randomly\n random_angle = random.randint(1, 60)\n if random_angle < 45:\n self.angle += Ship.ROTATION_STEP\n elif random_angle > 15:\n self.angle -= Ship.ROTATION_STEP\n\n # change velocity components based on ship's angle\n angle = self.angle * math.pi / 180 # convert to radians\n self.dx = Alien.VELOCITY_FACTOR * math.sin(angle)\n self.dy = Alien.VELOCITY_FACTOR * -math.cos(angle)\n\n def die(self):\n \"\"\"Destroy alien\"\"\"\n super(Alien, self).die()\n Alien.total -= 1\n self.game.score.value += int(Alien.POINTS)\n self.game.score.right = games.screen.width - 10\n\nclass Missile(Collider):\n \"\"\"A missile launched by the player's ship\"\"\"\n image = games.load_image(\"missile.bmp\")\n sound = games.load_sound(\"missile.wav\")\n BUFFER = 40\n VELOCITY_FACTOR = 7\n LIFETIME = 40\n\n def __init__(self, ship_x, ship_y, ship_angle):\n \"\"\"Initialize missile sprite\"\"\"\n Missile.sound.play()\n\n # convert to radians\n angle = ship_angle * math.pi / 180\n\n # calculate missile's starting position\n buffer_x = Missile.BUFFER * math.sin(angle)\n buffer_y = Missile.BUFFER * -math.cos(angle)\n x = ship_x + buffer_x\n y = ship_y + buffer_y\n\n # calculate missile's velocity components\n dx = Missile.VELOCITY_FACTOR * math.sin(angle)\n dy = Missile.VELOCITY_FACTOR * -math.cos(angle)\n\n # create the missile\n super(Missile, self).__init__(image=Missile.image,\n x=x, y=y,\n dx=dx, dy=dy)\n\n self.lifetime = Missile.LIFETIME\n\n def update(self):\n \"\"\"Move the missile\"\"\"\n super(Missile, self).update()\n\n # if lifetime is up, destroy the missile\n self.lifetime -= 1\n if self.lifetime == 0:\n self.destroy()\n\nclass Explosion(games.Animation):\n \"\"\"Explosion animation\"\"\"\n sound = games.load_sound(\"explosion.wav\")\n images = [\"explosion1.bmp\",\n \"explosion2.bmp\",\n \"explosion3.bmp\",\n \"explosion4.bmp\",\n \"explosion5.bmp\",\n \"explosion6.bmp\",\n \"explosion7.bmp\",\n \"explosion8.bmp\",\n \"explosion9.bmp\"]\n\n def __init__(self, x, y):\n super(Explosion, self).__init__(images = Explosion.images,\n x = x, y = y,\n repeat_interval = 4, n_repeats = 1,\n is_collideable = False)\n Explosion.sound.play()\n\nclass Game(object):\n \"\"\"The game itself\"\"\"\n\n def __init__(self):\n \"\"\"Initialize Game object\"\"\"\n # set level\n self.level = 0\n\n # load sound for level advance\n self.sound = games.load_sound(\"level.wav\")\n\n # create score\n self.score = games.Text(value=0,\n size=30,\n color=color.white,\n top=5,\n right=games.screen.width - 10,\n is_collideable=False)\n games.screen.add(self.score)\n\n # create player's ship\n self.player = Player(game=self,\n x=games.screen.width / 2,\n y=games.screen.height / 2)\n games.screen.add(self.player)\n\n def play(self):\n \"\"\"Play the game\"\"\"\n # begin theme music\n games.music.load(\"theme.mid\")\n games.music.play(-1)\n\n # load and set background\n nebula_image = games.load_image(\"nebula.jpg\")\n games.screen.background = nebula_image\n\n # advance to level 1\n self.advance()\n\n # start play\n games.screen.mainloop()\n\n def advance(self):\n \"\"\"Advance to the next level\"\"\"\n self.level += 1\n\n # amount of space around ship to preserve when creating aliens\n BUFFER = 150\n\n # create new aliens\n for i in range(self.level):\n # calculate an x and y at least BUFFER distance from ship\n\n # choose minimum distance along x-axis and y-axis\n x_min = random.randrange(BUFFER)\n y_min = BUFFER - x_min\n\n # choose distance along x-axis and y-axis based on minimum distance\n x_distance = random.randrange(x_min, games.screen.width - x_min)\n y_distance = random.randrange(y_min, games.screen.height - y_min)\n\n # calculate location based on distance\n x = self.player.x + x_distance\n y = self.player.y + y_distance\n\n # wrap around screen if necessary\n x %= games.screen.width\n y %= games.screen.width\n\n # create the alien\n new_alien = Alien(game = self,\n x = x, y = y)\n games.screen.add(new_alien)\n\n i += 1\n\n # display level number\n level_message = games.Message(value=\"Level \" + str(self.level),\n size=40,\n color=color.yellow,\n x=games.screen.width / 2,\n y=games.screen.width / 10,\n lifetime=3 * games.screen.fps,\n is_collideable=False)\n games.screen.add(level_message)\n\n # play new level sound, except at first level\n if self.level > 1:\n self.sound.play()\n\n def end(self):\n \"\"\"End the game\"\"\"\n # show 'Game Over' for 5 seconds\n end_message = games.Message(value=\"Game Over\",\n size=90,\n color=color.red,\n x=games.screen.width / 2,\n y=games.screen.height / 2,\n lifetime=5 * games.screen.fps,\n after_death=games.screen.quit,\n is_collideable=False)\n games.screen.add(end_message)\n\ndef main():\n space_invaders = Game()\n space_invaders.play()\n\nmain()\n","repo_name":"dsoloha/py","sub_path":"Lab12/spaceInvaders/spaceInvaders-dsoloha.py","file_name":"spaceInvaders-dsoloha.py","file_ext":"py","file_size_in_byte":10236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"32136639682","text":"# Python3,mvs, 2017-09-27\nlist1 = []\n\ndict1 = {}\nfor i in range(0, 128):\n list1.append(i)\n dict1[i] = bytes([i])\n# print(list1)\n# print(bytes(list1))\nfor a in dict1:\n print(str(a) + \" : \" + str(dict1[a])+\"\\n\")\n","repo_name":"mavost/urban_python","sub_path":"Weigend/Exercises/chap04_ASCII.py","file_name":"chap04_ASCII.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"71307723274","text":"import pandas as pd\nimport textacy\nimport os\nimport inspect\nimport sys\n\n\ndef get_cur_path():\n this_file_path = '/'.join(\n os.path.abspath(\n inspect.stack()[0][1]\n ).split('/')[:-1]\n )\n\n os.chdir(this_file_path)\n print('>>', os.getcwd())\n return this_file_path\n\n# ----\ndef get_hsCode_df():\n loc = './../../GeneratedData/HSCodes'\n f_name = 'HS_Code_6digit_data.csv'\n f_path = os.path.join(loc, f_name)\n df = pd.read_csv(f_path)\n return df\n\n\ndef get_KwSource_df():\n loc = './../../GeneratedData/Keywords'\n keys = ['CITES','IUCN_RedList','WWF_HighRisk']\n data_types = ['sc_name','common_names']\n result_dict = {}\n\n for source in keys:\n result_dict[source] = {}\n\n for _type in data_types :\n _loc = os.path.join(loc)\n f_path =os.path.join(\n _loc,\n source + '_' + _type + '.txt'\n )\n tmp_df = pd.read_csv(f_path,index_col=None,header=None)\n\n list_kws = set(list(tmp_df[0]))\n result_dict[source][_type] = list_kws\n # find the HS codes that match anything out here\n\n return result_dict\n\n\ndef processor():\n hsc_df = get_hsCode_df()\n source_kw_dict = get_KwSource_df()\n source_HS_Code_list = {}\n\n for k in source_kw_dict.keys():\n source_HS_Code_list[k] = []\n\n for i,row in hsc_df.iterrows():\n hs_code = row['hscode_6']\n row_scn = row['sc_names']\n row_kws = row['keywords']\n if type(row_scn) == str :\n _type = 'sc_name'\n row_scn = row_scn.split(';')\n row_scn = set(row_scn)\n # Try and see matches which source\n for source, data in source_kw_dict.items():\n _canditates = set(data[_type])\n flag = len(row_scn.intersection( _canditates ))>0\n if flag :\n # Add HS code to flag type\n source_HS_Code_list[source].append(hs_code)\n\n if type(row_kws) == str :\n _type = 'common_names'\n row_kws = row_kws.split(';')\n row_kws = set(row_kws)\n # Try and see matches which source\n for source, data in source_kw_dict.items():\n _canditates = set(data[_type])\n flag = len(row_kws.intersection( _canditates ))>0\n if flag :\n # Add HS code to flag type\n source_HS_Code_list[source].append(hs_code)\n\n op_file_loc = './../../GeneratedData/HSCodes'\n for source,_list in source_HS_Code_list.items():\n f_name = source + '_' + 'HS_Codes.txt'\n f_path = os.path.join(\n op_file_loc,\n f_name\n )\n _list = list(set(_list))\n series_hscodes = pd.Series(_list)\n series_hscodes.to_csv(\n f_path,\n header = False,\n index=False\n )\n print(source_HS_Code_list)\n\n\n\ndef main():\n\n old_path = os.getcwd()\n cur_path = get_cur_path()\n os.chdir(cur_path)\n processor()\n os.chdir(old_path)\n\nmain()","repo_name":"ddatta-DAC/WWF_DomainData_v1","sub_path":"src/GenerateKeywords/process_HSCode_kwFlags.py","file_name":"process_HSCode_kwFlags.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"6608547920","text":"\"\"\"\ndatabase - stuff for handling a database\n\"\"\"\nfrom pathlib import Path\nimport sqlite3\n\n\nclass DatabaseError(RuntimeError):\n pass\n\n\nclass Database(object):\n \"\"\"\n wrapper class around sqlite3 to allow neat Python code\n \"\"\"\n\n database_path = None\n connection = None\n\n def __init__(self, database_path: Path | str, must_exist: bool = True):\n \"\"\"\n define a sqlite3 database\n :param database_path: path to the database file\n :param must_exist: whether the database must already exist\n \"\"\"\n database_path = Path(database_path)\n if must_exist and not database_path.is_file():\n raise DatabaseError(f\"database {database_path!r} does not exist\")\n \n super().__init__()\n self.database_path = database_path\n\n def connect(self):\n \"\"\"\n connect to the database\n \"\"\"\n if self.connection is not None:\n raise DatabaseError(f\"already connected to {self.database_path!r}\")\n\n self.connection = sqlite3.connect(self.database_path)\n self.connection.row_factory = sqlite3.Row\n\n def cursor(self):\n \"\"\"\n create a cursor on the database\n :return: database cursor\n \"\"\"\n if self.connection is None:\n raise DatabaseError(f\"not connected to {self.database_path!r}\")\n\n return self.connection.cursor()\n\n def execute(self, command, *args, **kwargs):\n \"\"\"\n execute a command on the database\n :param command: command text\n :param args: arguments to the command\n :param kwargs: keyword arguments to the command\n :return: command result\n \"\"\"\n if self.connection is None:\n raise DatabaseError(f\"not connected to {self.database_path!r}\")\n\n return self.connection.execute(command, *args, **kwargs)\n\n def executemany(self, command, args):\n \"\"\"\n execute a command on the database\n :param command: command text\n :param args: arguments to the command\n :return: command result\n \"\"\"\n if self.connection is None:\n raise DatabaseError(f\"not connected to {self.database_path!r}\")\n\n return self.connection.executemany(command, args)\n\n def executescript(self, script):\n \"\"\"\n execute a command on the database\n :param script: command text\n :return: command result\n \"\"\"\n if self.connection is None:\n raise DatabaseError(f\"not connected to {self.database_path!r}\")\n\n return self.connection.executescript(script)\n\n def disconnect(self, success: bool = True):\n \"\"\"\n disconnect from the database\n :param success: whether processing was successful and should be persisted\n :return:\n \"\"\"\n if self.connection is None:\n raise DatabaseError(f\"not connected to {self.database_path!r}\")\n\n if success:\n self.connection.commit()\n else:\n self.connection.rollback()\n\n self.connection.close()\n self.connection = None\n\n # --- context manager interface ---\n\n def __enter__(self):\n \"\"\"\n create a database context\n :return: a database context - this object\n \"\"\"\n self.connect()\n\n # will handle the database context ourselves\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb) -> bool:\n \"\"\"\n terminate a database context - the arguments tell whether the termination is regular or by exception\n :param exc_type: exception type or None\n :param exc_val: exception value or None\n :param exc_tb: exception traceback or None\n :return: whether the exception (if any) was handled\n \"\"\"\n success = exc_type is None\n self.disconnect(success)\n\n # database context terminated, but exception not handled\n return False\n","repo_name":"eric-nieuwland/PET-demo","sub_path":"src/common/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"74691969675","text":"#!/usr/bin/python\nimport\tos, sys, optparse\nsys.path.insert( 0, os.path.abspath(\"libs\"))\n\nimport dep_check\nimport fetch_repos\nimport windows_build\nimport linux_build\n\ndef parse_cmd_args():\n\tif os.name == \"nt\":\n\t\tinstall_dir=\"C:\\\\MaNGOS\"\n\telse:\n\t\tinstall_dir=\"/opt/mangos\"\n\tparser = optparse.OptionParser(version=\"%prog 1.0\")\n\t\n\tparser.add_option(\"--mangos-destdir\", \"--install-dir\", \"--destdir\",\n\t\taction=\"store\",\n\t\tdest=\"mangos_destdir\",\n\t\tdefault=install_dir)\n\n\tparser.add_option(\"--sd2-patch\", \"--patch\", \"--sd2\", \n\t\taction=\"store\",\n\t\tdest=\"sd2_patch\",\n\t\tdefault=\"MaNGOS-8095-ScriptDev2.patch\")\n\n\tparser.add_option(\"--no-build\",\n\t\taction=\"store_false\",\n\tdest=\"build\",\n\tdefault=True)\n\n\tparser.add_option(\"--no-install\",\n\t\taction=\"store_false\",\n\t\tdest=\"install\",\n\t\tdefault=True)\n\n\tparser.add_option(\"--no-fetch\",\n\t\taction=\"store_false\",\n\tdest=\"fetch\",\n\tdefault=True)\n\n\tparser.add_option(\"--no-rebuild\",\n\t\taction=\"store_false\",\n\tdest=\"rebuild\",\n\tdefault=True)\n\n\tparser.add_option(\"--debug\",\n\t\taction=\"store_true\",\n\tdest=\"debug\",\n\tdefault=False)\n\n\t(options, args) = parser.parse_args()\n\treturn options\n\nif __name__ == '__main__':\n\tbuild_dir = os.getcwd()\n\topts = parse_cmd_args()\n\n\tif os.name == \"nt\":\n\t\tdep_check.win32()\n\telse:\n\t\tdep_check.linux()\n\n\tif opts.fetch: fetch_repos.pre_build_fetch(opts)\n\tif os.name == \"nt\":\n\t\tif opts.build: windows_build.make()\n\t\tif opts.install: windows_build.install(opts)\n\telse:\n\t\tif opts.build: linux_build.make(opts)\n\tos.chdir(build_dir)\n\tif opts.fetch: fetch_repos.post_build_fetch()\n\n","repo_name":"audiohacked/audiohacked","sub_path":"MaNGOS_InstallerScripts/make-mangos-server.py","file_name":"make-mangos-server.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"40470595936","text":"import datetime as dt\n\nimport app.util.time_util as time_util\nimport app.service.subs.subs_service as subs_service\nimport app.service.timetable.timetable_service as spbu_service\nfrom app.domain.response import Response\nfrom app.domain.subs_types import *\nfrom app.domain.timetable_types import *\nfrom app.util.time_util import get_week_boundaries\n\n\ndef get_day_events(day, chat_id) -> Response:\n subs = subs_service.get_by_chat_id(chat_id)\n if subs.state == STATE.SAVED_GROUP:\n day = _get_day_events(day, subs.group_id)\n text = _get_text(day)\n return Response(text=text, buttons=[])\n else:\n return Response(text='Для начала требуется пройти регистрацию =). Вызови команду /start', buttons=[])\n\n\ndef get_day_events_all(callback):\n subs = subs_service.get_all()\n subs = list(filter(lambda sub: sub.state == STATE.SAVED_GROUP and sub.is_active, subs))\n for sub in subs:\n day = _get_day_events(time_util.get_current_date(), sub.group_id)\n text = _get_text(day)\n callback(sub.chat_id, text)\n\n\ndef _get_day_events(current_date, group_id) -> Day:\n from_date, to_date = get_week_boundaries(current_date)\n days = spbu_service.get_events(group_id, from_date, to_date)\n return next((day for day in days if day.day_date == current_date), None)\n\n\ndef _get_text(day: Day) -> str:\n if day is not None and len(day.events) > 0:\n return _map_day(day)\n else:\n return f'Кажется, пар нет. Можно отдохнуть)'\n\n\ndef _map_day(day: Day) -> str:\n representation = str()\n representation += '\\n\\n' + day.day_string + '\\n\\n'\n for event in day.events:\n representation += \"Предмет: \" + event.subject + \"\\n\"\n representation += \"Преподаватели: \" + event.educators + \"\\n\"\n representation += \"Начало: \" + event.start_datetime.time().isoformat()\n representation += \", Конец: \" + event.end_datetime.time().isoformat()\n representation += \"\\n\\n\"\n return representation\n","repo_name":"germanKoch/spbuTimetableBot","sub_path":"app/usecase/get_events_usecase.py","file_name":"get_events_usecase.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"13046129406","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n#Task_1\nmy_list = [92, 94, 88, 91, 87]\ntest_1 = np.array(my_list)\n#Task_2\ntest_2 = np.genfromtxt('test_2.csv', delimiter=',')\n#Task_3\ntest_3 = np.array([87, 85, 72, 90, 92])\ntest_3_fixed = test_3 + 2\n#Task_4\ntotal_grade = (test_1 + test_2 + test_3_fixed) / 3\n#так же можно использовать // для целочисленного деления но в задании это не указано\nprint('задание 4 ',total_grade)\n#Task_5\ncoin_toss = np.array(['1', '0', '0', '1', '0'])\ncoin_toss_again = np.array([coin_toss, ['0', '0', '1', '1', '1']])\n#Task_6\njeremy_test_2 = test_2[3]\nmanual_adwoa_test_1 = test_1[1:3]\n#Task_7\nstudent_scores = np.array([[92, 94, 88, 91, 87],\n [79, 100, 86, 93, 91],\n [87, 85, 72, 90, 92]])\ntanya_test_3 = student_scores[2, 0]\ncody_test_scores = student_scores[:, 4]\n#Task_8\narray_temperature = np.genfromtxt('temperature_data.csv', delimiter=',')\nprint('Проверка в задании 8', array_temperature)\ntemperature_fixed = array_temperature + 3\nmonday_temperas = temperature_fixed[0, :]\nthursday_friday_morning = temperature_fixed[3:5, 1]\nprint('чт-пт', thursday_friday_morning)\na = temperature_fixed\ntemperature_extremes = sorted(a[(a < 50) | (a > 60)])\n\n\n#Task_9\ndef archimed_spiral():\n n = np.array([n for n in range(0, 97)])\n fi = (n * math.pi) / 12\n x = fi * np.cos(fi)\n y = fi * np.sin(fi)\n x_y_plot = plt.plot(x, y)\n plt.show()\n\ndef pascal_snail():\n n = np.array([n for n in range(0, 25)])\n fi = (n * math.pi) / 12\n x = 2 * np.cos(fi) ** 2 + np.cos(fi)\n y = 2 * np.cos(fi) * np.sin(fi) + np.sin(fi)\n x_y_plot = plt.plot(x, y)\n plt.show()\n\ndef distance(lat_1, lon_1, lat_2, lon_2):\n r = 6371\n mas = np.array([lat_1, lon_1, lat_2, lon_2])\n lat_1, lon_1, lat_2, lon_2 = np.radians(mas)\n formula = 2 * r * np.arcsin(np.sqrt(np.sin((lat_2 - lat_1) / 2) ** 2 + np.cos(lat_1) * np.cos(lat_2) * np.sin((lon_2 - lon_1) / 2) ** 2))\n return np.round(formula, 1)\n\nprint(distance(57.37, 39.51, 57.46, 40.55))","repo_name":"RomanKuklev/numpy.pandas.ex","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"41229727385","text":"import os\n\nfrom flask import send_file\n\nfrom app.classes.handling.storage import StorageBadRequest\nfrom app.functions import UPLOAD_ABS_DIRECTORY, REST_GUIDE\nfrom app.functions.handling.error_codes import STORAGE_0119\nfrom app.functions.io.response import success\nfrom app.functions.logging.logger import info\nfrom app.functions.utils.file import split_path, extract_resource_name\nfrom app.storages import DataStorage\nfrom app.storages.data_storage import check_resource_extension\n\nDEFAULT_FILE_PERMISSIONS = 0o777\n\n\nclass ServerDataStorage(DataStorage):\n init_arguments = None\n\n def __init__(self, init_arguments: dict):\n self.init_arguments = init_arguments\n\n @staticmethod\n def create_directories(path: str):\n info('server.create_directories: path %s', path)\n path_parts = split_path(path)\n resource_name = extract_resource_name(path)\n info('server.create_directories: resource_name %s', resource_name)\n os.makedirs(UPLOAD_ABS_DIRECTORY, DEFAULT_FILE_PERMISSIONS, True)\n directory_path = ''\n for path_part in path_parts:\n if resource_name not in path_part and '/' not in path_part:\n directory_path += '/' + path_part\n absolute_path = UPLOAD_ABS_DIRECTORY + directory_path\n os.makedirs(absolute_path, DEFAULT_FILE_PERMISSIONS, True)\n return directory_path\n\n def upload(self, path: str, data):\n check_resource_extension(path)\n info('server.upload: path %s', path)\n self.create_directories(path)\n data.save(UPLOAD_ABS_DIRECTORY + path)\n return success(path, 200, 'Success', REST_GUIDE + 'resources')\n\n def download(self, path: str):\n check_resource_extension(path)\n path = UPLOAD_ABS_DIRECTORY + path\n info('server.download: path %s', path)\n if not os.path.isfile(path):\n raise StorageBadRequest('resource does not exist for path ' + path, STORAGE_0119)\n attachment_file_name = extract_resource_name(path)\n return send_file(path, None, True, attachment_file_name)","repo_name":"JetRabbits/backend_test_task","sub_path":"src/app/storages/server/server_data_storage.py","file_name":"server_data_storage.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"11872374806","text":"# Toggle bit\r\n\r\ndef ToggleBit(iNo,iPos):\r\n iMask = 0x00000001\r\n iResult = 0\r\n iMask = iMask << (iPos - 1)\r\n iResult = iNo ^ iMask\r\n return iResult\r\n\r\nif __name__ == \"__main__\":\r\n iValue = int(input(\"Enter Number : \"))\r\n iValue2 = int(input(\"Enter Position of bit : \"))\r\n iRet = ToggleBit(iValue,iValue2)\r\n print(\"Updated Number is : \",iRet)\r\n ","repo_name":"nikbhor/Python","sub_path":"Program115.py","file_name":"Program115.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"2437562869","text":"import random\n\nclass Table:\n def __init__(self, *argv):\n list_of_entries = []\n list_of_pickers = []\n\n index = 0\n for element in argv:\n if isinstance(element[1], list):\n item_range = range(element[1][0],element[1][1]+1)\n else:\n item_range = range(element[1])\n\n if isinstance(element[0], list):\n for internal_element in element[0]:\n list_of_entries.append(internal_element)\n temp = [index for _ in item_range]\n list_of_pickers += temp\n index += 1\n else:\n list_of_entries.append(element[0])\n temp = [index for _ in item_range]\n list_of_pickers += temp\n index += 1\n\n \n self.list_of_entries = list_of_entries\n self.list_of_pickers = list_of_pickers\n\n def roll(self):\n index = random.randrange(len(self.list_of_pickers))\n element_index = self.list_of_pickers[index]\n element = self.list_of_entries[element_index]\n if isinstance(element, Table) or isinstance(element, MultiRoll):\n return element.roll()\n return element\n\nclass MultiRoll():\n def __init__(self, value, *argv):\n self.value = value\n self.tables = [argv[x] for x in range(len(argv))]\n\n def roll(self):\n results = []\n for element in self.tables:\n results.append(element.roll())\n return self.value.format(*results)","repo_name":"ChrstphrHll/npc_gen","sub_path":"roll_tables/Table.py","file_name":"Table.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"17691326400","text":"import random\n\nimport pygame\nimport math\n\n\nclass Pokeball(pygame.sprite.Sprite):\n def __init__(self, *groups):\n super().__init__(*groups)\n\n self.image = pygame.image.load(\"data/Pokeballs/Pokeball.png\")\n self.image = pygame.transform.scale(self.image, [30, 30])\n self.rect = pygame.Rect(50, 50, 100, 100)\n\n self.rect.x = 840 + random.randint(1, 400)\n self.rect.y = random.randint(0, 449)\n #self.timer = 0\n self.speed = 1 + random.random() * 2\n\n def update(self, *args):\n self.rect.x -= self.speed\n #self.timer += 0.01\n #self.rect.x = math.sin(self.timer) * 100\n\n if self.rect.right < 0:\n self.kill()\n","repo_name":"JonatasFontele/pokemon-pygame","sub_path":"model/pokeball.py","file_name":"pokeball.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"40544500995","text":"#\n# @lc app=leetcode id=1641 lang=python3\n#\n# [1641] Count Sorted Vowel Strings\n#\n\n# @lc code=start\n# TAGS: Dynamic Programming, Math, Backtracking\nclass Solution:\n \"\"\"\n There is a Math solution with O(1) Complexity\n \"\"\"\n # 32 ms, 69.6%. Time and Space: O(N). DFS with memo\n def countVowelStrings(self, n: int) -> int:\n @lru_cache(None)\n def dfs(start=0, length=0):\n if sofar == n:\n return 1\n rv = 0\n for i in range(start, 5):\n rv += dfs(i, length + 1)\n return rv\n return dfs()\n \n # 24 ms, 96.29%. Time: O(N). Space: O(1). DP\n def countVowelStrings(self, n: int) -> int:\n dp = [1, 1, 1, 1, 1]\n for _ in range(n):\n new_dp = [0, 0, 0, 0, 0]\n curr = 0\n for i in range(5):\n curr += dp[i]\n new_dp[i] = curr\n dp = new_dp\n return dp[-1]\n\n # 24 ms, 96.29%. Same as above but cleaner\n def countVowelStrings(self, n: int) -> int:\n dp = [0, 1, 1, 1, 1, 1]\n for _ in range(n):\n for i in range(1, 6):\n dp[i] += dp[i - 1]\n return dp[-1]\n# @lc code=end\n\n","repo_name":"trung-hn/leetcode-solutions","sub_path":"src/1641.count-sorted-vowel-strings.py","file_name":"1641.count-sorted-vowel-strings.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"28"} +{"seq_id":"70025979275","text":"n =int(input())\nlst = []\nfor i in range(n):\n o = [0,0]\n s = input().split()\n o[0] = s[0]\n o[1] = -int(s[1])\n lst.append(o)\nlst.sort(key = lambda x:(x[1],x[0])) #先排成绩再排名字可以用多个lambda,不一定用itemgetter\nfor i in range(n):\n print (lst[i][0],-lst[i][1])","repo_name":"douxiaotian/Courseworks","sub_path":"Foundations of Computer(I)/Homework4/成绩排序.py","file_name":"成绩排序.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"6092681156","text":"#!/usr/bin/python3\n'''\nEasy management of AUR local repository\n'''\nimport sys\nimport argparse\nfrom xdg.BaseDirectory import (xdg_data_home, xdg_config_home, xdg_cache_home)\n\nfrom saur import version\nfrom saur import config\nfrom saur import aur\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog='saur', description='This is SAUR, the Emperor of AUR/PKGBUILD local repository management.')\n parser.add_argument('--version', action='version', version=f'%(prog)s {version.__version__}', help='print version and exit')\n parser.add_argument('-c','--config', metavar='PATH', type=str, default=xdg_config_home + '/saur/config.ini',\n help='path to config file (default: $XDG_CONFIG_HOME/saur/config.ini)')\n\n subparsers = parser.add_subparsers(required=True, dest='cmd', title='subcommands')\n parser_createdb = subparsers.add_parser('createdb', help='create a new pacman package database')\n parser_rmpkg = subparsers.add_parser('rmpkg', help='remove package(s) from package database')\n parser_fetch = subparsers.add_parser('fetch', help='fetch packages (this overwrites all changes!)')\n parser_fetch.add_argument('-C', '--clear-cache', action='store_true', help='clear the cache before fetching')\n parser_list = subparsers.add_parser('list', help='print list of packages in CONFIG')\n parser_rebuild = subparsers.add_parser('rebuild', help='rebuild a package (or more) and replace it in the repository')\n parser_rebuild.add_argument(dest='packages', metavar='PACKAGE', nargs='+', help='package(s) to be rebuilt')\n parser_sync = subparsers.add_parser('sync', help='build a package (or more) if it is newer then in the repository')\n parser_sync.add_argument('-x', '--exclude', metavar='PACKAGE', nargs='+', help='package(s) that should not be sync\\'d')\n\n args = parser.parse_args ()\n\n Config = config.SaurConfig (args.config)\n Config.parse_config ()\n\n Aur = aur.Aur ()\n\n if args.cmd == 'sync':\n Aur.sync (Config.config(), args.exclude)\n elif args.cmd == 'rebuild':\n pass\n #run_rebuild (conf, args.packages)\n elif args.cmd == 'list':\n Aur.list (Config.config())\n elif args.cmd == 'fetch':\n Aur.fetch (Config.config(), clearcache=args.clear_cache)\n else:\n print(f'Command `{args.cmd}\\' is not implemented!')\n sys.exit(2)\n","repo_name":"hv15/saur","sub_path":"saur.py","file_name":"saur.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"20914686150","text":"from moviepy.editor import VideoFileClip\nimport os\n\n\ndef get_gif_name(file_name):\n gif_names = []\n dir_path = os.getcwd()\n for path in os.scandir(dir_path):\n if path.is_file():\n if file_name in path.name:\n gif_names.append(path.name)\n\n gif_count = str(len(gif_names))\n return f'{file_name}-example-{gif_count}.gif'\n\n\ndef convert_video_to_gif(url):\n gif_name = get_gif_name(\"TikTok\")\n\n try:\n video_clip = VideoFileClip(url)\n video_clip.write_gif(gif_name)\n except OSError as e:\n print(e, 'Not correct url')\n else:\n return os.path.abspath(f'{gif_name}')\n\n\n# for testing\n# convert_video_to_gif(\n# 'https://v16-webapp.tiktok.com/b665ff39ebd310121ded0675f4c8ddc1/62e83ff0/video/tos/useast2a/tos-useast2a-ve-0068c003/d3402df7040545c3aa18a46b1f498864/?a=1988&ch=0&cr=0&dr=0&lr=tiktok_m&cd=0%7C0%7C1%7C0&cv=1&br=4494&bt=2247&btag=80000&cs=0&ds=3&ft=eXd.6H-oMyq8Zlyc1we2N1ioyl7Gb&mime_type=video_mp4&qs=0&rc=PGUzaDVlZmZnZDZpZDk6PEBpanl4dzY6ZmVrZTMzNzczM0A2YGE0X2EzNTYxNi0tXzZfYSNfaWFjcjRva2ZgLS1kMTZzcw%3D%3D&l=20220801150440010192045157203A0737')\n","repo_name":"MykhailoPasiechnyk/Python-Bootcamp-Test-Tasks","sub_path":"task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"35095830674","text":"from src.helpers.experiment import create_tsfresh_dataframe\nfrom src.helpers.diabetes.cega import clarke_error_grid\nfrom src.helpers.diabetes.madex import mean_adjusted_exponent_error\nfrom pycaret.regression import setup, create_model, compare_models, predict_model\nfrom loguru import logger\nimport warnings\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\n# import matplotlib.pyplot as plt\n\nwarnings.filterwarnings(\"ignore\")\n\n\ndef run_experiment(train_parameters, unseen_data_parameters):\n\n source_df = create_tsfresh_dataframe(train_parameters)\n clean_df = source_df.drop(\n columns=['start', 'end', 'start_time', 'end_time'])\n\n exp_reg = setup(clean_df,\n target='label',\n feature_selection=True,\n html=False,\n silent=True\n )\n\n best3 = compare_models(\n exclude=['catboost', 'xgboost'],\n sort='RMSE',\n n_select=3,\n # verbose=False\n )\n\n for selected_model in best3:\n output = {}\n model = create_model(selected_model)\n output['model'] = model\n pd = predict_model(model)\n (_, res) = clarke_error_grid(pd['label'], pd['Label'], 'Test')\n output['internal_cga_analysis'] = res\n rmse = np.sqrt(mean_squared_error(pd['label'], pd['Label']))\n rmadex = np.sqrt(mean_adjusted_exponent_error(\n pd['label'], pd['Label']))\n output['internal_rmese'] = rmse\n output['internal_rmadex'] = rmadex\n\n unseen_df = create_tsfresh_dataframe(unseen_data_parameters)\n clean_unseen_df = unseen_df.drop(\n columns=['start', 'end', 'start_time', 'end_time'])\n unseen_pd = predict_model(model, data=clean_unseen_df)\n (_, res) = clarke_error_grid(unseen_pd['label'], unseen_pd['Label'], 'Test')\n output['unseen_cga_analysis'] = res\n rmse = np.sqrt(mean_squared_error(unseen_pd['label'], unseen_pd['Label']))\n rmadex = np.sqrt(mean_adjusted_exponent_error(\n unseen_pd['label'], unseen_pd['Label']))\n output['unseen_rmese'] = rmse\n output['unseen_rmadex'] = rmadex\n logger.info(output)\n\n\nif __name__ == '__main__':\n parameters = {\n 'ohio_no': 559,\n 'scope': 'train',\n 'train_ds_size': 0,\n 'window_size': 6,\n 'prediction_horizon': 1,\n 'minimal_features': False,\n }\n\n test_parameters = {\n 'ohio_no': 559,\n 'scope': 'test',\n 'train_ds_size': 100000,\n 'window_size': 6,\n 'prediction_horizon': 1,\n 'minimal_features': False,\n }\n\n run_experiment(parameters, test_parameters)\n","repo_name":"spitoglou/diabetes","sub_path":"sandbox.py","file_name":"sandbox.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"525475982","text":"import os\n\n\nTEST_SEARCH = {\n 'default': {\n 'index': 'test_reconnectingasia',\n 'serializers': (\n 'search.tests.mocks.MockSerializer',\n 'search.tests.mocks.MockSerializerTwo',\n 'search.tests.mocks.MockSerializerThree',\n ),\n 'connections': {\n 'hosts': [os.getenv('ELASTICSEARCH_TEST_URL', 'http://localhost:9200')],\n 'timeout': 20,\n }\n }\n}\n\nTEST_RQ_QUEUES = {\n 'default': {\n 'URL': os.getenv('REDISTOGO_URL', 'redis://localhost:6379/0'),\n 'ASYNC': False,\n },\n}\n","repo_name":"CSIS-iLab/new-silk-road","sub_path":"search/tests/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"28"} +{"seq_id":"43387177401","text":"import collections\nfrom linked_list import Node\n\n\nhead = Node(None)\nnode_1 = Node(1)\nnode_2 = Node(2)\nnode_3 = Node(1)\ntail = Node(None)\nhead.next = node_1\nnode_1.next = node_2\nnode_2.next = node_3\nnode_3.next = tail\n\n\n# My\ndef isPalin(input):\n lst = []\n while input is not None:\n lst.append(input.data)\n input = input.next\n\n while len(lst) > 1:\n if lst.pop() != lst.pop(0):\n return False\n \n return True\n\nisPalin(head)\n\n\"\"\"\n처음엔 리스트로 풀었는데, 파이썬 리스트는 동적배열이라 list.pop(0), 즉 리스트에\n맨 앞 요소롤 꺼내오는 작업을 하면 전체 리스트를 한칸씩 shift 해야되서 O(n) 소요\n따라서 이중 연결 리스트인 Deque 사용하는게 .pop(0) 연산에 효과적 - O(1) \n\nDeque는 파이썬에서 stack과 queue 기능을 모두 가진 객체\n\"\"\"\n# Deque\ndef isPalin(head):\n Deque = collections.deque()\n while head is not None:\n Deque.append(head.data)\n head.next\n \n while len(Deque) > 1:\n if Deque.popleft() != Deque.pop():\n return False\n \n return True\n\n\n# Runner\nl1 = ListNode()\n\nclass Solution:\n def isPalindrome(self, head: ListNode) -> bool:\n node = head\n list_tmp = []\n \n if node == False : # 빈 linked list이면 팰린드롬이므로 True반환\n return True\n \n while node is not None : #linked list를 list로 변환\n list_tmp.append(node.val) \n head = node.next\n \n # print(type(list_tmp)) -> \"list\"\n \n if list_tmp == list_tmp[::-1]: #팰린드롬 확인\n return True\n else :\n return False\n\n\n","repo_name":"HyunLee103/Algorithm","sub_path":"ch8_Linked_list/13_palindrome.py","file_name":"13_palindrome.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"33745235475","text":"import pymysql\nfrom sqlalchemy import create_engine\nimport sqlalchemy\nimport pandas as pd\n\ncnx = create_engine('mysql+pymysql://analyst:badsecuritykills@localhost:3306/items')\n\npd.io.sql.execute(\"\"\"CREATE TABLE books( \\\nid VARCHAR(40) PRIMARY KEY NOT NULL \\\n,author VARCHAR(255) \\\n,copies INT)\"\"\", cnx)\n\ndf = pd.DataFrame({\n \"author\": [\"Alice\", \"Bob\", \"Charlie\"],\n \"copies\": [2, \"\", 7, ],},\n index = [1, 2, 3])\n #Notice that one of these has the wrong data type!\n\ndf.to_sql(name='books',con=cnx,if_exists='append',index=False)\n\ndf.head()\n\n\ndf.to_sql()","repo_name":"czhaogithub/hello-world","sub_path":"mycode.py","file_name":"mycode.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"28108886712","text":"# Copyright 2015 gRPC authors.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"The Python implementation of the gRPC route guide client.\"\"\"\r\n\r\nfrom __future__ import print_function\r\n\r\nimport logging\r\nimport random\r\n\r\nimport grpc\r\n\r\nimport greet_pb2\r\nimport greet_pb2_grpc\r\n# import gree_resources\r\n\r\n\r\ndef make_request(mess):\r\n return greet_pb2.EchoRequest(\r\n request=mess\r\n )\r\n\r\n\r\ndef generate_messages(count):\r\n messages = [\r\n make_request(\"First message\" + str(count))\r\n ]\r\n for msg in messages:\r\n print(\"Sending \" + msg.request)\r\n yield msg\r\n\r\n\r\ndef echo_test(stub, count):\r\n responses = stub.EchoBidir(generate_messages(count))\r\n for response in responses:\r\n print(\"reply test\" + str(count) + \" \" + response.reply)\r\n\r\n\r\ndef echo_test_hangup(stub, count):\r\n responses = stub.EchoBidirHangup(generate_messages(count))\r\n for response in responses:\r\n if response.reply == 'quit':\r\n break;\r\n print(\"reply hangup test\" + str(count) + \" \" + response.reply)\r\n\r\ndef run():\r\n # NOTE(gRPC Python Team): .close() is possible on a channel and should be\r\n # used in circumstances in which the with statement does not fit the needs\r\n # of the code.\r\n with grpc.insecure_channel('localhost:5000') as channel:\r\n stub = greet_pb2_grpc.GreeterStub(channel)\r\n\r\n count = 0\r\n\r\n while True:\r\n #echo_test(stub, count)\r\n echo_test_hangup(stub, count)\r\n count = count + 1\r\n\r\n \r\n \r\n\r\nif __name__ == '__main__':\r\n logging.basicConfig()\r\n run()\r\n","repo_name":"osexpert/GrpcDotNetProblem","sub_path":"PythonClient/PythonClient.py","file_name":"PythonClient.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"28129726057","text":"import time\nstart_time = time.time()\nimport sys\nsys.path.append(\"./test\")\nimport os\nimport configparser\nimport logging\nimport platform\nimport argparse\n\n\n\nimport Test_set\nimport Test_help\nimport Test_option_para\n\noption_para = Test_option_para.Test_option_para()\nconfig = configparser.ConfigParser()\nconfig.read('./test/test.cfg')\nNumber_test_sets = config['Test_sets']['Number_test_sets']\nDefault_test_set = config['Test_sets']['Default_test_set']\nlogging.basicConfig(filename=\"test_log.txt\", filemode=\"w\", format=\"%(message)s\", datefmt=\"%d-%M-%Y %H:%M:%S\", level=logging.DEBUG)\nlogging.info('Time: '+time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\nlogging.info(\"********************************************************************************\")\nlogging.info(\"CPU info : \"+ platform.processor())\nlogging.info(\"OS info : \"+ platform.system() + platform.release() )\nlogging.info(\"Python info : \"+ platform.python_version() )\n\nlogging.info(\"********************************************************************************\")\nlogging.info('Number of test sets: ' + Number_test_sets )\nlogging.info('Input parameter: ' + str(sys.argv)+'\\n' )\n\noptparser = argparse.ArgumentParser(description='Mysticism MUD Auto test tool')\noptparser.add_argument(\"Test_Num\",nargs='?', type=int,help=\"Test script number into test set (0 - n)\",\n default=-1 )\noptparser.add_argument(\"Test_Set\",nargs='?', type=int,help=\"Test set number (0 - n), default set can define by \",\n default= int(Default_test_set) )\noptparser.add_argument(\"-v\", \"--version\", help=\"show the version info for the test tool\",\n action=\"store_true\")\noptparser.add_argument(\"--logpath\", help=\"--logpath= :Modify the log path/name. (eg:--logpath='./test/log.txt')\",\n default='autotest_log.txt')\noptparser.add_argument(\"-a\",\"--all\" ,help=\"Run all test circuit incuded by test.cfg \" ,\n action=\"store_true\")\nsets_list = list()\n\nlogging.info('Test Mysticism MUD setting : ')\n\ntry:\n args = optparser.parse_args()\nexcept argparse.ArgumentError:\n os._exit(1)\nif args.version :\n Test_help.show_version()\n os._exit(1)\nelif args.all: # 运行全部test script\n option_para.set_Run_all_test(1)\n\n\nfor i in range(0,int(Number_test_sets)):\n PATH_test_set = config['Test_set_'+str(i)]['PATH_test_set_'+str(i)]\n Name_test_set = config['Test_set_'+str(i)]['Name_test_set_'+str(i)]\n Info_test_set = config['Test_set_'+str(i)]['Info_test_set_'+str(i)]\n testset = Test_set.Test_set(PATH_test_set,Name_test_set,Info_test_set,option_para)\n sets_list.append(testset)\n\n\n\n# 运行某一个script test\ntest_num = args.Test_Num\ntest_Set_num = args.Test_Set\n\nprint(\"Run the test script : \"+ str(test_num) + \" in test set : \" + str(test_Set_num))\nprint(\"Test set info :\" + sets_list[test_Set_num].print_test_set_info() )\ncfg_name = sets_list[test_Set_num].cfg_is_exit()\nsets_list[test_Set_num].cfg_import(cfg_name)\nprint(\"Test script info :\" + sets_list[test_Set_num].print_test_script_info(test_num) )\nsets_list[test_Set_num].run_script(test_num)\nprint('Log path :' + args.logpath)\nend_time = time.time()\nprint (\"MUD auot test run time : \"+ \"{:.1f}\".format(end_time-start_time) + 's')\nlogging.info(\"MUD auot test run time : \"+ \"{:.1f}\".format(end_time-start_time) + 's')\nprint(\"Finish test!\")\n\ndef run_all_test():\n print(\"Start all test!\")\n for set in sets_list:\n cfg_name = set.cfg_is_exit()\n set.cfg_import(cfg_name)\n set.run_all_circuit()\n print('Log path :' + args.logpath)\n print(\"Finish all test successfully!\")\n os._exit(1)\n","repo_name":"tang5722917/Auto_test_tool","sub_path":"ttt_test.py","file_name":"ttt_test.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"5819783347","text":"import numpy as np\nfrom scipy.optimize import newton\n\n\ndef derivative_free_gradient(RFP, RPP, dF, dP, djd_ddp):\n \"\"\"\n Function to compute gradient of derivative cost function\n This cost function has two variables as free constraint and segment time\n\n derivative_cost_function / free_constraint\n dJd / ddp\n\n dJd/ddp = 2dF.T * RFP + 2dP.T * RPP\n\n [free constraints x 3] matrix form\n \"\"\"\n for i in range(3):\n # gradient of first cost function\n djd_ddp[:, i] = (2 * dF[:, i].T * RFP + 2 * dP[:, i].T * RPP).T\n\n return djd_ddp\n\n\ndef derivative_time_gradient(order, k_r, time, m, c):\n \"\"\"\n Function to compute gradient of derivative cost function\n This cost function has two variables as free constraint and segment time\n\n J = Jd + kT * ( sum(T))\n\n derivative_cost_function / segment_time\n dJd / dT\n\n Jd = (p.T * Q * p) * 1/time**7\n dJd/dT = (p.T * Q * p) * -7/time**8\n\n [ 1 x m ] vector form\n \"\"\"\n dJd_dT = np.zeros(m)\n\n # compute P\n polynomial_r = np.ones(order + 1)\n for i in range(0, k_r):\n polynomial_r = np.polyder(polynomial_r)\n\n p = np.zeros((order + 1, order + 1))\n for j in range(0, order + 1):\n for k in range(j, order + 1):\n # position\n if j <= len(polynomial_r) - 1 and k <= len(polynomial_r) - 1:\n order_t_r = ((order - k_r - j) + (order - k_r - k))\n if j == k:\n p[j, k] = np.power(polynomial_r[j], 2) / (order_t_r + 1)\n else:\n p[j, k] = 2 * polynomial_r[j] * polynomial_r[k] / (order_t_r + 1)\n\n p = p + np.transpose(p)\n p = 0.5 * p\n p = np.matrix(p)\n\n for i in range(m):\n dJd_dT[i] = np.sum(c[i * (order + 1): i * (order + 1) + order + 1, j].T * p * c[i * (order + 1): i * (order + 1) + order + 1, j] for j in range(3))\n dJd_dT[i] = dJd_dT[i] * -8 / time[i]**7\n\n return dJd_dT\n\n\ndef constrained_time(dJd_dT, m):\n \"\"\"\n Constrained Gradient Descent Method\n\n ||a|| : constant\n ex) a1 + a2 + a3 ... + am = 10\n\n normal vector n = < 1, 1, 1 ... 1>\n tangential vector t = gradient J - ( gradient J * n )n\n\n a = a - r * t\n \"\"\"\n # normal vector of time plane\n n = np.ones(m)\n n = n / np.linalg.norm(n)\n\n # tangential vector of time plane\n t = dJd_dT - np.dot(dJd_dT, n) * n\n\n return t\n\n\ndef time_gradient(m):\n \"\"\"\n Function to compute gradient of time cost function\n\n Jt = kt { time[1] ... time[m] }\n dJt/dT = kt { 1 ... 1 }\n\n [ 1 x m ] vector form\n \"\"\"\n dJt_dT = np.ones(m)\n return dJt_dT\n\n\ndef collsion_gradient(L, Lpp, d, m, djc_ddp, obstacle, threshold, dt, free_constraint):\n \"\"\"\n Function to compute gradient of collision cost function\n This has only one variable as free constraint\n\n Sigma[ c(F(T)) * ||V(T)|| * dT ] = sigma[ c(f(t)) * ||v(t)/a|| * a dt ] = sigma[ c(f(t)) * ||v(t)|| * dt ]\n f(t), v(t) is independent on segment time.\n\n Compute gradient for each axis k [ x y z ], apply product and chain rule\n dJc/ddPk = sigma { ||v(t)|| gradient{c for k} T Lpp dt\n + c(f(t))vk(t)/||v(t)||T V Lpp dt\n\n T = [t0 t1 t2 ... tN]\n fk(t) = Tpk , f(t) = [fx(t) fy(t) fz(t)]\n vk(t) = TVpk, v(t) = [vx(t) vy(t) vz(t)]\n\n invA * M = L = [Lff Lpp]\n\n c(x) = 1/2e(d(x) - e )^2\n d(x) = sqrt((fx(t) - X)^2 + (fy(t) - Y)^2 + (fz(t) - Z)^2)\n\n gradient(x) c = dC/dx = dC/dd dd/dx\n dC/dd = 1/e {d(x) - e}\n dd/dfx = 1/2 * {(fx-X)^2 + (fy-Y)^2 + (fz-Z)^2}^-0.5 * 2(fx - X)\n\n fx = Tpx = TL[dF dP]\n\n [free constraints x 3] matrix form\n \"\"\"\n # N = np.arange(0, time, dt)\n # N = np.linspace(0, time, n)\n\n # If dt == 1, T = [0]\n T = np.arange(0, 1, dt) # * time_scaling\n\n for i in range(3):\n summation_jacobian = np.zeros(free_constraint)\n\n for j in range(m):\n for t in T:\n P_vector = np.zeros(m * 10)\n V_vector = np.zeros(m * 10)\n P_vector[j * 10: j * 10 + 10] = get_vector(0, t)\n V_vector[j * 10: j * 10 + 10] = get_vector(1, t)\n\n vk = np.zeros(3) # /time_scaling\n fk = np.zeros(3)\n for k in range(3):\n vk[k] = (np.matrix(V_vector) * L * d[:, k]).item(0)\n fk[k] = (np.matrix(P_vector) * L * d[:, k]).item(0)\n norm_velocity = np.linalg.norm((vk[0], vk[1], vk[2]))\n distance = np.linalg.norm((fk[0] - obstacle[0], fk[1] - obstacle[1], fk[2] - obstacle[2]))\n\n VL = np.matrix(V_vector) * Lpp\n PL = np.matrix(P_vector) * Lpp\n if distance <= threshold:\n dc_dd = 1.0 / threshold * (distance - threshold)\n dd_df = 1.0 / distance * (fk[i] - obstacle[i])\n c_f = 1.0 / (2 * threshold) * (distance - threshold) ** 2\n sum1 = norm_velocity * dc_dd * dd_df * PL * dt # not depend on time scaling\n else:\n # distance > threshold\n c_f = 0\n sum1 = 0\n\n if norm_velocity == 0:\n # vk is also 0\n sum2 = 0\n else:\n sum2 = VL * c_f * vk[i] / norm_velocity * dt # not depend on time scaling\n\n summation_jacobian = sum1 + sum2 + summation_jacobian\n\n # gradient of second cost function\n djc_ddp[:, i] = summation_jacobian.T\n\n return djc_ddp\n\n\ndef endpoint_free_gradient(dJe_ddp, L, Lpp, d, m, pep, vep, lamda1, lamda2):\n \"\"\"\n Function to compute gradient of end-point cost function relative to free constraints\n\n dJe_ddp = lamda1 * 2 * (ptep - pep) * P * Lpp + lamda2 * 2 * (vtep - vep) * V * Lpp\n\n [free constraints x 3] matrix form\n \"\"\"\n for i in range(3):\n P_vector = np.zeros(m * 10)\n V_vector = np.zeros(m * 10)\n P_vector[(m - 1) * 10:] = get_vector(0, 1)\n V_vector[(m - 1) * 10:] = get_vector(1, 1)\n\n ptep = (np.matrix(P_vector) * L * d[:, i]).item(0)\n vtep = (np.matrix(V_vector) * L * d[:, i]).item(0)\n\n dJe_ddp[i] = lamda1 * 2 * (ptep - pep[i]) * P_vector * Lpp + lamda2 * 2 * (vtep - vep[i]) * V_vector * Lpp\n return dJe_ddp\n\n\ndef maxt(t, coefficient):\n length = len(coefficient)\n poly = np.sum(coefficient[i] * t**(length - 1 - i) for i in range(length))\n return poly\n\n\ndef soft_constraint_gradient(p, dJs_ddp, L, Lpp, d, m, vel_max, acc_max, free_constraint):\n \"\"\"\n Function to compute gradient of soft-constraint cost function relative to free constraints\n\n For k axis,\n dJe_ddp = Sigma[ exp(vel_norm - vel_max) / vel_norm * vk * V * Lpp + exp(acc_norm - acc_max) /acc_norm * ak * A * Lpp ]\n\n [free constraints x 3] matrix form\n \"\"\"\n\n v_actual = np.zeros(3) # / time_scaling\n a_actual = np.zeros(3) # / time_scaling**2\n\n v_coefficient = np.zeros((10, 3))\n a_coefficient = np.zeros((10, 3))\n j_coefficient = np.zeros((10, 3))\n\n summation_jacobian = np.zeros((free_constraint, 3))\n summation_jacobian = np.matrix(summation_jacobian)\n\n for i in range(m):\n V_vector = np.zeros(m * 10)\n A_vector = np.zeros(m * 10)\n\n for j in range(3):\n v_coefficient[:, j] = p[i * 10: i * 10 + 10, j] * get_vector(1, 1)\n a_coefficient[:, j] = p[i * 10: i * 10 + 10, j] * get_vector(2, 1)\n j_coefficient[:, j] = p[i * 10: i * 10 + 10, j] * get_vector(3, 1)\n\n v_norm_dot = np.sum(np.poly1d(v_coefficient[:, i]) * np.poly1d(a_coefficient[:, i]) for i in range(3)) # / v_norm\n v_norm_dot = np.array(v_norm_dot)\n\n a_norm_dot = np.sum(np.poly1d(a_coefficient[:, i]) * np.poly1d(j_coefficient[:, i]) for i in range(3)) # / a_norm\n a_norm_dot = np.array(a_norm_dot)\n\n vel_t = newton(maxt, 0.5, args=(v_norm_dot,))\n acc_t = newton(maxt, 0.5, args=(a_norm_dot,))\n\n V_vector[i * 10: i * 10 + 10] = get_vector(0, vel_t)\n A_vector[i * 10: i * 10 + 10] = get_vector(1, acc_t)\n\n for j in range(3):\n v_actual[j] = (np.matrix(V_vector) * L * d[:, j]).item(0) # / time_scaling\n a_actual[j] = (np.matrix(A_vector) * L * d[:, j]).item(0) # / time_scaling**2\n\n vel_norm = np.linalg.norm(v_actual)\n acc_norm = np.linalg.norm(a_actual)\n\n VL = np.matrix(V_vector) * Lpp\n AL = np.matrix(A_vector) * Lpp\n\n for j in range(3):\n summation_jacobian[:, j] = summation_jacobian[:, j] + np.exp(vel_norm - vel_max) / vel_norm * v_actual[j] * VL + \\\n np.exp(acc_norm - acc_max) / acc_norm * a_actual[j] * AL\n\n for j in range(3):\n dJs_ddp[:, j] = summation_jacobian[:, j]\n\n return dJs_ddp\n\n\ndef get_vector(k, t):\n \"\"\"\n k-th derivative at time t\n \"\"\"\n order = 9\n compute_mat = np.eye(order + 1)\n values = np.zeros(order + 1)\n for j in range(0, order + 1):\n tempCoeffs = compute_mat[j, :]\n for i in range(0, k):\n tempCoeffs = np.polyder(tempCoeffs)\n values[j] = np.polyval(tempCoeffs, t)\n return values\n\n\n","repo_name":"juanmed/riseq_uav","sub_path":"riseq_trajectory/scripts/minimum_snap_trajectory/compute_gradient.py","file_name":"compute_gradient.py","file_ext":"py","file_size_in_byte":9154,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"13286550844","text":"import os\nimport time\n\neletkor: int = 0\nborszin: int = 0\n\nwhile(eletkor == 0 and borszin == 1):\n eletkor = input(\"\")\n if(eletkor.replace(\"-\",\"\").isdigit()):\n eletkor = int(eletkor)\n if(eletkor <= 6):\n print(\" 0-6 gyerek\")\n break\n elif(eletkor <= 18):\n print(\" 7-18 iskolás\")\n elif(eletkor < 65):\n print(\" 19-65 dolgozó\")\n elif(eletkor >= 65):\n print(\" 65- nyugdíjas\")\n else:\n print(\"nem számot adott meg\")\n time.sleep(2)\n os.system(\"cls\")\n\n","repo_name":"Vojsfor/python","sub_path":"ciklá/fel 06/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"16057654433","text":"from sklearn.ensemble import RandomForestClassifier\nimport pickle\nfrom pathlib import Path\nimport pandas as pd\n\nclass TrainModel(object):\n def __init__(self, model_path:Path, data_path:Path, params_name:str, data_name:str, model_name:str):\n self.model_path = model_path\n self.data_path = data_path\n self.model_params = self.parse_params(params_name)\n self.data = None\n self.model = None\n self.data_name = data_name\n self.model_name = model_name\n def get_best_params(self, lparams):\n params = {}\n max = -1\n for trial in lparams:\n if sum(trial.values) >max:\n max = sum(trial.values)\n params = trial.params\n return params\n def drop_rp(self):\n self.data.drop(['relative_sequence_position'], axis = 1, inplace = True)\n def parse_params(self, param_name:str):\n with open(self.model_path/param_name, 'rb') as f:\n params = pickle.load(f)\n params = self.get_best_params(params)\n return params\n def get_data(self):\n with open(self.data_path/self.data_name, 'rb') as f:\n data = pickle.load(f)\n self.data = data\n def extract_df(self, df):\n return df.drop(['label'], axis = 1), df['label']\n def train_model(self):\n trainx, trainy = self.extract_df(self.data)\n rf = RandomForestClassifier(n_estimators = self.model_params.get('n_estimators'), criterion = self.model_params.get('criterion'), \n max_features = self.model_params.get('max_features'), min_samples_split = self.model_params.get('min_samples_split'),\n min_samples_leaf = self.model_params.get('min_samples_leaf'), random_state= 42, n_jobs = -1)\n rf.fit(trainx, trainy)\n self.model = rf\n def write_model(self):\n with open(self.model_path/self.model_name, 'wb') as f:\n pickle.dump(self.model,f)\n ","repo_name":"b0bf15h/dsa4266_wooper","sub_path":"TrainModel.py","file_name":"TrainModel.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"724404257","text":"from gensim.models import Doc2Vec\nfrom gensim.test.test_doc2vec import ConcatenatedDoc2Vec\nfrom author_style.author_style_emb import transform_doc\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom collections import defaultdict\nimport numpy as np\nimport os\nfrom sklearn.preprocessing import normalize\n\nfrom functools import partial\n\n__all__ = [\n \"AuthorFeatures\",\n \"AuthorInferVecFeatures\",\n \"Author2VecFeatures\",\n \"TfidfEmbeddingVectorizer\",\n \"MeanEmbeddingVectorizer\",\n]\n\n\nclass AuthorFeatures(BaseEstimator, TransformerMixin):\n def __init__(self):\n self.num_features_ = 519\n\n def get_feature_names(self):\n return np.array(\n [\"Author_{}\".format(author) for author in range(self.num_features_)]\n )\n\n def make_feature_vec(self, author):\n feature_vec = np.zeros((self.num_features_,))\n author_id = int(author.split(\"_\")[-1])\n feature_vec[author_id] = 1\n return feature_vec\n\n def fit(self, documents, y=None):\n return self\n\n def transform(self, documents):\n doc_feature_vecs = np.zeros((len(documents), self.num_features_))\n\n for i, doc in enumerate(documents):\n # Print a status message every 200 doc\n if i % 200 == 0.0:\n print(\"Document %d of %d\" % (i, len(documents)))\n\n doc_feature_vecs[i] = self.make_feature_vec(doc.author2vec_id)\n\n return doc_feature_vecs\n\n\nclass Author2VecFeatures(BaseEstimator, TransformerMixin):\n def __init__(self, model_dir=None, model_name=None, norm=None, dtype=np.float32):\n print(\"Model dir, model name\", model_dir, model_name)\n self.model_dir = model_dir\n self.model_name = model_name\n self.dtype = dtype\n self.norm = norm\n\n def get_feature_names(self):\n return np.array([\"author_emb_\" + str(i) for i in range(self.num_features_)])\n\n def make_feature_vec(self, vec_id):\n try:\n feature_vec = self.model_.docvecs[vec_id]\n except KeyError as e:\n print(e)\n print(\"Could not fine doc vec for {}\".format(vec_id))\n feature_vec = np.zeros((self.num_features_,), dtype=self.dtype)\n\n return feature_vec\n\n def fit(self, documents, y=None):\n print(\"Here===> {}\".format(self.model_name))\n if self.model_name:\n print(\"Loading Vectors\")\n model_data = os.path.join(\n self.model_dir, \"model_%s.doc2vec\" % self.model_name\n )\n if self.model_name == \"dbow_dmm\" or self.model_name == \"dbow_dmc\":\n m1 = os.path.join(\n self.model_dir, \"model_%s.doc2vec\" % self.model_name.split(\"_\")[0]\n )\n m2 = os.path.join(\n self.model_dir, \"model_%s.doc2vec\" % self.model_name.split(\"_\")[1]\n )\n model1 = Doc2Vec.load(m1)\n model2 = Doc2Vec.load(m2)\n self.model_ = ConcatenatedDoc2Vec([model1, model2])\n self.num_features_ = model1.wv.syn0.shape[1] + model2.wv.syn0.shape[1]\n else:\n self.model_ = Doc2Vec.load(model_data)\n self.num_features_ = self.model_.wv.syn0.shape[1]\n print(self.num_features_)\n print(\"Done Loading vectors\")\n else:\n print(\"Hereeeeee \", self.model_name)\n raise OSError(\"Model does not exit\")\n\n return self\n\n def transform(self, documents):\n doc_feature_vecs = np.zeros(\n (len(documents), self.num_features_), dtype=self.dtype\n )\n\n for i, doc in enumerate(documents):\n # Print a status message every 200 doc\n if i % 200 == 0.0:\n print(\"Document %d of %d\" % (i, len(documents)))\n #\n # print(\"Document %s\" % doc.book_id)\n doc_feature_vecs[i] = self.make_feature_vec(doc.author2vec_id)\n\n if self.norm:\n print(\"Vectors normalized\")\n doc_feature_vecs = normalize(doc_feature_vecs, norm=self.norm)\n\n return doc_feature_vecs\n\n\nclass AuthorInferVecFeatures(Author2VecFeatures):\n def __init__(\n self, step, model_dir=None, model_name=None, norm=None, dtype=np.float32\n ):\n super(AuthorInferVecFeatures, self).__init__(\n model_dir=model_dir, model_name=model_name, norm=norm, dtype=dtype\n )\n self.step = step\n\n def make_feature_vec(self, content):\n n_grams = transform_doc(content, n=3, step=self.step)\n feature_vec = self.model_.infer_vector(n_grams)\n return feature_vec\n\n def transform(self, documents):\n doc_feature_vecs = np.zeros((len(documents), self.num_features_))\n\n for i, doc in enumerate(documents):\n # Print a status message every 200 doc\n if i % 200 == 0.0:\n print(\"Document %d of %d\" % (i, len(documents)))\n\n doc_feature_vecs[i] = self.make_feature_vec(doc.content)\n\n return doc_feature_vecs\n\n\n###REF: https://github.com/erogol/QuoraDQBaseline/blob/master/utils.py\n\n\nclass MeanEmbeddingVectorizer(Author2VecFeatures):\n def __init__(\n self, step, model_dir=None, model_name=None, norm=None, dtype=np.float32\n ):\n super(MeanEmbeddingVectorizer, self).__init__(\n model_dir=model_dir, model_name=model_name, norm=norm, dtype=dtype\n )\n self.step = step\n\n def get_feature_names(self):\n return np.array([\"mean_emb_\" + str(i) for i in range(self.num_features_)])\n\n def make_feature_vec(self, content):\n\n if content:\n return np.mean(\n [\n self.model_[w]\n for w in transform_doc(content, n=3, step=self.step)\n if w in self.model_\n ],\n axis=0,\n )\n else:\n print(\"Empty content\")\n return np.zeros((self.num_features_,), dtype=self.dtype)\n\n def transform(self, documents):\n doc_feature_vecs = np.zeros(\n (len(documents), self.num_features_), dtype=self.dtype\n )\n\n for i, doc in enumerate(documents):\n # Print a status message every 200 doc\n if i % 200 == 0.0:\n print(\"Document %d of %d\" % (i, len(documents)))\n #\n # print(\"Document %s\" % doc.book_id)\n doc_feature_vecs[i] = self.make_feature_vec(doc.content)\n\n if self.norm:\n print(\"Vectors normalized\")\n doc_feature_vecs = normalize(doc_feature_vecs, norm=self.norm)\n\n return doc_feature_vecs\n\n\nclass TfidfEmbeddingVectorizer(Author2VecFeatures):\n def __init__(\n self, step, model_dir=None, model_name=None, norm=None, dtype=np.float32\n ):\n super(TfidfEmbeddingVectorizer, self).__init__(\n model_dir=model_dir, model_name=model_name, norm=norm, dtype=dtype\n )\n self.step = step\n self.word2weight = None\n\n def get_feature_names(self):\n return np.array([\"tfidf_emb_\" + str(i) for i in range(self.num_features_)])\n\n def fit(self, X, y=None):\n from . import type_ngram_analyze\n\n super().fit(X, y)\n analyer_fx = partial(type_ngram_analyze, step=self.step)\n tfidf = TfidfVectorizer(analyzer=analyer_fx, lowercase=False)\n tfidf.fit(X)\n # if a word was never seen - it must be at least as infrequent\n # as any of the known words - so the default idf is the max of\n # known idf's\n max_idf = max(tfidf.idf_)\n self.max_idf = max_idf\n print(\"Max idf:{}\".format(max_idf))\n self.word2weight = {w: tfidf.idf_[i] for w, i in tfidf.vocabulary_.items()}\n\n return self\n\n def make_feature_vec(self, content):\n if content:\n\n return np.mean(\n [\n self.model_[w] * self.word2weight.get(w, self.max_idf)\n for w in transform_doc(content, n=3, step=self.step)\n if w in self.model_\n ],\n axis=0,\n )\n else:\n print(\"Empty content\")\n return np.zeros((self.num_features_,), dtype=self.dtype)\n\n def transform(self, documents):\n doc_feature_vecs = np.zeros(\n (len(documents), self.num_features_), dtype=self.dtype\n )\n\n for i, doc in enumerate(documents):\n # Print a status message every 200 doc\n if i % 200 == 0.0:\n print(\"Document %d of %d\" % (i, len(documents)))\n #\n # print(\"Document %s\" % doc.book_id)\n doc_feature_vecs[i] = self.make_feature_vec(doc.content)\n\n if self.norm:\n print(\"Vectors normalized\")\n doc_feature_vecs = normalize(doc_feature_vecs, norm=self.norm)\n\n return doc_feature_vecs\n","repo_name":"sjmaharjan/author2vec","sub_path":"author2vec/author2vec/features/author2vec.py","file_name":"author2vec.py","file_ext":"py","file_size_in_byte":8911,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"18573681202","text":"#!/usr/bin/env python3\nimport requests\nfrom json import dumps\nfrom sys import argv\n\ndef signup():\n data = {\n 'username': 'Quickcall', 'password': 'Quickcall',\n }\n header = {'Content-Type': 'application/json'}\n res = requests.post('http://qcall.feranmi.tech/api/v1/auth/signup', headers=header, json=data)\n print(res)\n print(dumps(res.json(), indent=4))\n\ndef signin():\n data = {\n 'username': 'Quickcall', 'password': 'Quickcall',\n }\n header = {'Content-Type': 'application/json'}\n res = requests.post('http://qcall.feranmi.tech/api/v1/auth/signin', headers=header, json=data)\n print(res.headers.get('Authorization'))\n print(res.json)\n print(dumps(res.json(), indent=4))\n return res.headers.get('Authorization')\n\n\ndef profile(token):\n data = {\n 'firstName': 'Wale', 'lastName': 'Adenuga', 'phoneNo1': '08160969769', 'age': '22',\n 'gender': 'male', \"nameOfEmerContact\": 'badaru basirah',\n \"relationship\": 'wifey',\n \"emergencyPhoneNo\": \"0909xxxxxxx\",\n 'email': 'oluwaferanmialausa2001@gmail.com', 'phoneNo2': '07019302484'\n }\n header = {'Content-Type': 'application/json', 'authorization': token}\n res = requests.post('http://qcall.feranmi.tech/api/v1/profile/create', headers=header, json=data)\n print(res.json)\n print(dumps(res.json(), indent=4))\n return res.headers.get('Authorization')\n\ndef medical(token):\n data = {\n 'bloodType': 'A+', 'genotype': 'AS', 'famDocContact': '020200000', 'medEmerContact': ['0200000'],\n 'allergies': ['dust'], 'chronicConditions': ['asthma']\n }\n header = {'Content-Type': 'application/json', 'authorization': token}\n\n \n res = requests.post('http://qcall.feranmi.tech/api/v1/profile/medical_information/submit', headers=header, json=data)\n print(res)\n print(dumps(res.json(), indent=4))\n \n\n print('\\nGet user Medical information\\n')\n res = requests.get('http://qcall.feranmi.tech/api/v1/profile/medical_information', headers=header)\n print(res)\n print(dumps(res.json(), indent=4))\n\n print('\\n Update user Medical information\\n')\n data = {\n 'allergies': ['dust', 'pollen'], \"chronicConditions\": [\"diabetes\", \"hypertension\"], 'medEmerContact': ['0200']\n }\n res = requests.patch('http://qcall.feranmi.tech/api/v1/profile/medical_information/update', headers=header, json=data)\n print(res)\n print(dumps(res.json(), indent=4))\n\ndef update_profile(token):\n print('\\n Update user profile\\n')\n data = {\n 'firstName': 'Walex', 'age': '44', \"emergencyPhoneNo\": \"0909xxxxxxx\"\n }\n header = {'Content-Type': 'application/json', 'authorization': token}\n res = requests.patch('http://qcall.feranmi.tech/api/v1/profile/basic_information/update', headers=header, json=data)\n print(dumps(res.json(), indent=4))\n\n res = requests.get('http://qcall.feranmi.tech/api/v1/profile/basic_information', headers=header)\n print(dumps(res.json(), indent=4))\n\n\nif __name__ == \"__main__\":\n signup()\n token = signin()\n profile(token)\n medical(token)\n update_profile(token)\n","repo_name":"Alausa2001/QuickCall","sub_path":"backend/api/functional_testing/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"39979288952","text":"from openerp.osv import orm, fields\nfrom openerp.tools.translate import _\n\n\nclass wzd_massive_price_change(orm.TransientModel):\n _name = \"wzd.massive_price_change\"\n\n _columns = {\n 'name': fields.selection(\n (('mku', 'MarkUp'), ('fix', 'Fix Price')),\n 'Standard Price MarkUp Type'),\n 'price_type': fields.selection(\n (('sale', 'Sale'), ('cost', 'Cost')),\n 'Price Type'),\n 'value': fields.float('Value', help=\"Insert a fix price or a value from 0 to 100 to markup old price\"),\n }\n\n def change(self, cr, uid, ids, context={}):\n wzd = self.browse(cr, uid, ids[0], context)\n\n # test if user have autorization\n\n if wzd.price_type == 'sale':\n if not self.pool['res.groups'].user_in_group(cr, uid, uid, 'product_bom.group_sell_price', context):\n raise orm.except_orm(_(\"You don't have Permission!\"), _(\"You must be on group 'Show Sell Price'\"))\n if wzd.price_type == 'cost':\n if not self.pool['res.groups'].user_in_group(cr, uid, uid, 'product_bom.group_cost_price', context):\n raise orm.except_orm(_(\"You don't have Permission!\"), _(\"You must be on group 'Show Cost Price'\"))\n\n if wzd.price_type == 'sale':\n if wzd.name == 'fix':\n self.pool['product.product'].write(cr, uid, context['active_ids'], {'list_price': wzd.value}, context)\n else:\n product_obj = self.pool['product.product']\n for ids in context['active_ids']:\n product = product_obj.browse(cr, uid, ids, context)\n new_price = product.list_price + ((product.list_price * wzd.value) / 100.00)\n product_obj.write(cr, uid, [ids, ], {'list_price': new_price}, context)\n else:\n if wzd.name == 'fix':\n self.pool['product.product'].write(cr, uid, context['active_ids'], {'standard_price': wzd.value}, context)\n else:\n product_obj = self.pool['product.product']\n for ids in context['active_ids']:\n product = product_obj.browse(cr, uid, ids, context)\n new_price = product.standard_price + ((product.standard_price * wzd.value) / 100.00)\n product_obj.write(cr, uid, [ids, ], {'standard_price': new_price}, context)\n\n return {'type': 'ir.actions.act_window_close'}\n\n","repo_name":"elmerjc/barcode_and_extras","sub_path":"massive_price_change/wizard/wizard.py","file_name":"wizard.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"32771802569","text":"import os\nfrom scipy import ndimage\nimport numpy as np\nimport pandas as pd\nimport SimpleITK as sitk\nfrom pprint import pprint\n#\nfrom keras.models import load_model\nfrom keras_contrib.layers.normalization.instancenormalization import InstanceNormalization\n#\nfrom data import get_data\nfrom utils import generate_sitk_obj_from_npy_array, threshold, get_spacing, calculate_metrics, save_candidate_roi, multi_prediction\nfrom plot_images import plot_images\n\ndef path_helper(folder, file_tail):\n return \"{}/{}/{}/{}_{}_{}.nrrd\".format(MASTER_FOLDER, dataset, folder, dataset, patient_id, file_tail)\n\n# TODO: deal with empty predictions, for localization\n\n# LOCALIZATION or SEGMENTATION\nTASK = \"SEGMENTATION\"\n\nRUN = \"112\"\nNAME = \"focal-tversky-loss-0.0005-augment-rt-maastro\"\nSAVE_CSV = True\nprint(\"{} test run # {}\".format(TASK, RUN))\n\nif TASK == \"LOCALIZATION\":\n IMAGE_SHAPE = (80, 96, 96)\n SAVE_CANDIDATES = True\n CROP_SHAPE = (64, 160, 160) # to save ROI images for segmentation model\n MASTER_FOLDER = \"/mnt/aertslab/USERS/Ahmed/0_FINAL_SEGMENTAION_DATA\"\n IMAGE_INTERPOLATED_RESIZED_FOLDER = \"5_image_interpolated_resized\"\n IMAGE_INTERPOLATED_ROI_PR_FOLDER = \"11_image_interpolated_roi_pr/{}_{}\".format(RUN, NAME)\nelif TASK == \"SEGMENTATION\":\n IMAGE_SHAPE = (64, 160, 160)\n SAVE_CANDIDATES = False # always false\n\n\nMULTI_PREDICTION = False\nMODEL_TO_USE = \"_final\" # \"\" or \"_final\"\n\n# metric-specific\nHAUSDORFF_PERCENT = 95\nOVERLAP_TOLERANCE = 5\nSURFACE_DICE_TOLERANCE = 6\n\n# get data\ndata = get_data(\"test\", IMAGE_SHAPE, TASK, MULTI_PREDICTION)\n\n# folder should already exist from training run\ndir_name = \"/output/{}_{}\".format(RUN, NAME)\n\n# initiate vars\nresults = []\nno_results = []\n\n# load model\nmodel = os.path.join(dir_name, \"{}{}.h5\".format(RUN, MODEL_TO_USE))\noriginal_model = load_model(model, custom_objects={'InstanceNormalization': InstanceNormalization})\n\nfor patient in data:\n #### VARIABLES\n patient_id = patient[\"patient_id\"]\n dataset = patient[\"dataset\"]\n # formatted (cropped & reshaped) if MULTI_PREDICTION = False\n # not cropped or reshaped if MULTI_PREDICTION = True\n image = patient[\"image\"]\n # original size\n image_sitk_obj = patient[\"image_sitk_obj\"]\n label_sitk_obj = patient[\"label_sitk_obj\"]\n spacing = get_spacing(image_sitk_obj)\n\n #### PREDICT\n if MULTI_PREDICTION:\n label_prediction = multi_prediction(image, original_model, IMAGE_SHAPE)\n label_prediction = threshold(np.squeeze(label_prediction), 4.5)\n else:\n label_prediction = original_model.predict(image.reshape(1,*image.shape))\n label_prediction = threshold(np.squeeze(label_prediction)) # 0.5\n\n\n\n # if there are voxels predicted:\n if label_prediction[label_prediction==1].sum() > 0:\n\n # save model output as nrrd\n # this will pad the prediction to match the size of the originals\n # for localization, 80, 96, 96 => 84, 108, 108\n # for segmentation, 64, 160, 160 => 76, 196, 196\n pred_sitk_obj = generate_sitk_obj_from_npy_array(\n image_sitk_obj,\n label_prediction,\n True,\n os.path.join(dir_name, \"{}_{}_prediction.nrrd\".format(dataset, patient_id)))\n\n # get arrays from data\n image_arr_org = sitk.GetArrayFromImage(image_sitk_obj)\n label_arr_org = sitk.GetArrayFromImage(label_sitk_obj)\n # get arrays from prediction\n pred_arr_org = sitk.GetArrayFromImage(pred_sitk_obj)\n\n # metrics\n result, dice, bbox_metrics = calculate_metrics(patient_id, spacing, label_arr_org, pred_arr_org, HAUSDORFF_PERCENT, OVERLAP_TOLERANCE, SURFACE_DICE_TOLERANCE)\n # append\n results.append(result)\n\n # plot 5x3 views\n plot_images(dataset,\n patient_id,\n image_arr_org,\n label_arr_org,\n pred_arr_org,\n dir_name,\n True,\n bbox_metrics,\n dice)\n print (\"{} done. dice :: {}\".format(patient_id, result[\"dice\"]))\n\n\n # extract ROI from image_interpolated_resized\n if SAVE_CANDIDATES:\n # create folder\n dir = \"{}/{}/{}\".format(MASTER_FOLDER, dataset, IMAGE_INTERPOLATED_ROI_PR_FOLDER)\n if not os.path.exists(dir):\n os.mkdir(dir)\n print(\"directory {} created\".format(dir))\n # save candidates\n save_candidate_roi(bbox_metrics,\n spacing,\n path_helper(IMAGE_INTERPOLATED_RESIZED_FOLDER, \"image_interpolated_resized_raw_xx\"),\n CROP_SHAPE,\n \"{}/{}_{}_{}\".format(dir, dataset, patient_id, \"image_interpolated_roi_raw_pr<>.nrrd\"))\n\n else:\n no_results.append(patient_id)\n # temporary for segmentation task\n if TASK == \"SEGMENTATION\":\n result = {}\n result[\"patient_id\"] = patient_id\n result[\"precision\"] = 0\n result[\"recall\"] = 0\n result[\"jaccard\"] = 0\n result[\"dice\"] = 0\n result[\"segmentation_score\"] = 0\n result[\"x_distance\"] = 0\n result[\"y_distance\"] = 0\n result[\"z_distance\"] = 0\n result[\"distance\"] = 0\n result[\"average_surface_distance_gt_to_pr\"] = 0\n result[\"average_surface_distance_pr_to_gt\"] = 0\n result[\"robust_hausdorff\"] = 0\n result[\"overlap_fraction_gt_with_pr\"] = 0\n result[\"overlap_fraction_pr_with_gt\"] = 0\n result[\"surface_dice\"] = 0\n for axes in [\"X\", \"Y\", \"Z\"]:\n for location in [\"min\", \"center\", \"max\", \"length\"]:\n result[\"prediction_{}_{}\".format(axes, location)] = 0\n results.append(result)\n\n\nprint (\"no results :: \", no_results)\n\n# populate df\nif SAVE_CSV:\n df = pd.DataFrame.from_dict(results)\n df.to_csv(os.path.join(dir_name, \"{}_{}.csv\".format(RUN, NAME)))\n","repo_name":"ahmedhosny/3d-unetplusplus","sub_path":"files/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5954,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"13167773895","text":"import sys\r\nimport struct\r\nimport codecs\r\nfrom functools import reduce\r\nfrom PIL import Image\r\n\r\np8 = lambda x: struct.pack(\"H\", x)[0]\r\npb16 = lambda x: struct.pack(\">H\", x)\r\n\r\ndata = b\"\"\r\ndata_patched = b\"\"\r\ndef patch(offset, patch_data):\r\n global data, data_patched\r\n if data_patched == b\"\": data_patched = data\r\n data_patched = data_patched[:offset] + patch_data + data_patched[offset + len(patch_data):]\r\n\r\ndef gb_checksum1(data):\r\n checksum = 0\r\n for i in range(0x134, 0x14D):\r\n checksum += (data[i] + 1)\r\n checksum = 0x100 - (checksum % 0x100)\r\n return checksum\r\n\r\ndef gb_checksum2(data):\r\n checksum = 0\r\n for i in range(len(data)):\r\n if i == 0x14E or i == 0x14F: continue\r\n checksum += data[i]\r\n checksum %= 0x10000\r\n return checksum\r\n\r\ndef decompress_data(arg):\r\n d = data\r\n if type(arg) == int:\r\n offset = arg\r\n else:\r\n d = arg\r\n offset = 0\r\n \r\n ptr = offset\r\n tiles = []\r\n tile_ptr = ptr + ((d[ptr] & 0b11111100) >> 2) + 2\r\n cnt = d[ptr] + 1\r\n ptr += 1\r\n for c in range(cnt):\r\n if c % 4 == 0:\r\n a = d[ptr]\r\n ptr += 1\r\n\r\n flag = ((a & 0b11000000) >> 6) & 0b11\r\n a = ((a << 2) + flag) & 0xFF\r\n if flag == 0:\r\n tiles.append(b\"\\x00\"*0x10)\r\n elif flag == 1:\r\n tiles.append(d[tile_ptr:tile_ptr+0x10])\r\n tile_ptr += 0x10\r\n else:\r\n b = u16(d[tile_ptr:tile_ptr+2])\r\n tile_ptr += 2\r\n tile = b\"\"\r\n for _ in range(0x10):\r\n if b & 0x8000 == 0:\r\n tile += p8(0x00)\r\n else:\r\n tile += p8(d[tile_ptr])\r\n tile_ptr += 1\r\n b <<= 1\r\n\r\n if flag == 3:\r\n tile += b\"\\x00\"\r\n for i in range(0x0F):\r\n tile = tile[:i+2] + p8(tile[i] ^ tile[i+2]) + tile[i+3:]\r\n tiles.append(tile[:0x10])\r\n return tiles, tile_ptr - offset\r\n\r\ndef compress_data(tiles):\r\n compressed_data = b\"\"\r\n compressed_tile = b\"\"\r\n result_flag = 0\r\n\r\n n_tile = len(tiles) // 0x10\r\n compressed_data += p8(n_tile - 1)\r\n for i in range(n_tile):\r\n flag = 0\r\n result_tile = b\"\"\r\n tile = tiles[i*0x10:i*0x10+0x10] + b\"\\x00\"\r\n if tile[:0x10] != b\"\\x00\"*0x10:\r\n xor_tile = tile[:2]\r\n for j in range(0x0F):\r\n xor_tile += p8(tile[j] ^ tile[j+2])\r\n xor_tile = xor_tile[:0x10]\r\n tile = tile[:0x10]\r\n\r\n bit1 = 0\r\n result_tile1 = b\"\"\r\n for j in range(len(tile)):\r\n bit1 <<= 1\r\n if tile[j] != 0:\r\n result_tile1 += p8(tile[j])\r\n bit1 |= 1\r\n\r\n bit2 = 0\r\n result_tile2 = b\"\"\r\n for j in range(len(xor_tile)):\r\n bit2 <<= 1\r\n if xor_tile[j] != 0:\r\n result_tile2 += p8(xor_tile[j])\r\n bit2 |= 1\r\n\r\n if len(result_tile1) <= len(result_tile2):\r\n if len(result_tile1) + 2 < 0x10:\r\n flag = 2\r\n result_tile = p16(bit1) + result_tile1\r\n else:\r\n if len(result_tile2) + 2 < 0x10:\r\n flag = 3\r\n result_tile = p16(bit2) + result_tile2\r\n \r\n if flag == 0:\r\n flag = 1\r\n result_tile = tile\r\n\r\n result_flag |= flag\r\n if i % 4 == 3:\r\n compressed_data += p8(result_flag)\r\n result_flag = 0\r\n elif i == n_tile - 1:\r\n result_flag <<= (2 * (3 - (i % 4)))\r\n compressed_data += p8(result_flag)\r\n\r\n result_flag <<= 2\r\n compressed_tile += result_tile\r\n\r\n compressed_data += compressed_tile\r\n return compressed_data\r\n\r\ndef get_title(img):\r\n X = 160\r\n Y = 144\r\n img = img.crop( (48, 40, 48+X, 40+Y) )\r\n\r\n pal = dict(map(lambda x: (x[1], x[0]), enumerate(sorted(list(set(img.getdata())), reverse=True))))\r\n\r\n img_2bpp = [ [0]*X for _ in range(Y) ]\r\n for i in range(Y):\r\n for j in range(X):\r\n p = img.getpixel((j, i))\r\n t = list(map(lambda x: abs(x[0]-p[0]), pal))\r\n img_2bpp[i][j] = t.index(min(t))\r\n\r\n tile_idx = 1\r\n tiles_k = {\"0\"*64: 0}\r\n tiles = b\"\\x00\"*16\r\n tilemap = []\r\n for i in range(Y // 8):\r\n for j in range(X // 8):\r\n a = \"\"\r\n for k in range(8):\r\n for l in range(8):\r\n a += str(img_2bpp[i*8+k][j*8+l])\r\n \r\n if (a in tiles_k) == False:\r\n tiles_k[a] = tile_idx\r\n tilemap.append(tile_idx)\r\n tile_idx += 1\r\n\r\n lb = 0\r\n hb = 0\r\n for m in range(len(a)):\r\n color = int(a[m])\r\n lb |= ((color & 0b10) >> 1) << (7 - (m % 8))\r\n hb |= (color & 0b01) << (7 - (m % 8))\r\n if m % 8 == 7:\r\n tiles += p8(hb) + p8(lb)\r\n lb = 0\r\n hb = 0\r\n else:\r\n tilemap.append(tiles_k[a])\r\n\r\n return tiles, b\"\".join(map(lambda x: p8((x + 0x80) & 0xFF), tilemap))\r\n\r\ndef get_sgb_border(img):\r\n TILE1_X = 64\r\n TILE1_Y = 192\r\n TILE1_W = 32\r\n TILE1_H = 16\r\n TILE2_X = 96\r\n TILE2_Y = 192\r\n TILE2_W = 96\r\n TILE2_H = 32\r\n TILE3_X = 136\r\n TILE3_Y = 184\r\n TILE3_W = 16\r\n TILE3_H = 8\r\n TILE4_X = 160\r\n TILE4_Y = 184\r\n TILE4_W = 16\r\n TILE4_H = 8\r\n\r\n tile1_img = img.crop( (TILE1_X, TILE1_Y, TILE1_X+TILE1_W, TILE1_Y+TILE1_H) )\r\n tile2_img = img.crop( (TILE2_X, TILE2_Y, TILE2_X+TILE2_W, TILE2_Y+TILE2_H) )\r\n tile3_img = img.crop( (TILE3_X, TILE3_Y, TILE3_X+TILE3_W, TILE3_Y+TILE3_H) )\r\n tile4_img = img.crop( (TILE4_X, TILE4_Y, TILE4_X+TILE4_W, TILE4_Y+TILE4_H) )\r\n\r\n tile1_idxs = [ [16, 17, 18, 19], [32, 33, 34, 35] ]\r\n tile2_idxs = [ [ x for x in range(52, 64) ], [ x for x in range(68, 80) ], [ x for x in range(84, 96) ], [ x for x in range(100, 112) ] ]\r\n tile3_idxs = [ [41, 42] ]\r\n tile4_idxs = [ [44, 45] ]\r\n \r\n t, _ = decompress_data(0xDF53)\r\n sgb_border_tiles = [ t[i]+t[i+1] for i in range(0, len(t), 2) ]\r\n sgb_border_tiles[64] = b\"\\x00\\xFF\"*8 + b\"\\xFF\"*16\r\n\r\n const_palette = [14, 4, 3, 2, 15]\r\n pal = dict([ (color, const_palette[i]) for i, color in enumerate(sorted(list(set(list(tile1_img.getdata()) + list(tile2_img.getdata()))))) ])\r\n\r\n def insert_new_tile(w, h, tile_img, tile_idxs):\r\n for i in range(h // 8):\r\n for j in range(w // 8):\r\n a = \"\"\r\n for y in range(8):\r\n for x in range(8):\r\n p = tile_img.getpixel(((j*8)+x, (i*8)+y))\r\n a += hex(pal[p])[2:]\r\n b1 = 0\r\n b2 = 0\r\n b3 = 0\r\n b4 = 0\r\n tile1 = b\"\"\r\n tile2 = b\"\"\r\n for m in range(len(a)):\r\n color = int(a[m], 16)\r\n b1 |= (color & 0b0001) << (7 - (m % 8))\r\n b2 |= ((color & 0b0010) >> 1) << (7 - (m % 8))\r\n b3 |= ((color & 0b0100) >> 2) << (7 - (m % 8))\r\n b4 |= ((color & 0b1000) >> 3) << (7 - (m % 8))\r\n if m % 8 == 7:\r\n tile1 += p8(b1) + p8(b2)\r\n tile2 += p8(b3) + p8(b4)\r\n b1 = 0\r\n b2 = 0\r\n b3 = 0\r\n b4 = 0\r\n sgb_border_tiles[tile_idxs[i][j]] = tile1 + tile2\r\n\r\n insert_new_tile(TILE1_W, TILE1_H, tile1_img, tile1_idxs)\r\n insert_new_tile(TILE2_W, TILE2_H, tile2_img, tile2_idxs)\r\n insert_new_tile(TILE3_W, TILE3_H, tile3_img, tile3_idxs)\r\n insert_new_tile(TILE4_W, TILE4_H, tile4_img, tile4_idxs)\r\n\r\n sgb_border_tiles = [ sgb_border_tiles[i // 2][:0x10] if i % 2 == 0 else sgb_border_tiles[i // 2][0x10:] for i in range(len(sgb_border_tiles) * 2) ]\r\n return b\"\".join(sgb_border_tiles)\r\n\r\ndef get_case_title(img, all_tiles=None):\r\n W = 8 * 8\r\n H = 8 * 8\r\n img = img.crop( (48, 56, 48+W, 56+H) )\r\n\r\n pal = dict(map(lambda x: (x[1], x[0]), enumerate(sorted(list(set(img.getdata())), reverse=True))))\r\n\r\n img_2bpp = [ [0]*W for _ in range(H) ]\r\n for i in range(H):\r\n for j in range(W):\r\n p = img.getpixel((j, i))\r\n t = list(map(lambda x: abs(x[0]-p[0]), pal))\r\n img_2bpp[i][j] = t.index(min(t))\r\n\r\n tiles_k = {}\r\n tiles = []\r\n tilemap = []\r\n for i in range(H // 8):\r\n tilemap_w = []\r\n for j in range(W // 8):\r\n a = \"\"\r\n for k in range(8):\r\n for l in range(8):\r\n a += str(img_2bpp[i*8+k][j*8+l])\r\n\r\n if (a in tiles_k) == False:\r\n lb = 0\r\n hb = 0\r\n tile = b\"\"\r\n for m in range(len(a)):\r\n color = int(a[m])\r\n lb |= ((color & 0b10) >> 1) << (7 - (m % 8))\r\n hb |= (color & 0b01) << (7 - (m % 8))\r\n if m % 8 == 7:\r\n tile += p8(hb) + p8(lb)\r\n lb = 0\r\n hb = 0\r\n tiles.append(tile)\r\n tiles_k[a] = tile\r\n\r\n if all_tiles != None:\r\n if tile == b\"\\xFF\"*0x10: tilemap_w.append(0x88)\r\n else: tilemap_w.append(all_tiles.index(tile))\r\n else:\r\n if all_tiles != None:\r\n if tile == b\"\\xFF\"*0x10: tilemap_w.append(0x88)\r\n else: tilemap_w.append(all_tiles.index(tiles_k[a]))\r\n tilemap.append(tilemap_w)\r\n return tiles, tilemap\r\n\r\n\r\nJUMP_OP = p8(0xC3)\r\nCALL_OP = p8(0xCD)\r\n\r\nNEW_FONT_OFFSET = 0x40000\r\n\r\ndata = open(sys.argv[1], \"rb\").read()\r\ndata = data + b\"\\x00\"*(0x80000-len(data))\r\n\r\nfor bank in range(0x01, 0x20):\r\n patch((bank * 0x4000) + 0x3FFE, p8(bank))\r\n\r\nfont = open(\"galmuri.fnt\", \"rb\").read()\r\npatch(NEW_FONT_OFFSET, font)\r\n\r\ndef get_font(char):\r\n hangul = codecs.open(u\"완성형.txt\", \"r\", \"UTF-16\").read()\r\n idx = hangul.find(char)\r\n if idx == -1:\r\n idx = \":SeHwa\".find(char)\r\n makers_tiles = [\r\n b\"\\x00\\x00\\x18\\x18\\x18\\x18\\x00\\x00\\x00\\x00\\x18\\x18\\x18\\x18\\x00\\x00\",\r\n b\"\\x3C\\x3C\\x66\\x66\\x70\\x70\\x3C\\x3C\\x0E\\x0E\\x66\\x66\\x3C\\x3C\\x00\\x00\",\r\n b\"\\x00\\x00\\x3C\\x3C\\x66\\x66\\x66\\x66\\x7E\\x7E\\x60\\x60\\x3E\\x3E\\x00\\x00\",\r\n b\"\\x66\\x66\\x66\\x66\\x66\\x66\\x7E\\x7E\\x66\\x66\\x66\\x66\\x66\\x66\\x00\\x00\",\r\n b\"\\x00\\x00\\x00\\x00\\x63\\x63\\x6B\\x6B\\x6B\\x6B\\x7F\\x7F\\x36\\x36\\x00\\x00\",\r\n b\"\\x00\\x00\\x00\\x00\\x1E\\x1E\\x36\\x36\\x36\\x36\\x36\\x36\\x1F\\x1F\\x00\\x00\"\r\n ]\r\n return makers_tiles[idx]\r\n return font[idx*0x10:idx*0x10+0x10]\r\n\r\ntitle_tiledata, title_tilemap = get_title(Image.open(\"title.png\"))\r\npatch(0x83AC, compress_data(title_tiledata))\r\npatch(0x62C4, title_tilemap)\r\n\r\nsgb_border_tiledata = get_sgb_border(Image.open(\"title.png\"))\r\npatch(0xDF53, compress_data(sgb_border_tiledata))\r\npatch(0xEE83, b\"\\x01\\x07\")\r\n\r\nmain_text = [ [\" 수 사 시 작 \", \" 수 사 재 개 \", \" 환 경 설 정 \"], [\"메시지 표시속도 \", \" 빠 르 게 \", \" 보 통 \", \" 느 리 게 \", \" \", \" \", \" \"], [\" 패스워드 입력\"], [\" 패 스 워 드\", \" \"] ]\r\nmain_text_xy = [ [(4, 4), (4, 9), (4, 14)], [(5, 3), (4, 7), (4, 10), (4, 13), (3, 12), (13, 16), (4, 3)], [(4, 2)], [(6, 4), (6, 3)] ]\r\n\r\nmain_tiledata, _ = decompress_data(0xA604)\r\nmain_tilemap, _ = decompress_data(0x3DB12)\r\nmain_tilemap2, _ = decompress_data(0x3DC00)\r\nmain_tilemap3, _ = decompress_data(0x3D7D5)\r\nmain_tilemap4, _ = decompress_data(0x1B30)\r\nfor i in range(7):\r\n main_tiledata[0x09+i] = b\"\\x00\"*0x10\r\nfor i in range(6):\r\n main_tiledata[0x1A+i] = b\"\\x00\"*0x10\r\n\r\nmain_text_chrs = \"\".join(sorted(set(\"\".join(map(lambda x: x.replace(\" \", \"\"), \"\".join(map(lambda x: \"\".join(x), main_text)))))))\r\ncnt = len(main_tiledata)\r\nfor i in range(len(main_text_chrs)):\r\n if 0x4B+i >= cnt:\r\n main_tiledata.append(get_font(main_text_chrs[i]))\r\n else: main_tiledata[0x4B+i] = get_font(main_text_chrs[i])\r\n\r\nmain_text10_start_idx = len(main_tiledata)\r\nfor i in range(len(main_text[1][0])+1):\r\n t1 = b\"\"\r\n t2 = b\"\"\r\n t3 = b\"\"\r\n if i == 0 or main_text[1][0][i-1] == \" \": t1 = b\"\\x00\"*0x10\r\n else: t1 = get_font(main_text[1][0][i-1])\r\n if i == len(main_text[1][0]) or main_text[1][0][i] == \" \": t2 = b\"\\x00\"*0x10\r\n else: t2 = get_font(main_text[1][0][i])\r\n for j in range(0x10):\r\n t3 += p8(((t1[j] << 4) & 0xFF) | ((t2[j] >> 4) & 0b1111))\r\n main_tiledata.append(t3)\r\npatch(0x17200, compress_data(b\"\".join(main_tiledata)))\r\n\r\ntilemap = [ b\"\".join(main_tilemap), b\"\".join(main_tilemap2), b\"\".join(main_tilemap3), b\"\".join(main_tilemap4) ]\r\nfor i in range(len(main_text)):\r\n for j in range(len(main_text[i])):\r\n x = main_text_xy[i][j][0]\r\n y = main_text_xy[i][j][1]\r\n idx = y * 0x14 + x\r\n for k in range(len(main_text[i][j])):\r\n tm = 0x4B + main_text_chrs.find(main_text[i][j][k])\r\n if main_text[i][j][k] == \" \": tm = 0x8C\r\n if i == 1 and j == 0: tm = main_text10_start_idx + k\r\n tilemap[i] = tilemap[i][:idx+k] + p8(tm) + tilemap[i][idx+k+1:]\r\npatch(0x3DB12, compress_data(tilemap[0]))\r\npatch(0x3DC00, compress_data(tilemap[1]))\r\npatch(0x3D7D5, compress_data(tilemap[2]))\r\npatch(0x1B30, compress_data(tilemap[3]))\r\n\r\nevidence_tiledata, _ = decompress_data(0x9BED)\r\nevidence_tiledata[9] = b\"\\x00\"*0x10\r\nevidence_tiledata[14] = b\"\\x00\"*0x10\r\nevidence_tiledata[15] = b\"\\x00\"*0x10\r\nevidence_text = \"사건정리\"\r\nfor i in range(len(evidence_text)):\r\n evidence_tiledata[i+10] = get_font(evidence_text[i])\r\npatch(0x9BED, compress_data(b\"\".join(evidence_tiledata)))\r\n\r\nprofile_point_tiledata, n = decompress_data(0x9892)\r\npoint_text = \"범인추리\"\r\nfor i in range(len(point_text)):\r\n profile_point_tiledata[i] = get_font(point_text[i])\r\nprofile_text = \"인물소개\"\r\nfor i in range(6):\r\n if i < len(profile_text):\r\n profile_point_tiledata[i+8] = get_font(profile_text[i])\r\n else: profile_point_tiledata[i] = b\"\\x00\"*0x10\r\npatch(0x9892, compress_data(b\"\".join(profile_point_tiledata)))\r\n\r\nprofile_tilemap_offset = 0x36C1\r\npatch(profile_tilemap_offset+0xD, p8(0x8C))\r\npatch(profile_tilemap_offset+0x14+0xD, b\"\\x8C\\x48\\x49\\x4A\\x4B\\x8C\\x8C\")\r\n\r\npoint_tilemap_offset = 0x23D5\r\npatch(point_tilemap_offset, b\"\\x8C\\x40\\x41\\x42\\x43\\x8C\\x8C\\x8C\")\r\n\r\ncase12_text = [ [\"총 간식 비용 \", \"주스 \", \"과자 \"], [\"말았습니다 \", \"폐를 끼쳤습니다\", \"사과드리기 \", \"죽겠습니다 \", \" \", \" \"] ]\r\ncase12_text_xy = [ [(0, 1), (0, 2), (0, 4)], [(0, 3), (0, 5), (0, 7), (0, 9), (4, 6), (4, 8)] ]\r\ncase12_text_chrs = \"\".join(sorted(set(\"\".join(map(lambda x: x.replace(\" \", \"\"), \"\".join(map(lambda x: \"\".join(x), case12_text)))))))\r\n\r\ncase12_tiledata, _ = decompress_data(0x30ABB)\r\nfor i in range(26):\r\n if i < len(case12_text_chrs):\r\n case12_tiledata[4+i] = get_font(case12_text_chrs[i])\r\n else: case12_tiledata[4+i] = b\"\\x00\"*0x10\r\npatch(0x30ABB, compress_data(b\"\".join(case12_tiledata)))\r\n\r\ncase1_tilemap, _ = decompress_data(0x30A1C)\r\ncase2_tilemap, _ = decompress_data(0x30F2D)\r\ntilemap = [ b\"\".join(case1_tilemap), b\"\".join(case2_tilemap) ]\r\nfor i in range(len(case12_text)):\r\n for j in range(len(case12_text[i])):\r\n x = case12_text_xy[i][j][0]\r\n y = case12_text_xy[i][j][1]\r\n idx = y * 0x14 + x\r\n for k in range(len(case12_text[i][j])):\r\n tm = 4 + case12_text_chrs.find(case12_text[i][j][k])\r\n if case12_text[i][j][k] == \" \": tm = 0x00\r\n tilemap[i] = tilemap[i][:idx+k] + p8(tm) + tilemap[i][idx+k+1:]\r\npatch(0x30A1C, compress_data(tilemap[0]))\r\npatch(0x30F2D, compress_data(tilemap[1]))\r\n\r\ncase_title_tiledata, n = decompress_data(0x9DAC)\r\nfor i in [31, 32, 36, 37, 46, 47, 59, 60, 72, 73, 81, 82, 88, 89, 94, 95]:\r\n case_title_tiledata[i] = b\"\\x00\"*0x10\r\n\r\ncase1_title_tiles, _ = get_case_title(Image.open(\"case1_title.bmp\"))\r\ncase2_title_tiles, _ = get_case_title(Image.open(\"case2_title.bmp\"))\r\ncase3_title_tiles, _ = get_case_title(Image.open(\"case3_title.bmp\"))\r\ncase_title_tiles = sorted(list(set(case1_title_tiles + case2_title_tiles + case3_title_tiles)))\r\nfor i in range(len(case_title_tiles)):\r\n if 123+i < len(case_title_tiledata):\r\n case_title_tiledata[123+i] = case_title_tiles[i]\r\n else: case_title_tiledata.append(case_title_tiles[i])\r\npatch(0x9DAC, compress_data(b\"\".join(case_title_tiledata)))\r\n\r\ncase1_title_tilemap_offset = 0x30000\r\ncase2_title_tilemap_offset = 0x30168\r\ncase3_title_tilemap_offset = 0x302D0\r\n\r\n_, case1_tilemap = get_case_title(Image.open(\"case1_title.bmp\"), case_title_tiles)\r\nfor i in range(8):\r\n for j in range(8):\r\n tm_idx = ((7*0x14)+6) + (i*0x14)+j\r\n patch(case1_title_tilemap_offset+tm_idx, p8((0xFB+case1_tilemap[i][j]) & 0xFF))\r\n\r\n_, case2_tilemap = get_case_title(Image.open(\"case2_title.bmp\"), case_title_tiles)\r\nfor i in range(8):\r\n for j in range(8):\r\n tm_idx = ((7*0x14)+6) + (i*0x14)+j\r\n patch(case2_title_tilemap_offset+tm_idx, p8((0xFB+case2_tilemap[i][j]) & 0xFF))\r\n\r\n_, case3_tilemap = get_case_title(Image.open(\"case3_title.bmp\"), case_title_tiles)\r\nfor i in range(8):\r\n for j in range(8):\r\n tm_idx = ((7*0x14)+6) + (i*0x14)+j\r\n patch(case3_title_tilemap_offset+tm_idx, p8((0xFB+case3_tilemap[i][j]) & 0xFF))\r\n\r\n\r\ntype_text = [\"번역 종류 선택 \", \" 더 빙 판 \", \" 자 막 판 \", \"그래픽:피씨\", \"번역 :가각\", \"패치 :SeHwa\"]\r\ntype_text_xy = [(5, 3), (4, 8), (4, 12), (2, 15), (2, 16), (2, 17)]\r\ntype_text_chrs = \"\".join(sorted(set(\"\".join(map(lambda x: x.replace(\" \", \"\"), type_text)))))\r\n\r\nmain_tiledata, _ = decompress_data(0xA604)\r\ntype_tiledata = main_tiledata[:0x26]\r\nfor i in range(7):\r\n type_tiledata[0x09+i] = b\"\\x00\"*0x10\r\nfor i in range(6):\r\n type_tiledata[0x1A+i] = b\"\\x00\"*0x10\r\n\r\nfor i in range(len(type_text_chrs)):\r\n type_tiledata.append(get_font(type_text_chrs[i]))\r\ntype_text0_start_idx = len(type_tiledata)\r\nfor i in range(len(type_text[0])+1):\r\n t1 = b\"\"\r\n t2 = b\"\"\r\n t3 = b\"\"\r\n if i == 0 or type_text[0][i-1] == \" \": t1 = b\"\\x00\"*0x10\r\n else: t1 = get_font(type_text[0][i-1])\r\n if i == len(type_text[0]) or type_text[0][i] == \" \": t2 = b\"\\x00\"*0x10\r\n else: t2 = get_font(type_text[0][i])\r\n for j in range(0x10):\r\n t3 += p8(((t1[j] << 4) & 0xFF) | ((t2[j] >> 4) & 0b1111))\r\n type_tiledata.append(t3)\r\n\r\nmain_tilemap, _ = decompress_data(0x3DC00)\r\ntype_tilemap = b\"\".join(main_tilemap)\r\nfor j in range(len(type_text)):\r\n x = type_text_xy[j][0]\r\n y = type_text_xy[j][1]\r\n idx = y * 0x14 + x\r\n for k in range(len(type_text[j])):\r\n tm = 0x26 + type_text_chrs.find(type_text[j][k])\r\n if type_text[j][k] == \" \": tm = 0x00\r\n if j == 0: tm = type_text0_start_idx + k\r\n type_tilemap = type_tilemap[:idx+k] + p8(tm) + type_tilemap[idx+k+1:]\r\nc1 = compress_data(b\"\".join(type_tiledata))\r\nc2 = compress_data(type_tilemap)\r\npatch(0x3FB00, compress_data(b\"\".join(type_tiledata)))\r\npatch(0x3FE00, compress_data(type_tilemap))\r\n\r\n\r\nPATCH_CODE0_OFFSET = 0x61\r\nPATCH_CODE1_BANK = 0x0F\r\nPATCH_CODE1_ADDR = 0x7A00\r\nPATCH_CODE1_OFFSET = (PATCH_CODE1_BANK * 0x4000) + (PATCH_CODE1_ADDR - 0x4000)\r\nPATCH_CODE_NEWBANK_BANK = 0x16\r\nPATCH_CODE_NEWBANK_ADDR = 0x7500\r\nPATCH_CODE_NEWBANK_OFFSET = (PATCH_CODE_NEWBANK_BANK * 0x4000) + (PATCH_CODE_NEWBANK_ADDR - 0x4000)\r\nPATCH_NEWBANK_HOOK_OFFSET_TABLE_ADDR = 0x7F00\r\nPATCH_NEWBANK_HOOK_OFFSET_TABLE_OFFSET = (PATCH_CODE_NEWBANK_BANK * 0x4000) + (PATCH_NEWBANK_HOOK_OFFSET_TABLE_ADDR - 0x4000)\r\n\r\npatch_code = open(\"output.obj\", \"rb\").read().split(b\"\\x88\\x88\\x88\\x88\")\r\npatch_code0 = patch_code[0].split(b\"\\x77\\x77\\x77\\x77\")\r\npatch_code1 = patch_code[1].split(b\"\\x77\\x77\\x77\\x77\")\r\n\r\nnewbank_offsets_idx = patch_code[len(patch_code)-1].find(b\"\\x99\\x99\\x99\\x99\")\r\nnewbank_offsets_data = patch_code[len(patch_code)-1][:newbank_offsets_idx]\r\npatch_newbank_offsets = [ u32(newbank_offsets_data[i*4:i*4+4]) for i in range(len(newbank_offsets_data) // 4) ]\r\npatch_newbank_addrs = b\"\".join(map(lambda x: p16(x % 0x4000 + 0x4000) if x >= 0x4000 and x != 0xFFFF else p16(x), patch_newbank_offsets))\r\npatch(PATCH_NEWBANK_HOOK_OFFSET_TABLE_OFFSET, patch_newbank_addrs)\r\n\r\npatch_code_newbank = patch_code[len(patch_code)-1][newbank_offsets_idx+4:]\r\nprint(\"Code0 end address : \" + hex(PATCH_CODE0_OFFSET + len(b\"\".join(patch_code0))))\r\nprint(\"Code1 end address : \" + hex(PATCH_CODE1_ADDR + len(b\"\".join(patch_code1))))\r\nprint(\"Code_newbank end address : \" + hex(PATCH_CODE_NEWBANK_ADDR + len(patch_code_newbank)))\r\npatch(PATCH_CODE0_OFFSET, b\"\".join(patch_code0))\r\npatch(PATCH_CODE1_OFFSET, b\"\".join(patch_code1))\r\npatch(PATCH_CODE_NEWBANK_OFFSET, patch_code_newbank)\r\npatch_code_addr = PATCH_CODE0_OFFSET\r\nfor i in range(len(patch_newbank_offsets)):\r\n patch(patch_newbank_offsets[i], CALL_OP + p16(patch_code_addr))\r\n\r\narr = codecs.open(u\"korean.tbl\", \"rb\", \"utf8\").read().split(\"\\n\")\r\nkor_tables = {}\r\nfor i in range(len(arr)):\r\n t = arr[i].split(\"=\")\r\n kor_tables[t[1]] = t[0]\r\ncode_tables = {}\r\nfor i in range(len(arr)):\r\n a = arr[i].split(\"=\")\r\n code_tables[int(a[0], 16)] = a[1]\r\n\r\ndef str2code(str):\r\n global kor_tables\r\n\r\n code = b\"\"\r\n for i in range(len(str)):\r\n code += bytes.fromhex(kor_tables[str[i]])\r\n return code\r\ndef code2str(code, length):\r\n global code_tables\r\n\r\n i = 0\r\n string = \"\"\r\n while True:\r\n if code[i] >= 0xE0 and code[i] < 0xF0:\r\n c = code[i] * 0x100 + code[i+1]\r\n i += 2\r\n else:\r\n c = code[i]\r\n i += 1\r\n string += code_tables[c]\r\n if len(string) >= length: break\r\n return string\r\n\r\nkor_tables[\"’\"] = \"20\"\r\nkor_tables[\"”\"] = \"21\"\r\ncode_tables[0x20] = \"’\"\r\ncode_tables[0x21] = \"”\"\r\npatch(0x9140, b\"\\x30\\x30\\x30\\x30\\x10\\x10\\x20\\x20\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\")\r\npatch(0x9150, b\"\\x6C\\x6C\\x6C\\x6C\\x24\\x24\\x48\\x48\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\")\r\n\r\n\r\ndef create_text(translated_text_data, tables, data, ptr):\r\n F9_nargs = [0, 2, 0, 1, 0, 0, 0, 1, 2, 2, 0, 1, 1, 1, 0, 0, 0, 2, 1, 0, 2, 2, 0, 2, 2, 1, 2, 0, 0]\r\n start_text = 0\r\n translated_idx = 0\r\n cur_line_cnt = 0\r\n result = b\"\"\r\n\r\n def append_text():\r\n if translated_text_data[translated_idx] == \"\": return b\"\"\r\n r = str2code(translated_text_data[translated_idx])\r\n t = 1\r\n while True:\r\n if data[ptr-t] == 0xA4:\r\n r += p8(0xA4)\r\n t += 1\r\n else: break\r\n return r\r\n\r\n while True:\r\n if data[ptr] == 0xF7:\r\n if start_text == 1:\r\n result += append_text()\r\n start_text = 0\r\n result += p8(0xF7)\r\n translated_idx += 1\r\n cur_line_cnt = 0\r\n elif data[ptr] == 0xF8:\r\n if start_text == 1:\r\n result += append_text()\r\n start_text = 0\r\n result += p8(0xF8) + p8(data[ptr+1])\r\n translated_idx += 1\r\n ptr += 1\r\n elif data[ptr] == 0xF9:\r\n if start_text == 1:\r\n result += append_text()\r\n start_text = 0\r\n result += p8(0xF9) + p8(data[ptr+1])\r\n n_arg = F9_nargs[data[ptr+1]]\r\n result += b\"\".join(map(lambda x: p8(x), data[ptr+2:ptr+2+n_arg]))\r\n translated_idx += 1\r\n ptr += n_arg\r\n ptr += 1\r\n elif data[ptr] == 0xFA:\r\n if start_text == 1:\r\n result += append_text()\r\n start_text = 0\r\n result += p8(0xFA)\r\n translated_idx += 2\r\n cur_line_cnt = 0\r\n elif data[ptr] == 0xFB:\r\n if start_text == 1:\r\n result += append_text()\r\n start_text = 0\r\n result += p8(0xFB) + p8(data[ptr+1])\r\n translated_idx += 1\r\n ptr += 1\r\n elif data[ptr] == 0xFC:\r\n if start_text == 1:\r\n result += append_text()\r\n start_text = 0\r\n result += p8(0xFC) + p8(data[ptr+1])\r\n translated_idx += 1\r\n ptr += 1\r\n elif data[ptr] == 0xFD:\r\n if start_text == 1:\r\n result += append_text()\r\n start_text = 0\r\n result += p8(0xFD)\r\n translated_idx += 1\r\n cur_line_cnt += 1\r\n if cur_line_cnt >= 3:\r\n cur_line_cnt = 0\r\n translated_idx += 1\r\n elif data[ptr] == 0xFE:\r\n if start_text == 1:\r\n result += append_text()\r\n start_text = 0\r\n result += p8(0xFE)\r\n translated_idx += 1\r\n elif data[ptr] == 0xFF:\r\n if start_text == 1:\r\n result += append_text()\r\n start_text = 0\r\n result += p8(0xFF)\r\n translated_idx += 2\r\n cur_line_cnt = 0\r\n break\r\n else:\r\n if start_text == 0:\r\n t = 0\r\n while True:\r\n if data[ptr+t] == 0xA4:\r\n result += p8(0xA4)\r\n t += 1\r\n else: break\r\n start_text = 1\r\n ptr += 1\r\n ptr += 1\r\n return result, translated_text_data[translated_idx:], ptr\r\n\r\nbank_text_offset = [0x4000]*0x20\r\nbank_text_offset[0x12] = 0x5300\r\n\r\ndef patch_text(bank, offset, text):\r\n patch(offset, p16(bank_text_offset[bank]))\r\n patch(bank * 0x4000 + (bank_text_offset[bank] - 0x4000), text)\r\n bank_text_offset[bank] += len(text)\r\n\r\ntranslated_text_data = codecs.open(u\"translated_text.txt\", \"rb\", \"utf8\").read().split(\"\\r\\n\")\r\ntranslated_text_data2 = codecs.open(u\"translated_text2.txt\", \"rb\", \"utf8\").read().split(\"\\r\\n\")\r\ntext_set_offset_addrs = [(0x3DC1, 0x3DB9, 0x3DBD), (0x2719, 0x271E, 0x2722), (0x2809, 0x280D, 0x2811), (0x0, 0x29E7, 0x29EB), (0x0, 0x2AAF, 0x2AB3)]\r\ntext_offsets = [(0x3DD9, 1), (0x2D68A, 2), (0x2D754, 2), (0x2D80E, 2), (0x2D856, 2)]\r\nfor i in range(len(text_offsets)):\r\n ptr = text_offsets[i][0]\r\n count = text_offsets[i][1]\r\n result_all = b\"\"\r\n for j in range(count):\r\n _, translated_text_data2, _ = create_text(translated_text_data2, kor_tables, data, ptr)\r\n result, translated_text_data, ptr = create_text(translated_text_data, kor_tables, data, ptr)\r\n result_all += result\r\n\r\n offset_bank = text_set_offset_addrs[i][0]\r\n offset_low = text_set_offset_addrs[i][1]\r\n offset_high = text_set_offset_addrs[i][2]\r\n if offset_bank != 0:\r\n patch(offset_bank, p8(0x12))\r\n patch(offset_low, p16(bank_text_offset[0x12])[:1])\r\n patch(offset_high, p16(bank_text_offset[0x12])[1:])\r\n\r\n patch(0x12 * 0x4000 + (bank_text_offset[0x12] - 0x4000), result_all)\r\n bank_text_offset[0x12] += len(result_all)\r\n\r\ntext_ptr_table_offsets = [(0x29F3, 5), (0x48E5, 30), (0x4BF6, 10), (0x22A8A, 68), (0x23C38, 7), (0x25B16, 17), (0x2AB05, 19), (0x2C000, 18), (0x2C945, 12), (0x2D05E, 10), (0x2D4E4, 5), (0x2D9A9, 19), (0x2F4C9, 50)]\r\nptr_tables = []\r\nfor i in range(len(text_ptr_table_offsets)):\r\n offset = text_ptr_table_offsets[i][0]\r\n count = text_ptr_table_offsets[i][1]\r\n last_addr = 0\r\n for j in range(count):\r\n bank = offset // 0x4000\r\n addr = u16(data[offset+(j*2):offset+(j*2)+2])\r\n ptr_tables.append((bank, addr))\r\n if addr == 0x0 or addr == 0xffff: continue\r\n if last_addr < addr: last_addr = addr\r\n for j in range(count):\r\n result_all = b\"\"\r\n result2_all = b\"\"\r\n bank = offset // 0x4000\r\n addr = u16(data[offset+(j*2):offset+(j*2)+2])\r\n addr_orig = addr\r\n if addr == 0xffff:\r\n patch(offset+(j*2), p16(0xffff))\r\n patch(offset+(count*2)+(j*2), p16(0xffff))\r\n continue\r\n if addr == 0x0: continue\r\n if addr >= 0x4000: addr -= 0x4000\r\n ptr = bank * 0x4000 + addr\r\n while True:\r\n result, translated_text_data, end_ptr = create_text(translated_text_data, kor_tables, data, ptr)\r\n result2, translated_text_data2, _ = create_text(translated_text_data2, kor_tables, data, ptr)\r\n if end_ptr == 0x2BEF9 or end_ptr == 0x2F4C9: # hack\r\n result_all += result\r\n result2_all += result2\r\n break\r\n if addr_orig != last_addr:\r\n find = -1\r\n for k in range(len(ptr_tables)):\r\n bank = ptr_tables[k][0]\r\n addr = ptr_tables[k][1]\r\n if addr == int(not not bank) * 0x4000 + (end_ptr % 0x4000):\r\n find = 1\r\n break\r\n if find == -1:\r\n result_all += result\r\n result2_all += result2\r\n ptr = end_ptr\r\n continue\r\n result_all += result\r\n result2_all += result2\r\n break\r\n\r\n if i == 0 or i == 2 or i == 4:\r\n patch_text(0x12, offset+(j*2), result_all)\r\n patch_text(0x12, offset+(count*2)+(j*2), result2_all)\r\n elif i == 1:\r\n f = 0\r\n char_len = 0\r\n tmp = result_all[:-1]\r\n while True:\r\n if tmp[-1] != 0xA4: break\r\n tmp = tmp[:-1]\r\n if result_all[0] == 0xFB: tmp = tmp[2:]\r\n for k in range(len(tmp)):\r\n if f == 1:\r\n f = 0\r\n continue\r\n if tmp[k] >= 0xE0 and tmp[k] <= 0xE9: f = 1\r\n char_len += 1\r\n tmp = tmp + b\"\\xA4\"*(8 - char_len)\r\n if result_all[0] == 0xFB: result_all = result_all[:2] + tmp\r\n else: result_all = tmp\r\n result_all += b\"\\xFF\"\r\n\r\n patch_text(0x12, offset+(j*2), result_all)\r\n\r\n f = 0\r\n char_len = 0\r\n tmp = result2_all[:-1]\r\n while True:\r\n if tmp[-1] != 0xA4: break\r\n tmp = tmp[:-1]\r\n if result2_all[0] == 0xFB: tmp = tmp[2:]\r\n for k in range(len(tmp)):\r\n if f == 1:\r\n f = 0\r\n continue\r\n if tmp[k] >= 0xE0 and tmp[k] <= 0xE9: f = 1\r\n char_len += 1\r\n tmp = tmp + b\"\\xA4\"*(8 - char_len)\r\n if result2_all[0] == 0xFB: result2_all = result2_all[:2] + tmp\r\n else: result2_all = tmp\r\n result2_all += b\"\\xFF\"\r\n\r\n patch_text(0x12, offset+(count*2)+6+(j*2), result2_all)\r\n elif i == 3:\r\n patch_text(0x13, offset+(j*2), result_all)\r\n patch_text(0x13, offset+(count*2)+(j*2), result2_all)\r\n elif i == 5:\r\n patch_text(0x14, offset+(j*2), result_all)\r\n patch_text(0x15, offset+(count*2)+(j*2), result2_all)\r\n elif i == 6:\r\n patch_text(0x16, offset+(j*2), result_all)\r\n patch_text(0x16, offset+(count*2)+(j*2), result2_all)\r\n elif i == 7:\r\n patch_text(0x19, offset+(j*2), result_all)\r\n patch_text(0x19, offset+(count*2)+(j*2), result2_all)\r\n elif i == 8:\r\n patch_text(0x17, offset+(j*2), result_all)\r\n patch_text(0x17, offset+(count*2)+(j*2), result2_all)\r\n elif i == 9:\r\n patch_text(0x18, offset+(j*2), result_all)\r\n patch_text(0x18, offset+(count*2)+(j*2), result2_all)\r\n elif i == 10:\r\n patch_text(0x18, offset+(j*2), result_all)\r\n patch_text(0x18, offset+(count*2)+(j*2), result2_all)\r\n elif i == 11:\r\n patch_text(0x17, offset+(j*2), result_all)\r\n patch_text(0x18, offset+(count*2)+(j*2), result2_all)\r\n elif i == 12:\r\n patch_text(0x19, offset+(j*2), result_all)\r\n patch_text(0x19, offset+(count*2)+(j*2), result2_all)\r\n if i == 1:\r\n patch(offset+(count*2), p16(offset+(count*2)+6+0x00))\r\n patch(offset+(count*2)+2, p16(offset+(count*2)+6+0x14))\r\n patch(offset+(count*2)+4, p16(offset+(count*2)+6+0x28))\r\n\r\n patch(0x1A18, p16(bank_text_offset[0x12]))\r\n patch(0x1AA2, p16(bank_text_offset[0x12]))\r\n patch(0x3A1A, p8(p16(bank_text_offset[0x12])[0]))\r\n patch(0x3A1E, p8(p16(bank_text_offset[0x12])[1]))\r\n patch(0x3B74, p8(p16(bank_text_offset[0x12])[0]))\r\n patch(0x3B78, p8(p16(bank_text_offset[0x12])[1]))\r\n patch(0x3B87, p8(0x12))\r\n patch(0x3994, p8(0x12))\r\n patch(0x12 * 0x4000 + (bank_text_offset[0x12] - 0x4000), b\"\\xA4\"*8 + b\"\\xFF\")\r\n bank_text_offset[0x12] += 9\r\n elif i == 2:\r\n patch(0x391F, p8(0x12))\r\n elif i == 3:\r\n patch(0x16CA, p8(0x13))\r\n elif i == 4:\r\n patch(0x1B0C, p8(0x12))\r\n elif i == 5:\r\n patch(0x2125, b\"\\x00\"*2)\r\n\r\nTEXT_LOC_TABLE_OFFSET = 0x2D42\r\n\r\nfor i in range(4):\r\n bank = data[TEXT_LOC_TABLE_OFFSET+(i*3)]\r\n addr = u16(data[TEXT_LOC_TABLE_OFFSET+(i*3)+1:TEXT_LOC_TABLE_OFFSET+(i*3)+3])\r\n offset = bank * 0x4000 + (addr - 0x4000)\r\n count = (u16(data[offset:offset+2]) - addr) // 2\r\n ptr_tables = []\r\n last_addr = 0\r\n for j in range(count):\r\n addr = u16(data[offset+(j*2):offset+(j*2)+2])\r\n ptr_tables.append(addr)\r\n if last_addr < addr: last_addr = addr\r\n for j in range(count):\r\n result_all = b\"\"\r\n result2_all = b\"\"\r\n addr = ptr_tables[j]\r\n ptr = bank * 0x4000 + (addr - 0x4000)\r\n while True:\r\n result, translated_text_data, end_ptr = create_text(translated_text_data, kor_tables, data, ptr)\r\n result2, translated_text_data2, _ = create_text(translated_text_data2, kor_tables, data, ptr)\r\n if i == 2 and j == 62: # hack\r\n if end_ptr == 0x3A4EC:\r\n result_all += result\r\n result2_all += result2\r\n ptr = end_ptr\r\n continue\r\n\r\n if ptr_tables[j] != last_addr:\r\n find = -1\r\n for k in range(count):\r\n addr = ptr_tables[k]\r\n if addr == int(not not bank) * 0x4000 + (end_ptr % 0x4000):\r\n find = 1\r\n break\r\n if find == -1:\r\n result_all += result\r\n result2_all += result2\r\n ptr = end_ptr\r\n continue\r\n result_all += result\r\n result2_all += result2\r\n break\r\n\r\n if i == 0:\r\n patch_text(0x1A, offset+(j*2), result_all)\r\n patch_text(0x1B, offset+(count*2)+(j*2), result2_all)\r\n elif i == 1:\r\n if j < 27:\r\n patch_text(0x14, offset+(j*2), result_all)\r\n else:\r\n patch_text(0x1E, offset+(j*2), result_all)\r\n\r\n if j < 27:\r\n patch_text(0x15, offset+(count*2)+(j*2), result2_all)\r\n else:\r\n patch_text(0x1F, offset+(count*2)+(j*2), result2_all)\r\n elif i == 2:\r\n patch_text(0x1C, offset+(j*2), result_all)\r\n patch_text(0x1D, offset+(count*2)+(j*2), result2_all)\r\n elif i == 3:\r\n patch_text(0x12, offset+(j*2), result_all)\r\n patch_text(0x12, offset+(count*2)+(j*2), result2_all)\r\n\r\nhidden_scenario_text_offsets = [0x3C433, 0x3C485, 0x3C539, 0x3C53D, 0x3C575, 0x3C5B3]\r\nnew_ptr_table_offset = 0x3FFA0\r\nlast_addr = 0x3C5F8\r\n\r\npatch(0x3C41F, p8(0x13))\r\nfor i in range(len(hidden_scenario_text_offsets)):\r\n ptr = hidden_scenario_text_offsets[i]\r\n result_all = b\"\"\r\n result2_all = b\"\"\r\n while True:\r\n result, translated_text_data, _ = create_text(translated_text_data, kor_tables, data, ptr)\r\n result2, translated_text_data2, ptr = create_text(translated_text_data2, kor_tables, data, ptr)\r\n result_all += result\r\n result2_all += result2\r\n if ptr == last_addr: break\r\n\r\n patch_text(0x13, new_ptr_table_offset+(i*2), result_all)\r\n patch_text(0x13, new_ptr_table_offset+(len(hidden_scenario_text_offsets)*2)+(i*2), result2_all)\r\n\r\nhidden_scenario_ptr_table_offsets = [(0x3C3E0, 4)]\r\nnew_ptr_table_offset = 0x3FFC0\r\nfor i in range(len(hidden_scenario_ptr_table_offsets)):\r\n offset = hidden_scenario_ptr_table_offsets[i][0]\r\n count = hidden_scenario_ptr_table_offsets[i][1]\r\n for j in range(count):\r\n result_all = b\"\"\r\n result2_all = b\"\"\r\n bank = offset // 0x4000\r\n addr = u16(data[offset+(j*2):offset+(j*2)+2])\r\n addr_orig = addr\r\n if addr >= 0x4000: addr -= 0x4000\r\n ptr = bank * 0x4000 + addr\r\n while True:\r\n result, translated_text_data, _ = create_text(translated_text_data, kor_tables, data, ptr)\r\n result2, translated_text_data2, ptr = create_text(translated_text_data2, kor_tables, data, ptr)\r\n result_all += result\r\n result2_all += result2\r\n if ptr == last_addr: break\r\n\r\n patch_text(0x1A, new_ptr_table_offset+(j*2), result_all)\r\n patch_text(0x1A, new_ptr_table_offset+(count*2)+(j*2), result2_all)\r\n\r\nfor i in range(0x12, 0x20):\r\n print(\"bank%X end address : %04x\" % (i, bank_text_offset[i]))\r\n\r\npatch(0x8EE0, b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x30\\x30\\x10\\x10\\x20\\x20\")\r\npatch(0x4032, str2code(\"인물소개 사건정리\") + b\"\\xFD\" + str2code(\"패스워드\"))\r\n\r\n\r\npatch(0x148, b\"\\x04\")\r\npatch(0x14D, p8(gb_checksum1(data_patched)))\r\npatch(0x14E, pb16(gb_checksum2(data_patched)))\r\n\r\nopen(\"output.gb\", \"wb\").write(data_patched)\r\n","repo_name":"SeHwa/detective-conan-2-gb-hangul-patch","sub_path":"patch.py","file_name":"patch.py","file_ext":"py","file_size_in_byte":38194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"32416337816","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 8 11:30:25 2019\n\n@author: luke\n\"\"\"\n\nfrom keras.layers import Input,LSTM,Dense,Embedding\nfrom keras.models import Model\n#from keras.callbacks import ModelCheckpoint\n\nimport pandas as pd\nimport numpy as np\nimport re\n\nimport pickle\n\nEMBED_HIDDEN_SIZE = 50\n\ndef create_model(input_length, n_input,n_output,n_units):\n #训练阶段\n #encoder\n encoder_input = Input(shape = (input_length,))\n embedded_input = Embedding(n_input, EMBED_HIDDEN_SIZE)(encoder_input)\n #encoder输入维度n_input为每个时间步的输入xt的维度,这里是用来one-hot的英文字符数\n encoder = LSTM(n_units, return_state=True)\n #n_units为LSTM单元中每个门的神经元的个数,return_state设为True时才会返回最后时刻的状态h,c\n _,encoder_h,encoder_c = encoder(embedded_input)\n encoder_state = [encoder_h,encoder_c]\n #保留下来encoder的末状态作为decoder的初始状态\n\n #decoder\n decoder_input = Input(shape = (None, n_output))\n #decoder的输入维度为中文字符数\n decoder = LSTM(n_units,return_sequences=True, return_state=True)\n #训练模型时需要decoder的输出序列来与结果对比优化,故return_sequences也要设为True\n decoder_output, _, _ = decoder(decoder_input,initial_state=encoder_state)\n #在训练阶段只需要用到decoder的输出序列,不需要用最终状态h.c\n decoder_dense = Dense(n_output,activation='softmax')\n decoder_output = decoder_dense(decoder_output)\n #输出序列经过全连接层得到结果\n\n #生成的训练模型\n model = Model([encoder_input,decoder_input],decoder_output)\n #第一个参数为训练模型的输入,包含了encoder和decoder的输入,第二个参数为模型的输出,包含了decoder的输出\n\n #推理阶段,用于预测过程\n #推断模型—encoder\n encoder_infer = Model(encoder_input,encoder_state)\n\n #推断模型-decoder\n decoder_state_input_h = Input(shape=(n_units,))\n decoder_state_input_c = Input(shape=(n_units,))\n decoder_state_input = [decoder_state_input_h, decoder_state_input_c]#上个时刻的状态h,c\n\n decoder_infer_output, decoder_infer_state_h, decoder_infer_state_c = decoder(decoder_input,initial_state=decoder_state_input)\n decoder_infer_state = [decoder_infer_state_h, decoder_infer_state_c]#当前时刻得到的状态\n decoder_infer_output = decoder_dense(decoder_infer_output)#当前时刻的输出\n decoder_infer = Model([decoder_input]+decoder_state_input,[decoder_infer_output]+decoder_infer_state)\n\n return model, encoder_infer, decoder_infer\n\ndef build_covab(articles):\n d = {} # {'word' : num}\n for article in articles:\n for word in article:\n if word in d:\n d[word] += 1\n else:\n d[word] = 1\n s = sorted(d.items(), key=lambda x:x[1], reverse=True) #降序\n\n# 计算最佳词典长度\n# s_n = [x[1] for x in s]\n# s_n = s_n[1:] #去掉\n# s_n = np.asarray(s_n)\n# print(\"s_n.mean\",s_n.mean())\n# print(s_n[int(len(s_n)*0.3)]) #取出所有词中频率最高的词的前30%,DEBUG_MODE中出现了2次\n# print(int(len(s_n)*0.3)) #9066 #VOCAB_SIZE = 9000\n\n s_w = [x[0] for x in s]\n word2int = {v : k + 1 for k, v in enumerate(s_w)}\n int2word = {k + 1 : v for k, v in enumerate(s_w)}\n return word2int, int2word\n\ndef clean_str_to_list(string):\n #string = re.sub(r\"[^A-Za-z0-9(),!?\\']\", \" \", string)\n string = re.sub(r\"n\\'t\", \" n\\'\", string) #don't -> do n't(do not)\n string = re.sub(r\"\\'s\", \" \\'s\", string) #It's -> It 's(It is or It has)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string) #I've -> I 've(I have)\n string = re.sub(r\"\\'re\", \" \\'re\", string) #You're -> You 're(You are)\n string = re.sub(r\"\\'d\", \" \\'d\", string) #I'd (like to) -> I 'd(I had)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string) #I'll -> I 'll(I will)\n string = re.sub(r\"\\.\", \" . \", string) #',' -> ' , '\n string = re.sub(r\",\", \" , \", string) #',' -> ' , '\n string = re.sub(r\"!\", \" ! \", string) #'!' -> ' ! '\n string = re.sub(r\"\\(\", \" ( \", string) #'(' -> ' ( '\n string = re.sub(r\"\\)\", \" ) \", string) #')' -> ' ) '\n string = re.sub(r\"\\?\", \" ? \", string) #'?' -> ' ? '\n sentense=[]\n for word in string.split(\" \"):\n if word.strip():\n sentense.append(word)\n return sentense\n\nN_UNITS = 256\nBATCH_SIZE = 64\nEPOCH = 100\nNUM_SAMPLES = 10000\n\ndata_path = 'data/cmn.txt'\n\ndf = pd.read_table(data_path,header=None).iloc[:NUM_SAMPLES,:,]\ndf.columns=['inputs','targets']\n\ndf['targets'] = df['targets'].apply(lambda x: '\\t'+x+'\\n')\n\ninput_texts = df.inputs.values.tolist()\ntarget_texts = df.targets.values.tolist()\n\ninput_words = []\nfor i in range(len(input_texts)):\n input_words.append(clean_str_to_list(input_texts[i]))\n\ntarget_texts = [i.strip() for i in target_texts]\n#print(input_texts)\n#print(target_texts)\n\n#print(input_words)\n\n#input_characters = sorted(list(set(df.inputs.unique().sum())))\ntarget_characters = sorted(list(set(df.targets.unique().sum())))\n\nINUPT_LENGTH = max([len(i) for i in input_words])\nOUTPUT_LENGTH = max([len(i) for i in target_texts])\n\nOUTPUT_FEATURE_LENGTH = len(target_characters)\n\nencoder_input = np.zeros((NUM_SAMPLES,INUPT_LENGTH))\ndecoder_input = np.zeros((NUM_SAMPLES,OUTPUT_LENGTH,OUTPUT_FEATURE_LENGTH))\ndecoder_output = np.zeros((NUM_SAMPLES,OUTPUT_LENGTH,OUTPUT_FEATURE_LENGTH))\n\n#input_dict = dict((char,index + 1) for index,char in enumerate(input_characters))\n#print(input_dict)\n#input_dict_reverse = dict((index + 1,char) for index,char in enumerate(input_characters))\ninput_dict, input_dict_reverse = build_covab(input_words)\n#print(input_dict[:10])\n#print(input_dict_reverse[:10])\n\ntarget_dict = {char:index for index,char in enumerate(target_characters)}\n#print(target_dict)\ntarget_dict_reverse = {index:char for index,char in enumerate(target_characters)}\n\nINPUT_FEATURE_LENGTH = len(input_dict) + 1\n\nprint(\"OUTPUT_LENGTH=\" , OUTPUT_LENGTH)\nprint(\"OUTPUT_FEATURE_LENGTH=\" , OUTPUT_FEATURE_LENGTH)\nprint(len(input_dict))\nprint(len(target_dict))\n\nwith open(\"output_length.pkl\", \"wb\") as f:\n pickle.dump([INUPT_LENGTH, OUTPUT_LENGTH], f)\n\nwith open(\"input_dict.pkl\", \"wb\") as f:\n pickle.dump(input_dict, f)\n\nwith open(\"input_dict_reverse.pkl\", \"wb\") as f:\n pickle.dump(input_dict_reverse, f)\n\nwith open(\"target_dict.pkl\", \"wb\") as f:\n pickle.dump(target_dict, f)\n\nwith open(\"target_dict_reverse.pkl\", \"wb\") as f:\n pickle.dump(target_dict_reverse, f)\n\nfor seq_index,seq in enumerate(input_words):\n for char_index, char in enumerate(seq):\n encoder_input[seq_index,char_index] = input_dict[char]\n\nfor seq_index,seq in enumerate(target_texts):\n for char_index,char in enumerate(seq):\n decoder_input[seq_index,char_index,target_dict[char]] = 1.0\n if char_index > 0:\n decoder_output[seq_index,char_index-1,target_dict[char]] = 1.0\n\n#print(''.join([input_dict_reverse[np.argmax(i)] for i in encoder_input[0] if max(i) !=0]))\n\n#print(''.join([target_dict_reverse[np.argmax(i)] for i in decoder_output[0] if max(i) !=0]))\n\n#print(''.join([target_dict_reverse[np.argmax(i)] for i in decoder_input[0] if max(i) !=0]))\n\nmodel_train, encoder_infer, decoder_infer = \\\n create_model(INUPT_LENGTH, INPUT_FEATURE_LENGTH, OUTPUT_FEATURE_LENGTH, N_UNITS)\n\nmodel_train.compile(optimizer='rmsprop', loss='categorical_crossentropy')\n\nmodel_train.summary()\n\nencoder_infer.summary()\n\ndecoder_infer.summary()\n\nmodel_train.fit([encoder_input,decoder_input],decoder_output,batch_size=BATCH_SIZE,\n epochs=EPOCH,validation_split=0.2)\nmodel_train.save(\"translate.h5\")\nencoder_infer.save(\"encoder_infer.h5\")\ndecoder_infer.save(\"decoder_infer.h5\")\n","repo_name":"moneypi/python_study","sub_path":"NLP/seq2seq/seq2seq_train.py","file_name":"seq2seq_train.py","file_ext":"py","file_size_in_byte":7770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"9867028227","text":"from threading import Thread\r\nfrom threading import Condition\r\nfrom Socket import Socket\r\nimport Threaduri_Sender as ts\r\n\r\nclass FormatareFisier:\r\n cale_fisier=''\r\n dimensiune_sir=2\r\n numar_secventa=0\r\n coada_pachete=[]\r\n text=''\r\n\r\n @staticmethod\r\n #functie pentru citirea fisierului\r\n def read_file(cale):\r\n\r\n #deschidem fisierul folosind calea\r\n file=open(cale)\r\n\r\n #salvam textul intr-o variabila\r\n text=file.read()\r\n\r\n #inchidem fisierul\r\n file.close()\r\n\r\n #salvam textul\r\n FormatareFisier.text=text\r\n return text\r\n\r\n @staticmethod\r\n #functie care imparte textul in mai multe bucati\r\n def split_file(text):\r\n\r\n #variabile\r\n dim=FormatareFisier.dimensiune_sir\r\n coada_pachete=[]\r\n\r\n\r\n for i in range(0, len(text), dim):\r\n #daca nu se imparte exact dimensiunea fisierului cu dimensiunea pachetului facem ultimul sir cu cate caractere au mai ramas\r\n if(i+dim>len(text)):\r\n sir=text[i:len(text)]\r\n else:\r\n sir=text[i:i+dim]\r\n\r\n #salvam sirul in coada de pachete pe pozitia k\r\n coada_pachete.append(sir)\r\n\r\n return coada_pachete\r\n\r\n @staticmethod\r\n #functie care realizeaza formatarea pachetelor (nr. secventa + sir caractere)\r\n def format_file(text):\r\n\r\n #apelam functia de citire fisier\r\n coada_pachete=FormatareFisier.split_file(text)\r\n FormatareFisier.coada_pachete=coada_pachete\r\n\r\n #vom folosi un vector intermediar pentru a edita coada de pachete\r\n i=1\r\n vector=[]\r\n for k in coada_pachete:\r\n #adaugam un separator pentru a fi mai usoara separarea de catre reciever\r\n sir=(str)(i)+'|'+k\r\n vector.append(sir)\r\n #salvam coada de pachete formatata\r\n return vector\r\n\r\n @staticmethod\r\n #functie care adauga cozii de pachete un pachet de inceput si unul de sfarsit\r\n def add_ends(f):\r\n if f==1:\r\n #pachetul de inceput va contine numarul de pachete de transmis si cuvantul START\r\n p='START'+'|'+(str)(len(FormatareFisier.coada_pachete))\r\n else:\r\n #pachetul de final va contine cuvantul STOP si numarul de caractere transmise\r\n p='STOP'+'|'+(str)(len(FormatareFisier.text))\r\n\r\n #adaugam pachetele de start si stop la coada de pachete pentru a fi transmise\r\n return p\r\n\r\n\r\n @staticmethod\r\n def siruri_egale(s1, s2):\r\n # verific daca cele 2 siruri au lungimi egale\r\n if (len(s1) != len(s2)):\r\n # in caz contrar inseamna ca ele nu pot fi identice\r\n return False\r\n # parcurg cele 2 siruri si verific daca sunt la fel\r\n for c in range(0, len(s1)):\r\n if s1[c] != s2[c]:\r\n # cele 2 siruri au simboluri diferite pe aceeasi pozitie-> nu sunt egale\r\n return False\r\n # sirurile sunt identice\r\n return True\r\n\r\nclass Thread_Prelucrare(Thread):\r\n # creez o variabila de conditie pentru sincronizarea thread-ului de citire\r\n stare_citire = Condition()\r\n coada_pachete = [] # coada ce va contine continutul fisierelor prelucrate\r\n\r\n def __init__(self):\r\n # apelez constructor din clasa parinte\r\n super(Thread_Prelucrare, self).__init__()\r\n\r\n # metoda run\r\n def run(self):\r\n # astept la infinit\r\n while True:\r\n # primesc lock\r\n Thread_Prelucrare.stare_citire.acquire()\r\n cale = FormatareFisier.cale_fisier\r\n # retin calea pentru impachetarea pachetelor de start si stop\r\n sir = FormatareFisier.read_file(cale)\r\n # prelucrez continutul fisierului\r\n Thread_Prelucrare.coada_pachete = FormatareFisier.format_file(sir)\r\n # prelucrez si pun in coada pachetul de start\r\n s = FormatareFisier.add_ends(1)\r\n # adaug pachetul de start\r\n Thread_Prelucrare.coada_pachete = [s] + Thread_Prelucrare.coada_pachete\r\n # dupa ce am pus toate pachetele corespunzatoare, adaug pachetul de stop\r\n s = FormatareFisier.add_ends(2)\r\n # adaug pachetul de stop\r\n Thread_Prelucrare.coada_pachete = Thread_Prelucrare.coada_pachete + [s]\r\n # verific daca am conexiunea pe socket deschisa\r\n if Socket.flag:\r\n # anunt thread-ul de trimitere ca poate sa isi inceapa treaba\r\n ts.Thread_Trimitere.stare_trimitere.acquire()\r\n ts.Thread_Trimitere.stare_trimitere.notify()\r\n # eliberare lock\r\n ts.Thread_Trimitere.stare_trimitere.release()\r\n # eliberare lock\r\n Thread_Prelucrare.stare_citire.release()\r\n\r\n\r\n","repo_name":"RobertChi/ProiectRC","sub_path":"FormatareFisier.py","file_name":"FormatareFisier.py","file_ext":"py","file_size_in_byte":4796,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"25367862620","text":"from django.contrib import admin\nfrom django.urls import path\nimport patient.views as patient_views\nfrom django.views.generic import TemplateView\napp_name = 'patient'\n\nurlpatterns = [\n path('/Checkout/', patient_views.checkout, name='checkout'),\n path('/History/', patient_views.history, name='history'),\n path('/Pharmacy/', patient_views.show_products, name='show_products'),\n path('/Cart/', patient_views.show_cart, name='show_cart'),\n path('pharmacy-info/', patient_views.test_function, name='show-pharmacy-info'),\n path('signup/', patient_views.signup, name='patient_signup'),\n path('/', patient_views.load_patient, name='home'),\n path('/search_result/', patient_views.search_result, name='search_result'),\n path('/take_appointment//', patient_views.take_appointment, name='take_appointment'),\n path('/show_appointments/', patient_views.show_appointments, name='show_appointments'),\n path('/', patient_views.load_patient, name='home'),\n path('/show_doc_profile//', patient_views.show_doctor_profile,name = 'show_doctor_profile'),\n path('/show_profile/', patient_views.show_profile, name='show_profile'),\n path('/show_prescription//', patient_views.show_prescription, name='show_prescription'),\n\n]\n","repo_name":"nafiz09/DoctorZone","sub_path":"patient/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"43481817067","text":"import cartopy.crs as ccrs\nimport copy\nfrom datacube.utils.cog import write_cog\nimport logging\nfrom flask import request as rq\nimport flask\nimport json\nimport metpy\nimport numpy as np\nimport os\nimport pathlib\nfrom pyproj import CRS\nimport xarray as xr\nimport uuid as ud\nimport zipfile\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass GoesProvider(object):\n\n def __init__(self, dataset, config):\n \"\"\"\n Initialize object\n\n :param provider_def: provider definition\n\n :returns: pygeoapi.providers.base.BaseProvider\n \"\"\"\n self.config = config\n self.DATASET_FOLDER = config['datasets'][dataset]['provider']['data_source']\n self.dir_root=self.DATASET_FOLDER\n self.ps_cov= {\n \"domain\": {\n \"axes\": {\n \"t\": {\n \"values\": [\n ]\n },\n \"x\": {\n \"values\": [\n ]\n },\n \"y\": {\n \"values\": [\n ]\n },\n },\n \"domainType\": \"PointSeries\",\n \"referencing\": [\n {\n \"coordinates\": [\n \"y\",\n \"x\"\n ],\n \"system\": {\n \"id\": \"http://www.opengis.net/def/crs/OGC/1.3/CRS84\",\n \"type\": \"GeographicCRS\"\n }\n },\n {\n \"coordinates\": [\n \"t\"\n ],\n \"system\": {\n \"calendar\": \"Gregorian\",\n \"type\": \"TemporalRS\"\n }\n }\n ],\n \"type\": \"Domain\"\n },\n \"parameters\": {\n \"p1\": {\n \"attrs\": {\n },\n \"description\": {\n \"en\": \"\"\n },\n \"observedProperty\": {\n \"label\": {\n \"en\": \"\"\n }\n },\n \"unit\": {\n \"label\": {\n \"en\": \"\"\n },\n \"symbol\": {\n \"type\": \"\",\n \"value\": \"\"\n }\n }\n }\n },\n \"ranges\": {\n \"p1\": {\n \"axisNames\": [\n ],\n \"dataType\": \"float\",\n \"shape\": [\n ],\n \"type\": \"NdArray\",\n \"values\": [\n ]\n }\n },\n \"type\": \"Coverage\"\n }\n self.area_cov= {\n \"domain\": {\n \"axes\": {\n \"t\": {\n \"values\": [\n ]\n },\n \"x\": {\n \"values\": [\n ]\n },\n \"y\": {\n \"values\": [\n ]\n },\n },\n \"domainType\": \"Grid\",\n \"referencing\": [\n {\n \"coordinates\": [\n \"y\",\n \"x\"\n ],\n \"system\": {\n \"id\": \"http://www.opengis.net/def/crs/OGC/1.3/CRS84\",\n \"type\": \"GeographicCRS\"\n }\n },\n {\n \"coordinates\": [\n \"t\"\n ],\n \"system\": {\n \"calendar\": \"Gregorian\",\n \"type\": \"TemporalRS\"\n }\n }\n ],\n \"type\": \"Domain\"\n },\n \"parameters\": {\n \"p1\": {\n \"attrs\": {\n },\n \"description\": {\n \"en\": \"\"\n },\n \"observedProperty\": {\n \"label\": {\n \"en\": \"\"\n }\n },\n \"unit\": {\n \"label\": {\n \"en\": \"\"\n },\n \"symbol\": {\n \"type\": \"\",\n \"value\": \"\"\n }\n }\n }\n },\n \"ranges\": {\n \"p1\": {\n \"axisNames\": [\n ],\n \"dataType\": \"float\",\n \"shape\": [\n ],\n \"type\": \"NdArray\",\n \"values\": [\n ]\n }\n },\n \"type\": \"Coverage\"\n }\n\n\n def query(self, dataset, qtype, coords, time_range, z_value, params, instance, outputFormat):\n self.uuid=str(ud.uuid4().hex)\n zarr_ds = self.config['datasets'][dataset]['provider']['data_source']+'/zarr'\n ds = xr.open_zarr(zarr_ds)\n if qtype=='point':\n output, output_boolean = self.get_position_data(ds,coords,qtype,params,time_range,outputFormat)\n return output, output_boolean\n if qtype=='polygon':\n output, output_boolean = self.get_polygon_data(ds,coords,qtype,params,time_range,outputFormat)\n return output, output_boolean\n\n\n def get_position_data(self,ds,coords,qtype,params,time_range,outputFormat):\n lon = coords[0] # longitude of interest\n lat = coords[1] # latitude of interest\n output = ds[params]\n output = output.sel(x=lon,y=lat, method='nearest')\n j_output = output.to_dict()\n if outputFormat=='CoverageJSON':\n j_cov = self.to_covjson(j_output,qtype,lat,lon)\n return json.dumps(j_cov, indent=4, sort_keys=True, default=str).replace('NaN', 'null'), 'no_delete'\n\n\n def get_polygon_data(self,ds,coords,qtype,params,time_range,outputFormat):\n geometries=[];coord_list=list()\n output=ds[params]\n if len(coords) == 5:\n coords_clip=[[coords[0][0],coords[0][1]],[coords[1][0],coords[1][1]],[coords[2][0]-1,coords[2][1]],[coords[3][0]-1,coords[3][1]],[coords[4][0],coords[4][1]]]\n else:\n coords_clip=coords\n geometries.append({'type':'Polygon', 'coordinates':[coords_clip]}) \n output=output.rio.write_crs(4326)\n output=output.rio.clip(geometries,output.rio.crs)\n start_date=time_range[0]\n end_date=time_range[1]\n output=output.sel({'time':slice(start_date,end_date)})\n j_output = output.to_dict()\n if outputFormat=='CoverageJSON':\n j_cov = self.to_covjson(j_output,qtype)\n return json.dumps(j_cov, indent=4, sort_keys=True, default=str).replace('NaN', 'null'), 'no_delete'\n if outputFormat==\"COGeotiff\":\n f_location,zip_bool=export_geotiff(self,output)\n if zip_bool==False:\n return flask.send_from_directory(self.dir_root,self.uuid+'.tif',as_attachment=True), self.dir_root+'/'+self.uuid+'.tif'\n if zip_bool==True:\n root=self.dir_root+'/temp_dir/'\n zip_file=f_location.split('/')[-1]+'.zip'\n return flask.send_from_directory(root,zip_file,as_attachment=True), 'no_delete'\n if outputFormat==\"NetCDF\":\n for data_vars in output.data_vars:\n del output[data_vars].attrs['grid_mapping']\n conversion=output.to_netcdf(self.dir_root+'/output-'+self.uuid+'.nc')\n return flask.send_from_directory(self.dir_root,'output-'+self.uuid+'.nc',as_attachment=True), self.dir_root+'/output-'+self.uuid+'.nc'\n\n\n\n\n\n def to_covjson(self,j_output,qtype):\n if qtype == 'point':\n cov = self.ps_cov\n if qtype=='polygon':\n cov = self.area_cov\n new_output=copy.deepcopy(cov)\n new_output['domain']['axes']['t']['values']=copy.deepcopy(j_output['coords']['time']['data'])\n new_output['domain']['axes']['x']['values']=copy.deepcopy(j_output['coords']['x']['data'])\n new_output['domain']['axes']['y']['values']=copy.deepcopy(j_output['coords']['y']['data'])\n for p in j_output['data_vars']:\n new_output['parameters'][p]={}\n new_output['parameters'][p]=copy.deepcopy(new_output['parameters']['p1'])\n new_output['parameters'][p]['description']=[p]\n try:\n new_output['parameters'][p]['observedProperty']['label']['en']=p\n new_output['parameters'][p]['unit']['label']['en']=p\n new_output['parameters'][p]['unit']['symbol']={'value': copy.deepcopy(j_output['data_vars'][p]['attrs']['units'])}\n except:\n pass\n new_output['ranges'][p]=copy.deepcopy(new_output['ranges']['p1'])\n if qtype=='point': \n new_output['ranges'][p]['values']=copy.deepcopy(j_output['data_vars'][p]['data'])\n new_output['ranges'][p]['shape']=[np.array(j_output['data_vars'][p]['data']).shape[0],1,1]\n if qtype=='polygon':\n new_output['ranges'][p]['values']=copy.deepcopy(np.array(j_output['data_vars'][p]['data']).flatten().tolist())\n new_output['ranges'][p]['shape']=np.array(j_output['data_vars'][p]['data']).shape\n new_output['ranges'][p]['axisNames']=['t','y','x']\n del new_output['parameters']['p1']\n del new_output['ranges']['p1']\n return new_output\n\n\ndef export_geotiff(self,output):\n dim_tracker={}\n zip_bool=False\n for dims in output.dims:\n if 'time' in dims:\n if output.dims[dims] == 1:\n output=output.sel({dims: output[dims].values[0]})\n else:\n dim_tracker[dims]=output[dims].values.tolist()\n #for forecast time selection I need to use np.timedelta64\n if len(dim_tracker)==0:\n f_location=self.dir_root+'/'+self.uuid+'.tif'\n output_array=output.to_array()\n output_array=output_array.compute()\n df=write_cog(output_array,fname=f_location)\n else:\n f_location=self.dir_root+'/temp_dir/'+self.uuid\n os.makedirs(f_location)\n if len(dim_tracker.keys())==1:\n if 'time' in list(dim_tracker.keys())[0]:\n for element in dim_tracker[list(dim_tracker.keys())[0]]:\n sample=output.sel({'time':element})\n sample_array=sample.to_array()\n sample_array=sample_array.compute()\n fname=str(element)+'.tif'\n w_location=f_location+'/'+fname\n df=write_cog(sample_array,fname=w_location)\n #create zip\n zip_bool=True\n base_path = pathlib.Path(f_location+'/')\n with zipfile.ZipFile(f_location+'.zip', mode='w') as z:\n for f_name in base_path.iterdir():\n z.write(f_name)\n return f_location, zip_bool\n\n","repo_name":"ShaneMill1/NCPP_EDR_API","sub_path":"data-api/EDR/provider/goes.py","file_name":"goes.py","file_ext":"py","file_size_in_byte":11238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"71898376394","text":"\"\"\"Adding n-n table to users and roles and adding link field to loja\n\nRevision ID: 05c71b4c3d54\nRevises: 11795a3e7b4f\nCreate Date: 2016-08-19 10:31:41.862580\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '05c71b4c3d54'\ndown_revision = '11795a3e7b4f'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('microondas',\n sa.Column('preco', sa.Float(), nullable=True),\n sa.Column('marca', sa.String(), nullable=True),\n sa.Column('modelo', sa.String(), nullable=True),\n sa.Column('link_loja', sa.String(), nullable=True),\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('capacidade', sa.Integer(), nullable=True),\n sa.Column('loja_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['loja_id'], ['lojas.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('roles_users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('role_id', sa.Integer(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.add_column('lojas', sa.Column('link', sa.String(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('lojas', 'link')\n op.drop_table('roles_users')\n op.drop_table('microondas')\n ### end Alembic commands ###\n","repo_name":"yrachid/casa-planner","sub_path":"migrations/versions/05c71b4c3d54_adding_n_n_table_to_users_and_roles_and_.py","file_name":"05c71b4c3d54_adding_n_n_table_to_users_and_roles_and_.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"6907232073","text":"__metaclass__ = type\n__all__ = [\n 'DistroSeriesDifferenceJob',\n ]\n\nfrom zope.component import getUtility\nfrom zope.interface import (\n implementer,\n provider,\n )\n\nfrom lp.registry.interfaces.distroseriesdifference import (\n IDistroSeriesDifferenceSource,\n )\nfrom lp.registry.interfaces.pocket import PackagePublishingPocket\nfrom lp.registry.model.distroseries import DistroSeries\nfrom lp.registry.model.distroseriesdifference import DistroSeriesDifference\nfrom lp.registry.model.sourcepackagename import SourcePackageName\nfrom lp.services.config import config\nfrom lp.services.database import bulk\nfrom lp.services.database.interfaces import (\n IMasterStore,\n IStore,\n )\nfrom lp.services.job.model.job import Job\nfrom lp.soyuz.interfaces.distributionjob import (\n DistributionJobType,\n IDistroSeriesDifferenceJob,\n IDistroSeriesDifferenceJobSource,\n )\nfrom lp.soyuz.interfaces.packageset import IPackagesetSet\nfrom lp.soyuz.interfaces.publishing import active_publishing_status\nfrom lp.soyuz.model.distributionjob import (\n DistributionJob,\n DistributionJobDerived,\n )\nfrom lp.soyuz.model.publishing import SourcePackagePublishingHistory\n\n\ndef make_metadata(sourcepackagename_id, parent_series_id):\n \"\"\"Return JSON metadata for a job on `sourcepackagename_id`.\"\"\"\n return {\n 'sourcepackagename': sourcepackagename_id,\n 'parent_series': parent_series_id,\n }\n\n\ndef create_job(derived_series, sourcepackagename, parent_series):\n \"\"\"Create a `DistroSeriesDifferenceJob` for a given source package.\n\n :param derived_series: A `DistroSeries` that is assumed to be derived\n from another one.\n :param sourcepackagename: The `SourcePackageName` whose publication\n history has changed.\n :param parent_series: A `DistroSeries` that is a parent of\n `derived_series`. The difference is between the versions of\n `sourcepackagename` in `parent_series` and `derived_series`.\n \"\"\"\n db_job = DistributionJob(\n distribution=derived_series.distribution, distroseries=derived_series,\n job_type=DistributionJobType.DISTROSERIESDIFFERENCE,\n metadata=make_metadata(sourcepackagename.id, parent_series.id))\n IMasterStore(DistributionJob).add(db_job)\n job = DistroSeriesDifferenceJob(db_job)\n job.celeryRunOnCommit()\n return job\n\n\ndef create_multiple_jobs(derived_series, parent_series):\n \"\"\"Create `DistroSeriesDifferenceJob`s between parent and derived series.\n\n :param derived_series: A `DistroSeries` that is assumed to be derived\n from another one.\n :param parent_series: A `DistroSeries` that is a parent of\n `derived_series`.\n :return: A list of newly-created `DistributionJob` ids.\n \"\"\"\n store = IStore(SourcePackagePublishingHistory)\n spn_ids = store.find(\n SourcePackagePublishingHistory.sourcepackagenameID,\n SourcePackagePublishingHistory.distroseries == derived_series.id,\n SourcePackagePublishingHistory.status.is_in(active_publishing_status))\n spn_ids = list(spn_ids)\n\n if len(spn_ids) == 0:\n return []\n\n job_ids = Job.createMultiple(store, len(spn_ids))\n return bulk.create(\n (DistributionJob.distribution, DistributionJob.distroseries,\n DistributionJob.job_type, DistributionJob.job_id,\n DistributionJob.metadata),\n [(derived_series.distribution, derived_series,\n DistributionJobType.DISTROSERIESDIFFERENCE, job_id,\n make_metadata(spn_id, parent_series.id))\n for job_id, spn_id in zip(job_ids, spn_ids)],\n get_primary_keys=True)\n\n\ndef find_waiting_jobs(derived_series, sourcepackagename, parent_series):\n \"\"\"Look for pending `DistroSeriesDifference` jobs on a package.\"\"\"\n # Look for identical pending jobs. This compares directly on\n # the metadata string. It's fragile, but this is only an\n # optimization. It's not actually disastrous to create\n # redundant jobs occasionally.\n json_metadata = make_metadata(sourcepackagename.id, parent_series.id)\n\n # Use master store because we don't like outdated information\n # here.\n store = IMasterStore(DistributionJob)\n\n candidates = store.find(\n DistributionJob,\n DistributionJob.job_type ==\n DistributionJobType.DISTROSERIESDIFFERENCE,\n DistributionJob.distroseries == derived_series,\n DistributionJob.metadata == json_metadata,\n DistributionJob.job_id.is_in(Job.ready_jobs))\n\n return [\n job\n for job in candidates\n if job.metadata[\"parent_series\"] == parent_series.id]\n\n\ndef may_require_job(derived_series, sourcepackagename, parent_series):\n \"\"\"Might publishing this package require a new job?\n\n Use this to determine whether to create a new\n `DistroSeriesDifferenceJob`. The answer may possibly be\n conservatively wrong: the check is really only to save the job\n runner some unnecessary work, but we don't expect a bit of\n unnecessary work to be a big problem.\n \"\"\"\n if parent_series.distribution == derived_series.distribution:\n # Differences within a distribution are not tracked.\n return False\n existing_jobs = find_waiting_jobs(\n derived_series, sourcepackagename, parent_series)\n return len(existing_jobs) == 0\n\n\ndef has_package(distroseries, sourcepackagename):\n \"\"\"Does `distroseries` have the given source package?\"\"\"\n return not distroseries.getPublishedSources(\n sourcepackagename, include_pending=True).is_empty()\n\n\n@implementer(IDistroSeriesDifferenceJob)\n@provider(IDistroSeriesDifferenceJobSource)\nclass DistroSeriesDifferenceJob(DistributionJobDerived):\n \"\"\"A `Job` type for creating/updating `DistroSeriesDifference`s.\"\"\"\n\n class_job_type = DistributionJobType.DISTROSERIESDIFFERENCE\n\n config = config.IDistroSeriesDifferenceJobSource\n\n @classmethod\n def createForPackagePublication(cls, derived_series, sourcepackagename,\n pocket):\n \"\"\"See `IDistroSeriesDifferenceJobSource`.\"\"\"\n # -backports and -proposed are not really part of a standard\n # distribution's packages so we're ignoring them here. They can\n # always be manually synced by the users if necessary, in the\n # rare occasions that they require them.\n ignored_pockets = [\n PackagePublishingPocket.BACKPORTS,\n PackagePublishingPocket.PROPOSED,\n ]\n if pocket in ignored_pockets:\n return\n\n # Create jobs for DSDs between the derived_series' parents and\n # the derived_series itself.\n parent_series_jobs = [\n create_job(derived_series, sourcepackagename, parent)\n for parent in derived_series.getParentSeries()\n if may_require_job(derived_series, sourcepackagename, parent)]\n\n # Create jobs for DSDs between the derived_series and its\n # children.\n derived_series_jobs = [\n create_job(child, sourcepackagename, derived_series)\n for child in derived_series.getDerivedSeries()\n if may_require_job(child, sourcepackagename, derived_series)]\n\n return parent_series_jobs + derived_series_jobs\n\n @classmethod\n def createForSPPHs(cls, spphs):\n \"\"\"See `IDistroSeriesDifferenceJobSource`.\"\"\"\n # XXX JeroenVermeulen 2011-08-25, bug=834499: This won't do for\n # some of the mass deletions we're planning to support.\n # Optimize.\n for spph in spphs:\n if spph.archive.is_main:\n cls.createForPackagePublication(\n spph.distroseries,\n spph.sourcepackagerelease.sourcepackagename, spph.pocket)\n\n @classmethod\n def massCreateForSeries(cls, derived_series):\n \"\"\"See `IDistroSeriesDifferenceJobSource`.\"\"\"\n for parent_series in derived_series.getParentSeries():\n create_multiple_jobs(derived_series, parent_series)\n\n @classmethod\n def getPendingJobsForDifferences(cls, derived_series,\n distroseriesdifferences):\n \"\"\"See `IDistroSeriesDifferenceJobSource`.\"\"\"\n jobs = IStore(DistributionJob).find(\n DistributionJob,\n DistributionJob.job_type == cls.class_job_type,\n Job.id == DistributionJob.job_id,\n Job._status.is_in(Job.PENDING_STATUSES),\n DistributionJob.distroseries == derived_series)\n\n parent_series_ids = set(\n dsd.parent_series.id for dsd in distroseriesdifferences)\n keyed_dsds = dict(\n (dsd.source_package_name.id, dsd)\n for dsd in distroseriesdifferences)\n jobs_by_dsd = {}\n for job in jobs:\n if job.metadata[\"parent_series\"] not in parent_series_ids:\n continue\n dsd = keyed_dsds.get(job.metadata[\"sourcepackagename\"])\n if dsd is not None:\n jobs_by_dsd.setdefault(dsd, []).append(cls(job))\n return jobs_by_dsd\n\n def __repr__(self):\n \"\"\"Returns an informative representation of the job.\"\"\"\n parts = \"%s for \" % self.__class__.__name__\n name = self.sourcepackagename\n if not name:\n parts += \"no package name (!)\"\n else:\n parts += \"package %s\" % name\n parts += \" from %s to %s\" % (self.parent_series.name,\n self.derived_series.name)\n return \"<%s>\" % parts\n\n @property\n def sourcepackagename(self):\n return SourcePackageName.get(self.metadata['sourcepackagename'])\n\n @property\n def derived_series(self):\n return self.distroseries\n\n @property\n def parent_series(self):\n parent_id = self.metadata['parent_series']\n return IStore(DistroSeries).get(DistroSeries, parent_id)\n\n def passesPackagesetFilter(self):\n \"\"\"Is this package of interest as far as packagesets are concerned?\n\n If the parent series has packagesets, then packages that are\n missing in the derived series are only of interest if they are\n in a packageset that the derived series also has.\n \"\"\"\n derived_series = self.derived_series\n parent_series = self.parent_series\n\n sourcepackagename = self.sourcepackagename\n if has_package(derived_series, sourcepackagename):\n return True\n if not has_package(parent_series, sourcepackagename):\n return True\n packagesetset = getUtility(IPackagesetSet)\n if packagesetset.getBySeries(parent_series).is_empty():\n # Parent series does not have packagesets, as would be the\n # case for e.g. Debian. In that case, don't filter.\n return True\n parent_sets = packagesetset.setsIncludingSource(\n sourcepackagename, distroseries=parent_series)\n for parent_set in parent_sets:\n for related_set in parent_set.relatedSets():\n if related_set.distroseries == derived_series:\n return True\n return False\n\n def getMatchingDSD(self):\n \"\"\"Find an existing `DistroSeriesDifference` for this difference.\"\"\"\n spn_id = self.metadata[\"sourcepackagename\"]\n parent_id = self.metadata[\"parent_series\"]\n store = IMasterStore(DistroSeriesDifference)\n search = store.find(\n DistroSeriesDifference,\n DistroSeriesDifference.derived_series == self.derived_series,\n DistroSeriesDifference.parent_series_id == parent_id,\n DistroSeriesDifference.source_package_name_id == spn_id)\n return search.one()\n\n def run(self):\n \"\"\"See `IRunnableJob`.\"\"\"\n if not self.passesPackagesetFilter():\n return\n\n ds_diff = self.getMatchingDSD()\n if ds_diff is None:\n getUtility(IDistroSeriesDifferenceSource).new(\n self.distroseries, self.sourcepackagename, self.parent_series)\n else:\n ds_diff.update()\n","repo_name":"pombredanne/launchpad-3","sub_path":"lib/lp/soyuz/model/distroseriesdifferencejob.py","file_name":"distroseriesdifferencejob.py","file_ext":"py","file_size_in_byte":12040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"11251625111","text":"from math import log2\nfrom textwrap import wrap\n\nfrom PySide2.QtCore import Qt\nfrom PySide2.QtGui import QIcon\nfrom PySide2.QtWidgets import QVBoxLayout, QWidget, QTableWidget, QLabel, QPushButton, QGridLayout, QLineEdit, QTableWidgetItem, \\\n QHeaderView\nfrom pyperclip import copy\n\nfrom utilities.ManageLng import ManageLng\nfrom utilities.PopupWindow import PopupWindow\nfrom utilities.Validator import is_empty, is_correct_network_address, is_correct_endpoint_numbers_per_network, is_correct_prefix\n\n\nclass VlsmCalculation(QWidget):\n def __init__(self):\n super(VlsmCalculation, self).__init__()\n\n # Use language settings\n self.ml = ManageLng()\n\n # App attributes\n self.network_ip = None\n self.prefix = None\n self.network_hosts = None\n self.length_of_subnets = []\n self.subnets = []\n\n main_layout = QVBoxLayout()\n main_layout.setSpacing(10)\n main_layout.setAlignment(Qt.AlignTop)\n self.setLayout(main_layout)\n\n top_bar = QGridLayout()\n\n # Left, top, right and bottom margins\n top_bar.setContentsMargins(40, 15, 40, 15)\n top_bar.setHorizontalSpacing(40)\n main_layout.addLayout(top_bar)\n\n self.starting_network_address_label = QLabel(self.ml.get_tr_text(\"tab_vlsm_starting_net\"))\n self.endpoint_numbers_per_network_label = QLabel(self.ml.get_tr_text(\"tab_vlsm_endpoint_nums\"))\n self.starting_network_prefix_label = QLabel(self.ml.get_tr_text(\"tab_vlsm_starting_net_prefix\"))\n\n self.starting_network_address_input = QLineEdit()\n self.starting_network_address_input.returnPressed.connect(self.calculation_action)\n\n self.starting_network_prefix_input = QLineEdit()\n self.starting_network_prefix_input.returnPressed.connect(self.calculation_action)\n\n self.endpoint_numbers_per_network_input = QLineEdit()\n self.endpoint_numbers_per_network_input.returnPressed.connect(self.calculation_action)\n\n top_bar.addWidget(self.starting_network_address_label, 0, 0)\n top_bar.addWidget(self.starting_network_address_input, 0, 1)\n\n top_bar.addWidget(self.starting_network_prefix_label, 1, 0)\n top_bar.addWidget(self.starting_network_prefix_input, 1, 1)\n\n top_bar.addWidget(self.endpoint_numbers_per_network_label, 2, 0)\n top_bar.addWidget(self.endpoint_numbers_per_network_input, 2, 1)\n\n self.calculation_button = QPushButton(self.ml.get_tr_text(\"tab_vlsm_calc_btn\"))\n self.calculation_button.setIcon(QIcon(\"static/images/get_info.png\"))\n self.calculation_button.clicked.connect(self.calculation_action)\n main_layout.addWidget(self.calculation_button, alignment=Qt.AlignCenter)\n\n self.table = QTableWidget()\n self.table.setColumnCount(6)\n self.table.itemDoubleClicked.connect(copy_text_action)\n\n # Set table header labels\n self.table_column_names = [self.ml.get_tr_text(\"table_column_network_add\"),\n self.ml.get_tr_text(\"table_column_ip_range\"),\n self.ml.get_tr_text(\"table_column_broadcast_add\"),\n self.ml.get_tr_text(\"table_column_subnet_mask\"),\n self.ml.get_tr_text(\"table_column_prefix\"),\n self.ml.get_tr_text(\"table_column_addressable_host\")]\n\n self.table.setHorizontalHeaderLabels(self.table_column_names)\n\n # Automatic resizing of the columns to the content\n self.table.horizontalHeader().setSectionResizeMode(0, QHeaderView.Stretch)\n self.table.horizontalHeader().setSectionResizeMode(1, QHeaderView.Stretch)\n self.table.horizontalHeader().setSectionResizeMode(2, QHeaderView.Stretch)\n\n # Fixed height of table rows\n self.table.verticalHeader().setSectionResizeMode(QHeaderView.Fixed)\n\n # Set table text align of vertical header\n self.table.verticalHeader().setDefaultAlignment(Qt.AlignCenter)\n\n main_layout.addWidget(self.table)\n\n def check_input(self):\n\n # If the starting network address is empty\n if is_empty(self.starting_network_address_input.text()):\n PopupWindow(\"warning\",\n self.ml.get_tr_text(\"tab_vlsm_warning01\"),\n self.starting_network_address_input)\n return False\n else:\n\n # If the starting network address is incorrect\n if not is_correct_network_address(self.starting_network_address_input.text()):\n PopupWindow(\"warning\",\n self.ml.get_tr_text(\"tab_vlsm_warning02\"),\n self.starting_network_address_input)\n return False\n\n # If endpoint numbers are empty\n if is_empty(self.endpoint_numbers_per_network_input.text()):\n PopupWindow(\"warning\",\n self.ml.get_tr_text(\"tab_vlsm_warning03\"),\n self.endpoint_numbers_per_network_input)\n return False\n else:\n\n # If endpoint numbers are incorrect\n if not is_correct_endpoint_numbers_per_network(self.endpoint_numbers_per_network_input.text()):\n PopupWindow(\"warning\",\n self.ml.get_tr_text(\"tab_vlsm_warning04\"),\n self.endpoint_numbers_per_network_input)\n return False\n\n # If prefix is incorrect\n self.prefix = self.starting_network_prefix_input.text().replace(\"/\", \"\").replace(\"\\\\\", \"\")\n if not is_correct_prefix(self.prefix):\n PopupWindow(\"warning\",\n self.ml.get_tr_text(\"tab_vlsm_warning05\"),\n self.starting_network_prefix_input)\n return False\n return True\n\n def inject_data_to_table(self):\n row = 0\n for subnet in self.subnets:\n self.table.insertRow(row)\n column = 0\n for i in subnet.items():\n value = str(i[1])\n self.table.setItem(row, column, TableItem(value))\n column += 1\n row += 1\n\n def storing_input_data(self):\n self.network_ip = self.starting_network_address_input.text()\n self.network_hosts = self.endpoint_numbers_per_network_input.text()\n\n def calculation_action(self):\n if self.check_input():\n # Stores user-specified data\n self.storing_input_data()\n\n # Resets table before injects data\n self.table.setRowCount(0)\n\n self.processing()\n self.inject_data_to_table()\n\n # Clears lists to next calculation\n self.length_of_subnets.clear()\n self.subnets.clear()\n\n def inject_data_to_dict(self):\n for network in self.length_of_subnets:\n hostbits = int(log2(network))\n prefix = 32 - hostbits\n mask = get_ip_from_32bit_format((\"0\" * hostbits).rjust(32, \"1\"))\n\n self.subnets.append({self.table_column_names[0]: self.network_ip,\n self.table_column_names[1]: f\"{get_first_addressable_ip(self.network_ip)} - \"\n f\"{get_last_addressable_ip(self.network_ip, mask)}\",\n self.table_column_names[2]: get_broadcast_ip(self.network_ip, mask),\n self.table_column_names[3]: mask,\n self.table_column_names[4]: f\"/{prefix}\",\n self.table_column_names[5]: pow(2, hostbits) - 2})\n\n self.network_ip = get_next_network_ip(self.network_ip, mask)\n\n def processing(self):\n\n # User-specified hosts are converted to the nearest 2 powers\n for hosts in self.network_hosts.split(\",\"):\n if int(hosts) > 0:\n hosts = int(hosts) + 2\n self.length_of_subnets.append(power_bit_length(int(hosts)))\n\n # The largest host network will be pre-sorting\n self.length_of_subnets.sort(reverse=True)\n sum_all_hosts = sum(self.length_of_subnets)\n\n if is_empty(self.prefix):\n first_octet = int(self.network_ip.split(\".\")[0])\n\n # Determining what could be the default mask based on network address\n if 1 <= first_octet < 128:\n if sum_all_hosts <= pow(2, 24):\n self.inject_data_to_dict()\n else:\n PopupWindow(\"warning\",\n self.ml.get_tr_text(\"tab_vlsm_warning06\"),\n self.endpoint_numbers_per_network_input)\n\n elif 128 <= first_octet < 192:\n if sum_all_hosts <= pow(2, 16):\n self.inject_data_to_dict()\n else:\n PopupWindow(\"warning\",\n self.ml.get_tr_text(\"tab_vlsm_warning07\"),\n self.endpoint_numbers_per_network_input)\n\n elif 192 <= first_octet < 224:\n if sum_all_hosts <= pow(2, 8):\n self.inject_data_to_dict()\n else:\n PopupWindow(\"warning\",\n self.ml.get_tr_text(\"tab_vlsm_warning08\"),\n self.endpoint_numbers_per_network_input)\n else:\n if sum_all_hosts <= pow(2, 32 - int(self.prefix)):\n self.inject_data_to_dict()\n else:\n s1 = self.ml.get_tr_text(\"tab_vlsm_warning09a\")\n s2 = self.ml.get_tr_text(\"tab_vlsm_warning09b\")\n PopupWindow(\"warning\",\n f\"{s1} /{self.prefix} {s2}\",\n self.endpoint_numbers_per_network_input)\n\n def re_translate_ui(self, lang):\n self.ml = ManageLng(lang)\n\n self.starting_network_address_label.setText(self.ml.get_tr_text(\"tab_vlsm_starting_net\"))\n self.endpoint_numbers_per_network_label.setText(self.ml.get_tr_text(\"tab_vlsm_endpoint_nums\"))\n self.starting_network_prefix_label.setText(self.ml.get_tr_text(\"tab_vlsm_starting_net_prefix\"))\n self.calculation_button.setText(self.ml.get_tr_text(\"tab_vlsm_calc_btn\"))\n\n self.table_column_names = [self.ml.get_tr_text(\"table_column_network_add\"),\n self.ml.get_tr_text(\"table_column_ip_range\"),\n self.ml.get_tr_text(\"table_column_broadcast_add\"),\n self.ml.get_tr_text(\"table_column_subnet_mask\"),\n self.ml.get_tr_text(\"table_column_prefix\"),\n self.ml.get_tr_text(\"table_column_addressable_host\")]\n\n self.table.setHorizontalHeaderLabels(self.table_column_names)\n\n\nclass TableItem(QTableWidgetItem):\n def __init__(self, text):\n super(TableItem, self).__init__(text)\n self.setText(text)\n self.setTextAlignment(Qt.AlignCenter)\n self.setFlags(Qt.ItemIsEnabled)\n\n\n# Copying content of cell to clipboard\ndef copy_text_action(item):\n copy(item.text())\n\n\n# A function that converts the entered number to the nearest 2 powers\ndef power_bit_length(x):\n return 2 ** (x - 1).bit_length()\n\n\ndef get_mask_from_prefix(prefix):\n subnet_mask_dec = \"\"\n for octet in wrap((\"0\" * (32 - prefix)).rjust(32, \"1\"), 8):\n subnet_mask_dec += f\"{int(octet, 2)}.\"\n return subnet_mask_dec[:-1]\n\n\n# Returns a 32 bit format from an IP address\ndef get_32bit_format(ip_address):\n format_32bit = \"\"\n for octet in ip_address.split(\".\"):\n format_32bit += f'{bin(int(octet)).replace(\"0b\", \"\").rjust(8, \"0\")}'\n return format_32bit\n\n\n# Returns a decimal IP address from a 32 bit format\ndef get_ip_from_32bit_format(format_32bit):\n ip_dec = \"\"\n for octet in wrap(format_32bit, 8):\n ip_dec += f\"{int(octet, 2)}.\"\n return ip_dec[:-1]\n\n\ndef get_first_addressable_ip(network_ip):\n first_addressable_ip_bin_32bit = bin(int(get_32bit_format(network_ip), 2) +\n int(\"1\", 2)).replace(\"0b\", \"\").rjust(32, \"0\")\n return get_ip_from_32bit_format(first_addressable_ip_bin_32bit)\n\n\ndef get_last_addressable_ip(network_ip, mask):\n broadcast_ip_32bit = get_32bit_format(get_broadcast_ip(network_ip, mask))\n last_addressable_ip_bin_32bit = bin(int(broadcast_ip_32bit, 2) -\n int(\"1\", 2)).replace(\"0b\", \"\").rjust(32, \"0\")\n return get_ip_from_32bit_format(last_addressable_ip_bin_32bit)\n\n\ndef get_broadcast_ip(network_ip, mask):\n broadcast_ip_32bit = f\"{get_32bit_format(network_ip)[:-get_32bit_format(mask).count('0')]}\" \\\n f\"{'1' * get_32bit_format(mask).count('0')}\"\n return get_ip_from_32bit_format(broadcast_ip_32bit)\n\n\ndef get_next_network_ip(network_ip, mask):\n broadcast_ip_32bit = get_32bit_format(get_broadcast_ip(network_ip, mask))\n next_network_ip_32bit = bin(int(broadcast_ip_32bit, 2) +\n int(\"1\", 2)).replace(\"0b\", \"\").rjust(32, \"0\")\n return get_ip_from_32bit_format(next_network_ip_32bit)\n","repo_name":"antarn88/NetworkAssistant","sub_path":"modules/VlsmCalculation.py","file_name":"VlsmCalculation.py","file_ext":"py","file_size_in_byte":13192,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"30968735998","text":"# pylint: disable=protected-access\nfrom sqlalchemy.orm import Session\n\nfrom fastmsa.repo import SqlAlchemyRepository\nfrom tests.app.domain.aggregates import Product\nfrom tests.app.domain.models import Batch, OrderLine\nfrom tests.integration import (\n insert_allocation,\n insert_batch,\n insert_order_line,\n insert_product,\n)\n\n\ndef test_repository_can_save_a_batch(session: Session) -> None:\n batch = Batch(\"batch1\", \"RUSTY-SOAPDISH\", 100, eta=None)\n product = Product(batch.sku, [batch])\n\n repo = SqlAlchemyRepository(Product, session)\n repo.add(product)\n session.commit()\n\n rows = list(\n session.execute(\"SELECT reference, sku, _purchased_quantity, eta FROM batch\")\n )\n assert rows == [(\"batch1\", \"RUSTY-SOAPDISH\", 100, None)]\n\n\ndef test_repository_can_retrieve_a_batch_with_allocations(session: Session) -> None:\n orderline_id = insert_order_line(session)\n batch1_id = insert_batch(session, \"batch1\", \"GENERIC-SOFA\")\n insert_product(session, \"GENERIC-SOFA\")\n insert_batch(session, \"batch2\", \"GENERIC-TABLE\")\n insert_product(session, \"GENERIC-TABLE\")\n insert_allocation(session, orderline_id, batch1_id)\n\n repo = SqlAlchemyRepository(Product, session)\n product = repo.get(\"GENERIC-SOFA\")\n retrieved = product.items[0] if product else None\n\n expected = Batch(\"batch1\", \"GENERIC-SOFA\", 100, eta=None)\n assert retrieved == expected # Batch.__eq__ only compares reference\n assert retrieved.sku == expected.sku\n assert retrieved._purchased_quantity == expected._purchased_quantity\n assert retrieved._allocations == {\n OrderLine(\"order1\", \"GENERIC-SOFA\", 12),\n }\n","repo_name":"2021-msa-study/fastmsa","sub_path":"tests/integration/test_repository.py","file_name":"test_repository.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"392390839","text":"import myparser\nfrom collections.abc import Iterable\n\ndef build_xml(obj,depth = 0):\n res = \"\"\n space = '\\t' * depth\n\n if isinstance(obj,dict):\n for key,value in obj.items():\n res += f\"{space}<{key.replace('<','<').replace('>','>')}>\\n{space}\\t{build_xml(value,depth + 1)}\\n{space}','>')}>\\n\"\n elif isinstance(obj,list):\n for value in obj:\n res += f\"{space}\\n{space}\\t{build_xml(value, depth + 1)}\\n{space}\\n\"\n else:\n res = f\"{obj}\"\n if obj is None:\n res =\"\"\n return res\n\n\ndef task1(file=\"out.json\"):\n obj = None\n with open(file) as json:\n obj = myparser.loads(json.read())\n return build_xml(obj)\n\n\ndef task2(file=\"out.json\"):\n from dict2xml import dict2xml\n from json import loads\n with open(file) as j:\n return dict2xml(loads(j.read()))\n\n\ndef task3(file=\"out.json\"):\n import re\n from dict2xml import dict2xml\n\n example = {'pair_id': 2460032073, 'subject': 'Дискретная математика (базовый уровень)', 'subject_id': 43649, 'note': \"\", 'type': 'Практические занятия', 'time_start': '10:00', 'time_end': '11:30', 'teacher_id': 100054, 'teacher_name': 'Поляков Владимир Иванович', 'room': '1324 (бывш. 369а)', 'building': 'Кронверкский пр., д.49, лит.А', 'format': 'Очно - дистанционный ', 'work_type': 'Практические занятия', 'work_type_id': 3, 'group': 'ДМ БАЗ 3.5', 'flow_type_id': 2, 'flow_id': 19987, 'zoom_url': \"\", 'zoom_password': \"\", 'zoom_info': \"\", 'bld_id': 13, 'format_id': 2, 'main_bld_id': 13}\n data = []\n with open(file) as json:\n value = json.read().replace(\"null\",'\"\"')\n lessons_count = value.count(\"pair_id\")\n for i in range(lessons_count):\n data.append({})\n for key in example:\n if isinstance(example[key],str):\n data[-1][key] = eval(list(re.finditer(f'\"{key}\":\\s*(\"[^\"]*\")',value))[i].group(1),{'__builtins__': {}})\n if isinstance(example[key],int):\n data[-1][key] = eval(list(re.finditer('\"'+key+'\":\\s*([^,\\}]+)[\\},]',value))[i].group(1),{'__builtins__': {}})\n return dict2xml(data)\n\n\ndef task4(file=\"out.json\"):\n import pandas as pd\n\n with open(file, encoding='utf-8') as inputfile:\n df = pd.read_json(inputfile)\n\n df.to_csv('out.csv', encoding='utf-8', index=False)\n\n\n\nif __name__ == \"__main__\":\n import timeit\n print(timeit.timeit(task1, number=100)/100)\n print(timeit.timeit(task2, number=100)/100)\n print(timeit.timeit(task3, number=100)/100)\n task4()\n","repo_name":"Slonser/ITMO_labs","sub_path":"cs/labs/lab4/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"39687978097","text":"import pygame as pg\nfrom time import sleep\nimport random\nfrom pygame.locals import *\n\n\ndef near(pos: list, system=[[-1, -1], [-1, 0], [-1, 1], [0, -1], [0, 1], [1, -1], [1, 0], [1, 1]]):\n count = 0\n for i in system:\n if cells[(pos[0] + i[0]) % len(cells)][(pos[1] + i[1]) % len(cells[0])]:\n count += 1\n return count\n\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nsize_of_cell = 5\nsl = 0\nlive = (2, 3)\nborn = (3,)\n\n\nroot = pg.display.set_mode((1200, 600))\n\ncells = [[random.choice([0, 1]) for j in range(root.get_width() // size_of_cell)] for i in\n range(root.get_height() // size_of_cell * 2)]\n\nif __name__ == '__main__':\n while True:\n # Заполняем экран белым цветом\n root.fill(WHITE)\n\n for i in pg.event.get():\n if i.type == QUIT:\n quit()\n\n cells2 = [[0 for j in range(len(cells[0]))] for i in range(len(cells))]\n\n for i in range(0, len(cells)):\n for j in range(0, len(cells[i])):\n pg.draw.rect(root, (255 * cells[i][j], 255 * cells[i][j], 255 * cells[i][j]),\n [i * size_of_cell, j * size_of_cell, size_of_cell, size_of_cell])\n # Если клетка жива\n if cells[i][j]:\n # Если у соседей не 2 или 3 соседа\n if near([i, j]) not in live:\n cells2[i][j] = 0\n continue\n # В ином случае\n cells2[i][j] = 1\n continue\n # Если клетка мертва и у неё 3 соседа\n if near([i, j]) in born:\n cells2[i][j] = 1\n continue\n # В противном случае\n cells2[i][j] = 0\n pg.display.update()\n\n cells = cells2\n sleep(sl * 0.1)\n","repo_name":"skozlovtsev/ml_labs","sub_path":"game_of_life.py","file_name":"game_of_life.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"25015375866","text":"import numpy as np\n\nfrom HelperClass2.NeuralNet_3_0 import *\n\n# Roll all our parameters dictionary into a single vector satisfying our specific required shape.\ndef dictionary_to_vector(dict_params):\n keys = []\n count = 0\n for key in [\"W1\", \"B1\", \"W2\", \"B2\", \"W3\", \"B3\"]:\n \n # flatten parameter\n new_vector = np.reshape(dict_params[key], (-1,1))\n keys = keys + [key]*new_vector.shape[0] # -> [\"W1\",\"W1\",...\"b1\",\"b1\",...\"W2\"...]\n \n if count == 0:\n theta = new_vector\n else: #np.concatenate\n theta = np.concatenate((theta, new_vector), axis=0)\n count = count + 1\n \n return theta, keys\n\n# roll all grad values into one vector, the same shape as dictionary_to_vector()\ndef gradients_to_vector(gradients):\n count = 0\n for key in [\"dW1\", \"dB1\", \"dW2\", \"dB2\", \"dW3\", \"dB3\"]:\n # flatten parameter\n new_vector = np.reshape(gradients[key], (-1,1))\n \n if count == 0:\n d_theta = new_vector\n else:\n d_theta = np.concatenate((d_theta, new_vector), axis=0)\n count = count + 1\n \n return d_theta\n\n# Unroll all our parameters dictionary from a single vector satisfying our specific required shape.\ndef vector_to_dictionary(theta, layer_dims):\n dict_params = {}\n L = 4 # the number of layers in the networt\n start = 0\n end = 0\n for l in range(1,L):\n end += layer_dims[l]*layer_dims[l-1]\n dict_params[\"W\" + str(l)] = theta[start:end].reshape((layer_dims[l-1],layer_dims[l]))\n start = end\n end += layer_dims[l]*1\n dict_params[\"B\" + str(l)] = theta[start:end].reshape((1,layer_dims[l]))\n start = end\n #end for\n return dict_params\n\n# cross entropy: -Y*lnA\ndef CalculateLoss(net, dict_Param, X, Y, count, ):\n net.wb1.W = dict_Param[\"W1\"]\n net.wb1.B = dict_Param[\"B1\"]\n net.wb2.W = dict_Param[\"W2\"]\n net.wb2.B = dict_Param[\"B2\"]\n net.wb3.W = dict_Param[\"W3\"]\n net.wb3.B = dict_Param[\"B3\"]\n net.forward(X)\n p = Y * np.log(net.output)\n Loss = -np.sum(p) / count\n return Loss\n\n\nif __name__ == '__main__':\n\n n_input = 7\n n_hidden1 = 16\n n_hidden2 = 12\n n_output = 10\n eta = 0.2\n eps = 0.01\n batch_size = 128\n max_epoch = 40\n\n hp = HyperParameters_3_0(n_input, n_hidden1, n_hidden2, n_output, eta, max_epoch, batch_size, eps, NetType.MultipleClassifier, InitialMethod.Xavier)\n net = NeuralNet_3_0(hp, \"MNIST_gradient_check\")\n dict_Param = {\"W1\": net.wb1.W, \"B1\": net.wb1.B, \"W2\": net.wb2.W, \"B2\": net.wb2.B, \"W3\": net.wb3.W, \"B3\": net.wb3.B}\n\n layer_dims = [n_input, n_hidden1, n_hidden2, n_output]\n n_example = 2\n x = np.random.randn(n_example, n_input)\n #y = np.array([1,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,1,0,0,0,0, 0,0,1,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,1,0,0, 0,0,0,0,0,0,0,0,0,1]).reshape(-1,n_example)\n #y = np.array([1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0]).reshape(-1,n_example)\n y = np.array([1,0,0,0,0,0,0,0,0,0]).reshape(1,-1)\n \n net.forward(x)\n net.backward(x, y)\n dict_Grads = {\"dW1\": net.wb1.dW, \"dB1\": net.wb1.dB, \"dW2\": net.wb2.dW, \"dB2\": net.wb2.dB, \"dW3\": net.wb3.dW, \"dB3\": net.wb3.dB}\n\n J_theta, keys = dictionary_to_vector(dict_Param)\n d_theta_real = gradients_to_vector(dict_Grads)\n\n n = J_theta.shape[0]\n J_plus = np.zeros((n,1))\n J_minus = np.zeros((n,1))\n d_theta_approx = np.zeros((n,1))\n\n # for each of the all parameters in w,b array\n for i in range(n):\n J_theta_plus = np.copy(J_theta)\n J_theta_plus[i][0] = J_theta[i][0] + eps\n # 多分类交叉熵\n J_plus[i] = CalculateLoss(net, vector_to_dictionary(J_theta_plus, layer_dims), x, y, n_example)\n\n J_theta_minus = np.copy(J_theta)\n J_theta_minus[i][0] = J_theta[i][0] - eps\n J_minus[i] = CalculateLoss(net, vector_to_dictionary(J_theta_minus, layer_dims), x, y, n_example)\n\n d_theta_approx[i] = (J_plus[i] - J_minus[i]) / (2 * eps)\n # end for\n numerator = np.linalg.norm(d_theta_real - d_theta_approx) ####np.linalg.norm 二范数\n denominator = np.linalg.norm(d_theta_approx) + np.linalg.norm(d_theta_real)\n difference = numerator / denominator\n print('diference ={}'.format(difference))\n if difference<1e-7:\n print(\"NO mistake.\")\n elif difference<1e-4:\n print(\"Acceptable, but a little bit high.\")\n elif difference<1e-2:\n print(\"May has a mistake, you need check code!\")\n else:\n print(\"HAS A MISTAKE!!!\")\n \n\n","repo_name":"microsoft/ai-edu","sub_path":"基础教程/A2-神经网络基本原理/第5步 - 非线性分类/src/ch12-MultipleLayerNetwork/Level2_GradientCheck.py","file_name":"Level2_GradientCheck.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","stars":12705,"dataset":"github-code","pt":"28"} +{"seq_id":"73362582154","text":"import requests\nimport threading\nimport datetime\n\ndef sendRequests(url):\n print(\"Sending request to: \", url)\n response = requests.get(url)\n if not response.status_code == 200:\n print(\"Request failed..!\")\n\ndef on_success(r):\n if r.status_code == 200:\n print('Post succeed: {}'.format(r))\n else:\n print('Post failed: {}'.format(r))\n\ndef on_error(ex):\n print('Post requests failed: {}'.format(ex))\n\ndef send(url):\n print(\"Sending requests to {} at {}: \".format(url, datetime.datetime.now()))\n threading.Timer(10800.0, send, args=[url]).start()\n requests.get(url)\n print(\"Finished at: {}!\".format(datetime.datetime.now()))\n\nif __name__== \"__main__\":\n print(\"Running main!!\")\n send('http://127.0.0.1:56733/links')\n send('http://127.0.0.1:56733/price')\n send('http://127.0.0.1:56733/sold')\n send('http://127.0.0.1:56733/visning')\n send('http://127.0.0.1:56733/clean')\n","repo_name":"Soumya117/finnScanTimer","sub_path":"timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"15152972380","text":"# %%\nimport numpy as np\nfrom scipy.io import wavfile\nimport audio_dspy as adsp\nimport matplotlib.pyplot as plt\n\n# %%\nfs, x = wavfile.read('spring-reverb-impulse.wav')\nx = x[2325:200000] / 2**15\n# sample rate: 44100\n\n# %%\nplt.plot(x)\n\n# %%\nfreqs, peaks = adsp.find_freqs(x, fs, above=30, thresh=-2, frac_off=0.01, plot=True)\nplt.xlim(20, 20000)\nprint(len(freqs))\n\n# %%\ntaus = np.zeros_like(freqs)\ntaus[:5] = adsp.find_decay_rates(freqs[:5], x[:int(fs*2.0)], fs, 30, thresh=-18, plot=False)\ntaus[5:60] = adsp.find_decay_rates(freqs[5:60], x[:int(fs*2.5)], fs, 30, thresh=-22, plot=False)\ntaus[60:] = adsp.find_decay_rates(freqs[60:], x[:int(fs*3.5)], fs, 30, thresh=-20, plot=False)\n\n# %%\namps = adsp.find_complex_amplitudes (freqs, taus, int(len(x) * 0.65), x, fs)\n\n# %%\ny = adsp.generate_modal_signal(amps, freqs, taus, len(amps), len(x), fs)\n\n# %%\nplt.plot(y * np.max(np.abs(x)))\nplt.plot(x)\n\n# %%\nX = adsp.normalize(np.fft.rfft (x))\nY = adsp.normalize(np.fft.rfft (y))\nf = np.linspace (0, fs/2, num=len(X))\nplt.semilogx (f, 20 * np.log10 (np.abs (X)))\nplt.semilogx (f, 20 * np.log10 (np.abs (Y)))\nplt.xlim(20, 20000)\n\n# %%\ny_wav = (y * 2**15).astype(np.int16)\nwavfile.write('impulse-render.wav', fs, y_wav)\n\n# %%\ndef print_array(arr):\n for x in arr:\n print(f'{x}f,')\n\n# %%\nprint_array(freqs)\n\n# %%\nprint_array(taus)\n\n# %%\nprint_array(amps.real)\n\n# %%\nprint_array(amps.imag)\n\n# %%\n","repo_name":"Chowdhury-DSP/chowdsp_utils","sub_path":"examples/ModalSpringReverb/impulse_analysis.py","file_name":"impulse_analysis.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":167,"dataset":"github-code","pt":"28"} +{"seq_id":"15134080421","text":"# My Rating = 3\n# https://leetcode.com/problems/add-two-numbers/\n# You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.\n# You may assume the two numbers do not contain any leading zero, except the number 0 itself.\n# Example:\n# Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)\n# Output: 7 -> 0 -> 8\n# Explanation: 342 + 465 = 807.\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n carr=0\n pre=None\n while True:\n if l1==None:\n num1=0\n else:\n num1=l1.val\n if l2==None:\n num2=0\n else:\n num2=l2.val\n tempSum=num1+num2+carr\n carr=tempSum//10\n tempSum=tempSum%10\n if carr==0 and tempSum==0 and l1==None and l2==None:\n break\n if pre!=None:\n pre.next = ListNode(tempSum)\n pre=pre.next\n else:\n head=ListNode(tempSum)\n pre=head\n if l1!=None:\n l1=l1.next\n if l2!=None:\n l2=l2.next\n return head\n ","repo_name":"Htrams/Leetcode","sub_path":"add_two_numbers_in_linkedList.py","file_name":"add_two_numbers_in_linkedList.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"16486574768","text":"import numpy as np\nfrom intersection import Intersection as it\nimport svg\n\nclass Line:\n def __init__(self):\n self.points = [[None, None],[None, None]]\n self.vector = [None, None]\n self.degree = 0\n self.radian = 0\n self.a = 0\n self.b = 0\n self.function = \"y = {0}x + {1}\".format(self.a,self.b)\n\n def set_2points(self, point1, point2):\n self.points = [point1, point2]\n arr_p1 = np.array(point1)\n arr_p2 = np.array(point2)\n arr_vec = arr_p2 - arr_p1\n self.vector = arr_vec.tolist()\n self.radian = np.arctan(arr_vec[1]/arr_vec[0])\n self.degree = np.rad2deg(self.radian)\n self.a = np.tan(self.radian)\n self.b = point1[1] - self.a * point1[0]\n self.function = \"y = {0}x + {1}\".format(self.a,self.b)\n \n def set_angle_point(self, angle, point, is_radian = False):\n if is_radian == False:\n angle = np.deg2rad(angle)\n point2 = [point[0] + np.cos(angle), point[1] + np.sin(angle)]\n self.set_2points(point, point2)\n\n def set_vector_point(self, vector, point):\n point2 = [point[0] + vector[0], point[1] + vector[1]]\n self.set_2points(point, point2)\n \n def get_angle2lines(self, line, is_radian = False):\n angle_1 = self.radian\n angle_2 = line.radian\n angle_A = abs(angle_1 - angle_2)\n angle_B = np.pi - angle_A\n if angle_A < angle_B:\n angle = [angle_A, angle_B]\n else:\n angle = [angle_B, angle_A]\n if is_radian == False:\n angle = np.rad2deg(angle).tolist()\n return angle\n\n def x_point(self, line):\n point1 = self.points[0]\n point2 = line.points[0]\n vector1 = self.vector\n vector2 = line.vector\n x = it(point1, vector1, point2, vector2)\n return x\n\n def is_on_line(self, point, threshold = 0.00001):\n a = np.tan(self.radian)\n b = self.points[0][1] - a * self.points[0][0]\n if abs(point[1] -(a * point[0] + b)) > threshold:\n return False\n else:\n return True\n\n def extention_x(self, terminal_x2, terminal_x1 = None):\n if terminal_x1 == None:\n terminal_x1=self.points[0][0]\n point1=[terminal_x1, self.a * terminal_x1 + self.b]\n point2=[terminal_x2, self.a * terminal_x2 + self.b]\n self.set_2points(point1, point2)\n \n def extention_y(self, terminal_y2, terminal_y1 = None):\n if terminal_y1 == None:\n terminal_y1=self.points[0][1]\n point1=[(terminal_y1-self.b)/self.a, terminal_y1]\n point2=[(terminal_y2-self.b)/self.a, terminal_y2]\n self.set_2points(point1, point2)\n\n def extention_Length(self, length):\n point1 = self.points[0]\n point2 = [length*np.cos(self.radian)+self.points[0][0], length*np.sin(self.radian)+self.points[0][1]]\n self.set_2points(point1,point2)\n \n def extention_xy(self,top_left,bottom_right,):\n (terminal_x1, terminal_y1) = top_left\n (terminal_x2, terminal_y2) = bottom_right\n self.extention_x(terminal_x2,terminal_x1)\n self.extention_y(terminal_y2,terminal_y1)\n\n def write_svg(self, svg, stroke=\"black\", stroke_width=1):\n x1=self.points[0][0]\n y1=self.points[0][1] \n x2=self.points[1][0]\n y2=self.points[1][1]\n svg.create_line(x1,y1,x2,y2,stroke=stroke,stroke_width=stroke_width)","repo_name":"McQotoCovayashi/svg","sub_path":"line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"39051793259","text":"import os\nimport sys\nimport subprocess\nfrom .main import get_vars\n\n## Inserting echos\n## Inserts echos (in a secondary temporaroy file) corresponding to assignments, function calls and function definitions\n##\n## @param filename Name/location of original file that was parsed\n## @param data Insert data that was generated from bashListener.py on passed onto this in *main* \ndef inserter(filename,data):\n\n\t# print(len(data))\n\tdata = sorted(data, key = lambda tup: (tup[0][0], tup[0][1]), reverse= True)\n\twith open(filename) as fh:\n\t\tcontent = fh.readlines()\n\n\tfor (line_no, col), text in data:\n\t\tif col != -1 : \n\t\t\tcontent[line_no-1] = content[line_no-1][0:col] + text + content[line_no-1][col:]\n\t\telse : \n\t\t\tif \"for\" in content[line_no-1]:\n\t\t\t\tline_no+=1\n\t\t\tcontent.insert(line_no, text)\n\t\n\twith open(f\"temp_{filename}\",\"w\") as fh:\n\t\tfh.truncate(0)\n\t\tfh.writelines(content)\n\n## Execute the subprocess\n## Funciton defined to execute a subprocess to run the temporary secondary file generated\ndef execute(cmd):\n\tprocess = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)\n\tstdout, stderr = process.communicate()\n\t# print(stdout)\n\t# print(stderr)\n\treturn stdout\n\n\n\nif __name__=='__main__':\n\t# a list of tuples that has the line number and the \n\t# name of the variable to be printed\n\t# line number is the one after which the echo has to be inserted\n\t# data = [(13,'ans'),(7,'i')]\n\tdata = get_vars('input.sh')\n\tprint(data)\n\tinserter('input.sh',data)\n\texecute([\"./temp_input.sh\",\"10\"])","repo_name":"gurnoor6/Bash-De-Bug","sub_path":"utils/inserter.py","file_name":"inserter.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"26346210323","text":"import time\nimport numpy as np\nfrom ben.helper_IO import load_list_from_file,write_list_to_file\nfrom ben.helper_location_and_bearing import *\n\n\nclass Bot(object):\n\n\n def __init__(self,R,calib=False):\n self.grabbed=False\n self.R=R\n self.motors=R.motors\n self.location=R.location\n if calib:\n # This code can be used to calibrate the robot. This calibration information can then be accesed through\n # a Calibration object\n\n calibrate(R,5,100,graduation=5,samples=8,drive_turn=\"drive\",write=True)\n calibrate(R,5,100,graduation=5,samples=8,drive_turn=\"turn\",write=True)\n\n self.constants=Calibration()\n\n def see(self,*args,**kwargs):\n return self.R.see(*args,**kwargs)\n\n def drive(self,dist,speed=100,t=0.1):\n\n average_speed=self.constants.d_averages[speed]\n\n self.R.motors[0].m0.power = speed\n self.R.motors[0].m1.power = speed\n try:\n time.sleep(dist/average_speed)\n except:\n print (dist,average_speed)\n time.sleep((abs(dist/average_speed)))\n self.R.motors[0].m0.power = 0\n self.R.motors[0].m1.power = 0\n time.sleep(t)\n\n def turn(self,angle_right,speed=50,t=0.1):\n\n average_speed = self.constants.t_averages[speed]\n\n\n time.sleep(t)\n if angle_right<0:\n angle_right=abs(angle_right)\n speed=-speed\n\n self.R.motors[0].m0.power = speed\n self.R.motors[0].m1.power = -speed\n\n time.sleep(angle_right/average_speed)\n self.R.motors[0].m0.power = 0\n self.R.motors[0].m1.power = 0\n time.sleep(t)\n\n def drive_raw(self,speed, seconds):\n self.R.motors[0].m0.power = speed\n self.R.motors[0].m1.power = speed\n time.sleep(seconds)\n self.R.motors[0].m0.power = 0\n self.R.motors[0].m1.power = 0\n\n def turn_raw(self,speed, seconds):\n self.R.motors[0].m0.power = speed\n self.R.motors[0].m1.power = -speed\n time.sleep(seconds)\n self.R.motors[0].m0.power = 0\n self.R.motors[0].m1.power = 0\n\n def grab(self):\n self.grabbed=self.R.grab()\n\n def release(self):\n self.R.release()\n self.grabbed=False\n\n @property\n def heading(self):\n return self.R.heading\n\nclass Calibration(object):\n\n def __init__(self,drive_dict=None,turn_dict=None):\n if drive_dict and turn_dict:\n self.drive_dict=drive_dict\n self.turn_dict=turn_dict\n\n else:\n self.drive_dict,self.turn_dict=load_calibrations()\n\n for i in self.drive_dict.keys():\n self.drive_dict[i]=self.reject_outliers(self.drive_dict[i])\n\n for i in self.turn_dict.keys():\n self.turn_dict[i] = self.reject_outliers(self.turn_dict[i])\n\n self.update_averages()\n\n def update_averages(self):\n self.update_outliers()\n self.d_averages = {key: np.average(values) for (key, values) in self.drive_dict.iteritems()}\n self.t_averages = {key: np.average(values) for (key, values) in self.turn_dict.iteritems()}\n\n def update_outliers(self):\n\n for key,value in self.drive_dict.iteritems():\n self.drive_dict[key]=self.reject_outliers(value)\n\n for key, value in self.turn_dict.iteritems():\n self.turn_dict[key] = self.reject_outliers(value)\n\n def update_data(self,data,driveturn=\"drive\"):\n if driveturn==\"drive\":\n for key,value in data.iteritems():\n l=len(value)\n self.drive_dict[key].append()\n del self.drive_dict[key][:l]\n elif driveturn==\"turn\":\n for key,value in data.iteritems():\n l=len(value)\n self.turn[key].append()\n del self.turn[key][:l]\n self.update_averages()\n\n\n\n def reject_outliers(self,data):\n\n std=np.std(data)\n mean=np.mean(data)\n low=lambda x:x>mean-2*std\n high=lambda x:x\\\"\")\n print(\"getalarm \\t\\t: displays list of alarms\")\n print(\"\\tUsage \\t: getalarm\")\n print(\"deletealarm \\t\\t: delete alarm from list of alarms\")\n print(\"\\tUsage \\t: deletealarm \")\n print(\"stop \\t\\t: stop alarm\")\n print(\"\\tUsage \\t: stop\")\n print(\"help \\t\\t: displays list of commands\")\n print(\"\\tUsage \\t: help\")\n print(\"exit \\t\\t: exit application\")\n print(\"\\tUsage \\t: exit\")\n\ndef Set_alarm(cmd) :\n try:\n tm = re.match('(((0[0-9])|(1[0-2])):[0-5][0-9]:[0-5][0-9](A|P)M)$', cmd[1])\n if tm is not None:\n fo = open(\"writeData.csv\", \"a\")\n fo.writelines(cmd[1] +\" \" + cmd[2] + \"\\n\")\n fo.close()\n else:\n print(\"Time Is Invalid\")\n \n except ValueError:\n pass\n\n\ndef Get_alarm() :\n fo = open(\"writeData.csv\", \"r\")\n linenu = 1\n for lines in fo:\n print(\"%d. %s\" %(linenu ,lines))\n linenu += 1\n fo.close()\n\ndef Delete_alarm(cmd) :\n flag = 0\n infile = open('writeData.csv','r').readlines()\n with open('writeData.csv','w') as outfile:\n for index,line in enumerate(infile):\n if index != (int(cmd[1],10) - 1):\n outfile.write(line)\n else:\n flag = 1\n if flag == 0:\n print(\"Invalid Line Number\")\n \n outfile.close()\n\ndef compare():\n global stop_flag\n now = datetime.datetime.now()\n d_today = now.strftime(\"%I:%M:%S%p\")\n \n fo = open(\"writeData.csv\", \"r\")\n for lines in fo:\n if lines.find(d_today) != -1:\n mixer.music.play()\n stop_flag = 1\n fo.close()\n if stop_flag != 0:\n stop_flag += 1\n if stop_flag == 30:\n stop_flag = 0\n mixer.music.stop()\n threading.Timer(1, compare).start()\n\n\nprint(\"This Program Is For Alarm Operations..............................\")\nhelp()\nmixer.init()\nmixer.music.load(\"mysong.mp3\")\nthreading.Timer(1, compare).start()\n#compare()\n\nwhile 1:\n x = input(\"->\")\n cmd = re.split(\"\\s\" , x)\n if(cmd[0] == \"help\"):\n if(len(cmd) == 1):\n help()\n else:\n print(\"Invalid Arguments\")\n \n elif(cmd[0] == \"setalarm\"):\n if(len(cmd) == 3):\n Set_alarm(cmd)\n else:\n print(\"Invalid Arguments\")\n \n elif(cmd[0] == \"getalarm\"):\n if(len(cmd) == 1):\n Get_alarm()\n else:\n print(\"Invalid Arguments\")\n \n elif(cmd[0] == \"deletealarm\"):\n if(len(cmd) == 2):\n Delete_alarm(cmd)\n else:\n print(\"Invalid Arguments\")\n\n elif(cmd[0] == \"exit\"):\n if(len(cmd) == 1):\n print(\"Application is sutting down....\")\n mixer.music.stop()\n break\n else:\n print(\"Invalid Arguments\")\n\n elif(cmd[0] == \"stop\"):\n if(len(cmd) == 1):\n mixer.music.stop()\n break\n else:\n print(\"Invalid Arguments\")\n\n elif(cmd[0] == \"\"):\n ()\n \n else :\n print(\"Invalid Command\") \n \n","repo_name":"pooja-k/java-Tasks","sub_path":"Thread.py","file_name":"Thread.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"42461639780","text":"import os\nimport json\nfrom locust import HttpUser, task, between, TaskSet\nfrom locustfiles.validators import User, is_success, is_throttled, is_message_response\nfrom websocket import create_connection\nimport random\n\nclass SendThenReceiveMessage(TaskSet):\n user_1 = User(1, \"eb622f9ac993c621391de3418bc18f19cb563a61\")\n user_2 = User(6, \"df3b32643068fb94041e54bb316957476d265beb\")\n users = [user_1, user_2]\n sender = None\n receiver = None\n\n statements = [\n 'Hello', \n 'How are you?', \n 'This chat is nice',\n ]\n \n @task\n def send_and_receive(self):\n self.init_chat()\n self.send_message()\n self.receive_message()\n self.disconnect()\n self.client.cookies.clear()\n\n def init_chat(self):\n uri = os.environ[\"CHAT_SOCKET\"]\n self.ws = create_connection(uri)\n self.sender = random.choice(self.users)\n self.receiver = random.choice([user for user in self.users if user != self.sender])\n\n init_message = {\n \"type\": \"init\",\n \"sender\": self.sender.id,\n \"receiver\": self.receiver.id,\n }\n init_message_json = json.dumps(init_message)\n self.ws.send(init_message_json)\n response_obj = json.loads(self.ws.recv())\n if is_throttled(response_obj): return\n assert is_success(response_obj), \"Chat initialization failed.\"\n\n def send_message(self):\n statement = random.choice(self.statements)\n message = {\n \"type\": \"message\",\n \"sender\": self.sender.id,\n \"receiver\": self.receiver.id,\n \"body\": statement,\n \"token\": self.sender.token,\n }\n message_json = json.dumps(message)\n self.ws.send(message_json)\n response_obj = json.loads(self.ws.recv())\n if is_throttled(response_obj): return\n assert is_message_response(response_obj), \"Sent text was not a message.\"\n\n def receive_message(self):\n response_obj = json.loads(self.ws.recv())\n if is_throttled(response_obj): return\n assert is_message_response(response_obj), \"Received text was not a message.\"\n\n def disconnect(self):\n self.ws.close()\n \nclass MessagingUser(HttpUser):\n tasks = [SendThenReceiveMessage]\n wait_time = between(5, 15)","repo_name":"LeaveyLabs/mist-backend","sub_path":"locustfiles/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"16576189590","text":"#!/usr/bin/env python3\ndef make_graph(text):\n graph = {}\n edges = []\n for line in text.split('\\n'):\n val = -1\n v0 = v1 = None\n for i in range(len(line)):\n char = line[i]\n if char in ' ->':\n continue\n\n if ord('0') <= ord(char) <= ord('9'):\n val = int(line[i:])\n break\n\n if ord('A') <= ord(char) <= ord('Z'):\n if v0 is None:\n v0 = ord(char) - ord('A')\n elif v1 is None:\n v1 = ord(char) - ord('A')\n\n if val != -1 and v0 is not None and v1 is not None:\n if v0 not in graph:\n graph[v0] = dict()\n if v1 not in graph:\n graph[v1] = dict()\n graph[v0][v1] = val\n edges.append((val, v0, v1))\n\n return graph, edges\n\n\ndef make_array(text):\n arr = []\n for i in text:\n if ord('A') <= ord(i) <= ord('Z'):\n arr.append(ord(i) - ord('A'))\n return arr\n\n\ndef dijkstra(graph, start, end):\n SIZE = 8\n MAX = 1000\n dist = [MAX] * SIZE\n to_reach = [MAX] * SIZE\n dist[start] = 0\n reached = set([start])\n src = start\n\n while len(reached) != SIZE:\n for dst in graph[src]:\n if dst in reached:\n to_reach[dst] = MAX\n else:\n to_reach[dst] = min(to_reach[dst], graph[src][dst])\n dst = min(range(len(to_reach)), key=to_reach.__getitem__)\n\n for i in graph[src]:\n newdist = dist[src] + graph[src][i]\n if newdist < dist[i]:\n dist[i] = newdist\n\n if src == end:\n for v in dist:\n print('-' if v == MAX else v, end=' ')\n print()\n return\n\n reached.add(dst)\n to_reach[dst] = MAX\n src = dst\n\n\ndef dap_sp(graph, top, end):\n SIZE = 8\n MAX = 1000\n dist = [MAX] * SIZE\n dist[top[0]] = 0\n\n for src in top:\n for dst in graph[src]:\n newdist = dist[src] + graph[src][dst]\n if newdist < dist[dst]:\n dist[dst] = newdist\n if src == end:\n for v in dist:\n print('-' if v == MAX else v, end=' ')\n print()\n return\n\n\ndef bellman_ford(graph, start):\n SIZE = 8\n MAX = 1000\n dist = [MAX] * SIZE\n dist[start] = 0\n\n for time in range(3):\n for src in graph:\n for dst in graph[src]:\n newdist = dist[src] + graph[src][dst]\n if newdist < dist[dst]:\n dist[dst] = newdist\n\n for v in dist:\n print('-' if v == MAX else v, end=' ')\n print()\n\n\nq1 = '''\n A->E 2\n B->A 37\n B->C 14\n B->E 41\n C->D 10\n D->H 4\n F->B 32\n F->C 38\n F->E 79\n G->C 65\n G->F 19\n G->H 4\n H->C 57\n'''\nq1_start = 'G'\nq1_end = 'C'\n\nq2 = '''\n A->E 31\n A->F 1\n B->A 21\n B->F 16\n C->B 22\n C->F 41\n C->G 5\n D->C 35\n D->G 49\n D->H 50\n F->E 31\n G->F 34\n G->H 2\n'''\nq2_top = ' D C G H B A F E'\nq2_end = 'A'\n\nq3 = '''\n A->E 5\n B->A 20\n B->C 3\n D->C 21\n F->A 50\n F->B 24\n F->E 7\n F->C 1\n G->F 34\n G->C 32\n H->D 18\n H->C 37\n H->G 10\n'''\nq3_start = 'H'\n\ndijkstra(make_graph(q1)[0],\n ord(q1_start) - ord('A'),\n ord(q1_end) - ord('A'))\n\ndap_sp(make_graph(q2)[0],\n make_array(q2_top),\n ord(q2_end) - ord('A'))\n\nbellman_ford(make_graph(q3)[0],\n ord(q3_start) - ord('A'))\n","repo_name":"hghwng/mooc-algs2","sub_path":"quiz/2-sp.py","file_name":"2-sp.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"11349562546","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport pandas as pd\nfrom PIL import Image\nimport h5py as h5\n\n\n# Read RGB\n\n# Read Depth\ndepth_file = h5.File('2_cam_1_0.h5', \"r\")\ndepth_frames = np.array(depth_file.get(\n \"/depth\"),\n dtype=float)\n\n# Read CSV\nlabels = pd.read_csv(\"2_cam_1_0_labels.csv\")\n\n# Create a Rectangle patch\nx, y = (labels['x1']+labels['x2'])/2, (labels['y1']+labels['y2'])/2\nx, y = labels['x1'], labels['y1']\nwidth, height = (labels['x2'] - labels['x1'], labels['y2']-labels['y1'])\n\nfor i in range(len(labels)):\n frame_id = int(labels['frame'][i][-2:])\n\n if frame_id < 9:\n im = np.array(Image.open('frames/2_cam_1_0_0%d.png' % (frame_id+1)), dtype=np.uint8)\n else:\n im = np.array(Image.open('frames/2_cam_1_0_%d.png' % (frame_id+1)), dtype=np.uint8)\n\n fig, ax = plt.subplots(2, figsize=(50, 50))\n\n # Display the image\n ax[0].imshow(im)\n ax[1].imshow(depth_frames[frame_id])\n\n rect = patches.Rectangle((x[i], y[i]), width[i], height[i], linewidth=1, edgecolor='r',\n facecolor='none')\n rect2 = patches.Rectangle((x[i] + 50*(np.abs(x[i]-424)/424), y[i] + 75*(np.abs(y[i]-240)/240)), width[i] / 1.25, height[i] / 1.36, linewidth=1, edgecolor='r',\n facecolor='none')\n\n # Add the patch to the Axes\n ax[0].add_patch(rect)\n ax[1].add_patch(rect2)\n plt.title(\"Frame %i\" %frame_id)\n plt.show()\n","repo_name":"CristinaMelicio/peopledetection","sub_path":"scripts/analytics/plot_labels.py","file_name":"plot_labels.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"23748119555","text":"import numpy as np\nimport time\nimport serial\n\n# Mensaje que quiero enviar\nmsg_on = \"y\"\n\n# Mensaje tiene que ser encodeado\nmsg_enc = str.encode(msg_on)\n\n# serial.Serial nos permite abrir el puerto COM deseado\nser = serial.Serial(\"/dev/rfcomm0\", baudrate = 115200, timeout = 1)\ntime.sleep(1)\n\nwhile True:\n try:\n ser.write(msg_enc)\n print(\"Mensaje enviado\")\n time.sleep(10)\n except KeyboardInterrupt:\n break\n\nser.close()","repo_name":"Maratripa/Proyecto-IRB2001","sub_path":"proyecto/act2/serialcom.py","file_name":"serialcom.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"15182863656","text":"def convert(number):\n #define variable raindrops as a string\n raindrops = ''\n\n if number % 3 == 0:\n raindrops += 'Pling'\n\n if number % 5 == 0:\n raindrops += 'Plang'\n\n if number % 7 == 0:\n raindrops += 'Plong'\n\n return str(raindrops or number)\n\n","repo_name":"ajedrych/exercism.io-python","sub_path":"raindrops/raindrops.py","file_name":"raindrops.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"28"} +{"seq_id":"14626983312","text":"from flask import Blueprint, render_template, redirect, url_for, flash, session, request\n\nfrom yumroad.extensions import db, login_manager\nfrom yumroad.models import User, Store\nfrom yumroad.forms import SignupForm, LoginForm\nfrom yumroad.email import send_welcome_message\n\nfrom flask_login import login_user, current_user, login_required, logout_user\n\nuser_bp = Blueprint('user', __name__)\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(user_id)\n\n@login_manager.unauthorized_handler\ndef unauthorized():\n session['after_login'] = request.url\n flash('You need to login', 'warning')\n return redirect(url_for('user.login'))\n\n@user_bp.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n flash('You are already logged in', 'warning')\n return redirect(url_for('products.index'))\n form = SignupForm()\n if form.validate_on_submit():\n # create a user\n user = User.create(form.email.data, form.password.data)\n db.session.add(user)\n store = Store(name=form.store_name.data, user=user) # may need another db.session.add(store)\n db.session.commit()\n login_user(user)\n send_welcome_message(user)\n flash(\"Registered successfully\", \"success\")\n return redirect(url_for('products.index'))\n return render_template('users/register.html', form=form)\n\n@user_bp.route('/login', methods=['GET', 'POST'])\ndef login():\n\n if current_user.is_authenticated:\n flash('You are already logged in', 'warning')\n return redirect(url_for('products.index'))\n\n form = LoginForm()\n if form.validate_on_submit():\n # log in user\n user = User.query.filter_by(email=form.email.data).one()\n login_user(user)\n flash('Logged in successfully', 'success')\n return redirect(session.get('after_login') or url_for('products.index'))\n return render_template('users/login.html', form=form)\n\n@user_bp.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('products.index'))\n","repo_name":"DentonPaul/yumroad-app","sub_path":"yumroad/blueprints/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"21592426188","text":"\"\"\"w6- Leia uma distância em KM e a quantidade de litros de gasolina consumidos por\n um carro em eum percurso, calcule o consumo de Km/L e escreva uma mensagem de acordo\n com a tabela abaixo.\n CONSUMO Km/l MENSAGEM\n menor que 8 Venda o carro!\n entre 8 e 11 Econômico!\n maior que 12 Super econômico!\n \"\"\"\n\nkm = float(input(\"Insira uma distancia em quilômetros: \"))\nli = float(input(\"Quants litros de gasolina são consumida nessa distancia? \"))\n\nif km / li < 8:\n print(\"Venda o carro!\")\nelif km / li >= 8 and km / li < 12:\n print(\"Econômico!\")\nelse:\n print(\"Super econômico!\")\n","repo_name":"DanielMalheiros/geekuniversity_programacao_em_python_essencial","sub_path":"Exercicios/secao05_estruturas_logicas_e_condicionais/exercicio26.py","file_name":"exercicio26.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"1726534084","text":"## Given a sorted array arr containing n elements with possibly duplicate elements, the task is to find indexes of first and last occurrences of an element x in the given array.\n\ndef firstLast(arr, n, x):\n first_index = -1\n last_index = -1\n\n for i in range(0, n):\n if arr[i] == x:\n first_index = i\n break\n\n for i in range(n-1, -1, -1):\n if arr[i] == x:\n last_index = i\n break\n return first_index, last_index\n\narr = [1,3,5,5,5,5,67,123,125]\nn = 9\nx = 5\nfirstLast(arr, n, x)","repo_name":"vanisingh-24/Intro-To-Python","sub_path":"DSA_450_Questions/SearchingAndSorting/001_FirstAndLastOccurrenceOfX.py","file_name":"001_FirstAndLastOccurrenceOfX.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"37113586224","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.patches import FancyArrowPatch\nfrom mpl_toolkits.mplot3d import proj3d\n\n\nclass Arrow3D(FancyArrowPatch):\n def __init__(self, xs, ys, zs, *args, **kwargs):\n super().__init__((0, 0), (0, 0), *args, **kwargs)\n self._verts3d = xs, ys, zs\n\n def do_3d_projection(self, renderer=None):\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, self.axes.M)\n self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))\n\n return np.min(zs)\n\n\ndef plotCoordinateSystem(\n ax,\n T,\n scale=1,\n arrowSize=10,\n offsetScale=1.1,\n originText=None,\n xText=None,\n yText=None,\n zText=None,\n):\n # Here we create the arrows:\n arrow_prop_dict = dict(\n mutation_scale=arrowSize, arrowstyle=\"-|>\", shrinkA=0, shrinkB=0\n )\n\n origin = T[:3, 3]\n X = scale * T[:3, 0]\n Y = scale * T[:3, 1]\n Z = scale * T[:3, 2]\n\n XArrow = Arrow3D(\n [origin[0], origin[0] + X[0]],\n [origin[1], origin[1] + X[1]],\n [origin[2], origin[2] + X[2]],\n **arrow_prop_dict,\n color=\"r\"\n )\n ax.add_artist(XArrow)\n\n YArrow = Arrow3D(\n [origin[0], origin[0] + Y[0]],\n [origin[1], origin[1] + Y[1]],\n [origin[2], origin[2] + Y[2]],\n **arrow_prop_dict,\n color=\"b\"\n )\n ax.add_artist(YArrow)\n\n ZArrow = Arrow3D(\n [origin[0], origin[0] + Z[0]],\n [origin[1], origin[1] + Z[1]],\n [origin[2], origin[2] + Z[2]],\n **arrow_prop_dict,\n color=\"g\"\n )\n ax.add_artist(ZArrow)\n\n if originText is not None:\n offsetPos = origin - (offsetScale - 1) * (X + Y + Z)\n ax.text(\n (offsetPos[0]),\n (offsetPos[1]),\n (offsetPos[2]),\n \"$\" + str(originText) + \"$\",\n )\n if xText is not None:\n ax.text(\n (origin[0] + X[0]) * offsetScale,\n (origin[1] + X[1]) * offsetScale,\n (origin[2] + X[2]) * offsetScale,\n r\"$x$\",\n )\n if yText is not None:\n ax.text(\n (origin[0] + Y[0]) * offsetScale,\n (origin[1] + Y[1]) * offsetScale,\n (origin[2] + Y[2]) * offsetScale,\n r\"$y$\",\n )\n if zText is not None:\n ax.text(\n (origin[0] + Z[0]) * offsetScale,\n (origin[1] + Z[1]) * offsetScale,\n (origin[2] + Z[2]) * offsetScale,\n r\"$z$\",\n )\n","repo_name":"markuswnuk91/trackDLO","sub_path":"src/visualization/plotCoordinateSystems.py","file_name":"plotCoordinateSystems.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"42660067797","text":"import mfgeo, numpy as np\nfrom mfgeo.noise import oneoverf, autoregressive\nfrom mfgeo import figurateur\nimport time\nfrom matplotlib import pyplot as plt\nimport path\n\n\nn = 10000\nrngs = [np.random.default_rng(seed=mu) for mu in range(10000)]\n\ngenerator = autoregressive\n\nangle_steps = 100\nangle = np.linspace(1/angle_steps, np.pi/2, angle_steps)\n\nadjusted_alpha = 0.5\nsigma_ref = np.array([generator.sequence(n, 0, rng) for rng in rngs])\nslope_base = 1/np.tan(angle)\nref = np.array(mfgeo.g1_distant(sigma_ref, slope_base/adjusted_alpha))\n\n\nfor beta in np.linspace(-0.99, 0.99, 3):\n\tprint(f'b : {beta}')\n\tt0 = time.time()\n\tsigmas = np.array([generator.sequence(n, beta, rng) for rng in rngs])\n\n\tdef tested(a):\n\t\treturn mfgeo.g1_distant(sigmas, slope_base/a)\n\n\tdef dist(a):\n\t\treturn np.sum((tested(a) - ref)**2)\n\n\tdef diff(a):\n\t\treturn dist(a+0.05) - dist(a)\n\n\tdef fall():\n\t\ta = 0.5\n\t\td = diff(a)\n\t\twhile np.abs(d)>0.05:\n\t\t\td = diff(a)\n\t\t\ta -= 0.05*d\n\n\t\tt1 = time.time()\n\t\tprint('d :', d)\n\t\tprint('a :', a)\n\t\tprint('time :', t1-t0, flush=True)\n\t\treturn a, dist(a)\n\n\ta, d = fall()\n\n\tplt.plot(angle, tested(a), label=f'b={beta:.2f}, a={a:.2f}, diff={d:.2f}')\n\n\nplt.plot(angle, ref, label=f'b={0:.2f} (ref). a = {adjusted_alpha:.2f}', linewidth=4)\nplt.legend()\n\nplt.tight_layout()\nplt.savefig(path.out(f'fit_to_b0_{generator.name}.png'), dpi=200)\nplt.show()\n","repo_name":"trnciii/extinction","sub_path":"test/fit_to_b0.py","file_name":"fit_to_b0.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"34207716578","text":"from typing import List \r\nclass Solution:\r\n def setZeroes(self, matrix: List[List[int]]) -> None:\r\n \"\"\"\r\n Do not return anything, modify matrix in-place instead.\r\n 之前做过,发现(i,j)为零设置[0,j],[i,0]为零,然后寻找所有的[0,x]把所有的x列设置为0,同理这是所有的[y,0]设置所有的y为0.\r\n 主要考察观察能力观察\r\n \"\"\"\r\n const_m=len(matrix)\r\n const_n=len(matrix[0])\r\n first_col_zero=not all(matrix[i][0] for i in range(const_m) )\r\n \r\n first_row_zero=not all([ matrix[0][j] for j in range(const_n) ] ) \r\n \r\n for i in range(const_m):\r\n for j in range(const_n):\r\n if matrix[i][j]==0:\r\n matrix[i][0]=0\r\n matrix[0][j]=0\r\n for i in range(1,const_m):\r\n if matrix[i][0]==0:\r\n for j in range(const_n):\r\n matrix[i][j]=0\r\n for j in range(1,const_n):\r\n if matrix[0][j]==0:\r\n for i in range(const_m):\r\n matrix[i][j]=0\r\n if first_col_zero : # 注意需要做一些保留\r\n for i in range(const_m):\r\n matrix[i][0]=0\r\n if first_row_zero:\r\n for j in range(const_n):\r\n matrix[0][j]=0\r\n return \r\n \r\nif __name__ == \"__main__\":\r\n instance=Solution()\r\n # instance.setZeroes([[0,1,2,0],[3,4,5,2],[1,3,1,5]])\r\n # instance.setZeroes([[0,1,2,0],[3,4,5,2],[1,3,1,5]])\r\n instance.setZeroes([[1,0,3]])\r\n\r\n \r\n ","repo_name":"wanbiguizhao/leetcode","sub_path":"questionbank/73. Set Matrix Zeroes.py","file_name":"73. Set Matrix Zeroes.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"17450095190","text":"\"\"\"\nQuestion:\nhttps://leetcode.com/problems/remove-nth-node-from-end-of-list/description/\n\"\"\"\n\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\nclass Solution:\n def removeNthFromEnd(self, head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n looker, ptr = head, None\n\n for i in range(n):\n looker = looker.next\n while looker:\n looker, ptr = looker.next, ptr.next if ptr else head\n if ptr:\n ptr.next = ptr.next.next\n\n return head if ptr else head.next\n","repo_name":"Alan32Liu/LeetBox","sub_path":"19. Remove Nth Node From End of List.py","file_name":"19. Remove Nth Node From End of List.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"71089560714","text":"\"\"\"\nContains different endpoints and their functionality\n\nCreated by: Jettin Joy\nCreated on: 06/12/2021\n\"\"\"\n\n\nimport json\nimport uuid\nfrom flask import jsonify, request\nfrom datetime import datetime\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom utils.helper_util import format_create_config\nfrom YaraUseCase.models import YaraUseCaseAPI, Steps\nfrom YaraUseCase import app, db\n\n\n@app.route(\"/\")\ndef index():\n \"\"\"This endpoint can be used to check whether API is up and running\"\"\"\n data = {\"Status\": \"API is up and running\"}\n response = app.response_class(\n response=json.dumps(data),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\n@app.route(\"/create\", methods=['POST'])\ndef create_pipeline_steps():\n \"\"\"Create a CircleCI configuration for the specified repo\n Parameters:\n organization (string): Organization name in which repo\n is associated with\n repo (string): Repository for which configuration to be generated\n Returns:\n config_steps (dict): The configuration steps for the repo in\n \"\"\"\n status = {}\n if request.method == 'POST':\n try:\n created_data = YaraUseCaseAPI(\n **format_create_config(request.get_json())\n )\n db.session.add(created_data)\n db.session.commit()\n status = jsonify({\"Status\": \"Created data successfully\"}), 201\n except (KeyError, SQLAlchemyError) as err:\n db.session.rollback()\n status = jsonify({\"Status\": \"Operation cannot be completed\"}), 500\n print(str(err))\n return status\n\n\n@app.route(\"/retrieve/all\", methods=['GET'])\ndef retrieve():\n \"\"\"Retrieve the config data for all the repo in organization\"\"\"\n try:\n \n result_set = YaraUseCaseAPI.query.filter_by(outdated=\"NO\",\n verified=\"NO\", status=\"PENDING\").all()\n output_result_set = list()\n for result in result_set:\n r_dict = dict()\n r_dict[\"organizarion\"] = result.organization\n r_dict['repository'] = result.repo\n r_dict['conf'] = result.pipeline_steps\n r_dict['udated_date_time'] = result.updated_date_time\n r_dict['created_date_time'] = result.created_date_time\n r_dict['created_by'] = result.created_by\n r_dict['updated_by'] = result.updated_by\n r_dict['status'] = result.status\n output_result_set.append(r_dict)\n return jsonify({\"All Repo details\": output_result_set}),200\n except (AttributeError, KeyError, SQLAlchemyError) as e_rr:\n print(str(e_rr))\n return jsonify({\"Status\": \"Something went wrong\"}),500\n\n@app.route(\"/retrieve/\")\ndef retrieve_repo(repo):\n \"\"\"Retrieve conf for a repo\"\"\"\n try:\n result_set = YaraUseCaseAPI.query.filter_by(outdated=\"NO\", \n repo=repo, verified=\"NO\", status=\"PENDING\").first()\n return jsonify({\n \"organization\": result_set.organization,\n \"conf\": result_set.pipeline_steps,\n \"status\": result_set.status,\n \"created_by\": result_set.created_by,\n \"updated_by\": result_set.updated_by,\n })\n except (AttributeError, KeyError, SQLAlchemyError) as e_rr:\n print(str(e_rr))\n return jsonify({\"Status\":\"Conf not found for the repo\"}), 404\n\n\n@app.route(\"/update/\", methods=[\"PUT\"])\ndef update_repo_conf(repo):\n \"\"\"Update conf for a repo\"\"\"\n try:\n existing_conf = YaraUseCaseAPI.query.filter_by(outdated=\"NO\",\n repo=repo).order_by(YaraUseCaseAPI.updated_date_time.desc()).first()\n existing_conf.conf = request.get_json().get('conf')\n existing_conf.updated_by = request.get_json().get('user')\n existing_conf.updated_date_time = datetime.now().strftime(\"'%Y-%m-%d %H:%M:%S'\")\n existing_conf.status = \"PENDING\"\n existing_conf.verified = \"NO\"\n db.session.commit()\n return jsonify({\"Status\": \"Updated Successfullt\"}), 201\n except (AttributeError, KeyError, SQLAlchemyError) as e_rr:\n db.session.rollback()\n return jsonify({\"Status\": \"Error Occured\", \"code\": str(e_rr)}), 500\n\n\n@app.route(\"/delete/\", methods=[\"DELETE\"])\ndef delete_repo_conf(repo):\n \"\"\"Delete the conf for the repo\"\"\"\n try:\n YaraUseCaseAPI.query.filter_by(outdated=\"NO\",\n repo=repo).delete(synchronize_session='fetch')\n db.session.commit()\n return jsonify({\"Status\": \"Deletion Completed\"}), 201\n except (AttributeError, KeyError, SQLAlchemyError) as e_rr:\n db.session.rollback()\n return jsonify({\"Status\": \"Error occured\", \"Code\": str(e_rr)}), 500\n\n\n@app.route(\"/statupdate/\", methods=[\"PATCH\"])\ndef patch_repo(repo):\n \"\"\"Patch the repo details\"\"\"\n try:\n existing_conf = YaraUseCaseAPI.query.filter_by(outdated=\"NO\",\n repo=repo,\n verified=\"NO\", status=\"PENDING\").order_by(YaraUseCaseAPI.updated_date_time.desc()).first()\n existing_conf.outdated = \"YES\"\n existing_conf.verfied = \"YES\"\n existing_conf.status = request.get_json().get('Status')\n existing_conf.updated_by = request.get_json().get('user')\n existing_conf.updated_date_time = datetime.now().strftime(\"'%Y-%m-%d %H:%M:%S'\")\n db.session.commit()\n return jsonify({\"Status\": \"Success\"}), 201\n except (AttributeError, KeyError, SQLAlchemyError) as e_rr:\n db.session.rollback()\n return jsonify({\"Status\": \"Error occured\", \"Code\": str(e_rr)}), 500\n\n@app.route(\"/steps\", methods=[\"POST\"])\ndef get_mand_steps():\n \"\"\"get the mandatory steps for check\"\"\"\n try:\n data = request.get_json()\n print(data)\n steps = data.get('steps')\n id = str(uuid.uuid4())\n steps = {\"MandSteps\": steps}\n c_date_time = datetime.now().strftime(\"'%Y-%m-%d %H:%M:%S'\")\n m_steps = Steps(id=id, mand_steps = steps, created_date_time=c_date_time)\n db.session.add(m_steps)\n db.session.commit()\n return jsonify({\"Status\": \"Success\"}), 201\n except (AttributeError, KeyError) as e_rr:\n db.session.rollback()\n return jsonify({\"Status\": str(e_rr)}), 500\n\n@app.route('/getsteps')\ndef retrieve_mand_steps():\n \"\"\"retrieve the mandatory steps for a repo\"\"\"\n try:\n m_steps = Steps.query.filter().order_by(Steps.created_date_time.desc()).first()\n steps = m_steps.mand_steps\n return jsonify(steps), 200\n except (AttributeError, KeyError, SQLAlchemyError) as e_rr:\n return {\"Status\": str(e_rr)}, 404\n\n@app.route('/report')\ndef get_repo_report():\n \"\"\"Get the repos report\"\"\"\n try:\n result_set = YaraUseCaseAPI.query.filter(YaraUseCaseAPI.status.in_((\"COMPLAINT\", \n \"NON-COMPLAINT\"))).order_by(YaraUseCaseAPI.updated_date_time.desc()).all()\n output_result_set = []\n for result in result_set:\n r_dict = dict()\n r_dict['repo'] = result.repo\n r_dict['status'] = result.status\n output_result_set.append(r_dict)\n return jsonify({\"Repo Report\": output_result_set}),200\n except (AttributeError, KeyError, SQLAlchemyError) as e_rr:\n return jsonify({\"Status\": str(e_rr)}), 404\n","repo_name":"Jettinjoy25021994/YaraUseCase","sub_path":"api1/YaraUseCase/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":7462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"21063905665","text":"from django.conf.urls.defaults import include, patterns\nfrom django.contrib import admin\nfrom django.conf import settings\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Uncomment this for admin:\n (r'^admin/doc/', include('django.contrib.admindocs.urls')),\n (r'^admin/(.*)', admin.site.root),\n (r'^dates/(?P\\d{8})-(?P\\d{8})/?', 'geo.views.dates'),\n # (r'^upload/?', 'geo.views.upload'),\n (r'^track/(?P[0-9]+)/?$', 'geo.views.show_track'),\n (r'^track/(?P[0-9]+).kml/?$', 'geo.views.kml_track'),\n (r'^getphotos/(?P[0-9]+)/?$', 'geo.views.get_photos'),\n (r'^os-track/(?P[0-9]+)/?$', 'geo.views.show_os_track'),\n (r'^os-route/?$', 'geo.views.os_route'),\n (r'^btracks/?$', 'geo.views.between_tracks'),\n (r'^geotag/?$', 'geo.views.geo_tag'),\n (r'^fauth/?$', 'geo.views.callback'),\n (r'^/?$', 'geo.views.home_page'),\n (r'index', 'geo.views.home_page'),\n)\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n (r'^media/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),\n )\n","repo_name":"richbs/tracked","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"2687213706","text":"# Pythong - Pong in Python\n\nimport pygame, sys\nfrom pygame.locals import *\n\ndef line_line_intersect(x1, y1, x2, y2, x3, y3, x4, y4):\n # Taken from http://paulbourke.net/geometry/lineline2d/\n # Denominator for ua and ub are the same, so store this calculation\n d = float((y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1))\n # n_a and n_b are calculated as seperate values for readability\n n_a = float((x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3))\n n_b = float((x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3))\n # Make sure there is not a division by zero - this also indicates that\n # the lines are parallel. \n # If n_a and n_b were both equal to zero the lines would be on top of each \n # other (coincidental). This check is not done because it is not \n # necessary for this implementation (the parallel check accounts for this).\n if d == 0:\n return False\n # Calculate the intermediate fractional point that the lines potentially intersect.\n ua = n_a / d\n ub = n_b / d\n # The fractional point will be between 0 and 1 inclusive if the lines\n # intersect. If the fractional calculation is larger than 1 or smaller\n # than 0 the lines would need to be longer to intersect.\n if ua >= 0. and ua <= 1. and ub >= 0. and ub <= 1.:\n return [x1 + (ua * (x2 - x1)), y1 + (ua * (y2 - y1))]\n return False\n\ndef resetBall(dir):\n ballRect.topleft = (640/2 - 10, 480/2 - 10)\n ballMotion[1][1] *= -1\n ballMotion[0][0] = 3\n ballMotion[0][1] = 3\n \ndef p2move():\n paddlespeed = 5\n if difficulty > 3:\n paddlespeed *= 2 ** (difficulty - 3)\n \n ypos = ballRect.centery\n xpos = ballRect.centerx\n \n ymagnitude = ballMotion[0][1]\n yvector = ballMotion[1][1] \n \n xmagnitude = ballMotion[0][0]\n \n # simulate ball's motion to the p1 paddle, assuming return\n if difficulty >= 3 and ballMotion[1][0] == -1:\n steps = (ballRect.left - p1paddleRect.right) // xmagnitude\n for i in range(steps):\n ypos += ymagnitude * yvector\n if (yvector == -1 and ypos < ymagnitude) or (yvector == 1 and ypos + ymagnitude > window.get_height()):\n yvector *= -1\n xpos = p1paddleRect.right + 1\n ymagnitude += 1\n xmagnitude += 1\n \n if difficulty >= 2:\n steps = (p2paddleRect.left - (xpos + ballRect.width/2)) // xmagnitude\n for i in range(steps):\n ypos += ymagnitude * yvector\n if (yvector == -1 and ypos < ymagnitude) or (yvector == 1 and ypos + ymagnitude > window.get_height()):\n yvector *= -1\n \n '''\n # every loop is one frame of motion\n while distance > 0:\n # simulate horizontal motion\n distance -= ballMotion[0][0]\n\n # simulate vertical motion\n ypos += ballMotion[0][1] * yvector\n\n # discern when yvector changes\n if (yvector == -1 and ypos < ballMotion[0][1]) or (yvector == 1 and ypos + ballMotion[0][1] > window.get_height()):\n yvector *= -1\n '''\n # print \"Predicted YPos: \" + str(ypos)\n if p2paddleRect.centery < ypos:\n motion = [paddlespeed, ypos - p2paddleRect.centery]\n motion.sort()\n motion = motion[0]\n elif p2paddleRect.centery > ypos:\n motion = [paddlespeed * -1, ypos - p2paddleRect.centery]\n motion.sort()\n motion = motion[1]\n else:\n motion = 0\n return motion\n\ndef moveBall():\n motionx = ballMotion[0][0] * ballMotion[1][0]\n motiony = ballMotion[0][1] * ballMotion[1][1]\n\n ballRect.move_ip(motionx, motiony)\n \npygame.init()\nclock = pygame.time.Clock()\n\nwindow = pygame.display.set_mode((640,480))\npygame.display.set_caption('Pythong')\n\nred = pygame.Color(255,0,0)\nblue = pygame.Color(0,0,255)\ngreen = pygame.Color(0,255,0)\n\nmousex, mousey = 0, 0\n\nfont = pygame.font.Font('freesansbold.ttf', 32)\nmsg = \"Pythong\"\n\np1pos = [32, 480/2 - 50]\np2pos = [window.get_width() - 20 - 32, 480/2 - 50]\nballpos = [640/2 - 10, 480/2 - 10]\n\np1paddleRect = pygame.Rect(p1pos[0], p1pos[1], 20, 100)\np2paddleRect = pygame.Rect(p2pos[0], p2pos[1], 20, 100)\nballRect = pygame.Rect(ballpos[0], ballpos[1], 20, 20)\n\nballMotion = ([3, 3], [1, 1])\n\np1score = 0\np2score = 0\n\n# 1: no prediction, just movement towards ball\n# 2: opponent predicts when the ball is moving toward them\n# 3: opponent predicts constantly\n# opponent's speed limit doubles for every level above 3 (5 pixels per tick at 3, 10 at 4, 20 at 5, etc)\ndifficulty = 2\n\nwhile True:\n window.fill((255,255,255))\n\n pygame.draw.rect(window, red, p1paddleRect)\n pygame.draw.rect(window, blue, p2paddleRect)\n pygame.draw.rect(window, green, ballRect)\n\n msg = \"Difficulty: \" + str(difficulty)\n msgSurface = font.render(msg, False, (160,160,160))\n p1scoretext = font.render(\"P1: \" + str(p1score), False, (60,60,60))\n p2scoretext = font.render(\"P2: \" + str(p2score), False, (60,60,60))\n\n window.blit(msgSurface, (window.get_width() // 2 - msgSurface.get_width() // 2, 20))\n window.blit(p1scoretext, (20,20))\n window.blit(p2scoretext, (window.get_width() - p2scoretext.get_width() - 20, 20))\n # may need rectSurface for it to work properly\n\n # process events\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n pygame.event.post(pygame.event.Event(QUIT))\n if event.key == K_UP:\n difficulty += 1\n if event.key == K_DOWN:\n if difficulty > 1:\n difficulty -= 1\n\n # control paddles \n '''\n key = pygame.key.get_pressed()\n if key[K_UP]:\n p1paddleRect.move_ip(0, -5)\n if key[K_DOWN]:\n p1paddleRect.move_ip(0, 5)\n '''\n mousepos = pygame.mouse.get_pos()\n p1paddleRect.centery = mousepos[1]\n # p1paddleRect.centery = ballRect.centery\n\n #move p2 paddle\n p2paddleRect.move_ip(0, p2move())\n\n # move ball\n ballx, bally = ballRect.centerx, ballRect.centery\n moveBall()\n\n if ballRect.bottom >= window.get_height() or ballRect.top < 0:\n ballMotion[1][1] *= -1\n\n # paddle collision testing\n ballEdgeL = pygame.Rect(ballRect.topleft, (1, 20))\n ballEdgeR = pygame.Rect(ballRect.topright, (1, 20))\n # if ballEdgeL.colliderect(p1paddleRect.topright, (1, 100)) or ballEdgeR.colliderect(p2paddleRect.topleft, (1, 100)):\n # if ballRect.colliderect(p1paddleRect.topright, (1, 100)) or ballRect.colliderect(p2paddleRect.topleft, (1, 100)):\n \n #left paddle collision test\n intersect = line_line_intersect(\n ballx - ballRect.width/2, bally, \n ballRect.left, ballRect.centery, \n p1paddleRect.right, p1paddleRect.top, \n p1paddleRect.right, p1paddleRect.bottom\n )\n if intersect:\n ballRect.centery = intersect[1]\n ballRect.left = p1paddleRect.right + 1\n ballMotion[1][0] *= -1\n ballMotion[0][0] += 1\n ballMotion[0][1] += 1\n \n #right paddle collision test\n intersect = line_line_intersect(\n ballx + ballRect.width/2, bally, \n ballRect.right, ballRect.centery, \n p2paddleRect.left, p2paddleRect.top, \n p2paddleRect.left, p2paddleRect.bottom\n )\n if intersect: # or ballRect.colliderect(p2paddleRect):\n ballRect.centery = intersect[1]\n ballRect.right = p2paddleRect.left - 1\n ballMotion[1][0] *= -1\n ballMotion[0][0] += 1\n ballMotion[0][1] += 1\n \n # point scored\n if ballRect.right >= window.get_width():\n # p1 scored a point\n p1score += 1\n resetBall(-1)\n if ballRect.left < 0:\n # p2 scored a point\n p2score += 1\n resetBall(1)\n \n pygame.display.update()\n clock.tick(30)","repo_name":"Goluxas/pythong","sub_path":"pythong.py","file_name":"pythong.py","file_ext":"py","file_size_in_byte":7798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"10641307064","text":"#Import handlers\nfrom handlers.handle_endturn import handle_endturn\nfrom handlers.handle_game_start import handle_game_start\n\n#Add routes and handlers to the dict {\"route_key\": handler_function}\nroute_dic = {\n \"endturn\": handle_endturn,\n \"startgame\": handle_game_start\n}\n\ndef routes(route_key, table, event, connection_id, apig_management_client):\n action = route_dic[route_key]\n action(table, event, connection_id, apig_management_client)\n","repo_name":"jtschuwirth/PeopleSort-Party-Backend","sub_path":"handlers/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"16351138730","text":"import wx\nimport wx.lib.mixins.listctrl as listmix\n \n########################################################################\nclass EditableListCtrl(wx.ListCtrl, listmix.TextEditMixin):\n ''' TextEditMixin allows any column to be edited. '''\n \n #----------------------------------------------------------------------\n def __init__(self, parent, ID=wx.ID_ANY, pos=wx.DefaultPosition,\n size=wx.DefaultSize, style=0):\n \"\"\"Constructor\"\"\"\n wx.ListCtrl.__init__(self, parent, ID, pos, size, style)\n listmix.TextEditMixin.__init__(self)\n \n########################################################################\nclass MyPanel(wx.Panel):\n \"\"\"\"\"\"\n \n #----------------------------------------------------------------------\n def __init__(self, parent):\n \"\"\"Constructor\"\"\"\n wx.Panel.__init__(self, parent)\n \n rows = [(\"Ford\", \"Taurus\", \"1996\", \"Blue\"),\n (\"Nissan\", \"370Z\", \"2010\", \"Green\"),\n (\"Porche\", \"911\", \"2009\", \"Red\")\n ]\n self.list_ctrl = EditableListCtrl(self, style=wx.LC_REPORT)\n \n self.list_ctrl.InsertColumn(0, \"Make\")\n self.list_ctrl.InsertColumn(1, \"Model\")\n self.list_ctrl.InsertColumn(2, \"Year\")\n self.list_ctrl.InsertColumn(3, \"Color\")\n \n index = 0\n for row in rows:\n self.list_ctrl.InsertStringItem(index, row[0])\n self.list_ctrl.SetStringItem(index, 1, row[1])\n self.list_ctrl.SetStringItem(index, 2, row[2])\n self.list_ctrl.SetStringItem(index, 3, row[3])\n index += 1\n \n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(self.list_ctrl, 0, wx.ALL|wx.EXPAND, 5)\n self.SetSizer(sizer)\n \n \n########################################################################\nclass MyFrame(wx.Frame):\n \"\"\"\"\"\"\n \n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n wx.Frame.__init__(self, None, wx.ID_ANY, \"Editable List Control\")\n panel = MyPanel(self)\n self.Show()\n \n#----------------------------------------------------------------------\nif __name__ == \"__main__\":\n app = wx.App(False)\n frame = MyFrame()\n app.MainLoop()","repo_name":"sguillia/UrbanBeacon","sub_path":"wxpy/web/web8.py","file_name":"web8.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"72004040714","text":"import numpy as np\nimport scipy.optimize as sopt\n\ndef ogd_projection_scipy(u,T,num_users):\n fun= lambda x: np.sum(np.power(x-u,2))\n lc1=sopt.LinearConstraint(np.eye(num_users),0,1)\n lc2=sopt.LinearConstraint(np.ones(num_users),1,1)\n bnd=sopt.Bounds(0,1)\n x0=np.array([1./num_users]*num_users)\n res=sopt.minimize(fun,x0,constraints=[lc1,lc2],bounds=bnd)\n return res.x\n\ndef madow_sampling(p,k):\n N = len(p)\n y = np.zeros(N)\n P = p.cumsum() ### cumulative probability\n # print('P',P)\n U=np.random.rand() ### uniform random number in [0,1]\n Us = np.arange(k)+U \n # print('Us: ', Us)\n idxs = np.searchsorted(P,Us)\n # print('idxs: ', idxs)\n y[idxs]=1\n return y","repo_name":"AtivJoshi/nofra","sub_path":"scheduling/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"4712533529","text":"import discord\r\nfrom discord.ext import commands\r\nimport traceback\r\nimport asyncio\r\nimport sys\r\nimport Cogs.Checks as k\r\n\r\n\r\nclass CommandErrorHandler(commands.Cog, name=\"Command Backend Helper\"):\r\n \"\"\"The backend helper for the Pogmas Base.\"\"\"\r\n\r\n def __init__(self, bot):\r\n self.bot = bot\r\n\r\n @commands.Cog.listener()\r\n async def on_command_error(self, ctx, error):\r\n \"\"\"The event triggered when an error is raised while invoking a command.\r\n Parameters\r\n ------------\r\n ctx: commands.Context\r\n The context used for command invocation.\r\n error: commands.CommandError\r\n The Exception raised.\r\n \"\"\"\r\n\r\n # This allows us to call the default error handler at any time\r\n async def ee():\r\n try:\r\n cog_name = ctx.cog.qualified_name\r\n except Exception as e:\r\n cog_name = \"None\"\r\n print(f'Ignoring exception in command {ctx.command}:', file=sys.stderr)\r\n traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)\r\n py_error = traceback.format_exception(type(error), error, error.__traceback__)\r\n py_error = ''.join(py_error)\r\n channel = self.bot.get_channel(767416350552490025)\r\n await channel.send(f'An error occured with command `{ctx.command}` in cog `{cog_name}`.\\\r\n \\n The command was invoked in <#{ctx.channel.id}> by `{ctx.author}`.\\nThe server this was invoked in was `{ctx.guild}`. \\nJumplink to command execution: {ctx.message.jump_url} . \\nException:')\r\n await channel.send(f'```py\\n{py_error}```')\r\n embed = discord.Embed(title=\"⚠ An error occurred.\", colour=discord.Colour.red(), description=\"An unexpected error has occured, this should never happen. I have sent details to mesub#0556.\")\r\n await ctx.send(embed=embed)\r\n\r\n # This prevents any commands with local handlers being handled here in on_command_error.\r\n if hasattr(ctx.command, 'on_error'):\r\n return\r\n\r\n # This prevents any cogs with an overwritten cog_command_error being handled here.\r\n cog = ctx.cog\r\n if cog:\r\n if cog._get_overridden_method(cog.cog_command_error) is not None:\r\n return\r\n\r\n ignored = (commands.CommandNotFound)\r\n\r\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\r\n # If nothing is found. We keep the exception passed to on_command_error.\r\n error = getattr(error, 'original', error)\r\n\r\n # Anything in ignored will return and prevent anything happening.\r\n if isinstance(error, ignored):\r\n return\r\n\r\n # For this error example we check to see where it came from...\r\n if isinstance(error, discord.ext.commands.errors.MissingAnyRole):\r\n if ctx.command.qualified_name in (\"say\", \"dm\"): # Check if the command being invoked is in the list\r\n await ctx.send('Command execution failed: You do not have any authorised roles.\\\r\n \\nIf you think this is a mistake please contact `mesub#0556`.')\r\n\r\n elif isinstance(error, discord.ext.commands.errors.MemberNotFound):\r\n await ctx.send(\"Command execution failed: I can't find that member in this server. Make sure the ID is correct.\")\r\n\r\n elif isinstance(error, discord.ext.commands.errors.NotOwner):\r\n if ctx.command.qualified_name in ('jishaku py'):\r\n await ctx.send(\"Evaluation of **python** code can only be executed by the bot owner.\")\r\n else:\r\n await ctx.send(\"Command execution failed: You are not the bot owner. (10 points for finding this command though.)\")\r\n\r\n elif isinstance(error, discord.ext.commands.DisabledCommand):\r\n await ctx.send(\"Command execution failed: Command is disabled and cannot be run, sorry.\")\r\n\r\n elif isinstance(error, discord.ext.commands.CommandOnCooldown):\r\n await ctx.send(f\"Woah there {ctx.author.name}, you're going too fast. Try again in: {int(ctx.command.get_cooldown_retry_after(ctx))}s.\")\r\n\r\n elif isinstance(error, discord.ext.commands.MaxConcurrencyReached):\r\n if ctx.command.qualified_name in ('cut'):\r\n await ctx.send(\"Command execution failed: All my hands are busy right now; I can only like `3` cuts per server at any one time!\")\r\n\r\n elif isinstance(error, discord.ext.commands.BadArgument):\r\n if ctx.command.qualified_name in (\"say\"):\r\n await ctx.send(\"Command execution failed: Channel not found.\")\r\n\r\n elif isinstance(error, discord.ext.commands.errors.TooManyArguments):\r\n cmd = self.bot.get_command('jishaku py')\r\n if ctx.command == cmd:\r\n await ctx.send(\"It's either: \\n`eval`\\n`jsk py` or\\n`jishaku py`\\nOkay?\")\r\n\r\n elif isinstance(error, discord.ext.commands.MissingRequiredArgument):\r\n await ctx.send(\"Command execution failed: Argument is missing! Correct usage:\")\r\n await ctx.send_help(ctx.command)\r\n\r\n elif isinstance(error, discord.ext.commands.CommandError):\r\n if ctx.command.qualified_name in (\"say\", \"dm\"):\r\n await ctx.send(\"This command doesn't exist. Did you mean `uptime`?\")\r\n elif ctx.command.qualified_name in (\"ttt\", \"ghost\"):\r\n await ctx.send(\"Command execution failed: You are not in the `lvl2` (Boosters or channel members) group of users authorised to use this command.\\\r\n \\nIf you think this is a mistake please contact mesub#0556.\")\r\n\r\n elif isinstance(error, discord.Forbidden):\r\n await ctx.send(\"Command execution failed: I can't do whatever you wanted me to do because I do not have permissions. Give me permissions and try again.\")\r\n\r\n elif isinstance(error, discord.NotFound):\r\n await ctx.send(\"Command execution failed: I can't seem to find that. Ensure the ID is correct and try again.\")\r\n\r\n elif isinstance(error, asyncio.TimeoutError):\r\n await ctx.message.add_reaction('⌛')\r\n else:\r\n # All other Errors not returned come here. And we can just print the default TraceBack.\r\n await ee()\r\n\r\n @commands.Cog.listener()\r\n async def on_error(self, error):\r\n print('Ignoring exception here:', file=sys.stderr)\r\n traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)\r\n py_error = traceback.format_exception(type(error), error, error.__traceback__)\r\n py_error = ''.join(py_error)\r\n channel = self.bot.get_channel(767416350552490025)\r\n await channel.send(f'An error occured.\\\r\n \\nException:')\r\n await channel.send(f'```py\\n{py_error}```')\r\n\r\n @commands.check(k.lvl5)\r\n @commands.command(name='ee', description=\"For internal testing only.\")\r\n async def force_error(self, ctx):\r\n await ee() #it won't work\r\n\r\ndef setup(bot):\r\n bot.add_cog(CommandErrorHandler(bot))\r\n","repo_name":"mesub7/pogmas","sub_path":"Cogs/Error.py","file_name":"Error.py","file_ext":"py","file_size_in_byte":7085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"17393325422","text":"# Program działa dla dowolnej wielkości macierzy ( nie tylko 3 wiersze jak w zadaniu, z tym że algorytm chyba mógłby być lepszy )\n\nimport sys, itertools\n\ndata = sys.stdin\n\ntestCounter = int(data.readline())\nrows, columns = data.readline().split()\nrows, columns = int(rows), int(columns) # Coś jest nie tak z wejsciem - problem z drugim testem \n\nfor test in range(testCounter):\n array = []\n for lines in range(rows):\n array.append(data.readline().split())\n\n band = []\n\n band.append(array[0])\n for number in range(1, rows - 1):\n band.append(array[number][-1])\n arrayReversed = array[-1]\n arrayReversed.reverse()\n band.append(arrayReversed)\n for number in range(2, rows):\n band.append(array[-number][0])\n bandFlatten = list(\n itertools.chain(*band)) # Pamietać że przy użyciu itertools.chain(*data) przed data MUSI BYĆ GWIAZDKA !\n\n # print(bandFlatten)\n\n firstElement = bandFlatten[0]\n del bandFlatten[0]\n bandFlatten.append(firstElement)\n\n bandFlatten.reverse() # I teraz na przemian - iteruję - najpierw elementy mid - ile ich jest, później pobieram jedną pełną listę, później znów elementy mid\n # i znóœ jedną pełną listę ( a obróciłem ją zeby zacząć sobie od początku\n\n # print(bandFlatten)\n\n try:\n for iter in range(1, len(array) - 1):\n array[iter][0] = bandFlatten[iter - 1]\n except:\n pass\n\n if columns == 2:\n array[-1] = bandFlatten[len(array) - 2:len(array[0])]\n else:\n array[-1] = bandFlatten[len(array)-2:len(array[0])+1]\n\n counter = 2\n try:\n for iter in range(len(array) + int(len(array[0]) / 2), len(bandFlatten) - len(array[0])):\n # print(iter)\n # print(bandFlatten[iter])\n array[-counter][-1] = bandFlatten[iter]\n counter += 1\n except:\n pass\n\n array[0] = bandFlatten[len(bandFlatten)-len(array[0]):len(bandFlatten)]\n array[0].reverse()\n\nfor line in array:\n for element in line:\n print(element,end= \" \")\n print()\n","repo_name":"andrimation/SPOJ_PL","sub_path":"Latwe/Tabelki_liczb.py","file_name":"Tabelki_liczb.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"29131718859","text":"import matplotlib.pyplot as plt\nimport math\nimport os\n\nfrom measurements import LatencyMeasurement\n\ncomperator_ref_voltage = 0.2\n\n# path to store measurements\nmeasurement_base_path = \"/home/tim/Bachelorarbeit/Messungen/\"\n\n# 15 degree\n\nmeasurement_path_15degree = measurement_base_path + f\"distance_{comperator_ref_voltage}V_15degree/\"\nfiles_15degree = os.listdir(measurement_path_15degree)\nfiles_15degree = sorted(files_15degree)\n\nif \"measurement.txt\" in files_15degree:\n files_15degree.remove(\"measurement.txt\")\n\nprint(files_15degree)\n\nx_distance_15degree = []\ny_reliability_udp_15_degree = []\npayload_size_bytes = 0\ninterval_us = 0\ndistance_cm = 0\nfor file_name in files_15degree:\n l = LatencyMeasurement()\n result = l.read_measurement_from_file(measurement_path_15degree + file_name)\n assert result != None, \"parsing error\"\n\n assert (payload_size_bytes == 0 or payload_size_bytes == l.get_payload_size()), \"measurements with different payload sizes\"\n assert (interval_us == 0 or interval_us == l.get_interval_us()), \"measurements with different interval\"\n payload_size_bytes = l.get_payload_size()\n interval_us = l.get_interval_us()\n distance_cm = l.get_distance_cm()\n\n y_reliability_udp_15_degree.append(l.get_reliability_udp())\n\n x_distance_15degree.append(l.get_distance_cm())\n\n# 120 degree\n\nmeasurement_path_120degree = measurement_base_path + f\"distance_{comperator_ref_voltage}V_120degree/\"\nfiles_120degree = os.listdir(measurement_path_120degree)\nfiles_120degree = sorted(files_120degree)\n\nif \"measurement.txt\" in files_120degree:\n files_120degree.remove(\"measurement.txt\")\n\nx_distance_120degree = []\ny_reliability_udp_120_degree = []\nfor file_name in files_120degree:\n l = LatencyMeasurement()\n result = l.read_measurement_from_file(measurement_path_120degree + file_name)\n assert result != None, \"parsing error\"\n\n assert (payload_size_bytes == 0 or payload_size_bytes == l.get_payload_size()), \"measurements with different payload sizes\"\n assert (interval_us == 0 or interval_us == l.get_interval_us()), \"measurements with different interval\"\n payload_size_bytes = l.get_payload_size()\n interval_us = l.get_interval_us()\n distance_cm = l.get_distance_cm()\n\n y_reliability_udp_120_degree.append(l.get_reliability_udp())\n\n x_distance_120degree.append(l.get_distance_cm())\n\n\n# plot\n# fig, axis = plt.subplots()\nplt.xlabel(\"Distance [in cm]\")\nplt.ylabel(\"Packet Delivery Rate [0..1]\")\n\nplt.plot(\n x_distance_15degree,\n y_reliability_udp_15_degree,\n label=\"15 degree\",\n marker='.'\n)\n\nplt.plot(\n x_distance_120degree,\n y_reliability_udp_120_degree,\n label=\"120 degree\",\n marker='.'\n)\n\nplt.title(f\"UDP Packet Delivery Rate for different distances and LED emitting angles\\nat an interval of {interval_us / 1000} ms, {payload_size_bytes} bytes payload and {comperator_ref_voltage} V comperator referenz voltage\")\n# legend doku https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.legend.html#matplotlib-axes-axes-legend\nplt.legend(loc=0) # best is 0, 7 is upper right corner\n# axis.legend()\n# place outside of plot\n# axis.legend(bbox_to_anchor=(1,1), loc=\"upper left\")\nplt.grid()\n\nplt.show()\n","repo_name":"timrdmr/ba_ip_over_vlc","sub_path":"measurements/plots/reliability_and_distance.py","file_name":"reliability_and_distance.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"74088238155","text":"from bsb.morphologies import Morphology, Branch\nfrom bsb._encoding import EncodedLabels\nfrom bsb.exceptions import MorphologyRepositoryError, MissingMorphologyError\nfrom bsb.storage.interfaces import (\n MorphologyRepository as IMorphologyRepository,\n StoredMorphology,\n)\nfrom .resource import Resource, handles_handles, HANDLED\nimport numpy as np\nimport json\nimport itertools\n\n_root = \"/morphologies\"\n\n\nclass MetaEncoder(json.JSONEncoder):\n \"\"\"\n Encodes morphology metadata to JSON\n \"\"\"\n\n def default(self, o):\n if isinstance(o, np.ndarray):\n arr = o.tolist()\n arr.append(\"__ndarray__\")\n return arr\n else:\n super().default(o)\n\n\ndef meta_object_hook(obj):\n for k, v in obj.items():\n if isinstance(v, list) and v[-1] == \"__ndarray__\":\n v.pop()\n obj[k] = np.array(v)\n return obj\n\n\nclass MorphologyRepository(Resource, IMorphologyRepository):\n def __init__(self, engine):\n super().__init__(engine, _root)\n\n def select(self, *selectors):\n if not selectors:\n return []\n all_loaders = self.all()\n selected = []\n for selector in selectors:\n selector.validate(all_loaders)\n selected.extend(filter(selector.pick, all_loaders))\n return selected\n\n @handles_handles(\"r\")\n def preload(self, name, meta=None, handle=HANDLED):\n return StoredMorphology(\n name,\n self._make_loader(name, meta),\n meta if meta is not None else self.get_meta(name, handle=handle),\n )\n\n def _make_loader(self, name, meta):\n def loader():\n return self.load(name, preloaded_meta=meta)\n\n return loader\n\n @handles_handles(\"r\")\n def get_meta(self, name, handle=HANDLED):\n all_meta = self.get_all_meta(handle=handle)\n try:\n meta = all_meta[name]\n except KeyError:\n raise MissingMorphologyError(\n f\"`{self._engine.root}` contains no morphology named `{name}`.\"\n ) from None\n return meta\n\n @handles_handles(\"r\")\n def get_all_meta(self, handle=HANDLED):\n if \"morphology_meta\" not in handle:\n return {}\n return json.loads(handle[\"morphology_meta\"][()], object_hook=meta_object_hook)\n\n @handles_handles(\"a\")\n def update_all_meta(self, meta, handle=HANDLED):\n all_meta = self.get_all_meta(handle=handle)\n all_meta.update(meta)\n self.set_all_meta(all_meta, handle=handle)\n\n @handles_handles(\"a\")\n def set_all_meta(self, all_meta, handle=HANDLED):\n if \"morphology_meta\" in handle:\n del handle[\"morphology_meta\"]\n handle.create_dataset(\n \"morphology_meta\", data=json.dumps(all_meta, cls=MetaEncoder)\n )\n\n @handles_handles(\"r\")\n def all(self, handle=HANDLED):\n meta = self.get_all_meta(handle=handle)\n return [\n self.preload(name, meta=meta[name], handle=handle) for name in self.keys()\n ]\n\n @handles_handles(\"r\")\n def has(self, name, handle=HANDLED):\n return f\"{self._path}/{name}\" in handle\n\n @handles_handles(\"r\")\n def load(self, name, preloaded_meta=None, handle=HANDLED):\n try:\n root = handle[f\"{self._path}/{name}/\"]\n except Exception:\n raise MissingMorphologyError(\n f\"`{self._engine.root}` contains no morphology named `{name}`.\"\n ) from None\n data = root[\"data\"][()]\n points = data[:, :3].copy()\n radii = data[:, 3].copy()\n # Turns the forced JSON str keys back into ints\n labelsets = {\n int(k): v for k, v in json.loads(root[\"data\"].attrs[\"labels\"]).items()\n }\n labels = EncodedLabels(\n len(points), buffer=data[:, 4].astype(int), labels=labelsets\n )\n prop_names = root[\"data\"].attrs[\"properties\"]\n props = dict(zip(prop_names, np.rollaxis(data[:, 5:], 1)))\n parents = {-1: None}\n branch_id = itertools.count()\n roots = []\n ptr = 0\n for nptr, p in root[\"graph\"][()]:\n radii[ptr:nptr]\n labels[ptr:nptr]\n {k: v[ptr:nptr] for k, v in props.items()}\n branch = Branch(\n points[ptr:nptr],\n radii[ptr:nptr],\n labels[ptr:nptr],\n {k: v[ptr:nptr] for k, v in props.items()},\n )\n parent = parents.get(p, None)\n parents[next(branch_id)] = branch\n if parent:\n parent.attach_child(branch)\n else:\n roots.append(branch)\n ptr = nptr\n if preloaded_meta is None:\n meta = self.get_meta(name, handle=handle)\n else:\n meta = preloaded_meta\n morpho = Morphology(roots, meta, shared_buffers=(points, radii, labels, props))\n assert morpho._check_shared(), \"Morpho read with unshareable buffers\"\n return morpho\n\n @handles_handles(\"a\")\n def save(self, name, morphology, overwrite=False, update_meta=True, handle=HANDLED):\n me = handle[self._path]\n if self.has(name):\n if overwrite:\n self.remove(name)\n else:\n root = self._engine.root\n raise MorphologyRepositoryError(\n f\"A morphology called '{name}' already exists in `{root}`.\"\n )\n root = me.create_group(name)\n # Optimizing a morphology goes through the same steps as what is required\n # to save it to disk; plus, now the user's object is optimized :)\n morphology.optimize()\n branches = morphology.branches\n n_prop = len(morphology._shared._prop)\n data = np.empty((len(morphology), 5 + n_prop))\n data[:, :3] = morphology._shared._points\n data[:, 3] = morphology._shared._radii\n data[:, 4] = morphology._shared._labels\n for i, prop in enumerate(morphology._shared._prop.values()):\n data[:, 5 + i] = prop\n dds = root.create_dataset(\"data\", data=data)\n dds.attrs[\"labels\"] = json.dumps(\n {k: list(v) for k, v in morphology._shared._labels.labels.items()}\n )\n dds.attrs[\"properties\"] = [*morphology._shared._prop.keys()]\n graph = np.empty((len(branches), 2))\n parents = {None: -1}\n ptr = 0\n for i, branch in enumerate(morphology.branches):\n ptr += len(branch)\n graph[i, 0] = ptr\n graph[i, 1] = parents[branch.parent]\n parents[branch] = i\n root.create_dataset(\"graph\", data=graph, dtype=int)\n morphology.meta[\"name\"] = name\n if len(morphology._shared._points):\n morphology.meta[\"ldc\"] = np.min(morphology._shared._points, axis=0)\n morphology.meta[\"mdc\"] = np.max(morphology._shared._points, axis=0)\n else:\n morphology.meta[\"ldc\"] = morphology.meta[\"mdc\"] = np.nan\n if update_meta:\n all_meta = self.get_all_meta(handle=handle)\n all_meta[name] = morphology.meta\n self.set_all_meta(all_meta)\n return StoredMorphology(name, lambda: morphology, morphology.meta)\n\n @handles_handles(\"a\")\n def remove(self, name, handle=HANDLED):\n try:\n del handle[f\"{self._path}/{name}\"]\n except KeyError:\n raise MorphologyRepositoryError(f\"'{name}' doesn't exist.\") from None\n","repo_name":"dbbs-lab/bsb-hdf5","sub_path":"bsb_hdf5/morphology_repository.py","file_name":"morphology_repository.py","file_ext":"py","file_size_in_byte":7462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"69881452237","text":"import serial\r\nimport time\r\nimport speech_recognition as sr\r\n\r\nr=sr.Recognizer()\r\nser=serial.Serial(\"com5\",9600)\r\nwhile True:\r\n \r\n with sr.Microphone() as source:\r\n print(\"listening...\")\r\n audio=r.listen(source,phrase_time_limit=5)\r\n try:\r\n text=r.recognize_google(audio)\r\n print(format(text))\r\n except:\r\n print(\"uncomprehensive\")\r\n text=\"nothing\"\r\n if (\"lights\" in text):\r\n try:\r\n ser.open()\r\n time.sleep(2.5)\r\n except:\r\n pass\r\n ser.write(b\"1\")\r\n ser.close()\r\n \r\n\r\n","repo_name":"marr02092/Voice-Commanded-Light-Switch","sub_path":"python-EncenderLuz.py","file_name":"python-EncenderLuz.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"19873938436","text":"from setuptools import setup, find_packages\n\nDESCRIPTION = 'Transforms HAML to Tornado templates or Underscore.js'\n\nwith open('README.rst') as f:\n LONG_DESCRIPTION = f.read()\n\nVERSION = '0.1.0'\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n]\n\nsetup(name='haiku',\n version=VERSION,\n packages=find_packages(),\n author='Stanislav Vishnevskiy',\n author_email='vishnevskiy@gmail.com',\n url='https://github.com/vishnevskiy/haiku',\n license='MIT',\n include_package_data=True,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n platforms=['any'],\n classifiers=CLASSIFIERS,\n test_suite='tests',\n)","repo_name":"vishnevskiy/haiku","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"40840788516","text":"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport model_utils\nfrom CONSTANTS import *\n\ndef plot_matching_results(img, part_matches_map, all_joint_detections_map, pred_pafs):\n C,S = ['g','r','y','w','c','m','k'], ['o','+','h','.','s','P','p','*','H','x','D','d','8']\n \n fig, axes = plt.subplots(1,len(SKELETON), figsize=(70,5))\n for i, ax in enumerate(axes.flat): \n ax.axis('off')\n part_pair = SKELETON[i]\n part_pair_tuple = (part_pair[0], part_pair[1])\n \n if(len(part_matches_map[part_pair_tuple])):\n ax.text(10,10, keypoint_labels[part_pair[0]]+'->'+keypoint_labels[part_pair[1]], va='top', color=\"white\", fontsize=12)\n ax.imshow(img)\n\n px, py = pred_pafs[(2*i)].numpy(), pred_pafs[(2*i)+1].numpy()\n mask = np.logical_or(px, py)\n ax.imshow(mask, 'jet', interpolation='none', alpha=0.5)\n\n\n for matched_pt_pair in part_matches_map[part_pair_tuple]:\n pts = np.array([matched_pt_pair[0], matched_pt_pair[1]])\n ax.plot(pts[:,0], pts[:,1], C[np.random.randint(0,len(C))]+S[np.random.randint(0,len(S))], markersize=12) \n ax.plot(pts[:,0], pts[:,1], 'w-', linewidth=2)\n ax.text(pts[0,0], pts[0,1], round(matched_pt_pair[2], 2), color='white')\n\n detected_parts_1 = all_joint_detections_map[part_pair[0]]\n detected_parts_2 = all_joint_detections_map[part_pair[1]]\n ax.plot(detected_parts_1[:,0], detected_parts_1[:,1], 'w+')\n ax.plot(detected_parts_2[:,0], detected_parts_2[:,1], 'w+')\n else: \n ax.figsize=(0,0)\n\n plt.tight_layout()\n \ndef plot_heatmaps(img, masks, idx_to_keypoint_type=idx_to_keypoint_type, figsize=(16,12)):\n fig, axes = plt.subplots(5, 4, figsize=figsize)\n sz = img.size[0]\n \n for i,ax in enumerate(axes.flat):\n ax.axis('off')\n if(i<17):\n ax.imshow(img)\n joint_type = idx_to_keypoint_type[i]\n peaks = model_utils.get_peaks(masks[i], nms_window=int(sz*30/368))\n ax.text(10,10, joint_type, va='top', color=\"white\", fontsize=12)\n ax.imshow(masks[i], 'jet', interpolation='none', alpha=0.5)\n ax.plot(peaks[:,0], peaks[:,1], 'w+')\n if(i==17):\n joint_type = \"background\"\n ax.text(10,10, joint_type, va='top', color=\"white\", fontsize=12)\n ax.imshow(img)\n ax.imshow(masks[i], 'jet', interpolation='none', alpha=0.5)\n\n plt.tight_layout()\n\ndef plot_pafs(img, pafs, joint_pairs=part_pairs, figsize=(16,12)):\n fig, axes = plt.subplots(5, 5, figsize=figsize)\n \n for i,ax in enumerate(axes.flat):\n ax.axis('off')\n if(i'+joint_pairs[i][1], va='top', color=\"white\", fontsize=12)\n ax.imshow(img)\n mask = np.logical_or(pafs[2*i], pafs[(2*i) + 1]).astype(int)\n ax.imshow(mask, 'jet', interpolation='none', alpha=0.7)\n plt.tight_layout()\n\n'''\ndef plot_paf_maps_from_annotations(img, keypoints, joint_pairs=part_pairs, keypoint_type_to_idx=keypoint_type_to_idx, n_items=19, figsize=(16,12), limb_width=5):\n fig, axes = plt.subplots(5, 4, figsize=figsize)\n \n for i,ax in enumerate(axes.flat):\n ax.axis('off')\n if(i<19):\n ax.text(10,10, joint_pairs[i][0]+'->'+joint_pairs[i][1], va='top', color=\"white\", fontsize=12)\n joint_pair_paf,_ = model_utils.calculate_paf_mask(img, joint_pairs[i], keypoints, keypoint_type_to_idx, limb_width)\n ax.imshow(img)\n ax.imshow(joint_pair_paf.transpose(), 'jet', interpolation='none', alpha=0.5)\n plt.tight_layout()\n\ndef plot_heat_maps_from_annotations(img, anns, n_items=17, figsize=(16,12), sigma=7):\n fig, axes = plt.subplots(5, 4, figsize=figsize)\n img = np.array(img)\n fliped_img = img.transpose((1,0,2))\n kps = model_utils.get_keypoints_from_annotations(anns)\n \n for i,ax in enumerate(axes.flat):\n ax.axis('off')\n if(i<17):\n joint_type = idx_to_keypoint_type[i]\n ax.text(10,10, joint_type, va='top', color=\"white\", fontsize=12)\n mask,_ = model_utils.calculate_heatmap(img, i, kps, sigma)\n ax.imshow(img)\n ax.imshow(mask.transpose(), 'jet', interpolation='none', alpha=0.5)\n plt.tight_layout()\n'''","repo_name":"thisistahir/PyTorch-OpenPose-Realtime-Multi-Person-2D-Pose-Estimation-using-Part-Affinity-Fields","sub_path":"plot_utils.py","file_name":"plot_utils.py","file_ext":"py","file_size_in_byte":4423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"22115173803","text":"\"\"\"\nAs of Fall 2022, this algorithm is deprecated. We are currently using the Optimistic Predictive Cost algorithm described\nin optimal_min_cost.py.\n\nImplementation of the Rapidly-Exploring Random Tree algorithm using precision arm configurations as nodes.\n\nGiven a start configuration and end configuration received from the ECE subteam, and a set of obstacles received from\nthe object detection algorithm, the algorithm attempts to find a collision-free path from the start configuration to the\nend configuration. This is done by growing a tree pseudo-randomly, and returning a set of configurations which is then\npassed to the ECE subteam.\n\nWritten by Simon Kapen '24 and Alison Duan '23, Spring 2021.\nDijkstra algorithm adapted from Fanjin Zeng on github, 2019 (gist.github.com/Fnjn/58e5eaa27a3dc004c3526ea82a92de80).\n\"\"\"\n\nimport math\nimport numpy as np\nfrom random import random\nfrom collections import deque\nimport time\nimport random\nfrom .collision_detection import arm_is_colliding_prisms, arm_is_colliding\nfrom .optimizers import path_optimizer_two, path_optimizer_four, checkPath\nfrom .util import line\nfrom .arm_node import Node\nfrom .arm_graph import Graph\nfrom matplotlib.widgets import Button\nfrom .arm_plot import plot_3d\nfrom .obstacle_generation import random_start_environment\nfrom .util.angles import true_angle_distances_arm\nimport sys\n\n\ndef nearest(g: Graph, target_end_effector_pos):\n \"\"\" Finds the nearest node to the input node in Cartesian space, \n by end effector position.\n\n Returns:\n An instance of the nearest node to the input node, as well as \n its index in the hashtable of the input graph.\n \"\"\"\n # print(g.end_effectors)\n nearest_node_index = np.argmin(np.sum(np.square(g.end_effectors - target_end_effector_pos), axis=1))\n\n return g.nodes[nearest_node_index], nearest_node_index\n\n\ndef steer(rand_angles, near_angles, step_size):\n \"\"\"Generates a new node a certain distance along the path between the nearest node to the random node.\n\n Args:\n rand_angles: The angles of the randomly generated node.\n near_angles: The angles of the node closest to the randomly generated node.\n step_size: The distance from the nearest node for the new node to be generated.\n\n Returns:\n An instance of Node representing the new node of the tree.\n \"\"\"\n\n dirn = true_angle_distances_arm(np.array(near_angles), np.array(rand_angles))\n length = np.linalg.norm(dirn)\n dirn = (dirn / length) * min(step_size, length)\n\n new_angles = (near_angles[0] + dirn[0], near_angles[1] + dirn[1], near_angles[2] + dirn[2],\n near_angles[3] + dirn[3], near_angles[4] + dirn[4])\n return Node(new_angles)\n\n\ndef extend_heuristic(g: Graph, rand_node: Node, step_size: float, threshold: int, obstacles):\n \"\"\"Extends RRT T from the node closest to the end node, in the direction of rand_node. If the node\n being extended from has failed too many times (generates an colliding configuration or does not get closer\n to the end node), it is removed from the ranking.\n\n Arguments:\n g: A rrtgraph.Graph instance.\n rand_node: An Node instance representing the randomly generated node.\n step_size: The distance from the nearest node for the new node to be generated.\n threshold: The maximum amount of extension failures per node.\n obstacles: An array of float arrays representing obstacles.\n \"\"\"\n near_node = g.ranking[0]\n new_node = steer(rand_node.angles, near_node.angles, step_size)\n\n if g.dist_to_end(new_node) < g.dist_to_end(near_node) and not arm_is_colliding_prisms(new_node, obstacles) \\\n and new_node.valid_configuration():\n nearest_to_new, nearest_to_new_idx = nearest(g, new_node.end_effector_pos)\n\n if arm_is_colliding_prisms(new_node, obstacles):\n raise Exception(\"Adding a colliding node\")\n g.add_vex(new_node, nearest_to_new)\n # dist = line.distance(new_node.end_effector_pos, nearest_to_new.end_effector_pos)\n # g.add_edge(newidx, nearest_to_new_idx, dist)\n return new_node, g.node_to_index[new_node]\n\n near_node.inc_fail_count()\n near_idx = g.node_to_index[near_node]\n if near_node.fail_count > threshold:\n g.ranking.remove(near_node)\n\n parent = g.get_parent(near_idx)\n parent.inc_fail_count()\n\n return near_node, near_idx\n\n\n# Possibly archive/delete. This is unused right now\ndef valid_configuration(angles):\n \"\"\" Returns true if the given angle configuration is a valid one. \"\"\"\n link_lengths = [.222, .3]\n # for a given a1, an a2 is always valid. However, the a3 is not necessarily valid:\n # use spherical coordinates for validity\n if link_lengths[0] * math.cos(angles[1]) < 0:\n return False\n\n if link_lengths[1] * math.cos(angles[2]) + link_lengths[0] * math.cos(angles[1]) < 0:\n return False\n\n return True, [(angles[0] + math.pi) % math.pi, (angles[1] + math.pi) % math.pi, \\\n (angles[2] + math.pi) % math.pi, (angles[3] + math.pi) % math.pi, \\\n (angles[4] + math.pi) % math.pi]\n\n\n\n\ndef rrt(start_angles, end_angles, obstacles, n_iter=300, radius=0.02, angle_threshold=1, stepSize=.05,\n heuristic_threshold=10):\n \"\"\"Uses the RRT algorithm to determine a collision-free path from start_angles to end_angles.\n\n Args:\n start_angles: An array of length 5 representing the initial angle configuration of the arm.\n end_angles: An array of length 5 representing the desired angle configuration.\n obstacles: An array of float arrays representing cube obstacles.\n n_iter: Maximum number of iterations to find a path.\n radius: Maximum distance between the final end effector position and the second-to-last position in a path.\n angle_threshold: Maximum distance between the final angles and the second-to-last angles in a path.\n stepSize: Distance between nodes in the graph.\n heuristic_threshold: Maximum number of times a new node can fail to expand from any given node in the graph.\n\n Returns:\n An instance of rrtgraph.Graph containing a list of instances of Node, and a path of Node instances between\n the start and end nodes, if successful.\n A boolean indicator representing whether a path was found.\n \"\"\"\n G = Graph(start_angles, end_angles)\n\n for i in range(n_iter):\n rand_node = Node(None)\n if arm_is_colliding_prisms(rand_node, obstacles):\n continue\n\n if i % 2 == 0 or not G.ranking:\n nearest_node, nearest_node_index = nearest(G, rand_node.end_effector_pos)\n if nearest_node is None:\n continue\n\n new_node = steer(rand_node.angles, nearest_node.angles, stepSize)\n\n if arm_is_colliding_prisms(new_node, obstacles) or not new_node.valid_configuration():\n continue\n\n nearest_to_new, _ = nearest(G, new_node.end_effector_pos)\n\n G.add_vex(new_node, nearest_to_new)\n # dist = line.distance(new_node.end_effector_pos, nearest_to_new.end_effector_pos)\n # G.add_edge(newidx, nearest_to_new_idx, dist)\n\n else:\n new_node, newidx = extend_heuristic(G, rand_node, stepSize, heuristic_threshold, obstacles)\n if arm_is_colliding_prisms(new_node, obstacles):\n continue\n\n end_eff_dist_to_goal = line.distance(new_node.end_effector_pos, G.end_node.end_effector_pos)\n angle_dist_to_goal = np.linalg.norm(true_angle_distances_arm(new_node.angles, G.end_node.angles))\n\n if end_eff_dist_to_goal < radius and not G.success:\n if arm_is_colliding_prisms(G.end_node, obstacles):\n raise Exception(\"Adding a colliding node\")\n desired_position = G.end_node.end_effector_pos\n if angle_dist_to_goal > angle_threshold:\n # tries to get a closer arm configuration to the second-to-last arm configration with inverse kinematics\n G.end_node = Node.from_point(desired_position, start_config=new_node.angles)\n\n endidx = G.add_vex(G.end_node, new_node)\n # G.add_edge(newidx, endidx, end_eff_dist_to_goal)\n G.success = True\n print(\"Iterations:\", i)\n break\n return G\n\n\ndef dijkstra(G, target_node=None):\n \"\"\"\n Dijkstra algorithm for finding shortest path from start position to end.\n \"\"\"\n srcIdx = G.node_to_index[G.start_node]\n dstIdx = G.node_to_index[G.end_node]\n\n if target_node is not None:\n dstIdx = G.node_to_index[target_node]\n\n # build dijkstra\n nodes = list(G.neighbors.keys())\n dist = {node: float('inf') for node in nodes}\n prev = {node: None for node in nodes}\n dist[srcIdx] = 0\n\n while nodes:\n curNode = min(nodes, key=lambda node: dist[node])\n nodes.remove(curNode)\n if dist[curNode] == float('inf'):\n break\n\n for neighbor, cost in G.neighbors[curNode]:\n newCost = dist[curNode] + cost\n if newCost < dist[neighbor]:\n dist[neighbor] = newCost\n prev[neighbor] = curNode\n\n # retrieve path\n path = deque()\n curNode = dstIdx\n while prev[curNode] is not None:\n path.appendleft(G.nodes[curNode])\n curNode = prev[curNode]\n path.appendleft(G.nodes[curNode])\n\n return list(path)\n\n\ndef random_start_environment(num_obstacles, bounds, obstacle_size=.2):\n \"\"\"Generates a start environment for a run of RRT.\n\n Returns:\n An Node representing a valid start configuration.\n An Node representing a valid end configuration.\n A set of [num_obstacles] obstacles that do not collide with the start or end configurations.\n \"\"\"\n\n random_start_node = Node(configuration=None)\n random_end_node = Node.from_point([random.uniform(bounds[0][0], bounds[0][1]),\n random.uniform(bounds[1][0], bounds[1][1]),\n random.uniform(bounds[2][0], bounds[2][1])],\n random_start_node.angles)\n\n max_tries = 10\n tries = 1\n while not random_end_node.valid_configuration():\n random_end_node = Node.from_point([random.uniform(bounds[0][0], bounds[0][1]),\n random.uniform(bounds[1][0], bounds[1][1]),\n random.uniform(bounds[2][0], bounds[2][1])], random_start_node.angles)\n tries += 1\n if tries > max_tries:\n return None, None, None\n\n current_obstacles = obstacle_generation.generate_random_obstacles(num_obstacles, bounds,\n max_side_length=obstacle_size)\n while arm_is_colliding_prisms(random_end_node, current_obstacles):\n current_obstacles = obstacle_generation.generate_random_obstacles(num_obstacles, bounds,\n max_side_length=obstacle_size)\n\n while arm_is_colliding_prisms(random_start_node, current_obstacles) or not random_start_node.valid_configuration():\n random_start_node = Node(None)\n\n #print(\"start angles:\", random_start_node.angles)\n #print(\"end angles:\", random_end_node.angles)\n #print(\"obstacles:\", current_obstacles)\n\n return random_start_node, random_end_node, current_obstacles\n\ndef rrt_graph_list(num_trials, n_iter, radius, step_size, threshold, bounds, num_obstacles=1):\n \"\"\" Generates a list of RRT graphs. \"\"\"\n print(\"RUNNING {t} TRIALS OF RRT WITH {o} OBSTACLES\\n\".format(t=num_trials, o=num_obstacles))\n graphs = []\n generated_obstacles = []\n for i in range(0, num_trials):\n trial_start_time = time.time()\n\n print(\"Trial: \", i + 1)\n current_start_node, current_end_node, random_obstacles = random_start_environment(num_obstacles, bounds)\n if current_start_node is None:\n continue\n if not current_start_node.valid_configuration():\n raise Exception(\"Approved an invalid start node\")\n\n if arm_is_colliding_prisms(current_end_node, random_obstacles):\n raise Exception(\"Approved a colliding node\")\n\n G = rrt(current_start_node.angles, current_end_node.angles, random_obstacles, n_iter=n_iter, radius=radius,\n stepSize=step_size,\n heuristic_threshold=threshold)\n\n if G.success:\n print(\"SUCCESS\")\n else:\n print(\"FAIL\")\n\n print(\"Trial time:\", time.time() - trial_start_time)\n print(\"\")\n graphs.append(G)\n generated_obstacles.append(random_obstacles)\n\n return graphs, generated_obstacles\ndef test():\n total_time_two = 0\n total_time_four = 0\n start = time.time()\n runs = 1000\n for i in range(1000):\n print('run')\n n_iter = 2000\n radius = .01\n stepSize = .35\n threshold = 2\n num_obstacles = 1\n bounds = [[-.05, .05], [-.05, .05], [-.05, .05]]\n # start_node = RRTNode([7.4883959080999105, -0.9802836168249124, 2.7119532197892307, 2.690692578970348, 1.4327288698060625])\n # end_node = RRTNode([0.80873032, 0.58529255 , 1.57082885 , 2.15507481 ,-0.80873048])\n start_node, end_node, obstacles, _ = random_start_environment(num_obstacles, bounds)\n location = random.uniform(.1, .1)\n prism = [location, location, location, .2, .2, .2]\n obstacles = [prism]\n start_time = time.time()\n print(\"RRT started\")\n\n try:\n G = rrt(start_node.angles,\n end_node.angles,\n obstacles,\n n_iter, radius, stepSize=stepSize)\n size1 = 0\n if G.success:\n path = dijkstra(G)\n size1 = len(path)\n runTime = time.time() - start_time\n\n optimize_start_time2 = time.time()\n path2 = path_optimizer_two(path, prism)\n size2 = len(path2)\n if checkPath(path2, prism):\n optimizeTime2 = time.time() - optimize_start_time2\n total_time_two += optimizeTime2\n\n optimize_start_time4 = time.time()\n path4 = path_optimizer_four(path, prism)\n size4 = len(path4)\n if checkPath(path4, prism):\n optimizeTime4 = time.time() - optimize_start_time4\n total_time_four += optimizeTime4\n except Exception:\n print(\"Exception thrown\")\n runs -= 1\n\n full_runtime = time.time() - start\n print(\"total time for 2s\", total_time_two)\n print(\"total time for 4s\", total_time_four)\n print(\"total runs\", runs)\n print(\"time per run for 2s\", total_time_two/runs)\n print(\"time per run for 4s\", total_time_four/runs)\n print(\"full run time:\", full_runtime)\n\n\ndef multiple_runs():\n n_iter = 1000\n radius = .07\n stepSize = .35\n threshold = 2\n num_obstacles = 1\n bounds = [[-.4, .4], [0, .4], [-.4, .4]]\n # start_node = RRTNode([7.4883959080999105, -0.9802836168249124, 2.7119532197892307, 2.690692578970348, 1.4327288698060625])\n # end_node = RRTNode([0.80873032, 0.58529255 , 1.57082885 , 2.15507481 ,-0.80873048])\n start_node, end_node, obstacles = random_start_environment(num_obstacles, bounds)\n print(\"Start:\", start_node.angles)\n print(\"End:\", end_node.angles)\n print(\"Obstacles:\", obstacles)\n location = random.uniform(.1, .1)\n prism = [location, location, location, .2, .2, .2]\n obstacles = [prism]\n G = rrt(start_node.angles,\n end_node.angles,\n obstacles,\n n_iter, radius, stepSize=stepSize)\n size1 = 0\n if G.success:\n path = dijkstra(G)\n size1 = len(path)\n print(\"Original Path Size:\", size1)\n plot_3d(G,path,obstacles,None)\n bestPath = optimize(path, obstacles)\n print(\"Optimal Path Size:\", len(bestPath))\n plot_3d(G,bestPath,obstacles,None)\n else:\n print(\"Path not found. :(\")\n plot_3d(G, [start_node, end_node], obstacles, None)\n\nif __name__ == '__main__':\n #test()\n multiple_runs()\n # n_iter = 1000\n # radius = .07\n # stepSize = .35\n # threshold = 2\n # num_obstacles = 1\n # bounds = [[-.4, .4], [0, .4], [-.4, .4]]\n # # start_node = RRTNode([7.4883959080999105, -0.9802836168249124, 2.7119532197892307, 2.690692578970348, 1.4327288698060625])\n # # end_node = RRTNode([0.80873032, 0.58529255 , 1.57082885 , 2.15507481 ,-0.80873048])\n # start_node, end_node, obstacles = random_start_environment(num_obstacles, bounds)\n # location = random.uniform(.1, .1)\n # prism = [location, location, location, .2, .2, .2]\n # obstacles = [prism]\n # start_time = time.time()\n # print(\"RRT started\")\n #\n # G = rrt(start_node.angles,\n # end_node.angles,\n # obstacles,\n # n_iter, radius, stepSize=stepSize)\n # size1 = 0\n # if G.success:\n # path = dijkstra(G)\n # size1 = len(path)\n # runTime = time.time() - start_time\n # optimize_start_time2 = time.time()\n # path2 = path_optimizer_two(path, prism)\n # size2 = len(path2)\n # if (path2 == path):\n # print('No optimizations could be made')\n # if checkPath(path2, prism):\n # optimizeTime2 = time.time() - optimize_start_time2\n # print('Optimization Time for 2 step', optimizeTime2)\n # print('Generation Runtime', runTime)\n # print(\"New Path is valid, Size\", size2)\n # print('Original Path size:', size1)\n # plot_3d(G, path, obstacles, path2)\n # plot_3d(G, None, obstacles, path2)\n # else:\n # print(\"Optimized path encounters collisions, and was discarded.\")\n # plot_3d(G, path, obstacles, path2)\n #\n # optimize_start_time4 = time.time()\n # path4 = path_optimizer_four(path, prism)\n # size4 = len(path4)\n # if path4 == path:\n # print(\"bruh they are the same\")\n # if checkPath(path4, prism):\n # optimizeTime4 = time.time() - optimize_start_time4\n # print('Optimization Time for 4 step', optimizeTime4)\n # print('Generation Runtime', runTime)\n # print(\"New Path is valid, Size\", size4)\n # print('Original Path size:', size1)\n # plot_3d(G, path, obstacles, path4)\n # plot_3d(G, None, obstacles, path4)\n # else:\n # print(\"Optimized path encounters collisions, and was discarded.\")\n # plot_3d(G, path, obstacles, path4)\n #\n # else:\n # print(\"Path not found. :(\")\n # plot_3d(G, [start_node, end_node], obstacles, None)\n","repo_name":"cornell-cup/r2-object_detection","sub_path":"c1c0_object_detection/kinematics/pure_rrt.py","file_name":"pure_rrt.py","file_ext":"py","file_size_in_byte":18834,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"28"} +{"seq_id":"698986288","text":"from .screen import Screen\nfrom pygame.image import load\nfrom pygame.font import Font\nfrom utils import Network\n\n\nclass MainMenu(Screen):\n def __init__(self, pg_screen, screen_size):\n Screen.__init__(self, pg_screen, screen_size)\n self.pg_screen = pg_screen\n self.screen_size = screen_size\n self.network = Network()\n self.font = Font(None, 30)\n self.sound_on = True\n self.music_on = True\n\n # load the assets\n self.logo = load(\"assets/logo.png\").convert_alpha()\n self.main_menu = load(\"assets/mainmenu.png\").convert_alpha()\n self.register_login = load(\"assets/register_login.png\").convert_alpha()\n self.buttons = load(\"assets/buttons.png\").convert_alpha()\n\n def draw(self):\n Screen.draw(self)\n\n # draw the main menu\n self.pg_screen.blit(self.logo, (5, 60))\n self.pg_screen.blit(self.register_login, (150, 5))\n self.pg_screen.blit(self.main_menu, (60, 250))\n s = 64 - self.sound_on * 64\n self.pg_screen.blit(self.buttons, (0, 415), (s, 0, 64, 64))\n m = 64 - self.music_on * 64\n self.pg_screen.blit(self.buttons, (64, 415), (m, 192, 64, 64))\n\n # if user is logged in, display their name\n username = self.network._cache[\"username\"]\n if username:\n self._draw_left_align_text(username, (10, 20))\n\n def _draw_left_align_text(self, text, top_left, color=(0, 0, 0)):\n text = self.font.render(text, True, color)\n text_rect = text.get_rect()\n text_rect.topleft = top_left\n self.pg_screen.blit(text, text_rect)\n\n def mouse_down(self, pos):\n if self.pos_between(pos, (69, 258), (240, 287)):\n return {\"screen\": \"game_screen\", \"play_sound\": \"click\"}\n elif self.pos_between(pos, (71, 301), (243, 330)):\n return {\"screen\": \"highscores_screen\", \"play_sound\": \"click\"}\n elif self.pos_between(pos, (72, 345), (242, 379)):\n return {\"screen\": \"help_screen\", \"play_sound\": \"click\"}\n elif self.pos_between(pos, (153, 58), (303, 93)):\n return {\"screen\": \"login_screen\", \"play_sound\": \"click\"}\n elif self.pos_between(pos, (154, 7), (308, 47)):\n return {\"screen\": \"register_screen\", \"play_sound\": \"click\"}\n elif self.pos_between(pos, (0, 415), (64, 479)):\n self.sound_on = not self.sound_on\n return {\"sound\": self.sound_on, \"play_sound\": \"click\"}\n elif self.pos_between(pos, (64, 415), (128, 479)):\n self.music_on = not self.music_on\n return {\"music\": self.music_on, \"play_sound\": \"click\"}\n","repo_name":"HetorusNL/mr_nom_python","sub_path":"screens/main_menu.py","file_name":"main_menu.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"21093129823","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# Language Version: 2.7+\n# Last Modified: 2022-01-09 03:29:09\nfrom __future__ import unicode_literals, division, absolute_import, print_function\n\n\"\"\"\n\n\"\"\"\n\n__all__ = []\n__author__ = \"北京秘银科技 赵文平(email:wenping_zhao@126.com tel:13511028685)\"\n__version__ = \"0.0.1\"\n\nimport re\n\ncipin = dict()\nwith open('../../p1.txt') as fd:\n for line in fd:\n line = line.strip().split()\n cipin[line[0]] = int(line[2])\n\nids_dict = dict()\nwith open('ids.txt') as fd:\n for line in fd:\n line = line.strip().split()\n ids_dict[line[2]] = line[1]\n\nids_pattern = sorted(ids_dict.keys(), key=len, reverse=True)\n#print('⿰言羊' in ids_pattern)\n\nIDS = dict()\nwith open('../../ids/IDS.TXT') as fd:\n for line in fd:\n if not line.startswith('U+'):\n continue\n line = line.strip().split()\n IDS[line[1]] = line\n\ndef rm_ids(ctx):\n # 替换unicode ids形式\n ids = False\n for ch in ctx:\n if 0x2FF0 <= ord(ch) <= 0x2FFB:\n ids = True\n break\n if not ids:\n return ctx\n\n for ids in ids_pattern:\n if ids in ctx:\n ctx = ctx.replace(ids, ids_dict.get(ids, ' '))\n\n return ctx\n\n# print(rm_ids('言辭⿰言羊j'))\n# print(rm_ids('⿰山叵⿰山我'))\n\nidsv = ids_dict.values()\nfor zi in cipin:\n if zi not in idsv and zi in IDS:\n print(IDS[zi][0], zi, IDS[zi][2], cipin[zi])\n\ndef main():\n ''''''\n\ndef test():\n ''''''\n\nif __name__ == \"__main__\":\n # main()\n test()\n\n","repo_name":"zhaowenping/cbeta","sub_path":"idx/ids2.py","file_name":"ids2.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"28"} +{"seq_id":"24135490399","text":"import network\r\nimport ubinascii\r\n\r\nwlan=network.WLAN(network.STA_IF)\r\nwlan.active(True)\r\na=wlan.scan()\r\nprint(\"List of all Access points around......\")\r\nfor e in a:\r\n b=e[1]\r\n c=ubinascii.hexlify(b)\r\n d=c.decode('utf-8')\r\n print(e[0]+\"Their mac Address is:\"+str(d))\r\n","repo_name":"turbotrail/Micropy-ESP32-AP-MAC-sniffer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"19221624198","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 03 20:57:12 2017\n\n@author: monarang\n\"\"\"\n\nfrom sklearn.ensemble import GradientBoostingClassifier,GradientBoostingRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.svm import LinearSVC\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import preprocessing\nimport pandas as pd\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.grid_search import GridSearchCV\nimport matplotlib.pyplot as plt\n \nroot_path = \"C:\\\\Users\\\\monarang\\\\Documents\\\\Projects\\\\Kaggle\\\\deloitte\\\\data_hackthon_2017\\\\\"\ntrain_data = pd.read_csv(root_path+\"HackathonRound1.csv\")\nupdate_train_data = pd.read_csv(root_path+\"DataUpdate_Hackathon.csv\")\n\ntrain_data = pd.concat([train_data,update_train_data])\ntrain_data.to_csv(root_path+'concat_data.csv', index=False,header=True)\n\n#split data in 2 seperate file one for open and one for close\n\nopen_price = pd.DataFrame({ 'Date': train_data['Date'], 'Share': train_data['Share Names'],\n 'Open': train_data['Open Price'] })\nopen_price.to_csv(root_path+\"open.csv\", index=False)\n\nclose_price = pd.DataFrame({ 'Date': train_data['Date'], 'Share': train_data['Share Names'],\n 'close': train_data['Close Price'] })\nclose_price.to_csv(root_path+\"close.csv\", index=False)\n\ndateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d')\ndata_open = pd.read_csv(root_path+\"open.csv\", index_col='Date',parse_dates=[0])\ndata_close = pd.read_csv(root_path+\"close.csv\", index_col='Date',parse_dates=[0])\n\n#print data.head()\n#data\n#model = ARIMA(data['Open'], order=(5,1,0))\n#model_fit = model.fit(disp=0)\n#print(model_fit.summary())\n\n#data.drop(['Share'],axis=1,inplace=True)\n#data=data.values\n\n# convert an array of values into a dataset matrix\ndef create_dataset(dataset, look_back=1):\n\tdataX, dataY = [], []\n\tfor i in range(len(dataset)-look_back-1):\n\t\ta = dataset[i:(i+look_back), 0]\n\t\tdataX.append(a)\n\t\tdataY.append(dataset[i + look_back, 0])\n\treturn numpy.array(dataX), numpy.array(dataY)\n\ndef create_seperate_file(data,column):\n for i in range(1,51):\n df=data[data['Share']=='Share'+str(i)][column]\n df.to_csv(root_path+column+'_Share'+str(i), index=False,header=False)\n \n \ncreate_seperate_file(data_open,'Open')\ncreate_seperate_file(data_close,'close')\n\ndef shift_save(file_name,window):\n shift_file = pd.read_csv(root_path+file_name, index_col='Date',parse_dates=[0])\n \n","repo_name":"emohit/kaggle","sub_path":"deloitte/data_hackthon_2017/Code/intial_code.py","file_name":"intial_code.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"72984801354","text":"import copy\n\nio_folder_path = './wrn_14_101_bsz_1/'\n\ngraph_file_name = io_folder_path + 'graph.dot'\nweights_file_name = io_folder_path + 'weights.txt'\ncosts_file_name = io_folder_path + 'costs.txt'\nmemory_file_name = io_folder_path + 'memory.txt'\nvar_nodes_file_name = io_folder_path + 'var_nodes.txt'\nref_nodes_file_name = io_folder_path + 'ref_nodes.txt'\nno_ops_file_name = io_folder_path + 'no_ops.txt'\nmetis_input_file_name = io_folder_path + 'metis_graph.txt'\nmetis_parts_file_name = io_folder_path + 'metis_graph.txt.part.4'\nvanilla_file_name = io_folder_path + 'vanilla_cleaned.place'\noutput_file_name = '/home/nahmad/placement.place'\noutput2_file_name = io_folder_path + 'wrn.csv'\n\ncomm_latency = 25\ncomm_transfer_rate_rec = 1.0 / (130000)\n\n\ndef clean_line(node_string):\n return (node_string.replace('\\n', '')).replace('\\r', '').replace(\n '\"', '').replace('\\t', '').replace(' ', '')\n\n\ndef clean_line_keep_spaces(node_string):\n return node_string.replace('\\n',\n '').replace('\\r',\n '').replace('\"',\n '').replace('\\t', '')\n\n\nnumber_of_edgse = 0\ngraph = {}\ndag = {}\nrev_dag = {}\nnodes_ranks_map = {}\nrev_nodes_ranks_map = {}\nnumber_of_nodes = 0\nnodes_weights = {}\nnodes_memories = {}\nedges_costs = {}\ntensors_sizes = {}\n\nwith open(graph_file_name, 'r') as f:\n for line in f:\n line = clean_line(line)\n splits = line.split('->')\n if len(splits) > 1:\n number_of_edgse += 1\n if splits[0] not in dag:\n dag[splits[0]] = []\n if splits[0] not in graph:\n graph[splits[0]] = []\n nodes_ranks_map[splits[0]] = number_of_nodes + 1\n rev_nodes_ranks_map[number_of_nodes + 1] = splits[0]\n number_of_nodes += 1\n if splits[1] not in rev_dag:\n rev_dag[splits[1]] = []\n if splits[1] not in graph:\n graph[splits[1]] = []\n nodes_ranks_map[splits[1]] = number_of_nodes + 1\n rev_nodes_ranks_map[number_of_nodes + 1] = splits[1]\n number_of_nodes += 1\n graph[splits[0]].append(splits[1])\n dag[splits[0]].append(splits[1])\n graph[splits[1]].append(splits[0])\n rev_dag[splits[1]].append(splits[0])\n\nwith open(weights_file_name, 'r') as f:\n for line in f:\n line = clean_line(line)\n splits = line.split('::')\n if (len(splits) > 1):\n nodes_weights[splits[0]] = splits[1]\n\nwith open(memory_file_name, 'r') as f:\n for line in f:\n line = clean_line(line)\n splits = line.split('::')\n if (len(splits) > 1):\n nodes_memories[splits[0]] = int(splits[1]) / 1000\n\nref_nodes = {}\nwith open(var_nodes_file_name, 'r') as f:\n for line in f:\n ref_nodes[clean_line(line)] = 1\n\nvar_nodes = {}\nwith open(ref_nodes_file_name, 'r') as f:\n for line in f:\n var_nodes[clean_line(line)] = 1\n\nno_op_nodes = {}\nwith open(no_ops_file_name, 'r') as f:\n for line in f:\n no_op_nodes[clean_line(line)] = 1\n\ncntt = 0\nwith open(costs_file_name, 'r') as f:\n for line in f:\n line = clean_line(line)\n splits = line.split('::')\n if (len(splits) > 1):\n node_name = splits[0]\n cntt += 1\n if node_name in graph:\n if node_name not in edges_costs:\n edges_costs[node_name] = {}\n tensors_sizes[node_name] = int(splits[1]) / 1000\n for adj_node in graph[node_name]:\n if adj_node not in edges_costs:\n edges_costs[adj_node] = {}\n if adj_node in no_op_nodes:\n edges_costs[node_name][adj_node] = comm_latency\n edges_costs[adj_node][node_name] = comm_latency\n else:\n edges_costs[node_name][adj_node] = int(\n float(splits[1]) * comm_transfer_rate_rec +\n comm_latency)\n edges_costs[adj_node][node_name] = comm_latency\n\nwith open(metis_input_file_name, 'w') as f:\n f.write(str(number_of_nodes) + ' ' + str(number_of_edgse) + ' 11\\n')\n for node in graph.keys():\n line_to_write = str(\n nodes_weights[node]) if node in nodes_weights else '1'\n for adj_node in graph[node]:\n line_to_write += ' ' + str(nodes_ranks_map[adj_node])\n line_to_write += ' ' + str(edges_costs[node][adj_node]) if node in edges_costs and adj_node \\\n in edges_costs[node] else str(comm_latency)\n\n line_to_write += '\\n'\n f.write(line_to_write)\n\nnodes_parts = {}\n\"\"\"i = 0\nwith open(metis_parts_file_name, 'r') as f:\n for line in f:\n nodes_parts[rev_nodes_ranks_map[i + 1]] = int(clean_line(line))\n i += 1 \"\"\"\n\ncollocation_groups = {}\nchanged = {}\nfor node_name in graph.keys():\n if node_name not in var_nodes or node_name in changed:\n continue\n\n to_visit = []\n node_part = 0 #nodes_parts[node_name]\n to_visit.append(node_name)\n collocation_group = [node_name]\n while len(to_visit) > 0:\n current_node_name = to_visit.pop(0)\n changed[current_node_name] = True\n for adj_node in graph[current_node_name]:\n if adj_node in ref_nodes:\n nodes_parts[adj_node] = node_part\n collocation_group.append(adj_node)\n for rev_adj_node in graph[adj_node]:\n if (rev_adj_node not in changed\n and rev_adj_node in var_nodes):\n nodes_parts[rev_adj_node] = node_part\n to_visit.append(rev_adj_node)\n changed[rev_adj_node] = True\n collocation_group.append(rev_adj_node)\n\n for node in collocation_group:\n collocation_groups[node] = copy.deepcopy(collocation_group)\n\nvanilla_placement = {}\nwith open(vanilla_file_name, 'r') as f:\n for line in f:\n line = clean_line_keep_spaces(line).lower()\n splits = line.split(' ')\n vanilla_placement[splits[0]] = int(splits[1])\n\"\"\" with open(output_file_name, 'w') as f:\n for node, part in nodes_parts.items():\n if node in vanilla_placement and vanilla_placement[node] == -1:\n f.write(node + ' ' + str(-1) + '\\n')\n else:\n f.write(node + ' ' + str(part) + '\\n') \"\"\"\n\nwith open(output2_file_name, 'w') as f:\n f.write(\n \"Id,(Outgoing) node,(Incoming) node,Colocation nodes,#tensorSize,#operations,RAM storage,Device constraint,name\\n\"\n )\n for node in dag.keys():\n line_to_write = str(nodes_ranks_map[node]) + ','\n if node in dag:\n for adj_node in dag[node]:\n line_to_write += str(nodes_ranks_map[adj_node]) + ';'\n line_to_write += ','\n if node in rev_dag:\n for rev_adj_node in rev_dag[node]:\n line_to_write += str(nodes_ranks_map[rev_adj_node]) + ';'\n line_to_write += ','\n if node in collocation_groups:\n for collocated_node in collocation_groups[node]:\n line_to_write += str(nodes_ranks_map[collocated_node]) + ';'\n line_to_write += ','\n if node in tensors_sizes:\n line_to_write += str(tensors_sizes[node])\n else:\n line_to_write += '1'\n line_to_write += ','\n if node in nodes_weights:\n line_to_write += str(nodes_weights[node])\n else:\n line_to_write += '1'\n line_to_write += ','\n if node in nodes_memories:\n line_to_write += str(nodes_memories[node])\n else:\n line_to_write += '1'\n line_to_write += ','\n if node in vanilla_placement and vanilla_placement[node] == -1:\n line_to_write += 'CPU,'\n else:\n line_to_write += 'NO,'\n line_to_write += str(node) + '\\n'\n f.write(line_to_write)\n","repo_name":"ParCoreLab/pardnn","sub_path":"src/graph_to_cp.py","file_name":"graph_to_cp.py","file_ext":"py","file_size_in_byte":8035,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"18560572837","text":"import re\nimport urllib.request\nimport urllib.error\n\ndef crawl(url,page):\n req=urllib.request.Request(url)\n req.add_header(\"User-Agent\",\"Mozilla/5.0(WindowsNT10.0;Win64;x64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/52.0.2743.116Safari/537.36Edge/15.15063\")\n req.timeout=5\n try:\n content=str(urllib.request.urlopen(req).read().decode('utf-8'))\n except urllib.error.URLErrorasease:\n if hasattr(e,\"code\"):\n print(page+\"页,\"+e.code)\n\n match1='divclass=\"content\".+?
'\n content1=re.findall(match1,content,re.S)\n contentList=[]\n for i in content1:\n contentList.extend(re.findall('.+?',i,re.S))\n\n fhandle=open('Z:\\\\Downloads\\\\practise\\\\urllib\\\\qiushi.txt','a',encoding='utf-8')\n for i in contentList:\n fhandle.write(i.replace('','**')+'\\n\\n')\n fhandle.close()\n\n\ndef lastPage(url):\n\treq=urllib.request.Request(url)\n\treq.add_header(\"User-Agent\",\"Mozilla/5.0(WindowsNT10.0;Win64;x64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/52.0.2743.116Safari/537.36Edge/15.15063\")\n\thtml=str(urllib.request.urlopen(req).read())\n\tmatch_lastPage='.+?'\n\tpage=re.findall(match_lastPage,html)\n\ti=len(page)\n\tlastPage=re.findall('\\d+',page[i-1])\n\treturn lastPage[0]\n\n\npageNum=lastPage('https://www.qiushibaike.com/1hr/page/1')\n\nfor page in range(1,int(pageNum)):\n\turl=\"https://www.qiushibaike.com/1hr/page/\"+str(page)\n\tcrawl(url,page)\nprint(\"done\")\n","repo_name":"cb0207/QiuShi-Baike","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"7105458003","text":"# import plotting tools\nfrom pathlib import Path\nimport numpy as np\nimport re\nfrom scipy.stats import mode\nfrom matplotlib import rcParams, cm, gridspec\nfrom matplotlib.lines import Line2D\nfrom cycler import cycler\nimport matplotlib.pyplot as plt\nfrom RPM_CRN_Plotting_Functions import *\n\nclass RPM_CRN_Figure:\n\n def __init__(self, FigSizeFormat=\"EPSL\", FigWidth_Inches=0., AspectRatio=16./9.):\n\n \"\"\"\n This function creates a default matplotlib figure object\n\n Args:\n FigSizeFormat: the figure size format according to journal for which the figure is intended\n values are geomorphology,ESURF, ESPL, EPSL, JGR, big\n default is ESURF\n \n AspectRatio: The shape of the figure determined by the aspect ratio, default is 16./9.\n\n Returns:\n matplotlib figure object\n\n Author: MDH\n\n \"\"\"\n self.Figure = None\n self.Axes = None\n \n self.CreateFigure(FigSizeFormat, FigWidth_Inches, AspectRatio)\n \n def CreateFigure(self, FigSizeFormat=\"EPSL\", FigWidth_Inches=0., AspectRatio=16./9.):\n \n \"\"\"\n This function creates a default matplotlib figure object\n\n Args:\n FigSizeFormat: the figure size format according to journal for which the figure is intended\n values are geomorphology,ESURF, ESPL, EPSL, JGR, big\n default is ESURF\n \n AspectRatio: The shape of the figure determined by the aspect ratio, default is 16./9.\n\n Returns:\n matplotlib figure object\n\n Author: MDH\n\n \"\"\"\n\n # set figure sizes (in inches) based on format\n if FigWidth_Inches > 0:\n FigWidth_Inches = FigWidth_Inches\n elif FigSizeFormat == \"geomorphology\":\n FigWidth_Inches = 6.25\n elif FigSizeFormat == \"big\":\n FigWidth_Inches = 16\n elif FigSizeFormat == \"small\":\n FigWidth_Inches = 3.3\n elif FigSizeFormat == \"ESURF\":\n FigWidth_Inches = 4.92\n elif FigSizeFormat == \"ESPL\":\n FigWidth_Inches = 7.08\n elif FigSizeFormat == \"EPSL\":\n FigWidth_Inches = 7.48\n elif FigSizeFormat == \"EPSL_small\":\n FigWidth_Inches = 3.74\n elif FigSizeFormat == \"JGR\":\n FigWidth_Inches = 6.6\n else:\n FigWidth_Inches = 4.92126\n \n # Set up fonts for plots\n rcParams['font.family'] = 'sans-serif'\n rcParams['font.sans-serif'] = ['arial']\n rcParams['font.size'] = 10\n rcParams['text.usetex'] = True\n \n # customise the colorcycle for plotting\n rcParams['axes.prop_cycle'] = cycler(color=cm.Dark2.colors)\n \n self.Figure = plt.figure(figsize=(FigWidth_Inches,FigWidth_Inches/AspectRatio),facecolor=None)\n\n def PlotProfileAndConcentrationFigure(self, ProfileFile, ConcentrationsFile, Colour=None, Symbol=\"-\", Legend=False, Label=None):\n\n # if no figure make the default\n if not self.Figure:\n print(self.Figure)\n self.CreateFigure()\n\n # if axes not created yet add axes as list for subplots and organise labels\n if not self.Axes:\n \n # set up the gridspec\n GridSpec = gridspec.GridSpec(ncols=2, nrows=2, width_ratios=[2, 1], height_ratios=[1,1])\n\n # ax0 for profiles, no x axis, y axis on the left\n ax0 = self.Figure.add_subplot(GridSpec[0,0])\n ax0.set_ylabel(\"Elevation (m)\")\n #ax0.xaxis.set_visible(False)\n ax0.spines['right'].set_visible(False)\n ax0.spines['top'].set_visible(False)\n #ax0.spines['bottom'].set_visible(False)\n ax0.text(0.05, 0.9, \"(a)\", transform=ax0.transAxes)\n\n # ax1 for concentrations, y axis on the right\n ax1 = self.Figure.add_subplot(GridSpec[1,0])\n #ax1.set_yscale(\"log\")\n ax1.set_xlabel(\"Distance (m)\")\n ax1.set_ylabel(\"Concentration (at g$^{-1}$)\")\n ax1.spines['right'].set_visible(False)\n ax1.spines['top'].set_visible(False)\n ax1.text(0.05, 0.9, \"(b)\", transform=ax1.transAxes)\n \n # ax2 axis for time series of retreat rates\n ax2 = self.Figure.add_subplot((GridSpec[0,1]))\n ax2.set_yscale(\"log\")\n ax2.xaxis.set_visible(False)\n ax2.spines['left'].set_visible(False)\n ax2.spines['top'].set_visible(False)\n ax2.yaxis.set_ticks_position('right')\n ax2.yaxis.set_label_position('right')\n ax2.set_ylabel(\"Cliff Retreat Rate (m yr$^{-1}$)\")\n ax2.spines['bottom'].set_visible(False)\n ax2.invert_xaxis()\n ax2.text(0.1, 0.9, \"(c)\", transform=ax2.transAxes)\n \n # ax3 for time series of maximum concentrations\n ax3 = self.Figure.add_subplot((GridSpec[1,1]))\n #ax3.set_yscale(\"log\")\n ax3.set_xlabel(\"Time (k yrs BP)\")\n ax3.set_ylabel(\"Max Intertidal Concentration (at g$^{-1}$)\")\n ax3.spines['left'].set_visible(False)\n ax3.spines['top'].set_visible(False)\n ax3.yaxis.set_ticks_position('right')\n ax3.yaxis.set_label_position('right')\n ax3.invert_xaxis()\n ax3.text(0.1, 0.9, \"(d)\", transform=ax3.transAxes)\n \n\n self.Axes = [ax0, ax1, ax2, ax3]\n\n # read the profile file\n Times, SeaLevels, Z, X = ReadShoreProfile(ProfileFile)\n LastX = X[-1]\n \n # find cliff and normalise\n CliffPositions = np.array([mode(EachX[EachX > 1])[0] for EachX in X])\n CliffPositions = np.array([Element for Each in CliffPositions for Element in Each])\n CliffIndices = [np.argmin(np.abs(ThisX-CliffPosition)) for ThisX, CliffPosition in zip(X,CliffPositions)]\n \n # check for condition where initial condition doesnt have a cliff\n if len(CliffPositions) == len(Times)-1:\n Times = Times[1:]\n \n LastX -= CliffPositions[-1]\n #self.Axes[0].set_xlim(0, CliffPosition)\n\n # plot final result on ax0\n Line, = self.Axes[0].plot(LastX, Z, ls=Symbol, color=Colour, label=Label)\n\n # copy the colour for other plots\n Colour = Line.get_color()\n LineStyles = ['-', '--', ':','-.']\n \n # read the concentrations\n Times2, dX, Concentrations = ReadConcentrationData(ConcentrationsFile)\n \n # populate lines for legend\n LegendLines = []\n LegendLabels = []\n \n for i, key in enumerate(Concentrations.keys()):\n \n N = Concentrations[key][-1]\n XConc = np.arange(0,len(N))*dX\n CliffIndex = np.argmin(np.abs(XConc-CliffPositions[-1]))\n XConc -= XConc[CliffIndex]\n self.Axes[1].plot(XConc[0:CliffIndex], N[0:CliffIndex], color=Colour, ls=LineStyles[i])\n LegendLines.append(Line2D([0], [0], color=\"grey\", ls=LineStyles[i]))\n result = [split for split in re.split('([0-9]+)', key) if split != \"\"] #lstrip('0123456789')\n Mass = result[0]\n Element = result[1]\n LegendLabels.append(\"$^{\"+Mass+\"}$\"+Element)\n \n # calculate max concentrations\n MaxN = []\n \n for Time, CliffPosition, N in zip(Times, CliffPositions, Concentrations[key]):\n #print(Time)\n XConc = np.arange(0,len(N))*dX\n CliffIndex = np.argmin(np.abs(XConc-CliffPosition))\n MaxN.append(np.max(N[0:CliffIndex]))\n \n self.Axes[3].plot(Times/1000., MaxN, ls=LineStyles[i], color=Colour)\n \n # calculate cliff retreat rates\n RetreatRates = -np.diff(CliffPositions)/np.diff(Times)\n self.Axes[2].plot(Times[1:]/1000,RetreatRates,'-', color=Colour)\n \n # make sure axes line up\n xmin, xmax = self.Axes[0].get_xlim()\n self.Axes[1].set_xlim(xmin, xmax)\n \n # make sure axes line up\n xmin, xmax = self.Axes[2].get_xlim()\n self.Axes[3].set_xlim(xmin, xmax)\n \n # make sure axes line up\n ymin, ymax = self.Axes[1].get_ylim()\n self.Axes[3].set_ylim(ymin, ymax)\n \n # create or update legends\n if Legend:\n #self.Axes[0].legend(loc=4)\n self.Axes[1].legend(LegendLines,LegendLabels)\n\n def PlotProfileEvolutionFigure(self, ProfileFile, Symbol=\"-\", TimeInterval=1000.):\n\n \"\"\"\n \"\"\"\n # if no figure make the default\n if not self.Figure:\n print(self.Figure)\n self.CreateFigure()\n\n # if axes not created yet add axes as list for subplots and organise labels\n if not self.Axes:\n \n # ax0 for profiles, no x axis, y axis on the left\n ax0 = self.Figure.add_subplot(111) #GridSpec[0,0])\n ax0.set_ylabel(\"Elevation (m)\")\n #ax0.xaxis.set_visible(False)\n ax0.spines['right'].set_visible(False)\n ax0.spines['top'].set_visible(False)\n #ax0.spines['bottom'].set_visible(False)\n self.Axes = [ax0]\n\n # read the profile file\n Times, SeaLevels, Z, X = ReadShoreProfile(ProfileFile)\n StartTime = Times[0]\n EndTime = Times[-1]\n Time = StartTime\n OldIndex = -9999\n \n print(StartTime, EndTime)\n\n if StartTime > EndTime:\n TimeInterval *= -1\n \n # set colour map\n ColourMap = cm.bone\n\n while Time >= EndTime:\n \n print(Time)\n \n # Find time\n Index = np.argmin(np.abs(Time-Times))\n \n if Index == OldIndex:\n break\n \n OldIndex = Index\n\n # plot final result on ax0\n Label = str(int(Time)) + \" years\"\n Colour = ColourMap(Time/np.max([StartTime,EndTime]))\n self.Axes[0].plot(X[Index], Z, ls=Symbol, color=Colour, label=Label)\n Time += TimeInterval\n \n # create or update legends\n self.Axes[0].legend() \n\n def SaveFig(self, Outputfilename):\n self.Figure.savefig(Outputfilename)\n\nif __name__ == \"__main__\":\n \n # set the workspace\n Folder = Path(\"../\")\n \n # set the project name\n Project = \"TestProject\"\n \n # set the location and name of the output figures\n FigureFile = Folder / \"Evolution.png\"\n FigureFile2 = Folder / \"ProfileConcentrations.png\"\n \n # define model output files\n ProfileFile = Folder / (Project+\"_ShoreProfile.xz\")\n ConcentrationsFile = Folder / (Project+\"_Concentrations.xn\")\n \n # create and populate the figures then save\n EvolutionFigure = RPM_CRN_Figure()\n EvolutionFigure.PlotProfileEvolutionFigure(ProfileFile)\n EvolutionFigure.SaveFig(FigureFile)\n \n MyFigure = RPM_CRN_Figure(FigWidth_Inches=11.)\n MyFigure.PlotProfileAndConcentrationFigure(ProfileFile, ConcentrationsFile, Label=\"test\", Legend=True)\n MyFigure.SaveFig(FigureFile2)","repo_name":"mdhurst1/Rocky-Profile-Model","sub_path":"plotting_functions/RPM_CRN_Figure.py","file_name":"RPM_CRN_Figure.py","file_ext":"py","file_size_in_byte":11203,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"3094574021","text":"import numpy as np\nimport torch\nimport trimesh\nimport trimesh.proximity\nfrom scipy.spatial.transform import Rotation as R\nimport pickle\nfrom tqdm import tqdm\n\nfrom core.utils.amano import ManoLayer as AManoLayer\n\nmano_layer = AManoLayer(cuda = True)\n\ndef get_stage_fitting_target(\n ref_flag_1:torch.Tensor,\n ref_flag_2:torch.Tensor,\n ref_1:torch.Tensor,\n ref_2:torch.Tensor,\n seq_1:torch.Tensor,\n seq_2:torch.Tensor,\n n_flag:torch.Tensor,\n stage_length,\n objs,\n obj_traj:torch.Tensor\n):\n # Compute coefs regarding to the two ends of the stage\n loss_coef_1 = torch.zeros_like(n_flag)\n loss_coef_2 = torch.zeros_like(n_flag)\n len_frame = n_flag.shape[1]\n part_cnt = n_flag.shape[0]\n for frame_i in range(stage_length):\n for part_i in range(n_flag.shape[0]):\n for finger_i in range(n_flag.shape[2]):\n if ref_flag_1[part_i, finger_i] < 0.5 and ref_flag_2[part_i, finger_i] < 0.5:\n continue\n elif ref_flag_2[part_i, finger_i] < 0.5:\n loss_coef_1[part_i, frame_i, finger_i] = 1\n elif ref_flag_1[part_i, finger_i] < 0.5:\n loss_coef_2[part_i, frame_i, finger_i] = 1\n else:\n loss_coef_1[part_i, frame_i, finger_i] = 1-frame_i/stage_length\n loss_coef_2[part_i, frame_i, finger_i] = frame_i/stage_length\n\n n_flag = torch.where(n_flag < 0.5, 0, 1)\n loss_coef_1 *= n_flag\n loss_coef_2 *= n_flag\n\n # Compute optimize target\n def get_target(seq, ref):\n tip_target = torch.zeros((part_cnt, len_frame, 5, 3), dtype=torch.float32).cuda()\n joint_target = torch.zeros((part_cnt, len_frame, 5, 4, 3), dtype=torch.float32).cuda()\n for part_i in range(part_cnt):\n for finger_i in range(5):\n tip_target_pf = seq[part_i, :, finger_i, 12:15] + ref[part_i, finger_i, 0:3]\n joint_target_pf = seq[part_i, :, finger_i, 0:12].reshape(-1, 4, 3)\n tip_target[part_i, :, finger_i] = torch.from_numpy(R.from_rotvec(obj_traj[part_i, :, 3:6].cpu()).apply(tip_target_pf.cpu())).cuda() + obj_traj[part_i, :, 0:3]\n for s in range(4):\n joint_target[part_i, :, finger_i, s] = torch.from_numpy(R.from_rotvec(obj_traj[part_i, :, 3:6].cpu()).apply(joint_target_pf[:, s].cpu())).cuda()\n return tip_target, joint_target\n\n tip_target_1, joint_target_1 = get_target(seq_1, ref_1)\n tip_target_2, joint_target_2 = get_target(seq_2, ref_2)\n\n return tip_target_1, joint_target_1, loss_coef_1, tip_target_2, joint_target_2, loss_coef_2\n\ndef fit_joint(\n tip_target:torch.Tensor,\n joint_target:torch.Tensor,\n loss_coef:torch.Tensor,\n init_trans,\n init_pose\n):\n len_frame = tip_target.shape[0]\n part_cnt = tip_target.shape[1]\n trans_t = torch.zeros((len_frame-1, 3), device='cuda')\n pose_t = torch.zeros((len_frame-1, 48), device='cuda')\n trans_t[:] = init_trans\n pose_t[:] = init_pose\n trans_t.requires_grad_(True)\n pose_t.requires_grad_(True)\n opt = torch.optim.Adam([trans_t, pose_t], lr=0.05)\n\n # Optimization\n for i in tqdm(range(2000)):\n opt.zero_grad()\n trans = torch.cat([init_trans.unsqueeze(0), trans_t], dim=0)\n pose = torch.cat([init_pose.unsqueeze(0), pose_t], dim=0)\n mano_output = mano_layer(pose[:, :3], pose[:, 3:])\n joints = mano_output.joints - mano_output.joints[:, :1] + trans.unsqueeze(1)\n loss_t, loss_j = 0, 0\n for part_i in range(part_cnt):\n for finger_i in range(5):\n finger_p = torch.cat([joints[:, :1], joints[:, finger_i*4+1:finger_i*4+5]], dim=1)\n tip_p = finger_p[:, 4]\n joint_p = finger_p[:, :4] - finger_p[:, 4:]\n joint_p = joint_p / torch.norm(joint_p, dim=2, keepdim=True)\n loss_t += torch.sum(torch.sum(torch.square(tip_p-tip_target[:, part_i, finger_i]), dim=1) * loss_coef[:, part_i, finger_i])\n loss_j += torch.sum(torch.sum(torch.square(joint_p-joint_target[:, part_i, finger_i]), dim=(1,2)) * loss_coef[:, part_i, finger_i])\n # finger_p = torch.cat([joints[:, :1], joints[:, finger_i*4+1:finger_i*4+5]], dim=1)\n # tip_p = finger_p[:, 4]\n # joint_p = finger_p[:, :4]\n # loss_t += torch.sum(torch.sum(torch.square(tip_p-tip_target[:, part_i, finger_i]), dim=1) * loss_coef[:, part_i, finger_i])\n # loss_j += torch.sum(torch.sum(torch.square(joint_p-joint_target[:, part_i, finger_i]), dim=(1,2)) * loss_coef[:, part_i, finger_i])\n pose_smooth = torch.sum(torch.square(pose[:-1] - pose[1:]))\n wrist_smooth = torch.sum(torch.square(trans[:-1] - trans[1:]))\n loss = 50*loss_t + loss_j + pose_smooth*0.05 + wrist_smooth*1000\n loss.backward()\n opt.step()\n\n trans = torch.cat([init_trans.unsqueeze(0), trans_t], dim=0)\n pose = torch.cat([init_pose.unsqueeze(0), pose_t], dim=0)\n return trans.detach(), pose.detach()\n\n\ndef FitSequence(\n ref_flag:torch.Tensor,\n ref:torch.Tensor,\n seq_1:torch.Tensor,\n seq_2:torch.Tensor,\n n_flag:torch.Tensor,\n stage_length:torch.Tensor,\n init_trans,\n init_pose,\n objs,\n obj_traj:torch.Tensor\n):\n '''\n ref_flag: stage, part, finger\n ref: stage, part, finger, position + normal\n seq_1: stage, part, frame, finger, joints position\n seq_2: stage, part, frame, finger, joints position\n n_flag: stage, part, frame, finger\n objs: part, trimesh_obj\n obj_traj: stage, part, frame, 6D traj\n '''\n\n stage_cnt = seq_1.shape[0]\n part_cnt = seq_1.shape[1]\n len_frame = seq_1.shape[2]\n tip_target_1 = torch.zeros((stage_cnt, part_cnt, len_frame, 5, 3), dtype=torch.float32).cuda()\n joint_target_1 = torch.zeros((stage_cnt, part_cnt, len_frame, 5, 4, 3), dtype=torch.float32).cuda()\n loss_coef_1 = torch.zeros((stage_cnt, part_cnt, len_frame, 5), dtype=torch.float32).cuda()\n tip_target_2 = torch.zeros((stage_cnt, part_cnt, len_frame, 5, 3), dtype=torch.float32).cuda()\n joint_target_2 = torch.zeros((stage_cnt, part_cnt, len_frame, 5, 4, 3), dtype=torch.float32).cuda()\n loss_coef_2 = torch.zeros((stage_cnt, part_cnt, len_frame, 5), dtype=torch.float32).cuda()\n\n for stage_i in range(seq_1.shape[0]):\n (\n tip_target_1[stage_i],\n joint_target_1[stage_i],\n loss_coef_1[stage_i],\n tip_target_2[stage_i],\n joint_target_2[stage_i],\n loss_coef_2[stage_i]\n ) = get_stage_fitting_target(\n ref_flag[stage_i-1] if stage_i != 0 else torch.zeros_like(ref_flag[stage_i]),\n ref_flag[stage_i],\n ref[stage_i-1] if stage_i != 0 else torch.zeros_like(ref[stage_i]),\n ref[stage_i],\n seq_1[stage_i],\n seq_2[stage_i],\n n_flag[stage_i],\n stage_length[stage_i],\n objs,\n obj_traj[stage_i]\n )\n\n # Concatenate stages, combine results from different ends\n tip_target_1_s = []\n tip_target_2_s = []\n joint_target_1_s = []\n joint_target_2_s = []\n loss_coef_1_s = []\n loss_coef_2_s = []\n for stage_i in range(seq_1.shape[0]):\n tip_target_1_s.append(tip_target_1[stage_i, :, :stage_length[stage_i]].transpose(0, 1))\n tip_target_2_s.append(tip_target_2[stage_i, :, :stage_length[stage_i]].transpose(0, 1))\n joint_target_1_s.append(joint_target_1[stage_i, :, :stage_length[stage_i]].transpose(0, 1))\n joint_target_2_s.append(joint_target_2[stage_i, :, :stage_length[stage_i]].transpose(0, 1))\n loss_coef_1_s.append(loss_coef_1[stage_i, :, :stage_length[stage_i]].transpose(0, 1))\n loss_coef_2_s.append(loss_coef_2[stage_i, :, :stage_length[stage_i]].transpose(0, 1))\n tip_target_1 = torch.cat(tip_target_1_s, dim=0)\n tip_target_2 = torch.cat(tip_target_2_s, dim=0)\n joint_target_1 = torch.cat(joint_target_1_s, dim=0)\n joint_target_2 = torch.cat(joint_target_2_s, dim=0)\n loss_coef_1 = torch.cat(loss_coef_1_s, dim=0)\n loss_coef_2 = torch.cat(loss_coef_2_s, dim=0)\n tip_target = tip_target_1 * loss_coef_1.unsqueeze(-1) + tip_target_2 * loss_coef_2.unsqueeze(-1)\n joint_target = joint_target_1 * loss_coef_1.unsqueeze(-1).unsqueeze(-1) + joint_target_2 * loss_coef_2.unsqueeze(-1).unsqueeze(-1)\n joint_target = torch.nn.functional.normalize(joint_target, dim=4)\n loss_coef = loss_coef_1 + loss_coef_2\n\n # Optimization\n trans, pose = fit_joint(\n tip_target,\n joint_target,\n loss_coef,\n init_trans,\n init_pose,\n )\n\n return trans, pose\n","repo_name":"cams-hoi/CAMS","sub_path":"synthesizer/FitSequence.py","file_name":"FitSequence.py","file_ext":"py","file_size_in_byte":8688,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"28"} +{"seq_id":"74385380234","text":"from __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Any\n\nBASE_DIR = Path(__file__).parent\n\nDEBUG = True\n\nSECRET_KEY = \"django-insecure-6-@0$ah01nfjv0+6f-9%eq2oqngwm#qtxy-ntpj+7hpz-*+q^(\"\n\n# Dangerous: disable host header validation\nALLOWED_HOSTS = [\"*\"]\n\nINSTALLED_APPS = [\n \"example\",\n \"reporting_endpoints\",\n \"django.contrib.staticfiles\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n \"django.contrib.sessions\",\n \"django.contrib.admin\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"csp.middleware.CSPMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"reporting_endpoints.middleware.ReportingEndpointsMiddleware\",\n]\n\nROOT_URLCONF = \"example.urls\"\n\nDATABASES: dict[str, dict[str, Any]] = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": \"example.sqlite3\",\n }\n}\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [BASE_DIR / \"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n },\n }\n]\n\nUSE_TZ = True\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [BASE_DIR / \"static\"]\n\nCSP_DEFAULT_SRC = \"'self'\"\nCSP_REPORT_URI = \"/_/csp-reports/\"\nCSP_REPORT_TO = \"csp-violations\"\nCSP_REPORT_ONLY = False\n\nREPORTING_ENDPOINTS = {\"csp-violations\": \"/_/csp-reports/\"}\nREPORT_TO_ENDPOINTS = [\n {\n \"group\": \"csp-violations\",\n \"max_age\": 10886400,\n \"endpoints\": [{\"url\": \"/_/csp-reports/\"}],\n }\n]\n","repo_name":"seporaitis/django-reporting-endpoints","sub_path":"example/example/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"19029889059","text":"# -*- coding: utf-8 -*-\n'''\nmultiprocessing crawler for jandan.net/ooxx\n\n\"pip install -r requirements.txt\" and run it.\n\nThe line with notes is position you can control this script.\n\nHave fun.\n\n@author: B1u3Buf4\n'''\nimport os\nimport re\nimport time\nfrom multiprocessing import Process, Queue, Pool\nimport requests\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\nclass checkload(object):\n def __init__(self,driver):\n self.driver = driver\n\n\n def __call__(self,driver):\n return driver.find_element_by_id('comments').get_attribute(\"innerHTML\").find('sinaimg.cn') > -1\n\n\ndef crawlurl():\n chrome_options = Options()\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--disable-gpu')\n driver = webdriver.Chrome(chrome_options = chrome_options)\n driver.get(url = 'http://jandan.net/ooxx')\n time.sleep(1)\n pics = []\n urls = []\n order = 1\n try:\n page = driver.page_source\n counts = re.findall('current-comment-page\">.* float:\n if acc_id is None:\n acc_id = cif_path.stem\n open_func = open\n if cif_path.name.endswith(\".gz\"):\n open_func = gzip.open\n with open_func(str(cif_path), mode=\"rt\") as cif_fh:\n mmcif_dict = MMCIF2Dict.MMCIF2Dict(cif_fh)\n chain_plddt = mmcif_dict[\"_ma_qa_metric_global.metric_value\"][0]\n plddt_strings = mmcif_dict[\"_ma_qa_metric_local.metric_value\"]\n chopping_plddt = []\n if chopping:\n for segment in chopping.segments:\n segment_plddt = [\n float(plddt)\n for plddt in plddt_strings[int(segment.start) - 1 : int(segment.end)]\n ]\n chopping_plddt += segment_plddt\n domain_length = len(chopping_plddt)\n average_plddt = round((sum(chopping_plddt) / domain_length), 2)\n\n else:\n average_plddt = chain_plddt\n return average_plddt\n\n\ndef get_LUR_residues_percentage(cif_path: Path, *, chopping=None, acc_id=None):\n if acc_id is None:\n acc_id = cif_path.stem\n open_func = open\n if cif_path.name.endswith(\".gz\"):\n open_func = gzip.open\n with open_func(str(cif_path), mode=\"rt\") as cif_fh:\n mmcif_dict = MMCIF2Dict.MMCIF2Dict(cif_fh)\n plddt_strings = mmcif_dict[\"_ma_qa_metric_local.metric_value\"]\n chopping_plddt = []\n if chopping:\n for segment in chopping.segments:\n segment_plddt = [\n float(plddt)\n for plddt in plddt_strings[int(segment.start) - 1 : int(segment.end)]\n ]\n chopping_plddt += segment_plddt\n else:\n chopping_plddt = plddt_strings\n # Calculate LUR\n LUR_perc = 0\n LUR_total = 0\n LUR_res = 0\n LUR_stretch = False\n min_res_lur = MIN_LENGTH_LUR\n for residue in segment_plddt:\n plddt_res = float(residue)\n if plddt_res < 70:\n LUR_res += 1\n if LUR_stretch:\n LUR_total += 1\n\n if LUR_res == min_res_lur and not LUR_stretch:\n LUR_stretch = True\n LUR_total += min_res_lur\n\n else:\n LUR_stretch = False\n LUR_res = 0\n LUR_perc = round(LUR_total / len(chopping_plddt) * 100, 2)\n\n return LURSummary(\n LUR_perc=LUR_perc, LUR_total=LUR_total, residues_total=len(chopping_plddt)\n )\n","repo_name":"UCLOrengoGroup/cath-alphaflow","sub_path":"cath_alphaflow/commands/extract_plddt_and_lur.py","file_name":"extract_plddt_and_lur.py","file_ext":"py","file_size_in_byte":5533,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"28"} +{"seq_id":"8525982444","text":"import requests\nimport json\nurl: str = \"http://localhost:5000/graphql\"\nbody: str = \"\"\"\nmutation {\n updatePost(id: 1, title: \"python test\", description: \"from python\"){\n post {\n id\n title\n description\n created_at\n }\n success\n }\n}\n\"\"\"\n\nr = requests.post(url=url, json={\"query\": body})\n\nif r.status_code == 200:\n json_object = json.loads(r.content.decode())\n res = json.dumps(json_object, indent=2)\n print(res)\n\n\n\n","repo_name":"ebotun/GraphQl","sub_path":"requests/updatePost.py","file_name":"updatePost.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"20827422462","text":"import numpy as np\n\nimport matplotlib\nimport matplotlib.pyplot as pyl\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\nfrom matplotlib.patches import Rectangle\n\nimport tkinter as Tk\nfrom tkinter import ttk\nfrom tkinter import scrolledtext as ScrolledText\nfrom tkinter import messagebox as MessageBox\nfrom tkinter import filedialog as FileDialog\nfrom tkinter import font as tkFont\n\nimport os\nimport json\nimport inspect\nfrom functools import partial\nmatplotlib.use(\"TkAgg\")\n\nimport tarsier_scrapper\nimport sepia_scrapper\nimport chameleon_scrapper\nimport framework_abstractor\n\nPROJECTS_DIR = 'Projects/'\n\nTARSIER_UNSUPPORTED_MODULES = []\nTARSIER_UNSUPPORTED_MODULES += ['hash'] # handle_event is in destructor, as it forces compiler to run the code\n\nSEPIA_UNSUPPORTED_MODULES = []\n\nCHAMELEON_UNSUPPORTED_MODULES = []\n\nUNSUPPORTED_MODULES = TARSIER_UNSUPPORTED_MODULES + SEPIA_UNSUPPORTED_MODULES + CHAMELEON_UNSUPPORTED_MODULES\n\ndef about_command():\n label = MessageBox.showinfo(\"About\", \"Tarsier code geneerator\\nWork In Progress, be kind\\nPlease visit https://github.com/neuromorphic-paris/\")\n\n \nclass GUI:\n def __init__(self):\n self.Framework = framework_abstractor.FrameworkAbstraction(LogFunction = self.Log)\n self.FrameworkFileName = ''\n\n self.Options = {'display_module_ev_outputs':True}\n\n TarsierModules = tarsier_scrapper.ScrapTarsierFolder()\n SepiaModules, SepiaTypes, SepiaUtilities = sepia_scrapper.ScrapSepiaFile()\n ChameleonModules = chameleon_scrapper.ScrapChameleonFolder()\n\n self.AvailableModules = {}\n for ModuleName, Module in list(TarsierModules.items()):\n self.AvailableModules[ModuleName] = Module\n for ModuleName, Module in list(SepiaModules.items()):\n self.AvailableModules[ModuleName] = Module\n for ModuleName, Module in list(ChameleonModules.items()):\n self.AvailableModules[ModuleName] = Module\n\n self.BaseTypes = {framework_abstractor.NoneEventType['name']: framework_abstractor.NoneEventType}\n for TypeName, Type in list(SepiaTypes.items()):\n self.BaseTypes[TypeName] = Type\n self.AvailableTypes = dict(self.BaseTypes)\n\n self.AvailableUtilities = {}\n for UtilityName, Utility in SepiaUtilities.items():\n self.AvailableUtilities[UtilityName] = Utility\n\n self.UserDefinedVariableTypes = ['Event type']\n self.MenuParams = {'event_stream_type': (self.AvailableTypes, self._OnEventTypeTemplateChange, self._OnNewEventTypeTemplate), 'Event': (self.AvailableTypes, self._OnEventTypeTemplateChange, self._OnNewEventTypeTemplate)}\n\n self.MainWindow = Tk.Tk()\n self.MainWindow.bind(\"\", lambda event:self.MainWindow.destroy())\n self.MainWindow.title('Beaver - {0}'.format(framework_abstractor.DEFAULT_NAME))\n\n MainMenu = Tk.Menu(self.MainWindow)\n self.MainWindow.config(menu=MainMenu)\n filemenu = Tk.Menu(MainMenu)\n MainMenu.add_cascade(label=\"File\", menu = filemenu)\n filemenu.add_command(label=\"New\", command=self.GenerateEmptyFramework)\n filemenu.add_command(label=\"Open...\", command=self.open_command)\n filemenu.add_command(label=\"Save\", command=self.save_command)\n filemenu.add_command(label=\"Save as...\", command=self.saveas_command)\n filemenu.add_separator()\n filemenu.add_command(label=\"Exit\", command=self._on_closing) \n\n insertmenu = Tk.Menu(MainMenu)\n MainMenu.add_cascade(label=\"Insert\", menu = insertmenu)\n newmenu = Tk.Menu(insertmenu)\n insertmenu.add_cascade(label = \"New\", menu = newmenu)\n for Type in self.UserDefinedVariableTypes:\n newmenu.add_command(label=Type, command=partial(self.AddType, Type))\n insertmenu.add_separator()\n\n def FirstUp(string):\n return string[0].upper()+string[1:]\n\n Menus = {'sepia':Tk.Menu(insertmenu), 'tarsier':Tk.Menu(insertmenu), 'chameleon':Tk.Menu(insertmenu)}\n HasSeparator = {Key: False for Key in Menus.keys()}\n for origin, Menu in Menus.items():\n insertmenu.add_cascade(label = FirstUp(origin), menu = Menu)\n for ModuleName in sorted(self.AvailableModules.keys()):\n if ModuleName not in UNSUPPORTED_MODULES:\n Module = self.AvailableModules[ModuleName]\n Menus[Module['origin']].add_command(label=ModuleName, command=partial(self.AddModule, str(ModuleName)))\n for UtilityName in sorted(self.AvailableUtilities.keys()):\n Utility = self.AvailableUtilities[UtilityName]\n if not HasSeparator[Utility['origin']]:\n Menus[Utility['origin']].add_separator()\n HasSeparator[Utility['origin']] = True\n Menus[Utility['origin']].add_command(label=UtilityName, command=partial(self.AddUtility, str(UtilityName)))\n \n\n helpmenu = Tk.Menu(MainMenu)\n MainMenu.add_cascade(label=\"Help\", menu=helpmenu)\n helpmenu.add_command(label=\"About...\", command=about_command)\n\n self.MainWindow.grid_columnconfigure(0, weight=1)\n self.MainWindow.grid_rowconfigure(0, weight=1)\n\n self.Display = Figure(figsize=(5,5), dpi=150)\n self.DisplayAx = self.Display.add_subplot(111)\n self.DisplayAx.tick_params('both', bottom = 'off', left = 'off', labelbottom = 'off', labelleft = 'off')\n self.Display.tight_layout()\n \n self.DisplayCanvas = FigureCanvasTkAgg(self.Display, self.MainWindow)\n cid = self.DisplayCanvas.mpl_connect('button_press_event', self._onDisplayClick)\n self.DisplayCanvas.show()\n self.DisplayCanvas.get_tk_widget().grid(row = 0, column = 0)\n\n self.AutoAddBGR = False\n self.AvailablesModulesPositions = []\n self.SelectedAvailableModulePosition = 0\n self.SelectedAvailableChameleonModulePosition = 0\n self.ModulesDiameter = 2.\n self.HModulesTilingDistance = 6.\n self.VModulesTilingDistance = 4.\n self.DisplayedModulesPositions = {}\n\n self.DisplayedLinks = {}\n self.ActiveItem = None\n \n self.DisplayCodeLinkFrame = Tk.Frame(self.MainWindow)\n self.DisplayCodeLinkFrame.grid(row = 0, column = 1)\n self.DisplayWorkFrame = Tk.Frame(self.DisplayCodeLinkFrame, bd = 4, relief='groove')\n self.DisplayWorkFrame.grid(row = 0, column = 0)\n ErasePicture = Tk.PhotoImage(file = 'Icons/erase.png')\n self.RemoveModuleButton = Tk.Button(self.DisplayWorkFrame, image=ErasePicture, command = self.RemoveModule)\n self.RemoveModuleButton.image = ErasePicture\n self.RemoveModuleButton.grid(row = 0, column = 0)\n RoutePicture = Tk.PhotoImage(file = 'Icons/route.png')\n self.RouteModuleButton = Tk.Button(self.DisplayWorkFrame, image=RoutePicture, command = self.RouteModule)\n self.RouteModuleButton.image = RoutePicture\n self.RouteModuleButton.grid(row = 1, column = 0)\n self.WaitingForRoute = None\n\n self.CodeWorkFrame = Tk.Frame(self.DisplayCodeLinkFrame, bd = 4, relief='groove')\n self.CodeWorkFrame.grid(row = 1, column = 0)\n self.ModuleCodeDisplayButton = Tk.Button(self.CodeWorkFrame, text = '?', command = self.DisplayModuleCode, font = tkFont.Font(size = 20))\n self.ModuleCodeDisplayButton.grid(row = 0, column = 0)\n\n self.TempFiles = {}\n\n self.DefaultFile = framework_abstractor.GENERAL_FILENAME\n self.CodeFrame = Tk.Frame(self.MainWindow)\n self.CodeFrame.grid(row = 0, column = 2)\n self.CurrentCodeFile = list(self.Framework.Files(Bare = True).keys())[0]\n self.CodeFileVar = Tk.StringVar(self.MainWindow)\n self.CodeFileVar.set(self.CurrentCodeFile)\n self.CodeFileMenu = Tk.OptionMenu(self.CodeFrame, self.CodeFileVar, *self.Framework.Data['files'])\n self.CodeFileMenu.grid(row = 0, column = 0)\n self.CodePad = ScrolledText.ScrolledText(self.CodeFrame, width=120, height=40, bg = 'white')\n self.CodePad.bind(\"<>\", self._onCodeModification)\n def _tab(arg):\n self.CodePad.insert(Tk.INSERT, framework_abstractor.CPP_TAB)\n return 'break'\n self.CodePad.bind(\"\", _tab)\n self.CodePad.grid(row = 1, column = 0)\n self.SortedFiles = []\n \n self.ParamsFrame = Tk.Frame(self.MainWindow, width = 100, bd = 4, relief='groove')\n self.ParamsFrame.grid(row = 2, column = 0, rowspan = 1, columnspan = 1, sticky=Tk.N+Tk.S+Tk.E+Tk.W)\n self.ParamsTitleFrame = Tk.Frame(self.ParamsFrame)\n self.ParamsTitleFrame.grid(row = 0, column = 0, columnspan = 2, sticky=Tk.N+Tk.S+Tk.W)\n\n self.ParamsValuesFrame = Tk.Frame(self.ParamsFrame, bd = 2, relief='groove')\n self.ParamsValuesFrame.grid(row = 1, column = 0, sticky = Tk.N+Tk.S+Tk.E+Tk.W)\n self.ParamsButtonsFrame = Tk.Frame(self.ParamsFrame)\n self.ParamsButtonsFrame.grid(row = 1, column = 1, sticky = Tk.N+Tk.S+Tk.E)\n self.ParamsUpperButton = Tk.Button(self.ParamsButtonsFrame, text = '^', height = 10, command = partial(self.UpdateDisplayedParams, -1))\n self.ParamsLowerButton = Tk.Button(self.ParamsButtonsFrame, text = 'v', height = 10, command = partial(self.UpdateDisplayedParams, +1))\n self.ParamsUpperButton.grid(row = 0, column = 0)\n self.ParamsLowerButton.grid(row = 1, column = 0)\n self.DisplayedParams = []\n self.NFieldsDisplayed = 13\n self.CurrentMinParamDisplayed = 0\n\n\n self.CompilationFrame = Tk.Frame(self.MainWindow)\n self.CompilationFrame.grid(row = 1, column = 2, sticky=Tk.N+Tk.S)\n self.CodeGenerationButton = Tk.Button(self.CompilationFrame, text = 'C++', command = self.GenerateCode, font = tkFont.Font(size = 15))\n self.CodeGenerationButton.grid(row = 0, column = 0)\n self.Premake4Button = Tk.Button(self.CompilationFrame, text = 'Premake4', command = self.GenerateBuild, font = tkFont.Font(size = 15))\n self.Premake4Button.grid(row = 0, column = 1)\n self.CompileButton = Tk.Button(self.CompilationFrame, text = 'Compile', command = self.GenerateBinary, font = tkFont.Font(size = 15))\n self.CompileButton.grid(row = 0, column = 2)\n\n self.ConsolePad = ScrolledText.ScrolledText(self.MainWindow, width=120, height=10, bg = 'black', fg = 'white')\n self.ConsolePad.grid(row = 2, column = 2, sticky=Tk.N+Tk.S)\n self.MAX_LOG_LINES = 50\n \n self.GUIUpdate('__init__')\n\n self.Log(\"Ready !\")\n self.MainWindow.mainloop()\n\n def _on_closing(self):\n if MessageBox.askokcancel(\"Quit\", \"Do you really want to quit?\"):\n self.MainWindow.quit()\n self.MainWindow.destroy()\n\n def UpdateAvailableTypes(self):\n self.AvailableTypes = dict(self.BaseTypes)\n for TypeName in self.Framework.UserDefinedTypes.keys():\n self.AvailableTypes[TypeName] = self.Framework.UserDefinedTypes[TypeName]\n\n def _onDisplayClick(self, event):\n if not self.Framework.Modules:\n return None\n Click = np.array([event.xdata, event.ydata])\n for Module in self.Framework.Modules:\n if (abs(self.DisplayedModulesPositions[Module['id']] - Click) < self.ModulesDiameter/2.).all():\n if type(Module) == framework_abstractor.LambdaModuleClass and Module['default']:\n self.SelectedAvailableModulePosition = -Module['id'] - 1\n if not self.WaitingForRoute is None:\n self.RouteModule(Module)\n self.GUIUpdate('_onDisplayClick', ActiveItemValue = Module)\n return None\n self.WaitingForRoute = None\n for LinkStr, LinkText in list(self.DisplayedLinks.items()):\n Contains, AddDict = LinkText.contains(event)\n if Contains:\n self.GUIUpdate('_onDisplayClick', ActiveItemValue = LinkStr)\n return None\n\n for nPosition, PositionAndParent in enumerate(self.AvailablesModulesPositions):\n if (abs(PositionAndParent[0] - Click) < self.ModulesDiameter/2.).all():\n if PositionAndParent[1:].count(None) == 0: # Only case where were have a parent AND a child, that are not actually that.\n self.SelectedAvailableChameleonModulePosition = nPosition\n else:\n self.SelectedAvailableModulePosition = nPosition\n self.GUIUpdate('_onDisplayClick', SetActiveItem = False)\n return None\n self.GUIUpdate('_onDisplayClick', ActiveItemValue = None)\n\n def GenerateEmptyFramework(self):\n if self.Framework.Modules:\n if not MessageBox.askokcancel(\"New\", \"Unsaved framework. Erase anyway ?\"):\n return None\n with FileDialog.asksaveasfile(mode='w', initialdir = PROJECTS_DIR, defaultextension='.json', title = \"New project\", filetypes=[(\"JSON\",\"*.json\")]) as file:\n if file is None:\n return None\n self.Framework = framework_abstractor.FrameworkAbstraction(LogFunction = self.Log)\n self.Framework.Data['name'] = file.name.split('/')[-1].split('.json')[0]\n\n self.FrameworkFileName = file.name\n self.MainWindow.title('Beaver - {0}'.format(self.Framework.Data['name']))\n\n self.TempFiles = {}\n self.DisplayedParams = []\n self.DisplayedModulesPositions = {}\n\n self.GUIUpdate('GenerateEmptyFramework')\n\n def save_command(self):\n self.RegisterCurrentCodePad()\n if self.FrameworkFileName:\n with open(self.FrameworkFileName, \"w\") as f:\n if not f is None:\n json.dump(self.Framework.ToDict(), f, protocol=json.HIGHEST_PROTOCOL)\n self.Log(\"Saved.\")\n else:\n self.Error(\"Something went wrong while saving project.\")\n else:\n self.saveas_command()\n\n def saveas_command(self):\n self.RegisterCurrentCodePad()\n with FileDialog.asksaveasfile(mode='w', initialdir = PROJECTS_DIR, initialfile = self.Framework.Data['name'], defaultextension='.json', title = \"Save as...\", filetypes=[(\"JSON\",\"*.json\")]) as f:\n if not f is None:\n NewName = f.name.split('/')[-1].split('.json')[0]\n if not self.Framework.Data['name'] or (NewName != self.Framework.Data['name'] and MessageBox.askyesno(\"Name changed\", \"Do you want to change project name from \\n{0} \\nto {1} ?\".format(self.Framework.Data['name'], NewName))):\n self.Framework.Data['name'] = NewName\n self.MainWindow.title('Beaver - {0}'.format(self.Framework.Data['name']))\n\n json.dump(self.Framework.ToDict(), f)\n self.FrameworkFileName = f.name\n self.Log(\"Saved.\")\n \n def open_command(self):\n with FileDialog.askopenfile(parent=self.MainWindow,mode='r', initialdir = PROJECTS_DIR, title='Open...', defaultextension='.json', filetypes=[(\"JSON\",\"*.json\")]) as file:\n if file != None:\n Data = json.load(file)\n self.Framework = framework_abstractor.FrameworkAbstraction(Data, self.Log)\n self.FrameworkFileName = file.name\n self.MainWindow.title('Beaver - {0}'.format(self.Framework.Data['name']))\n\n self.TempFiles = {}\n self.DisplayedParams = []\n self.DisplayedModulesPositions = {}\n \n self.GUIUpdate('open_command')\n\n def RegisterCurrentCodePad(self, OriginFunction):\n if self.CurrentCodeFile in list(self.TempFiles.keys()):\n return None\n CurrentText = self.CodePad.get('1.0', Tk.END+'-1c')\n self.Framework.Files()[self.CurrentCodeFile]['data'] = CurrentText\n\n def AddModule(self, ModuleName):\n if self.AvailableModules[ModuleName]['origin'] == 'chameleon':\n self.AddChameleonModule(ModuleName)\n self.GUIUpdate('AddModule', ActiveItemValue = self.Framework.Modules[-1])\n return None\n\n ModuleNames = [Module['name'] for Module in self.Framework.Modules]\n AskedModuleName = ModuleName\n N = 0\n while AskedModuleName in ModuleNames:\n N += 1\n AskedModuleName = ModuleName + '_{0}'.format(N)\n\n self.Log(\"Adding \" + AskedModuleName)\n self.Framework.AddModule(self.AvailableModules[ModuleName], AskedModuleName)\n NewModule = self.Framework.ReferenceModulesDictionnary['name'][AskedModuleName]\n\n if self.SelectedAvailableModulePosition < 0: # We replace here lambda function\n DefaultLambdaFunctionID = -self.SelectedAvailableModulePosition - 1\n ParentID = self.Framework.ReferenceModulesDictionnary['id'][DefaultLambdaFunctionID]['parent_ids'][0]\n self.AddLink(ParentID, NewModule['id'])\n else:\n ChildrenID = self.AvailablesModulesPositions[self.SelectedAvailableModulePosition][2]\n if not ChildrenID is None:\n self.AddLink(NewModule['id'], ChildrenID) # Even if ChildrenID is None, we add the link. In this case, it will be a link to a default lambda function\n\n self.GUIUpdate('AddModule', ActiveItemValue = NewModule)\n\n def AddChameleonModule(self, ModuleName, AddBGC = True): # As Chameleon modules are quite uniques, we add them apart\n ModuleNames = [Module['name'] for Module in self.Framework.Modules]\n AskedModuleName = ModuleName\n N = 0\n while AskedModuleName in ModuleNames:\n N += 1\n AskedModuleName = ModuleName + '_{0}'.format(N)\n\n self.Log(\"Adding \" + AskedModuleName)\n Tile = self.AvailablesModulesPositions[self.SelectedAvailableChameleonModulePosition][1:]\n if Tile not in list(self.Framework.ChameleonTiles.keys()):\n self.Framework.ChameleonTiles[Tile] = []\n if self.AutoAddBGR and AddBGC and ModuleName != 'background_cleaner':\n FoundBGC = False\n for ModuleID in self.Framework.ChameleonTiles[Tile]:\n if self.Framework.ReferenceModulesDictionnary['id'][ModuleID]['module']['name'] == 'background_cleaner':\n FoundBGC = True\n break\n if not FoundBGC:\n self.AddChameleonModule('background_cleaner', AddBGC = False)\n NewModule = self.Framework.ReferenceModulesDictionnary['name'][AskedModuleName]\n self.Framework.ChameleonTiles[Tile] += [NewModule['id']]\n\n self.AddChameleonModuleDisplay(NewModule)\n\n def AddUtility(self, UtilityName):\n None\n\n def AddChameleonModuleDisplay(self, Module):\n print(\"Adding chameleon module display for \", Module)\n self.DisplayedModulesPositions[Module['id']] = self.AvailablesModulesPositions[self.SelectedAvailableChameleonModulePosition][0]\n\n def GetChameleonModulePosition(self, Tile, nModule):\n if not list(self.Framework.ChameleonTiles.values()):\n TilesSizes = 0\n else:\n TilesSizes = max([len(IDs) for IDs in list(self.Framework.ChameleonTiles.values())])\n return self.ChameleonInitialTilePosition + np.array([Tile[0] * (TilesSizes*self.ModulesDiameter + self.HModulesTilingDistance), - Tile[1] * self.VModulesTilingDistance/1.5]) + nModule * np.array([self.ModulesDiameter, 0])\n\n def RemoveModule(self, Item = None):\n if Item is None:\n Item = self.ActiveItem\n if Item is None:\n return None\n if type(Item) == framework_abstractor.ModuleClass:\n self.Log(\"Removing {0}\".format(Item['name']))\n self.Framework.RemoveModule(Item)\n\n elif type(Item) == tuple:\n LinkStr = Item\n ParentID, ChildID = self.Framework.GetParentAndChildFromLinkStr(LinkStr)\n self.Log(\"Removed link from {0} to {1}\".format(self.Framework.ReferenceModulesDictionnary['id'][ParentID]['name'], self.Framework.ReferenceModulesDictionnary['id'][ChildID]['name']))\n self.Framework.RemoveLink(ParentID, ChildID)\n\n self.GUIUpdate('RemoveModule')\n\n def RouteModule(self, SecondRoutedModule):\n if type(SecondRoutedModule) != framework_abstractor.ModuleClass:\n return None\n \n if self.WaitingForRoute is None:\n if SecondRoutedModule['origin'] == 'chameleon':\n self.Error('Chameleon modules cannot output events.')\n return None\n HandlersParamsIndexes = SecondRoutedModule.FindModuleHandlers()\n FreeSlot = False\n for HandlerIndex in HandlersParamsIndexes:\n if SecondRoutedModule['parameters'][HandlerIndex] == '@' + framework_abstractor.LAMBDA_FUNCTION_FROM.format(SecondRoutedModule['name'], SecondRoutedModule['module']['parameters'][HandlerIndex]['name']):\n FreeSlot = True\n break\n if not FreeSlot: # This work since, even for a chameleon module, needs a free slot that is actually a lambda function by default\n self.Error(\"Selected module cannot have any more outputs\")\n return None\n self.WaitingForRoute = SecondRoutedModule['id'] # Will be the parent\n self.Log('Selected a child module to link to...')\n return None\n\n if not SecondRoutedModule.HasOperator():\n self.WaitingForRoute = None\n self.Error('Selected module cannot receive any input')\n return None\n \n if SecondRoutedModule['id'] == self.WaitingForRoute:\n self.WaitingForRoute = None\n self.Error('Cannot link a module to itself')\n return None\n\n NewParentsIDs = self.Framework.ReferenceModulesDictionnary['id'][self.WaitingForRoute]['parent_ids']\n while NewParentsIDs:\n OlderGen = []\n for ParentID in NewParentsIDs:\n for OlderParentID in self.Framework.ReferenceModulesDictionnary['id'][ParentID]['parent_ids']:\n if OlderParentID not in OlderGen:\n OlderGen += [OlderParentID]\n if OlderParentID == SecondRoutedModule['id']:\n self.Error('Cannot link create circular dependancies')\n self.WaitingForRoute = None\n return None\n NewParentsIDs = list(OlderGen)\n\n self.Log(\"Linking {0} to {1}\".format(self.Framework.ReferenceModulesDictionnary['id'][self.WaitingForRoute]['name'], SecondRoutedModule['name']))\n self.AddLink(self.WaitingForRoute, SecondRoutedModule['id'])\n\n self.WaitingForRoute = None\n self.GUIUpdate('RouteModule')\n\n def GetDescendance(self, ElderID):\n AllDescendance = [ElderID]\n NewChilds = self.Framework.GetChildrenIDs(ElderID)\n while NewChilds:\n NextGenChilds = []\n for ID in NewChilds:\n for NewChildID in self.Framework.GetChildrenIDs(ID):\n if NewChildID not in NextGenChilds:\n NextGenChilds += [NewChildID]\n AllDescendance += NextGenChilds\n NewChilds = list(NextGenChilds)\n return AllDescendance\n\n def AddLink(self, ParentID, ChildrenID):\n self.Framework.AddLink(ParentID, ChildrenID)\n\n def AddType(self, Type):\n if Type == 'Event type':\n self.Framework.AddNewType()\n self.ActiveItem = framework_abstractor.TYPES_DEF_FILE\n self.Update(Mod = 0)\n\n def _OnRemoveType(self, TypeName):\n self.Framework.RemoveType(TypeName)\n self.Framework.WriteTypesFile()\n\n self.GUIUpdate('_OnRemoveType')\n\n def GenerateCode(self):\n if not self.Framework.Data['name'] or not self.FrameworkFileName:\n self.saveas_command()\n self.Framework.GenerateCode()\n\n def GenerateBuild(self):\n if not self.FrameworkFileName:\n self.saveas_command()\n if not self.FrameworkFileName:\n return None\n LuaFilename = self.Framework.GenerateBuild()\n\n def GenerateBinary(self):\n None\n\n def DisplayModuleCode(self):\n for Key, Value in self.ActiveItem.items():\n print(Key, Value)\n if not self.ActiveItem is None and type(self.ActiveItem) == framework_abstractor.ModuleClass:\n Module = self.ActiveItem\n if Module['origin'] == 'tarsier':\n self.TempFiles[Module['module']['name'] + '.hpp'] = '\\n'.join(tarsier_scrapper.GetTarsierCode(Module['module']['name'] + '.hpp', Full = True))\n elif Module['origin'] == 'sepia':\n SepiaCode = sepia_scrapper.GetSepiaCode(Full = True)\n ModuleStartLine = sepia_scrapper.FindTemplateFunctions(SepiaCode, Module['module']['name'])\n while sepia_scrapper.TEMPLATE_LINE_INDICATOR not in SepiaCode[ModuleStartLine] and ModuleStartLine > 0:\n ModuleStartLine -= 1\n if ModuleStartLine > 0 and sepia_scrapper.COMMENT_INDICATOR in SepiaCode[ModuleStartLine-1]:\n ModuleStartLine -= 1\n Lines = [\"Starting at line {0}\".format(ModuleStartLine + 1), \"\"]\n nOpen = 0\n nClose = 0\n while nOpen == 0 or nOpen > nClose:\n CurrentLine = SepiaCode[ModuleStartLine]\n Lines += [CurrentLine]\n CurrentLine = CurrentLine.split(sepia_scrapper.COMMENT_INDICATOR)[-1]\n nOpen += CurrentLine.count('{')\n nClose += CurrentLine.count('}')\n ModuleStartLine += 1\n self.TempFiles[Module['module']['name'] + '.hpp'] = '\\n'.join(Lines)\n else:\n return None\n self.GUIUpdate('DisplayModuleCode', TmpFile = Module['module']['name'] + '.hpp')\n\n def UpdateCodeMenu(self, OriginFunction):\n Menu = self.CodeFileMenu['menu']\n Menu.delete(0, \"end\") \n for FileName in self.Framework.Files().keys():\n Menu.add_command(label = FileName, command = partial(self._OnCodefileSelection, FileName))\n\n def _OnCodefileSelection(self, Filename):\n self.GUIUpdate('_OnCodefileSelection', ActiveItemValue = Filename)\n\n def UpdateDisplayedFile(self, OriginFunction, TmpFile = None):\n self.CodePad.delete('1.0', Tk.END)\n if not TmpFile is None:\n self.CodePad.insert(Tk.END, self.TempFiles[TmpFile])\n else:\n self.CodePad.insert(Tk.END, self.Framework.Files()[self.CurrentCodeFile]['data'])\n\n def _onCodeModification(self):\n None\n\n def Log(self, string):\n self.ConsolePad.config(state=Tk.NORMAL)\n if string[-1] != '\\n':\n string = string+'\\n'\n self.ConsolePad.insert(Tk.END, string)\n CurrentText = self.ConsolePad.get('1.0', Tk.END+'-1c')\n if CurrentText.count('\\n') > self.MAX_LOG_LINES:\n CurrentText = '\\n'.join(CurrentText.split('\\n')[-self.MAX_LOG_LINES:])\n self.ConsolePad.delete('1.0', Tk.END)\n self.ConsolePad.insert(Tk.END, CurrentText)\n self.ConsolePad.see('end')\n self.ConsolePad.config(state=Tk.DISABLED)\n\n def Warning(self, string):\n self.Log(\"WARNING : \"+string)\n def Error(self, string):\n self.Log(\"Error : \"+string)\n\n def RegenerateChameleonAvailableSlots(self):\n self.SelectedAvailableChameleonModulePosition = len(self.AvailablesModulesPositions)\n AddedTiles = []\n if not list(self.Framework.ChameleonTiles.keys()):\n AddedTiles += [(0,0)]\n for Tile, IDs in list(self.Framework.ChameleonTiles.items()):\n for nModule, ModuleID in enumerate(IDs):\n self.DisplayedModulesPositions[ModuleID] = self.GetChameleonModulePosition(Tile, nModule)\n self.AvailablesModulesPositions += [(self.GetChameleonModulePosition(Tile, nModule+1), Tile[0], Tile[1])]\n\n NextTiles = [(Tile[0], Tile[1] + 1), (Tile[0] + 1, Tile[1]), (Tile[0] + 1, Tile[1] + 1)]\n for NextTile in NextTiles:\n if NextTile not in list(self.Framework.ChameleonTiles.keys()) and NextTile not in AddedTiles:\n AddedTiles += [NextTile]\n\n for Tile in AddedTiles:\n self.AvailablesModulesPositions += [(self.GetChameleonModulePosition(Tile, 0), Tile[0], Tile[1])]\n \n def ExtractArboresence(self):\n self.Arboresence = {}\n self.CitedModules = []\n self.OriginModules = []\n\n for Module in self.Framework.Modules:\n if Module['origin'] == 'chameleon':\n continue\n if not Module.HasOperator():\n self.OriginModules += [Module['id']]\n if Module['id'] not in self.Arboresence.keys():\n self.Arboresence[Module['id']] = {'children':[], 'parents': []}\n if Module.HasOperator():\n self.Arboresence[Module['id']]['parents'] += [-1]\n HandlersParamsIndexes = Module.FindModuleHandlers()\n for HandlerIndex in HandlersParamsIndexes:\n HandlerName = Module['module']['parameters'][HandlerIndex]['name']\n HandlerParamFuncName = framework_abstractor.LAMBDA_FUNCTION_FROM.format(Module['name'], HandlerName)\n ChildrenName = Module['parameters'][HandlerIndex].split('@')[-1]\n ChildrenModule = self.Framework.ReferenceModulesDictionnary['name'][ChildrenName]\n self.Arboresence[Module['id']]['children'] += [ChildrenModule['id']]\n if ChildrenModule['id'] not in self.Arboresence.keys():\n self.Arboresence[ChildrenModule['id']] = {'children':[], 'parents': []}\n if ChildrenModule.HasOperator():\n self.Arboresence[ChildrenModule['id']]['parents'] += [-1]\n self.Arboresence[ChildrenModule['id']]['parents'] += [Module['id']]\n if ChildrenModule['id'] not in self.CitedModules:\n self.CitedModules += [ChildrenModule['id']]\n print(\"Arbo : \", self.Arboresence)\n\n def UpdateDisplayPositions(self, OriginFunction):\n self.ExtractArboresence()\n \n self.DisplayedModulesPositions = {}\n self.AvailablesModulesPositions = []\n\n if not [Module['id'] for Module in self.Framework.Modules if Module['origin'] != 'chameleon']:\n self.AvailablesModulesPositions += [(np.array([0., 0.]), None, None)]\n self.ChameleonInitialTilePosition = np.array([0., -self.VModulesTilingDistance])\n self.SelectedAvailableModulePosition = 0\n self.SelectedAvailableChameleonModulePosition = len(self.AvailablesModulesPositions)\n self.RegenerateChameleonAvailableSlots()\n return None\n\n AskedHeights = {}\n for ModuleID in self.OriginModules:\n AskedHeights[ModuleID] = 0\n if not self.OriginModules:\n for ModuleID in self.Arboresence.keys():\n if ModuleID in self.CitedModules:\n continue\n AskedHeights[ModuleID] = 0\n \n nLoops = {key:0 for key in list(self.Arboresence.keys())}\n while not len(AskedHeights.keys()) == len(self.Arboresence.keys()):\n for ModuleID in self.Arboresence.keys():\n if ModuleID in AskedHeights.keys():\n continue\n nLoops[ModuleID] += 1\n ParentMissing = False\n for ParentID in self.Arboresence[ModuleID]['parents']:\n if ParentID == -1:\n continue\n LowestHeight = 0.\n if ParentID in AskedHeights.keys():\n LowestHeight = AskedHeights[ParentID] - self.VModulesTilingDistance\n else:\n ParentMissing = True\n break\n if (self.Arboresence[ModuleID]['parents'].count(-1) == len(self.Arboresence[ModuleID]['parents']) or ParentMissing):\n if nLoops[ModuleID] < 100:\n continue\n else:\n if not self.Arboresence[ModuleID]['children']:\n LowestHeight = max(list(AskedHeights.values()))\n else:\n LowestHeight = min(list(AskedHeights.values()))\n for ChildrenID in self.Arboresence[ModuleID]['children']:\n if ChildrenID in AskedHeights.keys():\n LowestHeight = max(LowestHeight, AskedHeights[ChildrenID] + self.VModulesTilingDistance)\n AskedHeights[ModuleID] = LowestHeight\n\n Heights = list(AskedHeights.values())\n Heights += [min(Heights) - self.VModulesTilingDistance, max(Heights) + self.VModulesTilingDistance]\n SortedIndexes = np.argsort(Heights)\n print(SortedIndexes)\n\n self.ChameleonInitialTilePosition = np.array([0., min(Heights) - self.VModulesTilingDistance])\n \n for LocalIndex in reversed(SortedIndexes):\n HeightConsidered = Heights[LocalIndex]\n print(LocalIndex, \" at \", HeightConsidered)\n Items = []\n for ModuleID in self.Arboresence.keys():\n if abs(AskedHeights[ModuleID] - HeightConsidered) < self.VModulesTilingDistance/2:\n Items += [ModuleID]\n if -1 in self.Arboresence[ModuleID]['parents'] and abs(AskedHeights[ModuleID] + self.VModulesTilingDistance - HeightConsidered) < self.VModulesTilingDistance/2:\n Items += [(np.array([0., AskedHeights[ModuleID] + self.VModulesTilingDistance]), None, ModuleID)]\n #if -1 in self.Arboresence[ModuleID]['children'] and abs(AskedHeights[ModuleID] - self.VModulesTilingDistance - HeightConsidered) < self.VModulesTilingDistance/2:\n # for i in range(self.Arboresence[ModuleID]['children'].count(-1)):\n # Items += [(np.array([0., AskedHeights[ModuleID] - self.VModulesTilingDistance]), ModuleID, None)]\n\n print(\"Items : \", Items)\n for nItem, Item in enumerate(Items):\n X = self.HModulesTilingDistance * (-len(Items) / 2. + nItem + 0.5)\n if type(Item) == int:\n if Item in self.DisplayedModulesPositions.keys():\n continue\n self.DisplayedModulesPositions[Item] = np.array([X, AskedHeights[Item]])\n else:\n Add = True\n for AddedAvlbPos in self.AvailablesModulesPositions:\n if Item[1:] == AddedAvlbPos[1:]:\n Add = False\n break\n if Add:\n Item[0][0] = X\n self.AvailablesModulesPositions += [Item]\n\n self.ChameleonInitialTilePosition[0] = min(self.ChameleonInitialTilePosition[0], X)\n\n for Module in self.Framework.Modules:\n if type(Module) == framework_abstractor.LambdaModuleClass and Module['default']:\n self.SelectedAvailableModulePosition = -Module['id']-1\n break\n else:\n self.SelectedAvailableModulePosition = len(self.AvailablesModulesPositions)-1\n self.RegenerateChameleonAvailableSlots()\n\n# def UpdateOld(self, Regenerate = False, Mod = None):\n# if type(self.ActiveItem) == framework_abstractor.ModuleClass:\n# self.UpdateDisplayedCodefile(self.ActiveItem.FileName(), SaveCurrentFile = False)\n# elif type(self.ActiveItem) == framework_abstractor.LambdaModuleClass:\n# pass\n# elif self.ActiveItem == framework_abstractor.TYPES_DEF_FILE:\n# self.UpdateDisplayedCodefile(framework_abstractor.TYPES_DEF_FILE, SaveCurrentFile = False)\n# elif self.ActiveItem is None:\n# self.UpdateDisplayedCodefile(self.DefaultFile, SaveCurrentFile = False)\n# if Regenerate:\n# self.UpdateDisplayPositions()\n# self.UpdateDrawing()\n# self.ChangeDisplayedParams(Mod)\n\n def GUIUpdate(self, OriginFunction, **kwargs):\n UpdateOriginFunctions = ['__init__', '_onDisplayClick', '_OnCodefileSelection', 'GenerateEmptyFramework', 'open_command', 'AddModule', 'AddUtility', 'RemoveModule', 'RouteModule', '_OnRemoveType', 'GenerateCode', 'GenerateBuild', 'GenerateBinary', 'DisplayModuleCode', '_OnAddedParameterChange', '_OnFrameworkNameChange', '_OnEventTypeTemplateChange', '_OnNewEventTypeTemplate', '_OnAddGlobalVariable', '_OnRemoveGlobalVariable', '_OnGlobalVariableChange', '_OnDefinedEventTypeFieldChange', '_OnDefinedEventTypeFieldAddRemove']\n if not callable(getattr(self, OriginFunction)):\n print(\"Ill defined call to GUI Update. Function not specified\")\n return None\n if OriginFunction not in UpdateOriginFunctions:\n print(\"Not implemented GUI update from function {0}\".format(OriginFunction))\n return None\n\n FunctionsOrder = [self.RegisterCurrentCodePad, self.UpdateFocus, self.UpdateDisplayPositions, self.UpdateDrawing, self.UpdateCodeMenu, self.UpdateDisplayedFile, self.UpdateDisplayedParams]\n\n FunctionsLinks = {}\n FunctionsLinks[self.UpdateFocus] = ['__init__', '_onDisplayClick', '_OnCodefileSelection', 'GenerateEmptyFramework', 'open_command', 'AddModule', 'RemoveModule', 'AddType', '_OnRemoveType', 'GenerateCode', 'GenerateBuild', 'GenerateBinary', '_OnNewEventTypeTemplate']\n\n FunctionsLinks[self.UpdateDisplayPositions] = ['__init__', 'GenerateEmptyFramework', 'open_command', 'AddModule', 'RemoveModule', 'RouteModule']\n FunctionsLinks[self.UpdateDrawing] = FunctionsLinks[self.UpdateDisplayPositions] + ['_onDisplayClick', '_OnCodefileSelection', '_OnRemoveType', '_OnAddedParameterChange', '_OnEventTypeTemplateChange', '_OnNewEventTypeTemplate', '_OnDefinedEventTypeFieldChange']\n\n FunctionsLinks[self.RegisterCurrentCodePad] = [] #TODO\n FunctionsLinks[self.UpdateCodeMenu] = ['__init__', '_onDisplayClick', '_OnCodefileSelection', 'GenerateEmptyFramework', 'open_command', 'AddModule', 'RemoveModule', 'AddType', '_OnRemoveType', 'GenerateCode', 'GenerateBuild', 'GenerateBinary', 'DisplayModuleCode', '_OnNewEventTypeTemplate']\n FunctionsLinks[self.UpdateDisplayedFile] = FunctionsLinks[self.UpdateCodeMenu] + ['_OnAddedParameterChange', '_OnFrameworkNameChange', '_OnEventTypeTemplateChange', '_OnAddGlobalVariable', '_OnRemoveGlobalVariable', '_OnGlobalVariableChange', '_OnDefinedEventTypeFieldChange', '_OnDefinedEventTypeFieldAddRemove']\n\n FunctionsLinks[self.UpdateDisplayedParams] = ['__init__', '_onDisplayClick', '_OnCodefileSelection', 'GenerateEmptyFramework', 'open_command', 'AddModule', 'RemoveModule', '_OnRemoveType', '_OnNewEventTypeTemplate', '_OnAddGlobalVariable', '_OnRemoveGlobalVariable', '_OnDefinedEventTypeFieldAddRemove']\n\n for Function in FunctionsOrder:\n if OriginFunction in FunctionsLinks[Function]:\n args, _, _ = inspect.getargs(Function.__code__)\n CallDict = {key: value for key, value in kwargs.items() if key in args}\n Function(OriginFunction, **CallDict)\n\n def UpdateFocus(self, OriginFunction, SetActiveItem = True, ActiveItemValue = None): # Master switch to change focus of the GUI.\n if not SetActiveItem:\n return None\n if OriginFunction in ['__init__', 'GenerateEmptyFramework', 'open_command', 'RemoveModule']:\n self.ActiveItem = None\n self.CurrentCodeFile = framework_abstractor.GENERAL_FILENAME\n elif OriginFunction in ['_onDisplayClick', 'AddModule']:\n self.ActiveItem = ActiveItemValue\n if self.ActiveItem is None:\n self.CurrentCodeFile = framework_abstractor.GENERAL_FILENAME\n else:\n self.CurrentCodeFile = self.ActiveItem.FileName()\n elif OriginFunction == '_OnCodefileSelection':\n self.CurrentCodeFile = ActiveItemValue \n if self.CurrentCodeFile == framework_abstractor.TYPES_DEF_FILE or self.CurrentCodeFile == framework_abstractor.GENERAL_FILENAME:\n self.ActiveItem = None\n else:\n self.ActiveItem= self.Framework.ReferenceModulesDictionnary['name'][self.CurrentCodeFile.split(framework_abstractor.HANDLERS_FILE_NAME_SUFFIX)[0]]\n elif OriginFunction in ['AddType', '_OnNewEventTypeTemplate']:\n self.ActiveItem = None\n self.CurrentCodeFile == framework_abstractor.TYPES_DEF_FILE\n elif OriginFunction == '_OnRemoveType':\n if not self.Framework.UserDefinedTypes:\n self.ActiveItem = None\n self.CurrentCodeFile = framework_abstractor.GENERAL_FILENAME\n else:\n print(\"Non implemented UpdateFocus caller {0}\".format(OriginFunction))\n self.CodeFileVar.set(self.CurrentCodeFile)\n\n def UpdateDrawing(self, OriginFunction):\n print(self.ActiveItem)\n minValues = np.array([0., 0.])\n maxValues = np.array([0., 0.])\n\n self.DisplayAx.clear()\n self.DisplayedLinks = {} \n for Module in self.Framework.Modules:\n alpha = 1.\n if not self.WaitingForRoute is None and self.WaitingForRoute == Module['id']:\n Color = 'k'\n else:\n if type(Module) == framework_abstractor.LambdaModuleClass and Module['default']:\n Color = 'grey'\n Style = '--'\n if Module['id'] == -self.SelectedAvailableModulePosition - 1:\n alpha = 1.\n else:\n alpha = 0.4\n else:\n if self.Framework.WellDefinedModule(Module):\n Color = 'g'\n else:\n Color = 'r'\n if not self.ActiveItem is None and (type(self.ActiveItem) == framework_abstractor.ModuleClass or type(self.ActiveItem) == framework_abstractor.LambdaModuleClass) and Module['id'] == self.ActiveItem['id']:\n Style = '-'\n alpha = 1.\n else:\n Style = '--'\n self.DrawModule(Module, Style, Color, alpha)\n minValues = np.minimum(minValues, self.DisplayedModulesPositions[Module['id']] - 1.5*self.ModulesDiameter)\n maxValues = np.maximum(maxValues, self.DisplayedModulesPositions[Module['id']] + 1.5*self.ModulesDiameter)\n\n self.DrawLinksToChildrens(Module, Color)\n\n for nSlot, AvailableSlotAndParent in enumerate(self.AvailablesModulesPositions):\n Color = 'grey'\n Style = '--'\n if nSlot == self.SelectedAvailableModulePosition or nSlot == self.SelectedAvailableChameleonModulePosition:\n alpha = 1.\n else:\n alpha = 0.4\n self.DrawModule({'nSlot':nSlot, 'id': None}, Style, Color, alpha)\n minValues = np.minimum(minValues, AvailableSlotAndParent[0] - self.ModulesDiameter)\n maxValues = np.maximum(maxValues, AvailableSlotAndParent[0] + self.ModulesDiameter)\n self.DrawAvailableParentsLinks()\n\n Center = (minValues + maxValues)/2.\n MaxAxis = (maxValues - minValues).max()\n minValues = Center - MaxAxis/2.\n maxValues = Center + MaxAxis/2.\n self.DisplayAx.set_xlim(minValues[0], maxValues[0])\n self.DisplayAx.set_ylim(minValues[1], maxValues[1])\n self.Display.canvas.show()\n #self.Log(\"Done.\")\n \n def DrawModule(self, Module, Style, Color, alpha = 1.):\n if 'nSlot' in list(Module.keys()):\n ModulePosition = self.AvailablesModulesPositions[Module['nSlot']][0]\n ModuleName = ''\n ModuleEvFields = []\n ModuleOutputFields = []\n elif type(Module) == framework_abstractor.ModuleClass:\n ModulePosition = self.DisplayedModulesPositions[Module['id']]\n ModuleName = Module['name']\n ModuleEvFields = Module['module']['ev_fields']\n ModuleOutputFields = Module['ev_outputs']\n elif type(Module) == framework_abstractor.LambdaModuleClass:\n ModulePosition = self.DisplayedModulesPositions[Module['id']]\n ModuleName = Module['name']\n ModuleEvFields = []\n ModuleOutputFields = []\n\n DXs = (self.ModulesDiameter/2 * np.array([np.array([-1, -1]), np.array([-1, 1]), np.array([1, 1]), np.array([1, -1])])).tolist()\n for nDX in range(len(DXs)):\n self.DisplayAx.plot([(ModulePosition + DXs[nDX])[0], (ModulePosition + DXs[(nDX+1)%4])[0]], [(ModulePosition + DXs[nDX])[1], (ModulePosition + DXs[(nDX+1)%4])[1]], ls = Style, color = Color, alpha = alpha)\n NameTextPosition = ModulePosition + self.ModulesDiameter/2 * 0.8 * np.array([-1, -1])\n self.DisplayAx.text(NameTextPosition[0], NameTextPosition[1], s = ModuleName, color = Color, alpha = alpha, fontsize = 8)\n if not self.ActiveItem is None and type(self.ActiveItem) == framework_abstractor.ModuleClass and Module['id'] == self.ActiveItem['id'] and len(ModuleEvFields) > 1:\n if ModulePosition[0] < 0:\n HAlign = 'right'\n FieldsTextPosition = ModulePosition + self.ModulesDiameter/2 * 1.2 * np.array([-1., 0])\n else:\n HAlign = 'left'\n FieldsTextPosition = ModulePosition + self.ModulesDiameter/2 * 1.2 * np.array([1., 0])\n ModuleFieldsString = 'Required fields for {0}:\\n'.format(ModuleEvFields[0]) + ', '.join(ModuleEvFields[1:])\n if self.Options['display_module_ev_outputs']:\n if list(ModuleOutputFields.keys()):\n ModuleFieldsString = ModuleFieldsString + '\\nOutputs :'\n for handle, Fields in list(ModuleOutputFields.items()):\n ModuleFieldsString = ModuleFieldsString + '\\n* ' + handle \n for Field in Fields:\n ModuleFieldsString += '\\n -> {0} {1}'.format(Field['type'], Field['name'])\n self.DisplayAx.text(FieldsTextPosition[0], FieldsTextPosition[1], s = ModuleFieldsString, bbox={'facecolor': Color, 'alpha': 1, 'pad': 2}, horizontalalignment=HAlign, verticalalignment='center', zorder=10, fontsize = 8)\n \n def DrawAvailableParentsLinks(self):\n Color = 'grey'\n Style = ':'\n for nAvailablePos, AvailablePos in enumerate(self.AvailablesModulesPositions):\n if AvailablePos[2] is None:\n continue\n if not AvailablePos[1] is None:\n continue\n if nAvailablePos == self.SelectedAvailableModulePosition:\n alpha = 1.\n else:\n alpha = 0.4\n ChildrenModule = self.Framework.ReferenceModulesDictionnary['id'][AvailablePos[2]]\n Start = AvailablePos[0] + np.array([0., -1.]) * self.ModulesDiameter/2\n End = self.DisplayedModulesPositions[AvailablePos[2]] + np.array([-1., 1.]) * self.ModulesDiameter/2 + np.array([1., 0.]) * self.ModulesDiameter * (len(ChildrenModule['parent_ids'])+1.)/(len(ChildrenModule['parent_ids'])+2.)\n YStep = (Start + End)/2\n self.DisplayAx.plot([Start[0], Start[0]], [Start[1], YStep[1]], ls = Style, color = Color, alpha = alpha)\n self.DisplayAx.plot([Start[0], End[0]], [YStep[1], YStep[1]], ls = Style, color = Color, alpha = alpha)\n self.DisplayAx.plot([End[0], End[0]], [YStep[1], End[1]], ls = Style, color = Color, alpha = alpha)\n\n def DrawLinksToChildrens(self, Module, ModuleColor):\n if type(Module) == framework_abstractor.LambdaModuleClass:\n return None\n HandlersParamsIndexes = Module.FindModuleHandlers()\n Links = []\n nUnused = 0\n for HandlerIndex in HandlersParamsIndexes:\n ChildrenName = Module['parameters'][HandlerIndex].split('@')[1]\n ChildrenModule = self.Framework.ReferenceModulesDictionnary['name'][ChildrenName]\n Links += [(Module['id'], ChildrenModule['id'], 1.-(ChildrenModule['parent_ids'].index(Module['id'])+1.+ChildrenModule.HasOperator())/(len(ChildrenModule['parent_ids'])+1.+ChildrenModule.HasOperator()), Module.returned_event_type[Module.module['parameters'][HandlerIndex]['name']]['name'])]\n\n for nLink, Link in enumerate(Links):\n ChildrenModule = self.Framework.ReferenceModulesDictionnary['id'][Link[1]]\n if ChildrenModule['origin'] == framework_abstractor.USER_DEFINED and ChildrenModule['default']:\n Color = 'grey'\n else:\n Color = ModuleColor\n LinkStr = framework_abstractor.GetLinkStr(Link[0], Link[1])\n if not self.ActiveItem is None and type(self.ActiveItem) == str and self.ActiveItem == LinkStr:\n Style = '-'\n else:\n Style = ':'\n if (self.SelectedAvailableModulePosition < 0 and -self.SelectedAvailableModulePosition - 1 == Link[1]):\n alpha = 1.\n else:\n alpha = 0.4\n Start = self.DisplayedModulesPositions[Link[0]] + np.array([-1., -1.]) * self.ModulesDiameter/2 +np.array([1., 0.]) * self.ModulesDiameter * (1. + nLink) / (1. + len(Links))\n End = self.DisplayedModulesPositions[Link[1]] + np.array([-1., 1.]) * self.ModulesDiameter/2 + np.array([1., 0.]) * self.ModulesDiameter * Link[2]\n YStep = (Start + End)/2\n self.DisplayAx.plot([Start[0], Start[0]], [Start[1], YStep[1]], ls = Style, color = Color, alpha = alpha)\n self.DisplayAx.plot([Start[0], End[0]], [YStep[1], YStep[1]], ls = Style, color = Color, alpha = alpha)\n self.DisplayAx.plot([End[0], End[0]], [YStep[1], End[1]], ls = Style, color = Color, alpha = alpha)\n\n LinkStr = framework_abstractor.GetLinkStr(Link[0], Link[1])\n self.DisplayedLinks[LinkStr] = self.DisplayAx.text(YStep[0], YStep[1], s = Link[3], zorder = 5, bbox={'facecolor': 'white', 'alpha': 1, 'pad': 2, 'ls': Style}, horizontalalignment='center', verticalalignment='center')\n\n def _OnParameterChange(self, StringVar, ParamIndex, DisplayIndex):\n print(\"Parameter : {0}, {1}\".format(ParamIndex, DisplayIndex))\n self.ActiveItem['parameters'][ParamIndex] = StringVar.get()\n self.DisplayedParams[DisplayIndex][0]['foreground'] = self.GetParamDisplayColor(ParamIndex)\n\n def _OnAddedParameterChange(self, StringVar, ParamIndex, DisplayIndex):\n if type(self.ActiveItem) == framework_abstractor.ModuleClass:\n AddedParamName = self._GetModuleAddedParams()[ParamIndex]['name']\n elif type(self.ActiveItem) == str:\n AddedParamName = self._GetLinkAddedParams()[ParamIndex]['name']\n # First check if name is ok and available\n if AddedParamName == 'Name':\n AskedName = StringVar.get()\n CursorIndex = self.DisplayedParams[DisplayIndex][-1].index(Tk.INSERT)\n if not self._AddedParamValidity(AddedParamName, AskedName):\n self.DisplayedParams[DisplayIndex][0]['foreground'] = 'red'\n if not self.ActiveItem is None:\n return None\n else:\n self.DisplayedParams[DisplayIndex][0]['foreground'] = 'black'\n\n if type(self.ActiveItem) == framework_abstractor.ModuleClass:\n self.Framework.ChangeModuleName(self.ActiveItem, AskedName)\n\n self.GUIUpdate('_OnAddedParameterChange')\n #self.DisplayedParams[DisplayIndex][-1].focus_set()\n #self.DisplayedParams[DisplayIndex][-1].icursor(CursorIndex)\n\n def _OnFrameworkNameChange(self, StrVar):\n AskedName = StrVar.get()\n self.Framework.Data['name'] = AskedName\n if not AskedName:\n AskedName = framework_abstractor.DEFAULT_NAME\n self.MainWindow.title('Beaver - {0}'.format(AskedName))\n self.GUIUpdate('_OnFrameworkNameChange')\n\n def _OnTemplateChange(self, StringVar, TemplateIndex, DisplayIndex):\n print(\"Template : {0}, {1}\".format(TemplateIndex, DisplayIndex))\n self.ActiveItem['templates'][TemplateIndex] = StringVar.get()\n self.DisplayedParams[DisplayIndex][0]['foreground'] = self.GetTemplateDisplayColor(TemplateIndex)\n\n def _OnEventTypeTemplateChange(self, StrVar, TemplateIndex, DisplayIndex, TypeName):\n print(\"Inserting type\" + TypeName)\n StrVar.set(TypeName)\n if TypeName == framework_abstractor.NoneEventType['name']:\n self.DisplayedParams[DisplayIndex][0]['foreground'] = 'red'\n else:\n self.DisplayedParams[DisplayIndex][0]['foreground'] = 'green'\n\n self.ActiveItem.SetEventTemplate(self.AvailableTypes[TypeName])\n self.GUIUpdate('_OnEventTypeTemplateChange')\n\n def _OnNewEventTypeTemplate(self, StringVar, TemplateIndex, DisplayIndex):\n self.Framework.AddNewType()\n self.GUIUpdate('_OnNewEventTypeTemplate')\n\n def _OnAddGlobalVariable(self):\n self.Framework.AddGlobalVariable()\n self.Framework.UpdateVariablesFile()\n self.GUIUpdate('_OnAddGlobalVariable')\n\n def _OnRemoveGlobalVariable(self, VarName):\n for nVariable, Variable in enumerate(self.Framework.GlobalVariables):\n if Variable['name'] == VarName:\n break\n self.Framework.GlobalVariables.pop(nVariable)\n self.Framework.UpdateVariablesFile()\n self.GUIUpdate('_OnRemoveGlobalVariable')\n\n def _OnGlobalVariableChange(self, StrVar, nVar, ModValue, DisplayNumber):\n StrValue = StrVar.get()\n print(StrValue)\n self.Framework.GlobalVariables[nVar][ModValue] = StrValue\n self.Framework.UpdateVariablesFile()\n self.GUIUpdate('_OnGlobalVariableChange')\n\n def _AddedParamValidity(self, AddedParamName, AddedParamValue):\n if AddedParamName == 'Name':\n if AddedParamValue == '':\n return False\n if type(self.ActiveItem) == framework_abstractor.ModuleClass:\n return self.Framework.ModuleNameValidity(self.ActiveItem, AddedParamValue)\n\n def _GetModuleAddedParams(self):\n return [{'name': 'Name', 'type': 'str', 'default': self.ActiveItem['name']}]\n\n def _GetLinkAddedParams(self):\n return []\n\n def UpdateDisplayedParams(self, OriginFunction, Mod = None):\n for Line in self.DisplayedParams:\n for Field in Line:\n Field.destroy()\n if self.ActiveItem is None:\n return self.DisplayGlobalVariables(Mod)\n \n elif type(self.ActiveItem) == framework_abstractor.ModuleClass:\n AddedParams = self._GetModuleAddedParams()\n ModuleParameters = self.ActiveItem['module']['parameters']\n ModuleTemplates = self.ActiveItem['module']['templates']\n elif type(self.ActiveItem) == tuple:\n AddedParams = self._GetLinkAddedParams()\n ModuleParameters = []\n ModuleTemplates = []\n\n elif self.ActiveItem.__class__ == framework_abstractor.LambdaModuleClass:\n AddedParams = self._GetModuleAddedParams()\n ModuleParameters = []\n ModuleTemplates = []\n\n elif type(self.ActiveItem) == str:\n if self.ActiveItem == framework_abstractor.TYPES_DEF_FILE:\n return self.DisplayTypesParameters(Mod)\n else:\n return self.DisplayGlobalVariables(Mod)\n\n else:\n print(\"Not implemented ActiveItem type\")\n return None\n\n ItemsFields = []\n if AddedParams or ModuleParameters:\n ItemsFields += [{'name':'Parameter', 'type': 'Type', 'value': 'Value'}]\n ItemsFields += AddedParams\n ItemsFields += ModuleParameters\n if ModuleTemplates:\n ItemsFields += [{'name':'Template', 'type': 'Type', 'value': 'Value'}]\n ItemsFields += ModuleTemplates\n\n if Mod == 0:\n self.CurrentMinParamDisplayed = 0\n elif not Mod is None:\n self.CurrentMinParamDisplayed = max(0, min(len(ItemsFields) - self.NFieldsDisplayed, self.CurrentMinParamDisplayed + Mod))\n self.DisplayedParams = []\n self.CurrentParams = {}\n\n if self.CurrentMinParamDisplayed != 0:\n FirstLine = '...'\n else:\n FirstLine = ''\n self.DisplayedParams += [[Tk.Label(self.ParamsValuesFrame, text = FirstLine, width = 20, anchor = Tk.W)]]\n self.DisplayedParams[-1][0].grid(row=len(self.DisplayedParams)-1, column = 0)\n\n for NField in range(self.CurrentMinParamDisplayed, min(len(ItemsFields), self.CurrentMinParamDisplayed + self.NFieldsDisplayed)):\n EntryEnabled = True\n Field = ItemsFields[NField]\n self.DisplayedParams += [[]]\n if Field in AddedParams:\n nField = AddedParams.index(Field)\n Color = self.GetAddedParamDisplayColor(Field['name'], Field['default'])\n StrVar = Tk.StringVar(self.MainWindow)\n CBFunction = self._OnAddedParameterChange\n if 'default' in Field.keys():\n StrVar.set(Field['default'])\n if type(self.ActiveItem) == framework_abstractor.LambdaModuleClass:\n EntryEnabled = False\n\n elif Field in ModuleParameters:\n nField = ModuleParameters.index(Field)\n Color = self.GetParamDisplayColor(ModuleParameters.index(Field))\n StrVar = Tk.StringVar(self.MainWindow)\n CBFunction = self._OnParameterChange\n if self.ActiveItem['parameters'][Field['param_number']]:\n StrVar.set(self.ActiveItem['parameters'][Field['param_number']])\n else:\n if 'default' in list(Field.keys()):\n StrVar.set(Field['default'])\n\n elif Field in ModuleTemplates:\n nField = ModuleTemplates.index(Field)\n Color = self.GetTemplateDisplayColor(ModuleTemplates.index(Field))\n StrVar = Tk.StringVar(self.MainWindow)\n if Field['name'] in self.MenuParams.keys(): # Case for specific parameter\n ParamDict = self.MenuParams[Field['name']][0]\n CBFunction = self.MenuParams[Field['name']][1]\n AddNewFunction = self.MenuParams[Field['name']][2]\n if self.ActiveItem['templates'][Field['template_number']]:\n StrVar.set(self.ActiveItem['templates'][Field['template_number']])\n else:\n StrVar.set(sorted(ParamDict.keys())[0])\n else:\n CBFunction = self._OnTemplateChange\n if self.ActiveItem['templates'][Field['template_number']]:\n StrVar.set(self.ActiveItem['templates'][Field['template_number']])\n else:\n if 'default' in list(Field.keys()):\n StrVar.set(Field['default'])\n if Field['default'] and Field['default'][0] == '#':\n EntryEnabled = False\n\n else:\n Color = 'black'\n StrVar = None\n CBFunction = None\n nField = None\n EntryEnabled = False\n\n self.DisplayedParams[-1] += [Tk.Label(self.ParamsValuesFrame, text = Field['name'], width = 20, anchor = Tk.W, foreground = Color)]\n self.DisplayedParams[-1][-1].grid(row=len(self.DisplayedParams)-1, column=0, sticky = Tk.N)\n\n self.DisplayedParams[-1] += [Tk.Label(self.ParamsValuesFrame, text = Field['type'], width = 20, anchor = Tk.W)]\n self.DisplayedParams[-1][-1].grid(row=len(self.DisplayedParams)-1, column=1, sticky = Tk.N)\n\n if not CBFunction is None:\n if Field['name'] in self.MenuParams.keys():\n self.DisplayedParams[-1] += [Tk.OptionMenu(self.ParamsValuesFrame, StrVar, *self.AvailableTypes)]\n self.DisplayedParams[-1][-1]['menu'].delete(0, \"end\")\n for Param in sorted(ParamDict.keys()):\n self.DisplayedParams[-1][-1]['menu'].add_command(label=Param, command=lambda param = Param, func = CBFunction, sv=StrVar, LocalNumber = nField, DisplayNumber = len(self.DisplayedParams)-1: func(sv, LocalNumber, DisplayNumber, param))\n if not AddNewFunction is None:\n self.DisplayedParams[-1][-1]['menu'].add_separator()\n self.DisplayedParams[-1][-1]['menu'].add_command(label='New...', command=lambda func = AddNewFunction, sv=StrVar, LocalNumber = nField, DisplayNumber = len(self.DisplayedParams)-1: func(sv, LocalNumber, DisplayNumber))\n else:\n StrVar.trace(\"w\", lambda name, index, mode, sv=StrVar, func = CBFunction, LocalNumber = nField, DisplayNumber = len(self.DisplayedParams)-1: func(sv, LocalNumber, DisplayNumber))\n self.DisplayedParams[-1] += [Tk.Entry(self.ParamsValuesFrame, textvariable = StrVar, width = 45, bg = 'white')]\n\n if not EntryEnabled:\n self.DisplayedParams[-1][-1].config(state = 'disabled')\n else:\n self.DisplayedParams[-1] += [Tk.Label(self.ParamsValuesFrame, text = Field['value'], width = 45, anchor = Tk.W)]\n self.DisplayedParams[-1][-1].grid(row=len(self.DisplayedParams)-1, column=2, sticky = Tk.N+Tk.E+Tk.W)\n if self.CurrentMinParamDisplayed + self.NFieldsDisplayed < len(ItemsFields):\n self.DisplayedParams += [[Tk.Label(self.ParamsValuesFrame, text = '...', width = 20, anchor = Tk.W)]]\n self.DisplayedParams[-1][0].grid(row=len(self.DisplayedParams)-1, column = 0)\n\n def DisplayGlobalVariables(self, Mod):\n VariablesParams = [{'name': 'Framework name', 'type': 'str', 'value': self.Framework.Data['name']}] + self.Framework.GlobalVariables\n NItemsFields = len(VariablesParams) + 1 # for + button\n\n if Mod == 0:\n self.CurrentMinParamDisplayed = 0\n elif not Mod is None:\n self.CurrentMinParamDisplayed = max(0, min(NItemsFields - self.NFieldsDisplayed, self.CurrentMinParamDisplayed + Mod))\n self.DisplayedParams = []\n self.CurrentParams = {}\n\n if self.CurrentMinParamDisplayed != 0:\n FirstLine = '...'\n else:\n FirstLine = ''\n self.DisplayedParams += [[Tk.Label(self.ParamsValuesFrame, text = FirstLine, width = 20, anchor = Tk.W)]]\n self.DisplayedParams[-1][0].grid(row=len(self.DisplayedParams)-1, column = 0)\n\n DisplayedFieldsIndexes = list(range(self.CurrentMinParamDisplayed, min(NItemsFields, self.CurrentMinParamDisplayed + self.NFieldsDisplayed)))\n nFieldPossible = 0\n for nVariable, Variable in enumerate(VariablesParams):\n if nFieldPossible in DisplayedFieldsIndexes:\n self.DisplayedParams += [[]]\n if nVariable == 0:\n self.DisplayedParams[-1] += [Tk.Label(self.ParamsValuesFrame, text = Variable['name'], width = 20, anchor = Tk.W, foreground = 'black')]\n self.DisplayedParams[-1][-1].grid(row=len(self.DisplayedParams)-1, column=0, sticky = Tk.N)\n self.DisplayedParams[-1] += [Tk.Label(self.ParamsValuesFrame, text = Variable['type'], width = 20, anchor = Tk.W, foreground = 'black')]\n self.DisplayedParams[-1][-1].grid(row=len(self.DisplayedParams)-1, column=1, sticky = Tk.N)\n\n StrVar = Tk.StringVar(self.MainWindow)\n StrVar.set(Variable['value'])\n StrVar.trace(\"w\", lambda name, index, mode, sv=StrVar: self._OnFrameworkNameChange(sv))\n self.DisplayedParams[-1] += [Tk.Entry(self.ParamsValuesFrame, text = StrVar, width = 45, foreground = 'black')]\n self.DisplayedParams[-1][-1].grid(row=len(self.DisplayedParams)-1, column=2, columnspan=2, sticky = Tk.N)\n continue\n\n for nField, FieldName in enumerate(['name', 'type', 'value']):\n Width = [20, 20, 40][nField]\n\n StrVar = Tk.StringVar(self.MainWindow)\n StrVar.set(Variable[FieldName])\n StrVar.trace(\"w\", lambda name, index, mode, sv=StrVar, nVar = nVariable-1, DisplayNumber = len(self.DisplayedParams)-1, ModValue = FieldName: self._OnGlobalVariableChange(sv, nVar, ModValue, DisplayNumber))\n self.DisplayedParams[-1] += [Tk.Entry(self.ParamsValuesFrame, text = StrVar, width = Width, foreground = 'black')]\n self.DisplayedParams[-1][-1].grid(row=len(self.DisplayedParams)-1, column=nField, sticky = Tk.N+Tk.W)\n\n self.DisplayedParams[-1] += [Tk.Button(self.ParamsValuesFrame, text = '-', command = lambda VarName = Variable['name']: self._OnRemoveGlobalVariable(VarName))]\n self.DisplayedParams[-1][-1].grid(row=len(self.DisplayedParams)-1, column=3, sticky = Tk.N+Tk.E+Tk.W)\n nFieldPossible += 1\n if nFieldPossible in DisplayedFieldsIndexes:\n self.DisplayedParams += [[Tk.Button(self.ParamsValuesFrame, text = '+', command = lambda :self._OnAddGlobalVariable())]]\n self.DisplayedParams[-1][-1].grid(row=len(self.DisplayedParams)-1, column=0, columnspan = 4, sticky = Tk.N+Tk.E+Tk.W)\n nFieldPossible += 1\n if self.CurrentMinParamDisplayed + self.NFieldsDisplayed < NItemsFields:\n self.DisplayedParams += [[Tk.Label(self.ParamsValuesFrame, text = '...', width = 20, anchor = Tk.W)]]\n self.DisplayedParams[-1][0].grid(row=len(self.DisplayedParams)-1, column = 0)\n\n def DisplayTypesParameters(self, Mod):\n NItemsFields = 0\n for TypeName in self.Framework.UserDefinedTypes.keys():\n NItemsFields += 2 # One for the name, one for the '+' button\n NItemsFields += len(self.Framework.UserDefinedTypes[TypeName]['fields'])\n\n if Mod == 0:\n self.CurrentMinParamDisplayed = 0\n elif not Mod is None:\n self.CurrentMinParamDisplayed = max(0, min(NItemsFields - self.NFieldsDisplayed, self.CurrentMinParamDisplayed + Mod))\n self.DisplayedParams = []\n self.CurrentParams = {}\n\n if self.CurrentMinParamDisplayed != 0:\n FirstLine = '...'\n else:\n FirstLine = ''\n self.DisplayedParams += [[Tk.Label(self.ParamsValuesFrame, text = FirstLine, width = 20, anchor = Tk.W)]]\n self.DisplayedParams[-1][0].grid(row=len(self.DisplayedParams)-1, column = 0)\n\n DisplayedFieldsIndexes = list(range(self.CurrentMinParamDisplayed, min(NItemsFields, self.CurrentMinParamDisplayed + self.NFieldsDisplayed)))\n nFieldPossible = 0\n for TypeName in self.Framework.UserDefinedTypes.keys():\n if nFieldPossible in DisplayedFieldsIndexes:\n self.DisplayedParams += [[]]\n self.DisplayedParams[-1] += [Tk.Label(self.ParamsValuesFrame, text = 'Event type', width = 20, anchor = Tk.W, foreground = 'black')]\n self.DisplayedParams[-1][-1].grid(row=len(self.DisplayedParams)-1, column=0, sticky = Tk.N)\n StrVar = Tk.StringVar(self.MainWindow)\n StrVar.set(TypeName)\n StrVar.trace(\"w\", lambda name, index, mode, sv=StrVar, TypeName = TypeName, nField = None, DisplayNumber = len(self.DisplayedParams)-1, ModValue = 'name': self._OnDefinedEventTypeFieldChange(sv, TypeName, nField, ModValue, DisplayNumber))\n self.DisplayedParams[-1] += [Tk.Entry(self.ParamsValuesFrame, textvariable = StrVar, width = 60, bg = 'white')]\n self.DisplayedParams[-1][-1].grid(row=len(self.DisplayedParams)-1, column=1, sticky = Tk.N)\n\n self.DisplayedParams[-1] += [Tk.Button(self.ParamsValuesFrame, text = '-', command = lambda TypeName = TypeName: self._OnRemoveType(TypeName))]\n self.DisplayedParams[-1][-1].grid(row=len(self.DisplayedParams)-1, column=2, sticky = Tk.N+Tk.E+Tk.W)\n nFieldPossible += 1\n for nFieldInType, Field in enumerate(self.Framework.UserDefinedTypes[TypeName]['fields']):\n if nFieldPossible in DisplayedFieldsIndexes:\n self.DisplayedParams += [[]]\n StrVar = Tk.StringVar(self.MainWindow)\n StrVar.set(Field['type'])\n StrVar.trace(\"w\", lambda name, index, mode, sv=StrVar, TypeName = TypeName, nField = nFieldInType, DisplayNumber = len(self.DisplayedParams)-1, ModValue = 'type': self._OnDefinedEventTypeFieldChange(sv, TypeName, nField, ModValue, DisplayNumber))\n self.DisplayedParams[-1] += [Tk.Entry(self.ParamsValuesFrame, textvariable = StrVar, width = 20, bg = 'white')]\n self.DisplayedParams[-1][-1].grid(row=len(self.DisplayedParams)-1, column=0, sticky = Tk.N)\n\n StrVar = Tk.StringVar(self.MainWindow)\n StrVar.set(Field['name'])\n StrVar.trace(\"w\", lambda name, index, mode, sv=StrVar, TypeName = TypeName, nField = nFieldInType, DisplayNumber = len(self.DisplayedParams)-1, ModValue = 'name': self._OnDefinedEventTypeFieldChange(sv, TypeName, nField, ModValue, DisplayNumber))\n self.DisplayedParams[-1] += [Tk.Entry(self.ParamsValuesFrame, textvariable = StrVar, width = 60, bg = 'white')]\n self.DisplayedParams[-1][-1].grid(row=len(self.DisplayedParams)-1, column=1, sticky = Tk.N)\n\n self.DisplayedParams[-1] += [Tk.Button(self.ParamsValuesFrame, text = '-', command = lambda TypeName = TypeName, nField = nFieldInType: self._OnDefinedEventTypeFieldAddRemove(TypeName, nField))]\n self.DisplayedParams[-1][-1].grid(row=len(self.DisplayedParams)-1, column=2, sticky = Tk.N+Tk.E+Tk.W)\n\n nFieldPossible += 1\n if nFieldPossible in DisplayedFieldsIndexes:\n self.DisplayedParams += [[Tk.Button(self.ParamsValuesFrame, text = '+', command = lambda TypeName = TypeName, nField = None: self._OnDefinedEventTypeFieldAddRemove(TypeName, nField))]]\n self.DisplayedParams[-1][-1].grid(row=len(self.DisplayedParams)-1, column=0, columnspan = 3, sticky = Tk.N+Tk.E+Tk.W)\n nFieldPossible += 1\n if self.CurrentMinParamDisplayed + self.NFieldsDisplayed < NItemsFields:\n self.DisplayedParams += [[Tk.Label(self.ParamsValuesFrame, text = '...', width = 20, anchor = Tk.W)]]\n self.DisplayedParams[-1][0].grid(row=len(self.DisplayedParams)-1, column = 0)\n\n def _OnDefinedEventTypeFieldChange(self, StrVar, TypeName, nField, ModValue, DisplayIndex):\n if ModValue == 'type':\n ColumnIndex = 0\n elif ModValue == 'name':\n ColumnIndex = 1\n CursorIndex = self.DisplayedParams[DisplayIndex][ColumnIndex].index(Tk.INSERT)\n if nField is None:\n PreviousName = TypeName\n NewName = StrVar.get()\n self.Framework.UserDefinedTypes[NewName] = self.Framework.UserDefinedTypes[PreviousName]\n self.Framework.UserDefinedTypes[NewName]['name'] = NewName\n del self.Framework.UserDefinedTypes[PreviousName]\n#TODO : change references to this event type\n self.Framework.WriteTypesFile()\n\n else:\n self.Framework.UserDefinedTypes[TypeName]['fields'][nField][ModValue] = StrVar.get()\n self.Framework.WriteTypesFile()\n\n self.GUIUpdate('_OnDefinedEventTypeFieldChange')\n self.DisplayedParams[DisplayIndex][ColumnIndex].focus_set()\n self.DisplayedParams[DisplayIndex][ColumnIndex].icursor(CursorIndex)\n\n def _OnDefinedEventTypeFieldAddRemove(self, TypeName, nField):\n if nField is None:\n self.Framework.UserDefinedTypes[TypeName]['fields'] += [{'type':'', 'name': ''}]\n else:\n self.Framework.UserDefinedTypes[TypeName]['fields'].pop(nField)\n self.Framework.WriteTypesFile()\n self.GUIUpdate('_OnDefinedEventTypeFieldAddRemove')\n\n def GetAddedParamDisplayColor(self, ParamName, ParamValue):\n if not self._AddedParamValidity(ParamName, ParamValue):\n return 'red'\n else:\n return 'black'\n\n def GetParamDisplayColor(self, NParam):\n ModuleParameters = self.ActiveItem['module']['parameters']\n TypeCanBeenChecked, ValueWasChecked = framework_abstractor.CheckParameterValidity(ModuleParameters[NParam]['type'], self.ActiveItem['parameters'][NParam])\n if not TypeCanBeenChecked:\n Color = 'black'\n else:\n if ValueWasChecked:\n Color = 'green'\n else:\n Color = 'red'\n return Color\n\n def GetTemplateDisplayColor(self, NTemplate):\n if self.ActiveItem['module']['templates'][NTemplate]['type'] == 'sepia::type':\n if not self.ActiveItem['templates'][NTemplate] in self.AvailableTypes.keys() or self.AvailableTypes[self.ActiveItem['templates'][NTemplate]]['origin'] != 'sepia':\n return 'red'\n return 'green'\n if not self.ActiveItem['templates'][NTemplate]:\n return 'red'\n return 'black'\n\nG = GUI()\n","repo_name":"neuromorphic-paris/Beaver","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":73908,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"37336708436","text":"# Have the function QuestionsMarks(str) take the str string parameter, which will contain single digit numbers, letters, and question marks, and check if there are exactly 3 question marks between every pair of two numbers that add up to 10. If so, then your program should return the string true, otherwise it should return the string false. If there aren't any two numbers that add up to 10 in the string, then your program should return false as well.\n\n# For example: if str is \"arrb6???4xxbl5???eee5\" then your program should return true because there are exactly 3 question marks between 6 and 4, and 3 question marks between 5 and 5 at the end of the string.\n# Examples\n# Input: \"aa6?9\"\n# Output: false\n# Input: \"acc?7??sss?3rr1??????5\"\n# Output: true\n\n# import string\n\n# def QuestionsMarks(str):\n# for s in str:\n# if s in string.digits:\n# print(s)\n# return False\n\n\nimport re\n\ndef QuestionsMarks(str):\n n = re.split(\"\\d\", \"a\"+str+\"a\") # make sure it does not start/end with ?\n q = [ques for ques in n if re.match(\"^\\?+$\", ques)]\n result = f\"{sum(len(qq) for qq in q) > 2}\"\n return result.lower()\n\n# keep this function call here\nprint(QuestionsMarks(\"acc?7??sss?3rr1??????5\"))\n# acc?7??sss?3rr1??????5\n","repo_name":"NumanIbnMazid/PracticeAndLearn","sub_path":"Coderbyte/Questions Marks.py","file_name":"Questions Marks.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"16585628713","text":"# coding: utf-8\n# 与 task6 代码基本一致,只是由于需要登录后访问,所以 headers 加了登录后获得的 cookie\n\nimport csv, re, time\nimport requests\n\nfrom bs4 import BeautifulSoup\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36',\n 'Host': '127.0.0.1:8000',\n 'Cookie': 'csrftoken=cGIJZeAcwBL75n7KzskxG9seKynFh9JxAvEJwmpIxiCst5SQwjCTLWTRAEJHtEF1; sessionid=bes62kv3zs80w4sxt2y6tz11ouhuu82h',\n}\n\nhost = 'http://127.0.0.1:8000'\npath_list = '/tasks/tutorial/list/'\n\n# 与 task6 代码基本一致,只是由于需要登录后访问,所以 headers 加了登录后获得的 cookie\nreq = requests.get(host + path_list, headers=headers)\n\nsoup = BeautifulSoup(req.text, 'lxml')\n\nli_list = soup.find('div', 'panel panel-default').ul.find_all('li')\n\nfor li in li_list[11:21]: # 这回抓取 10 -20 课的 python 教程\n href = li.a['href']\n title = li.span.get_text(strip=True)\n\n time.sleep(2.1)\n req_item = requests.get(host+href, headers=headers)\n soup_item = BeautifulSoup(req_item.text, 'lxml')\n\n content = str(soup_item.find('div', 'ppx-main-block'))\n print(content)\n\n with open(title + '.html', 'w', encoding='utf-8') as f:\n f.write(content)\n\n print(title, '保存成功')\n","repo_name":"QI-Xiao/solver","sub_path":"task7/task7.py","file_name":"task7.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"8796316685","text":"import io\n\nfrom django.test.testcases import TestCase\nfrom 台湾言语服务.models import 训练过渡格式\n\n\nclass 加资料试验(TestCase):\n 公家内容 = {'来源': 'Dr. Pigu', '种类': '字词', '年代': '2017', }\n\n def test_错误的袂加入去(self):\n with io.StringIO() as 输出:\n 训练过渡格式.加一堆资料([\n 训练过渡格式(文本='媠|sui2', **self.公家内容),\n 训练过渡格式(文本='媠|sui1', **self.公家内容),\n 训练过渡格式(文本='媠|sui2-sui2', **self.公家内容),\n ], 错误输出=输出)\n self.assertEqual(训练过渡格式.objects.count(), 2)\n","repo_name":"i3thuan5/tai5-uan5_gian5-gi2_hok8-bu7","sub_path":"試驗/訓練過渡格式/test加資料單元試驗.py","file_name":"test加資料單元試驗.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"zh","doc_type":"code","stars":41,"dataset":"github-code","pt":"28"} +{"seq_id":"37245100403","text":"import random\nimport deff\nx = int(input(\"Сколько оценок? \"))\narr=[]\nflag=True\nfor i in range(x):\n arr.append(random.randint(2,5))\n if arr[i]==2:\n flag=False\ndeff.print_arr(arr)\nif flag==False:\n print(\"Ученик двоечник\")\nfor i in range(x):\n if arr[i]!=5:\n flag=False\nif flag:\n print(\"Ученик отличник\")","repo_name":"buba329/qwerty","sub_path":"flag.py","file_name":"flag.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"14755957881","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef main() :\n x = np.fromfile(\"11--110--resnet_model-res2a_relu-0--float16.fmout.bin\", dtype=np.float16).reshape(1,64,56,56)\n y_dumped = np.fromfile(\"11--128--resnet_model-res2a_branch1-Conv2D-0--float16.fmout.bin\", dtype=np.float16).reshape(1,256,56,56)\n w = np.fromfile(\"11--124--resnet_model-res2a_branch1-kernel_cast-0--float16.var_cast.bin\", dtype=np.float16).reshape(1,1,64,256)\n\n strides = [1, 1, 1, 1]\n padding = 'SAME'\n\n y_true = tf.nn.conv2d(x, w, strides, padding, data_format='NCHW')\n\n result_out = None\n with tf.Session():\n result_out = y_true.eval()\n\n result_out.astype(np.float16).tofile(\"test_result_out_fp16.bin\")\n\n z = y_dumped - result_out\n n = tf.count_nonzero(z).eval()\n print(\"========\")\n if n == 0:\n print(\"PASS: data just generated is the same as reference data\")\n else:\n print(\"FAIL: data just generated is different than reference data, different count is [\" + n + \"]\")\n print(\"========\")\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"houqing/ai","sub_path":"tf/op-example-fw-fp16.py","file_name":"op-example-fw-fp16.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"43453074335","text":"import matplotlib\nimport numpy as np\nimport pandas as pd\nfrom collections import namedtuple\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nEpisodeStats = namedtuple(\"Stats\",[\"episode_lengths\", \"episode_rewards\"])\n\ndef plot_episode_stats(stats, smoothing_window=10, noshow=False):\n # Plot the episode length over time\n fig1 = plt.figure(figsize=(10,5))\n plt.plot(stats.episode_lengths)\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Episode Length\")\n plt.title(\"Episode Length over Time\")\n\n # Plot the episode reward over time\n fig2 = plt.figure(figsize=(10,5))\n rewards_smoothed = pd.Series(stats.episode_rewards).rolling(smoothing_window, min_periods=smoothing_window).mean()\n plt.plot(rewards_smoothed)\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Episode Reward (Smoothed)\")\n plt.title(\"Episode Reward over Time (Smoothed over window size {})\".format(smoothing_window))\n\n # Plot time steps and episode number\n fig3 = plt.figure(figsize=(10,5))\n plt.plot(np.cumsum(stats.episode_lengths), np.arange(len(stats.episode_lengths)))\n plt.xlabel(\"Time Steps\")\n plt.ylabel(\"Episode\")\n plt.title(\"Episode per time step\")\n plt.show()\n\n return fig1, fig2, fig3\n","repo_name":"ymlasu/para-atm-collection","sub_path":"safety-risk-uq/UAS_ObstacleAvoidance_SafetyBound_RL/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"28"} +{"seq_id":"19451191563","text":"\"\"\"Celery tasks.\"\"\"\n\n# Django\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\n\n# Django REST Framework\nfrom rest_framework.reverse import reverse\n\n# Models\nfrom ..users.models import User\n\n# Celery\nfrom celery import task\n\n# Utils\nfrom django.utils import timezone\nfrom datetime import timedelta\n\n# JWT\nimport jwt\n\n\ndef gen_verification_token(user):\n \"\"\"Generate JWT necessary for the user to authenticate its account\"\"\"\n exp_date = timezone.now() + timedelta(days=3)\n payload = {\n 'user': user.username,\n 'exp': exp_date,\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n return token.decode()\n\n\n@task(name='send_confirmation_email', max_retries=3)\ndef send_confirmation_email(user_pk, host):\n user = User.objects.get(pk=user_pk)\n token = gen_verification_token(user)\n subject = 'Verify your email at Personal CRM (PRM)'\n from_email = 'Personal CRM '\n html_content = render_to_string('emails/users/register_confirmation.html', {\n 'user': user,\n 'token': token,\n 'type': 'email_confirmation',\n 'verification_link': f\"{host}{reverse('users:users-verify')}\"\n })\n text_content = strip_tags(html_content)\n\n message = EmailMultiAlternatives(\n subject, text_content, from_email, [user.email])\n message.attach_alternative(html_content, 'text/html')\n message.send()\n","repo_name":"rem1niscence/personal_crm","sub_path":"prm/taskapp/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"74174201034","text":"from .views import HouseViewSet, HouseBuildingViewSet,\\\n HouseEntancesViewSet, FloorViewSet,\\\n RiserViewSet\n\nfrom rest_framework.routers import DefaultRouter\n\nfrom django.urls import path, include\n\n\n\n\nrouter = DefaultRouter()\nrouter.register(r'houses', HouseViewSet, basename='house')\nrouter.register(r'houses_building', HouseBuildingViewSet, basename='houses_building')\nrouter.register(r'house_entrance', HouseEntancesViewSet, basename='house_entrance')\nrouter.register(r'house_floor', FloorViewSet, basename='house_floor')\nrouter.register(r'house_riser', RiserViewSet, basename='house_riser')\n\nurlpatterns = [\n path('', include(router.urls)),\n]\n","repo_name":"thelonggoodbuy/swipe_api","sub_path":"src/houses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"70157038474","text":"from __future__ import annotations\n\nimport math\nfrom abc import ABC, abstractmethod\nfrom typing import List, Dict, Optional, Tuple\n\nimport numpy as np\n\nimport src.lib_pu as pu\n\nfrom prettytable import PrettyTable\nfrom functools import partial\n\nimport src.lib_pu.games as pu_games\n\n\nclass Game(ABC):\n outcomes: List[pu.Outcome]\n messages: List[pu.Message]\n\n outcome_dist: Dict[pu.Outcome, float]\n message_dist: Dict[pu.Message, float]\n\n strategy_util: pu.StrategyUtil\n action: Dict[pu.Agent, pu.Action]\n host_reverse: pu.ContAction\n \n loss = {}\n loss_names = {}\n entropy_cont = None\n matrix = {}\n\n def __init__(self, loss_names: Dict[pu.Agent, str], matrix: Optional[Dict[pu.Agent], np.ndarray] = None, random_outcome_dist: bool = False):\n outcomes, messages = self.create_structure(len(self.default_outcome_dist()), self.message_structure())\n\n if random_outcome_dist:\n outcome_dist = dict(zip(outcomes, self.sample_categorical_distribution(len(outcomes))))\n else:\n outcome_dist = dict(zip(outcomes, self.default_outcome_dist()))\n\n self.strategy_util = pu.StrategyUtil(self)\n\n self.outcomes = outcomes\n self.messages = messages\n self.outcome_dist = outcome_dist\n self.loss_names = loss_names\n \n if loss_names[pu.CONT].startswith(pu.MATRIX):\n self.matrix = matrix\n self.loss = {agent: partial(pu.LOSS_FNS[pu.MATRIX], self.matrix[agent]) for agent in pu.AGENTS}\n self.entropy_cont = partial(pu.ENTROPY_FNS[pu.MATRIX], self.matrix[pu.CONT])\n else:\n self.loss = {agent: pu.LOSS_FNS[loss_names[agent]] for agent in pu.AGENTS}\n self.entropy_cont = pu.ENTROPY_FNS[loss_names[pu.CONT]]\n \n self.action = {agent: None for agent in pu.AGENTS}\n\n def set_action(self, agent: pu.Agent, value: np.ndarray):\n try:\n self.action[agent] = pu.Action.get_class(agent).from_array(value, self.outcomes, self.messages)\n except IndexError:\n raise pu.InvalidStrategyError(value, self.get_action_shape(agent))\n\n if agent == pu.HOST:\n self.message_dist = self.strategy_util.get_message_dist()\n self.host_reverse = self.strategy_util.get_host_reverse()\n\n def step(self, actions: Dict[str, np.ndarray]) -> Dict[pu.Agent, float]:\n for agent in pu.AGENTS:\n self.set_action(agent, actions[agent])\n\n return {agent: self.get_expected_loss(agent) for agent in pu.AGENTS}\n\n def get_expected_loss(self, agent: pu.Agent) -> float:\n loss: float = 0\n for x in self.outcomes:\n for y in self.messages:\n l = self.get_loss(agent, x, y)\n P_y_x = self.action[pu.HOST][x, y]\n p_x = self.outcome_dist[x]\n _l = self.outcome_dist[x] * self.action[pu.HOST][x, y] * self.get_loss(agent, x, y)\n if not math.isnan(_l):\n loss += _l\n\n return np.sign(loss) * pu.CLIPPED_INFINITY_LOSS if math.isinf(loss) else loss\n\n def get_loss(self, agent: pu.Agent, x: pu.Outcome, y: pu.Message):\n return self.loss[agent](self.action[pu.CONT], self.outcomes, x, y)\n\n \"\"\"Returns entorpy for the contestant. In theory we can also calculate this for the host, but it would have no real meaning\"\"\"\n def get_expected_entropy(self) -> Optional[float]:\n ent: float = 0\n for y in self.messages:\n e = self.message_dist[y] * self.get_entropy(y)\n if not math.isnan(e):\n ent += e\n \n return ent\n\n def get_entropy(self, y: pu.Message):\n return self.entropy_cont(self.loss[pu.CONT], self.host_reverse, self.outcomes, y)\n\n def get_action_shape(self, agent: pu.Agent) -> List[int]:\n shape = []\n if agent == pu.CONT:\n for y in self.messages:\n if len(y.outcomes) > 1:\n shape.append(len(y.outcomes))\n elif agent == pu.HOST:\n for x in self.outcomes:\n if len(x.messages) > 1:\n shape.append(len(x.messages))\n return shape\n\n def is_graph_game(self) -> bool:\n return all(len(y.outcomes) <= 2 for y in self.messages)\n\n def is_matroid_game(self) -> bool:\n # each outcome x must occur in some message y\n if not all(len(x.messages) > 0 for x in self.outcomes):\n return False\n\n # basis exchange property\n for y1 in self.messages:\n for y2 in self.messages:\n if y1 == y2:\n continue\n for x1 in y1.outcomes:\n if x1 in y2.outcomes:\n continue\n true_for_some = False\n for x2 in y2.outcomes:\n if x2 in y1.outcomes:\n continue\n outcome_list = y1.outcomes.copy()\n outcome_list.remove(x1)\n outcome_list.append(x2)\n for y in self.messages:\n if set(outcome_list) == set(y.outcomes):\n true_for_some = True\n if not true_for_some:\n return False\n\n return True\n \n \"\"\"Determines the lambda_x vector, given the loss and strategy for the contestant\"\"\"\n def get_lambda_x_vector(self) -> Dict[pu.Outcome, float]:\n vec = {}\n for x in self.outcomes:\n vec[x] = max(self.get_loss(pu.CONT, x, y) for y in x.messages)\n return vec\n \n \"\"\"Checks whether the lambda_x vector determined from contestant's loss and strategy is a KT (Kuhn Tucker) vector\"\"\"\n def is_lambda_x_vector_kt(self, vec: Dict[pu.Outcome, float]) -> bool:\n ent_dict = {y: self.get_entropy(y) for y in self.messages}\n for y in self.messages:\n _sum = sum(self.host_reverse[x, y] * vec[x] for x in y.outcomes)\n ent = self.get_entropy(y)\n if math.isclose(self.message_dist[y], 0, rel_tol=1e-2) and (ent < _sum or math.isclose(ent, _sum, rel_tol=1e-2)):\n continue\n if self.message_dist[y] > 0 and math.isclose(ent, _sum, rel_tol=1e-2):\n continue\n else:\n return False\n return True\n \n \"\"\"Checks whether the contestant (Q) plays an equalizer strategy, i.e. the expected loss of Q does not depend on P\"\"\"\n def cont_is_equalizer_strategy(self):\n for x in self.outcomes:\n losses = [self.get_loss(pu.CONT, x, y) for y in x.messages]\n \n for loss in losses:\n if not math.isclose(loss, losses[0], rel_tol=1e-2):\n return False\n return True\n \n \"\"\"Checks, for both agents, whether they play a worst-case optimal strategy, according to the lambda_x vector. If the vector is a KT-vector, they play worst-case optimally.\"\"\"\n def is_worst_case_optimal(self) -> bool:\n return self.is_lambda_x_vector_kt(self.get_lambda_x_vector())\n \n def get_outcome_diffs(self) -> Dict[Tuple[pu.Message, pu.Message], pu.Outcome]:\n diffs = {}\n for y1 in range(len(self.messages)):\n for y2 in range(y1 + 1, len(self.messages)):\n # Determine the difference between message y1 and y2\n diff1 = list(set(self.messages[y1].outcomes) - set(self.messages[y2].outcomes))\n diff2 = list(set(self.messages[y2].outcomes) - set(self.messages[y1].outcomes))\n \n # If y1 and y2 differ by the exchange of one outcome:\n if len(diff1) == 1 and len(diff2) == 1 and diff1[0] != diff2[0]:\n diffs[(self.messages[y1], self.messages[y2])] = diff1[0]\n diffs[(self.messages[y2], self.messages[y1])] = diff2[0]\n return diffs\n \n \"\"\"Check whether a matrix is symmetric w.r.t. exchanges in message set Y. The loss function does not need to be fully symmetric in order for the theorems of (van Ommen et. al. 2015) to hold.\"\"\"\n def is_matrix_symmetric_with_respect_to_exchanges(self, agent) -> bool:\n m = self.matrix[agent]\n for y1 in range(len(self.messages)):\n for y2 in range(y1 + 1, len(self.messages)):\n # Determine the difference between message y1 and y2\n diff1 = list(set(self.messages[y1].outcomes) - set(self.messages[y2].outcomes))\n diff2 = list(set(self.messages[y2].outcomes) - set(self.messages[y1].outcomes))\n \n # If y1 and y2 differ by the exchange of one outcome:\n if len(diff1) == 1 and len(diff2) == 1 and diff1[0] != diff2[0]:\n x1 = diff1[0].id\n x2 = diff2[0].id\n if m[x1, x1] != m[x2, x2]:\n return False\n if m[x1, x2] != m[x2, x1]:\n return False\n for x_prime in range(len(self.outcomes)):\n if x_prime == x1 or x_prime == x2:\n continue\n if m[x_prime, x1] != m[x_prime, x2]:\n return False\n if m[x1, x_prime] != m[x2, x_prime]:\n return False\n return True\n \n def is_matrix_fully_symmetric(self, agent) -> bool:\n m = self.matrix[agent]\n for x1 in range(len(self.outcomes)):\n for x2 in range(x1 + 1, len(self.outcomes)):\n if m[x1, x1] != m[x2, x2]:\n return False\n if m[x1, x2] != m[x2, x1]:\n return False\n for x_prime in range(len(self.outcomes)):\n if x_prime == x1 or x_prime == x2:\n continue\n if m[x_prime, x1] != m[x_prime, x2]:\n return False\n if m[x1, x_prime] != m[x2, x_prime]:\n return False\n return True\n\n def get_filtered_action(self, agent: pu.Agent):\n action = {}\n if agent == pu.CONT:\n for y in self.messages:\n if len(y.outcomes) < 2:\n continue\n if y not in action:\n action[y] = {}\n for x in y.outcomes:\n action[y][x] = self.action[agent][x, y]\n elif agent == pu.HOST:\n for x in self.outcomes:\n if len(x.messages) < 2:\n continue\n if x not in action:\n action[x] = {}\n for y in x.messages:\n action[x][y] = self.action[agent][x, y]\n return action\n\n @staticmethod\n def create_structure(outcome_count: int, messages: List[List[int]]) -> (List[pu.Outcome], List[pu.Message]):\n # create messages\n new_messages: List[pu.Message] = []\n for y in range(len(messages)):\n new_messages.append(pu.Message(y, []))\n\n # create outcomes\n new_outcomes: List[pu.Outcome] = []\n for x in range(outcome_count):\n new_outcomes.append(pu.Outcome(x, []))\n\n # fill messages\n for y in range(len(messages)):\n for x in messages[y]:\n new_messages[y].outcomes.append(new_outcomes[x])\n\n # infer outcome structure from outcome count and message structure\n outcomes: List[List[int]] = []\n for x in range(outcome_count):\n ys: List[int] = []\n for y in range(len(messages)):\n if x in messages[y]:\n ys.append(y)\n outcomes.append(ys)\n\n # fill outcomes\n for x in range(len(outcomes)):\n for y in outcomes[x]:\n new_outcomes[x].messages.append(new_messages[y])\n\n return new_outcomes, new_messages\n\n @staticmethod\n def sample_categorical_distribution(outcome_count: int) -> List[float]:\n \"\"\"Samples a categorical/discrete distribution, uniform randomly.\"\"\"\n return np.random.dirichlet([1] * outcome_count).tolist()\n\n @staticmethod\n @abstractmethod\n def name() -> str:\n pass\n \n @classmethod\n def pretty_name(cls) -> str:\n return pu_games.GAME_PRETTY_NAMES[cls.name()]\n\n @staticmethod\n @abstractmethod\n def default_outcome_dist() -> List[float]:\n pass\n\n @staticmethod\n @abstractmethod\n def message_structure() -> List[List[int]]:\n pass\n \n @classmethod\n def get_outcome_count(cls) -> int:\n return len(cls.default_outcome_dist())\n\n @classmethod\n def get_message_count(cls) -> int:\n return len(cls.message_structure())\n\n @staticmethod\n @abstractmethod\n def cont_default() -> np.ndarray:\n pass\n\n @staticmethod\n @abstractmethod\n def host_default() -> np.ndarray:\n pass\n\n @staticmethod\n def matprint(mat, fmt=\"g\"):\n pass\n \n def __str__(self):\n table = PrettyTable(['Game', self.name()], align=\"l\")\n\n table.add_row(['Graph game?', self.is_graph_game()])\n table.add_row(['Matroid game?', self.is_matroid_game()])\n table.add_row(['', ''])\n\n for x in self.outcomes:\n table.add_row([x, \"p=\" + '{:.3f}'.format(self.outcome_dist[x]) + \" | \" + str([str(y) for y in x.messages]).translate({39: None}).strip('[]')])\n\n table.add_row(['', ''])\n for y in self.messages:\n table.add_row([y, \"p=\" + '{:.3f}'.format(self.message_dist[y]) + \" | \" + str([str(x) for x in y.outcomes]).translate({39: None}).strip('[]')])\n \n table.add_row(['', ''])\n table.add_row(['Message differences', ''])\n diffs = self.get_outcome_diffs()\n for y1, y2 in diffs:\n table.add_row([str((y1, y2)), diffs[(y1, y2)]])\n\n table.add_row(['', ''])\n table.add_row(['Cont loss', self.loss_names[pu.CONT]])\n if self.loss_names[pu.CONT].startswith(pu.MATRIX):\n col_maxes = max([max([len(\"{:g}\".format(x)) for x in col]) for col in self.matrix[pu.CONT].T], [max([len(\"{:g}\".format(x)) for x in col]) for col in self.matrix[pu.HOST].T])\n for x in self.matrix[pu.CONT]:\n _row = ''\n for i, y in enumerate(x):\n _row += (\"{:\"+str(col_maxes[i])+\"g} \").format(y)\n table.add_row(['', _row])\n table.add_row(['Symmetric w.r.t. exchanges?', self.is_matrix_symmetric_with_respect_to_exchanges(pu.CONT)])\n \n table.add_row(['Host loss', self.loss_names[pu.HOST]])\n if self.loss_names[pu.HOST].startswith(pu.MATRIX):\n col_maxes = max([max([len(\"{:g}\".format(x)) for x in col]) for col in self.matrix[pu.CONT].T], [max([len(\"{:g}\".format(x)) for x in col]) for col in self.matrix[pu.HOST].T])\n for x in self.matrix[pu.HOST]:\n _row = ''\n for i, y in enumerate(x):\n _row += (\"{:\"+str(col_maxes[i])+\"g} \").format(y)\n table.add_row(['', _row])\n table.add_row(['Symmetric w.r.t. exchanges?', self.is_matrix_symmetric_with_respect_to_exchanges(pu.HOST)])\n\n table.add_row(['', ''])\n table.add_row(['Cont action', ''])\n table.add_row(['Action space', self.get_action_shape(pu.CONT)])\n try:\n for y in self.messages:\n for x in self.outcomes:\n if x == self.outcomes[0]:\n table.add_row([y, f\"{x}: {self.action[pu.CONT][x, y]}\"])\n else:\n table.add_row(['', f\"{x}: {self.action[pu.CONT][x, y]}\"])\n\n table.add_row(['Cont expected loss', self.get_expected_loss(pu.CONT)])\n table.add_row(['Expected entropy', self.get_expected_entropy()])\n except Exception as e:\n table.add_row(['Cont action', 'ERROR'])\n \n table.add_row(['', ''])\n table.add_row(['Host reverse action', ''])\n\n for y in self.messages:\n for x in self.outcomes:\n if x == self.outcomes[0]:\n table.add_row([y, f\"{x}: {self.host_reverse[x, y]}\"])\n else:\n table.add_row(['', f\"{x}: {self.host_reverse[x, y]}\"])\n\n table.add_row(['', ''])\n table.add_row(['Host action', ''])\n table.add_row(['Action space', self.get_action_shape(pu.HOST)])\n \n for x in self.outcomes:\n for y in self.messages:\n if y == self.messages[0]:\n table.add_row([x, f\"{y}: {self.action[pu.HOST][x, y]}\"])\n else:\n table.add_row(['', f\"{y}: {self.action[pu.HOST][x, y]}\"])\n\n table.add_row(['Host expected loss', self.get_expected_loss(pu.HOST)])\n \n table.add_row(['', ''])\n table.add_row(['RCAR dist: ', \"{:.3f}\".format(self.strategy_util.rcar_dist())])\n table.add_row(['Is P RCAR?', self.strategy_util.is_rcar()])\n \n table.add_row(['', ''])\n table.add_row(['Lambda_x vector:', self.get_lambda_x_vector()])\n table.add_row(['Cont equalizer?', self.cont_is_equalizer_strategy()])\n table.add_row(['Worst-case optimal?', self.is_worst_case_optimal()])\n \n return str(table)\n","repo_name":"caldaibis/probability-updating-games-marl","sub_path":"src/lib_pu/games/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":17367,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"74847221514","text":"from apscheduler.schedulers.blocking import BlockingScheduler\nfrom common import output, raw_data_gen, raw_data_download\nfrom policy import happy, pre_enter_point, max_min\nfrom pytz import utc\nfrom tzlocal import get_localzone\nimport os\nimport datetime\n\nscheduler = BlockingScheduler(timezone=\"Asia/Taipei\")\n\nprint (datetime.datetime.now())\n\n\ntz = get_localzone()\nprint(tz)\n\n\ndef job():\n print(datetime.datetime.now())\n output.remove()\n raw_data_download.download()\n raw_data_gen.go()\n max_min.go()\n happy.go()\n # update to git\n log = \"update \" + datetime.datetime.now().strftime(\"%Y%m%d\")\n os.system('git pull')\n os.system('git status')\n os.system('git add .')\n os.system('git commit -m \"' + log + '\"')\n os.system('git push')\n\n\n# scheduler.add_job(job, 'interval', seconds=10)\n\n# scheduler.add_job(job, 'cron', day_of_week='mon-fri', hour=17, minute=30)\nscheduler.add_job(job, 'cron', day_of_week='mon-fri', hour=21, minute=42)\n\nprint('****** start scheduler ******')\n\n\nwhile True:\n scheduler.start()\n","repo_name":"ngxial03/Future","sub_path":"scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"26250615712","text":"print('welcome to mablibs by shadowninja826')\r\nname1 = input(\"Enter a name: \")\r\nname2 = input(\"Enter another name: \")\r\n#noun1 = input(\"Enter a noun: \")\r\nplace1 = input(\"Enter a place: \")\r\nadjective1 = input(\"Enter a adjective: \")\r\nadjective2 = input(\"Enter another adjective: \")\r\nanimal1 = input(\"Enter an animal: \")\r\nfood1 = input(\"Enter a food: \")\r\nverb1 = input(\"Enter a verb: \")\r\nnumber1 = input(\"Enter a number: \")\r\nnumber2 = input(\"Enter another number: \")\r\n\r\n\r\nprint(\"Once upon a time there was a man named \"+name1+\" that lived on a \"+place1+\".\")\r\nprint(\"He raises a lot of animals but his favorite one was his \"+adjective1+ chr(32)+ adjective2 +chr(32)+ animal1 +\" named \" +name2+\".\")\r\nprint(\"Every morning he would feed his \"+adjective1 +chr(32)+animal1 +chr(32)+food1+\" that was left over from the previous night.\")\r\nprint(\"Sometimes when \"+name1+\" comes out of the house \"+name1+\" would see \"+adjective1 +chr(32)+adjective2+chr(32)+ name2+\" walking around the barn and pecking for \"+food1+\".\")\r\nprint(number1+\" years later \"+name2+\" laid eggs and had \"+number2+\" babies. They all lived at the \"+place1+\" together and ate \"+food1+\".\")\r\n","repo_name":"shadowninja826/madLib","sub_path":"animalLib_1.py","file_name":"animalLib_1.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"41037026707","text":"from qunetsim.backends.rw_lock import RWLock\nfrom qunetsim.utils.constants import Constants\nimport queue\n\n\nclass ClassicalStorage(object):\n \"\"\"\n A classical storage for messages.\n \"\"\"\n\n GET_NEXT = 1\n GET_ALL = 2\n GET_WITH_SEQ_NUM = 3\n GET_ALL_MSGS_ANY_HOST = 4\n GET_WITH_SEQ_NUM_ANY_HOST = 5\n\n def __init__(self):\n self._host_to_msg_dict = {}\n self._host_to_read_index = {}\n self.last_msg_added_to_host = None\n\n # read write lock, for threaded access\n self._lock = RWLock()\n\n # for tracking pending requests\n # dictionary tracks the request made by a pending request.\n self._pending_request_dict = {}\n # Determines a unique ID for a pending request.\n self._request_id = 0\n # Amount of pending requests\n self._amount_pending_requests = 0\n\n def _check_all_requests(self):\n \"\"\"\n Checks if any of the pending requests is now fulfilled.\n\n Returns:\n If a request is fulfilled, the request is handled and the function\n returns the message of this request.\n \"\"\"\n for req_id, args in self._pending_request_dict.items():\n ret = None\n if args[2] == ClassicalStorage.GET_NEXT:\n ret = self._get_next_from_sender(args[1])\n elif args[2] == ClassicalStorage.GET_ALL:\n ret = self._get_all_from_sender(args[1])\n elif args[2] == ClassicalStorage.GET_WITH_SEQ_NUM:\n ret = self._get_with_seq_num_from_sender(args[1], args[3])\n elif args[2] == ClassicalStorage.GET_ALL_MSGS_ANY_HOST:\n ret = self._get_all_from_sender(self.last_msg_added_to_host) \\\n if self.last_msg_added_to_host is not None else None\n elif args[2] == ClassicalStorage.GET_WITH_SEQ_NUM_ANY_HOST:\n ret = self._get_with_seq_num_from_sender(self.last_msg_added_to_host, args[3]) \\\n if self.last_msg_added_to_host is not None else None\n else:\n raise ValueError(\"Internal Error, this request does not exist!\")\n\n if ret is not None:\n args[0].put(ret)\n self._remove_request(req_id)\n return ret\n\n def _add_request(self, args):\n \"\"\"\n Adds a new request to the classical storage. If a new message arrives, it\n is checked if the request for the qubit is satisfied.\n\n Args:\n args (list): [Queue, from_host_id, type, ...]\n Returns:\n (int): ID of the request\n \"\"\"\n self._pending_request_dict[self._request_id] = args\n self._request_id += 1\n self._amount_pending_requests += 1\n return self._request_id\n\n def _remove_request(self, req_id):\n \"\"\"\n Removes a pending request from the request dict.\n\n Args:\n req_id (int): The id of the request to remove.\n \"\"\"\n if req_id in self._pending_request_dict:\n del self._pending_request_dict[req_id]\n self._amount_pending_requests -= 1\n\n def empty(self):\n \"\"\"\n Empty the classical storage.\n \"\"\"\n self._lock.acquire_write()\n self._host_to_msg_dict = {}\n self._host_to_read_index = {}\n self.last_msg_added_to_host = None\n self._lock.release_write()\n\n def _add_new_host_id(self, host_id):\n \"\"\"\n Add a new host to the storage.\n\n Args:\n host_id (str): The host ID to store.\n \"\"\"\n self._host_to_msg_dict[host_id] = []\n self._host_to_read_index[host_id] = 0\n\n def remove_all_ack(self, from_sender=None):\n \"\"\"\n Removes all ACK messages stored. If from sender is given, only ACKs from\n this sender are removed.\n\n Args:\n from_sender (str): Host id of the sender, whos ACKs should be delted.\n \"\"\"\n\n self._lock.acquire_write()\n\n def delete_all_ack_for_sender(sender_id):\n for c, msg in enumerate(self._host_to_msg_dict[sender_id]):\n if msg.content == Constants.ACK:\n del self._host_to_msg_dict[sender_id][c]\n\n if from_sender is None:\n for sender in list(self._host_to_msg_dict):\n delete_all_ack_for_sender(sender)\n elif from_sender in self._host_to_msg_dict:\n delete_all_ack_for_sender(from_sender)\n self._lock.release_write()\n\n # TODO: refactor to \"add_msg\"\n def add_msg_to_storage(self, message):\n \"\"\"\n Adds a message to the storage.\n \"\"\"\n sender_id = message.sender\n self._lock.acquire_write()\n if sender_id not in list(self._host_to_msg_dict):\n self._add_new_host_id(sender_id)\n self._host_to_msg_dict[sender_id].append(message)\n self.last_msg_added_to_host = sender_id\n self._check_all_requests()\n self._lock.release_write()\n\n def get_all_from_sender(self, sender_id, wait=0):\n \"\"\"\n Get all stored messages from a sender. If delete option is set,\n the returned messages are removed from the storage.\n\n Args:\n sender_id (str): The host id of the host.\n wait (int): Default is 0. The maximum blocking time. -1 to block forever.\n\n Returns:\n List of messages of the sender. If there are none, an empty list is\n returned.\n \"\"\"\n # Block forever if wait is -1\n if wait == -1:\n wait = None\n\n self._lock.acquire_write()\n msg = self._get_all_from_sender(sender_id)\n if msg is not None or wait == 0:\n self._lock.release_write()\n return msg if msg is not None else []\n\n q = queue.Queue()\n request = [q, sender_id, ClassicalStorage.GET_ALL]\n req_id = self._add_request(request)\n self._lock.release_write()\n\n try:\n msg = q.get(timeout=wait)\n except queue.Empty:\n pass\n\n if msg is None:\n self._lock.acquire_write()\n self._remove_request(req_id)\n self._lock.release_write()\n return []\n return msg\n\n def _get_all_from_sender(self, sender_id):\n if sender_id in list(self._host_to_msg_dict):\n return self._host_to_msg_dict[sender_id]\n return None\n\n def get_next_from_sender(self, sender_id, wait=0):\n \"\"\"\n Gets the next, unread, message from the sender. If there is no message\n yet, it is waited for the waiting time till a message is arrived. If\n there is still no message, than None is returned.\n\n Args:\n sender_id (str): The sender id of the message to get.\n wait (int): Default is 0. The maximum blocking time. -1 to block forever.\n Returns:\n Message object, if such a message exists, or none.\n \"\"\"\n # Block forever if wait is -1\n if wait == -1:\n wait = None\n\n self._lock.acquire_write()\n next_msg = self._get_next_from_sender(sender_id)\n if next_msg is not None or wait == 0:\n self._lock.release_write()\n return next_msg\n\n q = queue.Queue()\n request = [q, sender_id, ClassicalStorage.GET_NEXT]\n req_id = self._add_request(request)\n self._lock.release_write()\n\n try:\n next_msg = q.get(timeout=wait)\n except queue.Empty:\n pass\n\n if next_msg is None:\n self._lock.acquire_write()\n self._remove_request(req_id)\n self._lock.release_write()\n return next_msg\n\n def _get_next_from_sender(self, sender_id):\n if sender_id not in list(self._host_to_msg_dict):\n return None\n if len(self._host_to_msg_dict[sender_id]) <= self._host_to_read_index[sender_id]:\n return None\n msg = self._host_to_msg_dict[sender_id][self._host_to_read_index[sender_id]]\n self._host_to_read_index[sender_id] += 1\n return msg\n\n def get_with_seq_num_from_sender(self, sender_id, seq_num, wait=0):\n \"\"\"\n Gets the next, unread, message from the sender. If there is no message\n yet, it is waited for the waiting time till a message is arrived. If\n there is still no message, than None is returned.\n\n Args:\n sender_id (str): The sender id of the message to get.\n wait (int): Default is 0. The maximum blocking time. -1 to block forever.\n Returns:\n Message object, if such a message exists, or none.\n \"\"\"\n # Block forever if wait is -1\n if wait == -1:\n wait = None\n\n self._lock.acquire_write()\n next_msg = self._get_with_seq_num_from_sender(sender_id, seq_num)\n if next_msg is not None or wait == 0:\n self._lock.release_write()\n return next_msg\n\n q = queue.Queue()\n request = [q, sender_id, ClassicalStorage.GET_NEXT, seq_num]\n req_id = self._add_request(request)\n self._lock.release_write()\n\n try:\n next_msg = q.get(timeout=wait)\n except queue.Empty:\n pass\n\n if next_msg is None:\n self._lock.acquire_write()\n self._remove_request(req_id)\n self._lock.release_write()\n return next_msg\n\n def _get_with_seq_num_from_sender(self, sender_id, seq_num):\n if sender_id not in list(self._host_to_msg_dict):\n return None\n if len(self._host_to_msg_dict[sender_id]) <= seq_num:\n return None\n msg = self._host_to_msg_dict[sender_id][seq_num]\n return msg\n \n def get_all_from_any_sender(self,wait=0):\n \"\"\"\n Get all stored messages from any sender. If delete option is set,\n the returned messages are removed from the storage.\n\n Args:\n wait (int): Default is 0. The maximum blocking time. -1 to block forever.\n\n Returns:\n List of messages of the sender. If there are none, an empty list is\n returned.\n \"\"\"\n\n # Block forever if wait is -1\n if wait == -1:\n wait = None\n\n self._lock.acquire_write()\n msg = None\n if self.last_msg_added_to_host is not None:\n msg = self.get_all_from_sender(self.last_msg_added_to_host)\n\n if wait == 0: \n self._lock.release_write()\n return msg if msg is not None else []\n \n q = queue.Queue()\n request = [q, None, ClassicalStorage.GET_ALL_MSGS_ANY_HOST]\n req_id = self._add_request(request)\n self._lock.release_write()\n\n try:\n msg = q.get(timeout=wait)\n except queue.Empty:\n pass\n\n \n if msg is None:\n self._lock.acquire_write()\n self._remove_request(req_id)\n self._lock.release_write()\n return []\n return msg\n \n def get_with_seq_num_from_any_sender(self, seq_num, wait=0):\n '''\n Returns:\n Message object, if such a message exists, or none.\n '''\n # Block forever if wait is -1\n if wait == -1:\n wait = None\n\n \n self._lock.acquire_write()\n next_msg = None\n if self.last_msg_added_to_host is not None:\n next_msg = self.get_with_seq_num_from_sender(self.last_msg_added_to_host,seq_num)\n\n if wait == 0: \n self._lock.release_write()\n return next_msg\n\n q = queue.Queue()\n request = [q, None, ClassicalStorage.GET_WITH_SEQ_NUM_ANY_HOST, seq_num]\n req_id = self._add_request(request)\n self._lock.release_write()\n\n try:\n next_msg = q.get(timeout=wait)\n except queue.Empty:\n pass\n\n if next_msg is None:\n self._lock.acquire_write()\n self._remove_request(req_id)\n self._lock.release_write()\n return next_msg\n\n def get_all(self):\n \"\"\"\n Get all Messages as a list.\n\n Returns:\n (list) messages: All Messages as a list.\n \"\"\"\n self._lock.acquire_write()\n ret = []\n for host_id in list(self._host_to_msg_dict):\n ret += self._host_to_msg_dict[host_id]\n self._lock.release_write()\n return ret\n","repo_name":"tqsd/QuNetSim","sub_path":"qunetsim/objects/storage/classical_storage.py","file_name":"classical_storage.py","file_ext":"py","file_size_in_byte":12341,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"28"} +{"seq_id":"22440981220","text":"import sys \n\nN, K= map(int,sys.stdin.readline().split())\n\n\nli = [0]*N\nn = N\ncount = K\ncnt = K\n\nprint('<', end='')\nwhile n: # 7\n if li[count-1] == 0 and cnt == K:\n li[count-1] = 1\n n -= 1\n if n == 0:\n if count == 0 :\n print(N,end='>')\n else :\n print(count,end='>')\n else:\n if count == 0 :\n print(N,end=', ')\n else :\n print(count, end=', ')\n cnt = 0\n else:\n count += 1\n count %= N\n if li[count-1] == 0:\n cnt += 1\n\n\nimport sys\nN, K = map(int, input().split()) # 7 3\npeople = list(range(1, N+1))\nresult = []\ni = K-1 # i = 2\nwhile True:\n result.append(people.pop(i))\n if not people:\n break\n i = (i+K-1) % len(people) # 2 + 3 - 1 = 4 4 % 6 = 6\n print(i)\n print(people)\nprint('<'+', '.join(map(str, result))+'>')\n","repo_name":"wogus3602/PracticeCode","sub_path":"Python,Cpp/백준/1158.py","file_name":"1158.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"17986440832","text":"#!/usr/bin/python3\n\"\"\"\nModule to supplement a unittest for class ``State``\nuse with the following commands:\n python3 -m unittest discover tests\n python3 -m unittest tests/test_models/test_state.py\n\"\"\"\n\nimport unittest\nfrom models.state import State as state\nfrom models.base_model import BaseModel\nimport inspect\nimport pep8\n\n# Test class inherits from unittest\nclass TestStateDoc(unittest.TestCase):\n \"\"\"documentation tests for ``State`` class\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"method to prepare test fixture\"\"\"\n cls.state = inspect.getmembers(state, inspect.isfunction)\n\n def test_StateModule_doc(self):\n \"\"\"method to test if module is properly documented\"\"\"\n self.assertIsNot(state.__doc__, None, 'module [state.py] should have\\\n proper documentation')\n self.assertTrue(len(state.__doc__) >= 1, 'module [state.py] should have\\\n proper documentation')\n\n def test_StateClass_doc(self):\n \"\"\"method to test if class is properly documented\"\"\"\n self.assertIsNot(state.__doc__, None, 'definition of [State] class should\\\n have proper documentation')\n self.assertTrue(len(state.__doc__) >= 1, 'definition of [State] class\\\n should have proper documentation')\n\n def test_StateFunctions__doc(self):\n \"\"\"method to test if functions are properly documented\"\"\"\n for function in self.state:\n self.assertIsNot(function[1].__doc__, None, '{:s} method should be\\\n properly documented'.format(function[0]))\n self.assertTrue(len(function[1].__doc__) >= 1, '{:s} method should\\\n be properly documented'.format(function[0]))\n\nclass TestStateFunctionality(unittest.TestCase):\n \"\"\"functionality of ``State``class\"\"\"\n\n def test_StatetName(self):\n \"\"\"method to test if ``State`` has name attribute and set to\n empty string\"\"\"\n self.assertTrue(hasattr(state, 'name'))\n self.assertEqual(state.name, '')\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"rkbrian/AirBnB_clone","sub_path":"tests/test_models/test_state.py","file_name":"test_state.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"1281054032","text":"import re\r\nimport pandas as pd\r\nimport comtypes.client\r\nfrom comtypes import COMError\r\nfrom comtypes.client import CreateObject, GetActiveObject\r\nfrom os.path import dirname, basename, splitext, join\r\nfrom time import sleep \r\nimport threading #TODO: is required?\r\nimport raa_logger\r\nimport logging\r\nfrom prettytable import PrettyTable\r\nfrom pythoncom import CoInitializeEx\r\nfrom pythoncom import CoUninitialize\r\n\r\nclass acad_block:\r\n def __init__(self, orig_name, x_cord, y_cord):\r\n self.orig_name = orig_name\r\n self.x_cord = x_cord\r\n self.y_cord = y_cord\r\n self.uniq_name = ''\r\n self.new_name = ''\r\n\r\n#TODO: all over the file - do we need to replace variables with self.?\r\nclass block_shuffle:\r\n def __init__(self, event):\r\n self.logger = logging.getLogger(\"raa_logger\")\r\n self.event = event\r\n\r\n def check_legal_mapping(self, land_use_codes, cellno_formats): \r\n error = True\r\n if len(land_use_codes) > len(set(land_use_codes)):\r\n self.logger.exception(\"\\nError: \\\"land use code\\\" values must be unique\")\r\n elif len(cellno_formats) > len(set(cellno_formats)):\r\n self.logger.exception(\"\\nError: \\\"cellno format\\\" values must be unique\")\r\n elif len(land_use_codes) != len(cellno_formats):\r\n self.logger.exception(\"\\nError: number of \\\"land use code\\\" values should be equal to those of \\\"cellno format\\\"\")\r\n else:\r\n error = False\r\n\r\n if error: \r\n self.logger.info(\"Exiting...\")\r\n CoUninitialize()\r\n exit()\r\n\r\n def trailing(self, s):\r\n return len(s) - len(s.rstrip('0'))\r\n \r\n # according to the number of '0' digits in the \"cellno format\" - decide what is the max nummber of cellno's \r\n def get_max_cellnos(self, cellno_format):\r\n trailing_zeros = self.trailing(cellno_format)\r\n #print(f\"{cellno_format} has {trailing_zeros} '0's\")\r\n return 10*trailing_zeros\r\n\r\n\r\n def open_acad(self, filepath):\r\n try: #Get AutoCAD running instance\r\n self.logger.info(\"\\nChecking for an active AutoCAD app...\\n\")\r\n acad = GetActiveObject(\"AutoCAD.Application\")\r\n state = True\r\n except(OSError,COMError): #If autocad isn't running, open it\r\n self.logger.info(\"No active app - opening AutoCAD...\\n\")\r\n acad = CreateObject(\"AutoCAD.Application\",dynamic=True)\r\n state = False\r\n acad.Visible = False #TODO: 1. how to get invisible AutoCAD right at opening? 2. should make invisible if already opened?\r\n\r\n if state: #If you have only 1 opened drawing\r\n self.logger.info(\"Found an active app\\n\")\r\n self.doc = acad.Documents.Item(0)\r\n else:\r\n self.doc = acad.Documents.Open(filepath)\r\n# return doc\r\n\r\n def acad_command(self, command_str):\r\n for i in range (50):\r\n try:\r\n self.logger.debug(f'Sending command:{command_str}')\r\n self.doc.SendCommand(command_str) \r\n except:\r\n self.failed = True\r\n self.logger.debug(f\"\\Did not succeed in sending AutoCAD command {i} times\")\r\n sleep(0.1)\r\n else:\r\n self.failed = False\r\n break\r\n if self.failed:\r\n self.logger.exception(f\"\\nError: did not succeed in sending AutoCAD command {i} times\")\r\n\r\n # Extract used cellno and codes from Autocad file \r\n def acad_ext_cellno_codes(self, template_filepath, ext_filepath):\r\n self.logger.info(\"\\nExtracting CELLNO data from Autocad...\\n\")\r\n # 1. Select all blocks\r\n self.acad_command('._select all ') #Notice that the last SPACE is equivalent to hiting ENTER\r\n #You should separate the command's arguments also with SPACE\r\n \r\n # Suppress dialog box for following file read/write\r\n self.acad_command('._filedia 0 ')\r\n \r\n # 2. Attribute extraction (ATTEXT)\r\n self.acad_command('._-attext c ' + template_filepath + '\\r' + ext_filepath + '\\ry\\r' )\r\n \r\n # Return file read/write dialog box\r\n self.acad_command('._filedia 1 ')\r\n\r\n def acad_replace_cellno(self, old_cellno, new_cellno):\r\n self.acad_command('._-attedit n\\rn\\rCellno\\rCELLNO\\r' + old_cellno + '\\r' + old_cellno + '\\r' + new_cellno + '\\r')\r\n\r\n def gen_template_file(self, acad_filepath):\r\n template_filepath = dirname(acad_filepath) + '/' + 'attr_extract_template.txt'\r\n self.logger.info(f\"Extracting block names from .dwg file...\\n\")\r\n with open(template_filepath, \"w\") as f: \r\n wstr = \"BL:NAME C008000\\nCELLNO C004000\\nCODE N004000\\nBL:X N012004\\nBL:Y N012004\\n\"\r\n f.write(wstr)\r\n return template_filepath\r\n\r\n def get_new_name(self, formats_d, code, cnt):\r\n format_max_cellnos = self.get_max_cellnos(formats_d[code]) # get max number of cellnos (according to the format)\r\n if cnt > format_max_cellnos:\r\n self.logger.exception(f\"\\nError: Exceeded maximum number of possible cellno values allowed by format\\nThere can be {format_max_cellnos} cellnos\\nExiting...\") \r\n CoUninitialize()\r\n exit()\r\n new_name = str(int(formats_d[code])+cnt)\r\n return new_name\r\n\r\n def get_nearest_block_idx(self, block, blocks, skip_blocks, code):\r\n self.logger.debug(f\"For code {code} there are {len(blocks) - len(skip_blocks)} blocks left\")\r\n self.logger.debug(f\"Getting block nearest to {block.orig_name}\")\r\n candid_blocks = list(b for b in blocks if b not in skip_blocks and b is not block)\r\n min_dist = 0\r\n if len(candid_blocks) > 1:\r\n for candid_block in candid_blocks:\r\n dist = (float(block.x_cord) - float(candid_block.x_cord))**2 + (float(block.y_cord) - float(candid_block.y_cord))**2\r\n self.logger.debug(f\"Distance between {block.orig_name} and {candid_block.orig_name} is {dist}\")\r\n if dist < min_dist or min_dist == 0:\r\n min_dist = dist\r\n nearest_block_idx = blocks.index(candid_block)\r\n elif len(candid_blocks) == 1:\r\n nearest_block_idx = blocks.index(candid_blocks[0])\r\n else:\r\n nearest_block_idx = blocks.index(block)\r\n return nearest_block_idx\r\n\r\n\r\n def shuffle(self, acad_filepath, mapping_excel_filepath):\r\n res = CoInitializeEx(0) \r\n # Open Autocad and dwg file\r\n self.open_acad(acad_filepath)\r\n \r\n \r\n # Extract cellno and code data\r\n template_filepath = self.gen_template_file(acad_filepath)\r\n ext_filepath = dirname(acad_filepath) + '/' + splitext(basename(acad_filepath))[0] + '.txt'\r\n self.acad_ext_cellno_codes(template_filepath, ext_filepath)\r\n \r\n # Read mapping excel and create a formats dict\r\n mapping_sheet = 'mapping' #TODO: take mapping sheet name from user\r\n df = pd.read_excel(mapping_excel_filepath, mapping_sheet) \r\n self.logger.debug(f\"Reading excel file: {mapping_excel_filepath}\\tsheet name: {mapping_sheet}\\n{df}\")\r\n land_use_codes = list(map(lambda x: str(int(x)), df['land use code'].dropna().tolist()))\r\n cellno_formats = list(map(lambda x: str(int(x)), df['cellno format'].dropna().tolist()))\r\n self.logger.debug(f\"len(land_use_codes):{land_use_codes}\")\r\n self.logger.debug(f\"len(cellno_formats):{cellno_formats}\")\r\n self.check_legal_mapping(land_use_codes, cellno_formats)\r\n \r\n formats_d = {}\r\n for i, code in enumerate(land_use_codes):\r\n formats_d.update({code : cellno_formats[i]})\r\n \r\n \r\n # Read original Cellno <-> Use Code pairs\r\n with open(ext_filepath) as f: \r\n file_str = f.readlines()\r\n \r\n # Create dictionary with:\r\n # keys: block codes\r\n # values: arrays of all acad_block classes of that code\r\n blocks_data = {}\r\n for line in file_str:\r\n self.logger.debug(f\"\\nline:{line}\") \r\n data_l = line.split(\",\")\r\n data_strip_l = [re.sub(\"[\\s\\t\\n\\']\", \"\", x) for x in data_l]\r\n line_desc = data_strip_l[0].lower()\r\n block_name = data_strip_l[1]\r\n block_code = data_strip_l[2]\r\n x_cord = data_strip_l[3]\r\n y_cord = data_strip_l[4]\r\n if \"cellno\" in line_desc:\r\n self.logger.debug(f\"block_code:{block_code}\")\r\n self.logger.debug(f\"block_name:{block_name}\")\r\n # check valid name - only consisting digits\r\n if block_name.isdigit():\r\n block = acad_block(block_name, x_cord, y_cord)\r\n if block_code not in blocks_data:\r\n blocks_data.update({block_code : []})\r\n blocks_data[block_code].append(block)\r\n self.logger.debug(f\"length of blocks_data:{len(blocks_data)}\")\r\n #data_d.setdefault(block_code, []).append(block_name)\r\n else:\r\n self.logger.warning(f\"Block name: {block_name} is an invalid name - should consist of digits only!\\nIgnoring block and moving on to the next one\")\r\n\r\n # Add unique temp name and a new name to each block\r\n # Naming should be done according to geografical distance between each group of block_code blocks,\r\n # such that the result should be that a group of same type blocks that are close to each other should have similar names, e.g. 200, 201, 202\r\n # To do this, we go through following steps:\r\n # 1. go over each code in the blocks_data dict\r\n uniq_char = 'A'\r\n for code, blocks in blocks_data.items():\r\n # 2. rename blocks in an ascending names (both for uniq and new names), going through the blocks according to their proximity as follows:\r\n # pick a random block and find its geografically nearest block (while ignoring blocks already picked),\r\n # then add picked block to skip list, rename the nearest with new and uniq names.\r\n # then move on to the nearest and look for the (now) closest to it.\r\n skip_blocks = []\r\n idx = 0\r\n cnt = 0\r\n if code not in formats_d.keys():\r\n self.logger.exception(\"\\nError: Autodesk CODE {code} was not found in excel list of codes\\nExiting...\")\r\n CoUninitialize()\r\n exit() \r\n while len(skip_blocks) < len(blocks):\r\n self.logger.debug(f\"\\nlen of skip_blocks:{len(skip_blocks)} len of blocks:{len(blocks)}\")\r\n # rename with uniq and new names\r\n block = blocks[idx]\r\n block.uniq_name = uniq_char + str(cnt)\r\n block.new_name = self.get_new_name(formats_d, code, cnt)\r\n self.logger.debug(f\"Block {block.orig_name} - added unique temp name: {block.uniq_name}\")\r\n self.logger.info(f\"Block {block.orig_name} - added new name: {block.new_name}\")\r\n idx = self.get_nearest_block_idx(block, blocks, skip_blocks, code)\r\n self.logger.debug(f\"Nearest to block {block.orig_name} is {blocks[idx].orig_name}\")\r\n cnt += 1\r\n skip_blocks.append(block)\r\n \r\n uniq_char = chr(ord(uniq_char) + 1) # advance uniq char for the next code\r\n\r\n # 3. now actually replace block names: first by unique temp name, then by new name\r\n table = PrettyTable(['Original Block Name', 'New Block Name'])\r\n for i in range (2):\r\n for blocks in blocks_data.values():\r\n for block in blocks:\r\n old_name = block.orig_name if i == 0 else block.uniq_name\r\n new_name = block.uniq_name if i == 0 else block.new_name\r\n if i == 0:\r\n self.logger.info(f\"Replacing block {block.orig_name} by a temporary unique name {block.uniq_name}\") \r\n elif i == 1:\r\n self.logger.info(f\"Replacing temporary unique name {block.uniq_name} by {block.new_name} (for block originally named {block.orig_name})\")\r\n table.add_row([block.orig_name, block.new_name])\r\n self.acad_replace_cellno(old_cellno=old_name, new_cellno=new_name)\r\n sleep(0.1)\r\n self.logger.info(\"\\nBlocks Replacement Conclusion:\")\r\n self.logger.info(\"-------------------------------\")\r\n self.logger.info(f\"{table}\")\r\n sleep(0.1)\r\n\r\n #TODO: at the end - save file\r\n self.event.set()\r\n CoUninitialize()\r\n return True\r\n\r\n","repo_name":"yniss/RAA","sub_path":"block_shuffle.py","file_name":"block_shuffle.py","file_ext":"py","file_size_in_byte":12773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"37726790722","text":"import numpy as np \nimport cv2\n\ndef crinex_edge(img, filter_size=(3,3), stride=1):\n img_shape = img.shape\n result_shape = tuple(np.int64((np.array(img_shape) - np.array(filter_size))/stride))\n\n # Generate Zero Matrix\n for i in range(2):\n globals()[f'result{i+1}'] = np.zeros(result_shape)\n\n global result1 \n global result2 \n\n # 1st Filter(h, w)\n for h in range(0, result_shape[0], stride):\n for w in range(0, result_shape[1], stride):\n tmp = img[h:h+filter_size[0], w:w+filter_size[1]]\n tmp = np.sort(tmp.ravel())\n result1[h,w] = tmp[int(filter_size[0]*filter_size[1]/2)]\n \n # 2nd Filter(h, w+1)\n for h in range(0, result_shape[0], stride):\n for w in range(0, result_shape[1], stride):\n tmp = img[h:h+filter_size[0], w+1:w+1+filter_size[1]]\n tmp = np.sort(tmp.ravel())\n result2[h,w] = tmp[int(filter_size[0]*filter_size[1]/2)]\n\n result1 = cv2.resize(result1, (224, 224))\n result2 = cv2.resize(result2, (224, 224))\n\n result1 = result1.astype('uint16')\n result2 = result2.astype('uint16')\n\n result12xor = cv2.bitwise_xor(result1, result2)\n ret_xor, th_xor = cv2.threshold(result12xor, 0, 255, cv2.THRESH_OTSU)\n\n return th_xor","repo_name":"crinex/crinex_filter","sub_path":"crinex_filter/crinex_edge.py","file_name":"crinex_edge.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"11992225068","text":"from datetime import datetime\n\nfrom flask import render_template, request\n\nfrom create_models import User, Offer, Order\n\nfrom settings import app, db\n\n\n@app.route(\"/\")\ndef main_page():\n \"\"\" Ссылки на значимые страницы для удобства\"\"\"\n return render_template('index.html')\n\n\n@app.route(\"/users\", methods=[\"GET\"])\ndef get_users():\n \"\"\"Список пользователей\"\"\"\n users = User.query.all()\n return render_template('all_users.html', users=users)\n\n\n@app.route(\"/users\", methods=[\"POST\"])\ndef load_user():\n \"\"\"Пост запрос добавления пользователя\"\"\"\n first_name = request.values.get(\"first_name\")\n last_name = request.values.get(\"last_name\")\n age = request.values.get(\"age\")\n email = request.values.get(\"email\")\n role = request.values.get(\"role\")\n phone = request.values.get(\"phone\")\n user = User(first_name=first_name,\n last_name=last_name, age=age,\n email=email, role=role, phone=phone)\n db.session.add(user)\n db.session.commit()\n\n return render_template('user_added.html')\n\n\n@app.route(\"/users/\")\ndef get_user(id):\n \"\"\"Страница пользователя\"\"\"\n user = User.query.get(id)\n return render_template('user.html', user=user)\n\n\n# Увы, мой метод решения идет через HTML, и использовать PUT невозможно (наверное).\n@app.route(\"/users/\", methods=[\"POST\"])\ndef put_user(id):\n \"\"\"ПУТ (ПОСТ) запрос изменения пользователя\"\"\"\n user = User.query.get(id)\n user.first_name = request.values.get(\"first_name\")\n user.last_name = request.values.get(\"last_name\")\n user.age = request.values.get(\"age\")\n user.email = request.values.get(\"email\")\n user.role = request.values.get(\"role\")\n user.phone = request.values.get(\"phone\")\n\n db.session.add(user)\n db.session.commit()\n\n return render_template('user_added.html')\n\n\n# Аналогично PUT, метод DELETE реализован через POST,\n# т.к. мне пока что не очень понятно как отправлять json запрос методом DELETE как в разборе ДЗ\n# было бы здорово, если бы хотя бы в шпаргалке, не говоря о видеоуроке,\n# было упоминание этих методов: как, зачем, почему!)\n@app.route(\"/users//delete\", methods=[\"POST\"])\ndef delete_user(id):\n \"\"\"Удаление пользователя\"\"\"\n user = User.query.get(id)\n db.session.delete(user)\n db.session.commit()\n users = User.query.all()\n return render_template('all_users.html', users=users)\n\n\n# Представления для заказов\n@app.route(\"/orders\")\ndef get_orders():\n \"\"\"Список всех заказов\"\"\"\n orders = Order.query.all()\n users = User.query.all()\n return render_template('all_orders.html', orders=orders, users=users)\n\n\n@app.route(\"/orders\", methods=[\"POST\"])\ndef load_order():\n \"\"\"Пост запрос добавления заказа\"\"\"\n name = request.values.get(\"name\")\n description = request.values.get(\"description\")\n start_date = datetime.strptime(request.values.get(\"start_date\"), '%Y-%m-%d').date()\n end_date = datetime.strptime(request.values.get(\"end_date\"), '%Y-%m-%d').date()\n address = request.values.get(\"address\")\n price = request.values.get(\"price\")\n customer_id = request.values.get(\"customer_id\")\n executor_id = request.values.get(\"executor_id\")\n order = Order(name=name,\n description=description, start_date=start_date,\n end_date=end_date, address=address, price=price, customer_id=customer_id, executor_id=executor_id)\n\n db.session.add(order)\n db.session.commit()\n\n return render_template('user_added.html')\n\n\n@app.route(\"/orders/\")\ndef get_order(id):\n \"\"\"Страница заказа\"\"\"\n order = Order.query.get(id)\n customer = User.query.get(order.customer_id)\n executor = User.query.get(order.executor_id)\n users = User.query.all()\n return render_template('order.html', order=order, users=users, customer=customer, executor=executor)\n\n\n@app.route(\"/orders/\", methods=[\"POST\"])\ndef put_order(id):\n \"\"\"ПОСТ (ПУТ) запрос изменения заказа\"\"\"\n order = Order.query.get(id)\n\n order.name = request.values.get(\"name\")\n order.description = request.values.get(\"description\")\n order.start_date = datetime.strptime(request.values.get(\"start_date\"), '%Y-%m-%d').date()\n order.end_date = datetime.strptime(request.values.get(\"end_date\"), '%Y-%m-%d').date()\n order.address = request.values.get(\"address\")\n order.price = request.values.get(\"price\")\n order.customer_id = request.values.get(\"customer_id\")\n order.executor_id = request.values.get(\"executor_id\")\n\n db.session.add(order)\n db.session.commit()\n\n return render_template('user_added.html')\n\n\n@app.route(\"/orders//delete\", methods=[\"POST\"])\ndef delete_order(id):\n \"\"\"Удаление заказа\"\"\"\n order = Order.query.get(id)\n db.session.delete(order)\n db.session.commit()\n return render_template('user_added.html')\n\n\n# Представления для предложений\n@app.route(\"/offers\")\ndef get_offers():\n \"\"\"Список всех предложений\"\"\"\n offers = Offer.query.all()\n users = User.query.all()\n orders = Order.query.all()\n return render_template('all_offers.html', offers=offers, orders=orders, users=users)\n\n\n@app.route(\"/offers\", methods=[\"POST\"])\ndef load_offer():\n \"\"\"Пост запрос добавления предложения\"\"\"\n order_id = request.values.get(\"order_id\")\n executor_id = request.values.get(\"executor_id\")\n offer = Offer(order_id=order_id, executor_id=executor_id)\n\n db.session.add(offer)\n db.session.commit()\n\n return render_template('user_added.html')\n\n\n@app.route(\"/offers/\")\ndef get_offer(id):\n \"\"\"Представление одного предложения\"\"\"\n users = User.query.all()\n orders = Order.query.all()\n offer = Offer.query.get(id)\n order = Order.query.get(offer.order_id)\n executor = User.query.get(offer.executor_id)\n\n return render_template('offer.html', offer=offer, orders=orders, users=users, order=order, executor=executor)\n\n\n@app.route(\"/offers/\", methods=[\"POST\"])\ndef put_offer(id):\n \"\"\"ПОСТ (ПУТ) запрос изменения предложения\"\"\"\n offer = Offer.query.get(id)\n\n offer.order_id = request.values.get(\"order_id\")\n offer.executor_id = request.values.get(\"executor_id\")\n\n db.session.add(offer)\n db.session.commit()\n\n return render_template('user_added.html')\n\n\n@app.route(\"/offers//delete\", methods=[\"POST\"])\ndef delete_offer(id):\n \"\"\"Удаление предложения\"\"\"\n offer = Offer.query.get(id)\n db.session.delete(offer)\n db.session.commit()\n return render_template('user_added.html')\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"yesha999/DZ16","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7170,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"30321771999","text":"from gefxml_reader import Cpt\nimport os\nfrom decimal import Decimal\n\nxmlFile = 'input/vergelijk/2222692_EHZ3AB-S01.xml'\ngefFile = 'input/vergelijk/2222692_EHZ3AB-S01.GEF'\n\nxmlCpt = Cpt()\nxmlCpt.load_xml(xmlFile)\n\ngefCpt = Cpt()\ngefCpt.load_gef(gefFile)\n\n# gef en xml hebben verschillende hoeveelheden decimalen\nfor column in xmlCpt.data.columns:\n # bepaal het aantal decimalen van beide\n try:\n gefDecimals = gefCpt.data[column].apply(lambda x: len(str(Decimal(f'{x}')).split('.')[1])).max()\n except:\n gefDecimals = 0\n try:\n xmlDecimals = xmlCpt.data[column].apply(lambda x: len(str(Decimal(f'{x}')).split('.')[1])).max()\n except:\n xmlDecimals = 0\n \n # beide dataframes afknippen op het minimum aantal decimalen van beide\n dec = int(min(gefDecimals, xmlDecimals))\n\n xmlCpt.data[column] = xmlCpt.data[column].apply(lambda x: str(Decimal(str(x)).quantize(Decimal((0, (1,), -dec)), rounding=\"ROUND_FLOOR\")))\n gefCpt.data[column] = gefCpt.data[column].apply(lambda x: str(Decimal(str(x)).quantize(Decimal((0, (1,), -dec)), rounding=\"ROUND_FLOOR\")))\n\n\nif all([gefCpt.easting == xmlCpt.easting, gefCpt.northing == xmlCpt.northing, gefCpt.groundlevel == gefCpt.groundlevel]):\n print('XYZ zijn gelijk')\n print(f'GEF: x: {gefCpt.easting}, y: {gefCpt.northing}, z: {gefCpt.groundlevel}')\n print(f'XML: x: {xmlCpt.easting}, y: {xmlCpt.northing}, z: {xmlCpt.groundlevel}')\nelse:\n print('Er zijn verschillen in XYZ')\n print(f'GEF: x: {gefCpt.easting}, y:{gefCpt.northing}, z:{gefCpt.groundlevel}')\n print(f'XML: x: {xmlCpt.easting}, y:{xmlCpt.northing}, z:{xmlCpt.groundlevel}')\n\n\nif (gefCpt.data == xmlCpt.data).all().all():\n print('Alle meetdata (afgesneden op gelijk aantal decimalen) is gelijk')\nelse:\n print('Er zijn verschillen in de meetdata (afgesneden op gelijk aantal decimalen)')\n print(f'GEF: {gefCpt.data[gefCpt.data != xmlCpt.data].dropna(axis=\"rows\", how=\"all\")}')\n print(f'XML: {xmlCpt.data[gefCpt.data != xmlCpt.data].dropna(axis=\"rows\", how=\"all\")}')","repo_name":"Amsterdam/gefxml_viewer","sub_path":"vergelijk_gefxml.py","file_name":"vergelijk_gefxml.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"568553563","text":"import math\nimport numpy as np\nfrom collections import deque\n\nimport mouette as M\nimport mouette.geometry as geom\nfrom mouette.geometry import rotate_2d\nfrom mouette.processing import SingularityCutter, SurfaceSubdivision\n\nfrom .common import *\nfrom .instance import Instance\nfrom .worker import *\n\n########## Utility functions ##########\n\ndef split_singular_triangles(I : Instance):\n \"\"\"Combinatorial operations on the mesh to place additionnal points inside singular triangles\"\"\"\n I.triplets_of_triangles = dict() # singular triangles are cut in 3 parts\n\n I.singu_faces_to_4pts = dict()\n I.triangles_before_split = dict()\n I.singular_vertices = I.mesh.vertices.create_attribute(\"singularities\", int)\n if len(I.singular_faces)==0 : return I\n\n nF = len(I.mesh.faces)\n with SurfaceSubdivision(I.work_mesh) as subdiv1:\n with SurfaceSubdivision(I.mesh) as subdiv2:\n for f in range(nF):\n if I.singular_faces[f] == 0 : continue\n # split triangle in 3\n A,B,C = I.mesh.faces[f]\n nV,nF = len(I.mesh.vertices), len(I.mesh.faces)\n I.singular_vertices[nV] = I.singular_faces[f]\n # split function adds a new vertex (the face barycenter) at the end \n subdiv1.split_face_as_fan(f)\n subdiv2.split_face_as_fan(f)\n I.singu_faces_to_4pts[f] = [nV,A,B,C]\n I.triplets_of_triangles[f] = (f, nF, nF+1)\n I.triplets_of_triangles[nF] = (nF, nF+1, f)\n I.triplets_of_triangles[nF+1] = (nF+1, f, nF)\n I.triangles_before_split[f] = (f,(A,B,C))\n I.triangles_before_split[nF] = (f,(A,B,C))\n I.triangles_before_split[nF+1] = (f,(A,B,C))\n # Resets invalid connectivity\n # This shouldn't be necessary but better be explicit\n I.mesh.connectivity.clear()\n I.mesh.half_edges.clear()\n I.work_mesh.connectivity.clear()\n I.work_mesh.half_edges.clear()\n return I\n\ndef replace_singularities_barycenter(I : Instance):\n \"\"\"Replacing singularities minimizing the ARAP energy, ie the three jacobian of adjacent triangles of a singularity should be isometries\n\n Returns:\n int: number of singularities for which replacement has failed\n \"\"\"\n\n n_singus_fail = 0\n for singuTri in I.singular_faces:\n try:\n # Get coordinates in x,y space\n S,A,B,C = I.singu_faces_to_4pts[singuTri]\n\n pA,pB,pC,pS = (I.mesh.vertices[_u] for _u in (A,B,C,S))\n X,Y,Z = geom.face_basis(pA,pB,pC)\n pA, pB, pC, pS = (M.Vec( X.dot(_p), Y.dot(_p) ) for _p in (pA,pB,pC,pS)) # project in basis of the triangle\n\n # Get coordinates in u,v space\n T1, iA1, iS1 = I.mesh.half_edges.adj(A,S) # T1 = ACS\n iC1 = 3 - iA1 - iS1\n cnr = I.mesh.connectivity.face_to_first_corner(T1)\n uA1, uC1, uS1 = ( M.Vec(I.UVs[cnr + _i].x, I.UVs[cnr + _i].y) for _i in (iA1,iC1,iS1))\n area1 = geom.triangle_area_2D(uA1,uC1,uS1)\n\n T2, iB2, iS2 = I.mesh.half_edges.adj(B,S) # T2 = ABS\n iA2 = 3 - iB2 - iS2\n cnr = I.mesh.connectivity.face_to_first_corner(T2)\n uA2, uB2, uS2 = ( M.Vec(I.UVs[cnr + _i].x, I.UVs[cnr + _i].y) for _i in (iA2,iB2,iS2))\n area2 = geom.triangle_area_2D(uA2,uB2,uS2)\n\n T3, iC3, iS3 = I.mesh.half_edges.adj(C,S) # T3 = BCS\n iB3 = 3 - iC3 - iS3\n cnr = I.mesh.connectivity.face_to_first_corner(T3)\n uB3, uC3, uS3 = ( M.Vec(I.UVs[cnr + _i].x, I.UVs[cnr + _i].y) for _i in (iB3,iC3,iS3))\n area3 = geom.triangle_area_2D(uB3,uC3,uS3)\n \n tot_area = area1 + area2 + area3\n pS = (area3*pA + area2*pC + area1*pB) / tot_area\n\n basisT = np.array((X,Y,Z))\n basisT = np.linalg.inv(basisT)\n bary = sum(I.mesh.vertices[_u] for _u in (A,B,C))/3\n I.mesh.vertices[S] = basisT.dot( M.Vec(pS.x, pS.y, Z.dot(bary)))\n\n except Exception as e:\n # print(e)\n n_singus_fail += 1\n return n_singus_fail\n\ndef create_optimal_seams(I : Instance, features : bool, verbose) -> SingularityCutter:\n \"\"\"Performs minimal set of cuts between singularities\"\"\"\n singu_set = {x for x in I.singular_vertices}\n if not features or (len(I.feat.feature_edges) == len(I.mesh.boundary_edges)):\n # no features detected\n cutter = SingularityCutter(I.mesh, singu_set, verbose=verbose)() \n else:\n featdetect = I.feat.original # extract detection from cutter\n cutter = SingularityCutter(I.mesh, singu_set, features=featdetect, verbose=verbose)()\n I.seams = cutter.cut_edges\n return cutter\n\ndef delimit_feature_regions(I : Instance, cutter ) -> M.utils.UnionFind:\n \"\"\"\n When dealing with feature edges, the tree traversal for reconstruction needs to be modified. \n Features delimit regions inside the mesh, that should only be connected by one edge.\n If such a region is reconstructed from two or more edges, new seams appear due to period jumps along existing seams.\n Taking this into account is a pain, so we instead flag everything for the traversal to avoid such cases.\n\n Returns:\n An Union-Find structure that tells us in which component each triangle is.\n \"\"\"\n triangle_region = M.utils.UnionFind(I.mesh.id_faces)\n forbidden_edges_set = cutter.cut_edges | I.feat.features_edges_no_cuts\n forbidden_edges = M.Attribute(bool) #self.input_mesh.edges.create_attribute(\"forbidden_edges\", bool)\n for e in forbidden_edges_set:\n forbidden_edges[e] = True\n\n I.tree = M.processing.trees.FaceSpanningForest(I.mesh, forbidden_edges_set)()\n for vertex,father in I.tree.traverse():\n if father is not None:\n triangle_region.union(vertex,father)\n return triangle_region\n\ndef rescale_uvs(I : Instance):\n \"\"\"Scales I.UVs in bounding box [0;1]^2\"\"\"\n xmin,xmax,ymin,ymax = float(\"inf\"), -float(\"inf\"), float(\"inf\"), -float(\"inf\")\n for c in I.mesh.id_corners:\n uv = I.UVs[c]\n xmin = min(xmin, uv.x)\n xmax = max(xmax, uv.x)\n ymin = min(ymin, uv.y)\n ymax = max(ymax, uv.y)\n scale_x = xmax-xmin\n scale_y = ymax-ymin\n scale = min(scale_x, scale_y)\n\n # rotate I.UVs so that feature edges are axis aligned\n if I.feat.feature_edges:\n e = list(I.feat.feature_edges)[0]\n A,B = I.work_mesh.edges[e]\n T, iA,iB = I.work_mesh.half_edges.adj(A,B)\n if T is None:\n T,iB,iA = I.work_mesh.half_edges.adj(B,A)\n cnr = I.work_mesh.connectivity.face_to_first_corner(T)\n vec = I.UVs[cnr+ iB] - I.UVs[cnr + iA]\n angle = -atan2(vec.y, vec.x)\n else:\n ref_frame = complex(I.var[I.var_sep_ff], I.var[I.var_sep_ff+1])\n angle = cmath.phase(ref_frame)/I.order\n\n # apply transformation\n for c in I.mesh.id_corners:\n I.UVs[c] = rotate_2d(I.UVs[c] / scale, angle)\n # I.UVs[c] = I.UVs[c] / scale\n return I.UVs\n\ndef write_output_obj(I : Instance, file_path : str):\n \"\"\"Final export of the mesh as an obj file with custom fields for singularity cones, seams and feature edges\"\"\"\n M.mesh.save(I.mesh, file_path)\n # now export cones, seams and features as special fields in .obj\n with open(file_path, \"a\") as fr:\n for s in I.singular_vertices:\n idx = I.singular_vertices[s]\n if idx==-1:\n fr.write(f\"c {s+1} -1\\n\")\n elif idx==1:\n fr.write(f\"c {s+1} 1\\n\")\n else:\n fr.write(f\"c {s+1} 0\\n\")\n \n for e in I.seams:\n a,b = I.mesh.edges[e]\n fr.write(f\"sm {a+1} {b+1}\\n\")\n\n for e in I.feat.features_edges_no_cuts:\n a,b = I.mesh.edges[e]\n if not I.mesh.is_edge_on_border(a,b):\n fr.write(f\"ft {a+1} {b+1}\\n\")\n\nclass ParamConstructor(Worker):\n \"\"\"Worker responsible for putting the parametrization back together after optimization. Also exports various debug outputs\"\"\"\n\n def __init__(self, instance: Instance, options = Options(), verbose_options = VerboseOptions()):\n super().__init__(\"ParamReconstruction\", instance, options, verbose_options)\n self.cutter : SingularityCutter = None\n self.reconstructed : bool = False\n\n def __call__(self):\n self.construct_param()\n return self\n\n def export_frame_field(self) -> M.mesh.PolyLine:\n \"\"\"\n Exports the frame field as a mesh for visualization.\n\n Returns:\n PolyLine: the frame field as a mesh object\n \"\"\"\n return self.instance.export_frame_field()\n\n def export_feature_graph(self) -> M.mesh.PolyLine:\n return self.instance.feat.feature_graph\n\n def export_seams(self) -> M.mesh.PolyLine:\n if not self.reconstructed : return None\n return self.instance.cut_graph\n\n def export_singularity_point_cloud(self) -> M.mesh.PointCloud:\n I = self.instance\n I.singu_ptcld = M.mesh.new_point_cloud()\n index = I.singu_ptcld.vertices.create_attribute(\"index\", float)\n i = 0\n for iF in I.work_mesh.id_faces:\n if abs(I.singular_faces[iF])>1e-8:\n P = I.singu_faces_to_4pts[iF][0]\n I.singu_ptcld.vertices.append(I.mesh.vertices[P])\n index[i] = I.singular_faces[iF]\n i += 1\n for v in I.feat.corners:\n if I.feat.corners[v] != I.order//2 :\n I.singu_ptcld.vertices.append(I.work_mesh.vertices[v])\n index[i] = I.feat.corners_no_cuts[v]\n i += 1\n return I.singu_ptcld\n\n def export_flat_mesh(self) -> M.mesh.SurfaceMesh:\n \"\"\"Builds the parametrization as if (x,y) = (u,v)\"\"\"\n if not self.reconstructed : return None \n I = self.instance\n I.param_mesh = M.mesh.new_surface()\n for T in I.mesh.id_faces:\n cnr = I.mesh.connectivity.face_to_first_corner(T)\n for i in range(3):\n I.param_mesh.vertices.append(M.Vec(I.UVs[cnr + i].x, I.UVs[cnr + i].y, 0.))\n I.param_mesh.faces.append((3*T,3*T+1,3*T+2))\n I.param_mesh.face_corners += [3*T,3*T+1,3*T+2]\n singular_tri = I.param_mesh.faces.create_attribute(\"singular\", bool)\n for f in I.triplets_of_triangles:\n singular_tri[f] = True\n return I.param_mesh\n\n def export_disk_mesh(self):\n \"\"\"Input mesh but with a disk topology, where seams are real cuts\"\"\"\n I = self.instance\n I.disk_mesh = M.mesh.copy(self.cutter.output_mesh)\n UVcut = I.disk_mesh.face_corners.create_attribute(\"uv_coords\",float,2)\n for c in I.mesh.id_corners:\n UVcut[c] = I.UVs[c]\n return I.disk_mesh\n\n def construct_param(self):\n I = self.instance\n\n I = split_singular_triangles(I) # Also resets connectivity if needed\n self.cutter = create_optimal_seams(I, self.options.features, self.verbose_options.logger_verbose) \n self.log(\"Starting UV reconstruction.\")\n # We reconstruct along a spanning tree whose root is not singular\n root = 0\n while root in I.triplets_of_triangles: # root is singular\n root += 1\n\n visited = M.ArrayAttribute(bool, len(I.mesh.faces)) # I.mesh.faces.create_attribute(\"visited\", bool, dense=True)\n I.UVs = I.mesh.face_corners.create_attribute(\"uv_coords\", float, 2)\n queue = deque()\n\n I.barycenters = M.attributes.face_barycenter(I.mesh, persistent=False) # recompute barycenters since we have split some triangles\n\n triangle_region = delimit_feature_regions(I, self.cutter) # Compute regions delimited by features edges\n\n def build_edge(A,B):\n ie = I.work_mesh.connectivity.edge_id(A,B)\n direct = (A m_AB -> B\n imA, imB = 4*ie, 4*ie + 2\n if not direct:\n imA,imB = imB,imA\n we *= -1\n mA, mB = complex(I.var[imA], I.var[imA+1]), complex(I.var[imB], I.var[imB+1])\n wpt = I.PT_array[ie] if direct else - I.PT_array[ie]\n rotB = principal_angle(we - wpt + pi)\n return c3vec(mA - mB*crot(rotB))\n\n def build_edge_to_center(T, A):\n Torig,(Ao,Bo,Co) = I.triangles_before_split[T]\n iA = np.argmax([Ao==A, Bo==A, Co==A]) # index of A in original triangle\n iS = I.var_sep_pt + 6*Torig + 2*iA\n return M.Vec(I.var[iS], I.var[iS+1], 0.)\n\n def build_triangle(T):\n A,B,C = I.work_mesh.faces[T]\n pA = M.Vec.zeros(3)\n pB = build_edge(A,B)\n pC = build_edge(A,C)\n return pA,pB,pC\n\n def build_triangle_singular(T, iA, iB):\n A,B = (I.work_mesh.faces[T][_x] for _x in (iA,iB))\n pA = M.Vec.zeros(3)\n pB = build_edge(A,B)\n pS = build_edge_to_center(T,A)\n return pA,pB,pS\n\n def can_traverse(a,b, T1=None,T2=None):\n e = I.mesh.connectivity.edge_id(a,b)\n if e in self.cutter.cut_edges: \n return False\n if T1 is None or T2 is None:\n T1,T2 = I.mesh.half_edges.edge_to_triangles(a,b)\n if T1 is None or T2 is None: \n return False # edge on border -> no need to push in queues\n is_feat = (e in I.feat.features_edges_no_cuts)\n return not (is_feat and triangle_region.connected(T1,T2))\n\n def push(T, T2, A, iAT, pA, B, iBT, pB):\n if (T2 is not None) and (not visited[T2]) and can_traverse(A,B, T, T2):\n triangle_region.union(T,T2)\n queue.append((T,T2,iAT,pA,iBT,pB))\n\n # Build the root triangle\n A,B,C = I.mesh.faces[root]\n pA,pB,pC = build_triangle(root)\n cnr = I.mesh.connectivity.face_to_first_corner(root)\n I.UVs[cnr] = pA.xy\n I.UVs[cnr + 1] = pB.xy\n I.UVs[cnr + 2] = pC.xy\n visited[root] = True\n\n # append adjacent triangles\n TAB, iA, iB = I.mesh.half_edges.opposite(A, B, root)\n push(root, TAB, A, iA, pA, B, iB, pB)\n\n TBC, iB, iC = I.mesh.half_edges.opposite(B, C, root)\n push(root, TBC, B, iB, pB, C, iC, pC)\n\n TCA, iC, iA = I.mesh.half_edges.opposite(C, A, root)\n push(root, TCA, C, iC, pC, A, iA, pA)\n \n # traverse the face tree\n self.log(\"Traverse face tree\")\n\n while len(queue)>0: \n father, T, iAT, pA, iBT, pB = queue.popleft()\n if T is None: continue # edge was on border -> nothing on the other side\n if visited[T] : continue\n visited[T] = True\n\n A,B = (I.mesh.ith_vertex_of_face(T,_u) for _u in (iAT,iBT))\n\n # build third vertex C from to origins A and B following edges\n if T in I.triplets_of_triangles: # T is part of a singular triangle\n # -> we build the three triangles in the triplet.\n\n # 1) Build singularity position (not using build_edge function since construction is different)\n iST = 3 - iAT - iBT\n S = I.mesh.ith_vertex_of_face(T,iST)\n qA,qB,qS = build_triangle_singular(T,iAT,iBT)\n qA,qB,qS = align_edges(pA,pB,qA,qB,qS)\n cnr = I.mesh.connectivity.face_to_first_corner(T)\n I.UVs[cnr + iAT] = qA.xy\n I.UVs[cnr + iBT] = qB.xy\n I.UVs[cnr + iST] = qS.xy\n\n # 2) build two other triangles\n # there are 2 cases whether we are adjacent to the cut or not\n T_on_cut = not ( can_traverse(A,S) and can_traverse(B,S) )\n pA,pB,pS = qA,qB,qS # ref for later\n\n if T_on_cut : # build T1 and T2 in consistent order\n if I.mesh.half_edges.opposite(B,S,T)[0] is None or (not can_traverse(B,S)): # triangles are on the other side but we take B as a notation\n B, pB = A, pA\n T1, iBT1, iST1 = I.mesh.half_edges.opposite(B,S,T)\n if T1 is None or (not can_traverse(B,S)): continue # we cannot traverse on both side -> stop here\n\n visited[T1] = True\n iCT1 = 3 - iBT1 - iST1\n # build C from known points\n C = I.mesh.ith_vertex_of_face(T1, iCT1)\n qB,qC,qS = build_triangle_singular(T1, iBT1, iCT1)\n qB,qS,qC = align_edges(pB,pS,qB,qS,qC) # /!\\ known points to align are B and S, not B and C\n\n cnr = I.mesh.connectivity.face_to_first_corner(T1)\n I.UVs[cnr + iBT1] = qB.xy\n I.UVs[cnr + iCT1] = qC.xy\n I.UVs[cnr + iST1] = qS.xy\n\n T1n, iBT1, iCT1 = I.mesh.half_edges.opposite(B,C,T1)\n push(T1, T1n, B, iBT1, qB, C, iCT1, qC)\n\n # T2\n pS,pC = qS,qC\n T2, iCT2, iST2 = I.mesh.half_edges.opposite(C,S,T1)\n if T2 is None or (not can_traverse(C,S)): continue # cannot access third triangle -> stop here\n visited[T2] = True\n iDT2 = 3 - iCT2 - iST2\n D = I.mesh.ith_vertex_of_face(T2, iDT2)\n qC,qD,qS = build_triangle_singular(T2,iCT2,iDT2)\n qC,qS,qD = align_edges(pC,pS,qC,qS,qD)\n cnr = I.mesh.connectivity.face_to_first_corner(T2)\n I.UVs[cnr + iCT2] = qC.xy\n I.UVs[cnr + iDT2] = qD.xy\n I.UVs[cnr + iST2] = qS.xy\n T2n, iCT2, iDT2 = I.mesh.half_edges.opposite(C,D,T2)\n push(T2, T2n, C,iCT2, qC, D, iDT2, qD)\n\n else : # build T1 in one side and T2 in another\n T1, iAT1, iST1 = I.mesh.half_edges.opposite(A,S,T) # T1 on side of A\n if T1 is not None and can_traverse(A,S):\n visited[T1] = True\n iCT1 = 3 - iAT1 - iST1\n # build C from known points\n C = I.mesh.ith_vertex_of_face(T1, iCT1)\n S = I.mesh.ith_vertex_of_face(T1, iST1)\n qA,qC,qS = build_triangle_singular(T1, iAT1, iCT1)\n qA,qS,qC = align_edges(pA,pS,qA,qS,qC)\n cnr = I.mesh.connectivity.face_to_first_corner(T1)\n I.UVs[cnr + iAT1] = qA.xy\n I.UVs[cnr + iCT1] = qC.xy\n I.UVs[cnr + iST1] = qS.xy\n T1n, iAT1, iCT1 = I.mesh.half_edges.opposite(A,C,T1)\n push(T1,T1n, A, iAT1, qA, C, iCT1, qC)\n\n T2, iBT2, iST2 = I.mesh.half_edges.opposite(B,S,T) # T2 on side of B\n if T2 is not None and can_traverse(B,S):\n visited[T2] = True\n iDT2 = 3 - iBT2 - iST2\n D = I.mesh.ith_vertex_of_face(T2, iDT2) \n S = I.mesh.ith_vertex_of_face(T2, iST2)\n qB,qD,qS = build_triangle_singular(T2, iBT2, iDT2)\n qB,qS,qD = align_edges(pB,pS,qB,qS,qD)\n cnr = I.mesh.connectivity.face_to_first_corner(T2)\n I.UVs[cnr + iBT2] = qB.xy\n I.UVs[cnr + iDT2] = qD.xy\n I.UVs[cnr + iST2] = qS.xy\n T2n, iBT2, iDT2 = I.mesh.half_edges.opposite(B,D,T2)\n push(T2,T2n, B, iBT2, qB, D, iDT2, qD)\n \n else: # regular triangle\n iCT = 3 - iAT - iBT\n C = I.mesh.faces[T][iCT]\n qA,qB,qC = [ build_triangle(T)[_x] for _x in (iAT, iBT, iCT)]\n qA,qB,qC = align_edges(pA,pB,qA,qB,qC)\n cnr = I.mesh.connectivity.face_to_first_corner(T)\n I.UVs[cnr + iAT] = qA.xy\n I.UVs[cnr + iBT] = qB.xy\n I.UVs[cnr + iCT] = qC.xy\n\n T1, iAT, iCT = I.mesh.half_edges.opposite(A,C,T)\n push(T, T1, A, iAT, qA, C, iCT, qC)\n\n T2, iBT, iCT = I.mesh.half_edges.opposite(B,C,T)\n push(T, T2, B, iBT, qB, C, iCT, qC)\n self.log(\"Tree traversal done.\")\n\n self.log(\"Scaling and alignement with axes\")\n I.UVs = rescale_uvs(I)\n \n self.log(\"Reposition singularities \")\n n_singus_fail = replace_singularities_barycenter(I)\n # n_singus_fail = 0 \n \n if n_singus_fail>0:\n self.log(f\"/!\\ {n_singus_fail} singularities failed to be positionned inside their triangle\")\n else:\n self.log(\"All singularities have been positionned\")\n\n self.reconstructed = True\n self.instance.cut_graph = self.cutter.cut_graph\n","repo_name":"GCoiffier/moving_frames_parametrization","sub_path":"vertex_based/src/reconstruct.py","file_name":"reconstruct.py","file_ext":"py","file_size_in_byte":21025,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"28"} +{"seq_id":"21457374734","text":"\"\"\" Matrix of the double layer potential\n\n Author: Zoïs Moitier\n Karlsruhe Institute of Technology, Germany\n\"\"\"\n\nfrom math import atan\nfrom typing import Callable, Tuple\n\nfrom numpy import (\n absolute,\n diag_indices,\n empty,\n eye,\n hypot,\n less,\n log,\n logical_not,\n pi,\n remainder,\n sin,\n where,\n)\nfrom numpy.typing import NDArray\nfrom scipy.special import hankel1, jv\n\nfrom ..obstacle import EllipseParametrization\nfrom .grid import _mesh_grid, grid\nfrom .quadrature import kress_weight\n\n\ndef _distance(\n S: NDArray, T: NDArray, gamma: Callable\n) -> Tuple[NDArray, NDArray, NDArray]:\n \"\"\"Return distance and difference.\"\"\"\n\n x_s, y_s = gamma(S)\n x_t, y_t = gamma(T)\n\n xdiff, ydiff = x_s - x_t, y_s - y_t\n dist = hypot(xdiff, ydiff)\n\n return dist, xdiff, ydiff\n\n\ndef double_layer_mpqr(\n ellipse: EllipseParametrization, k: float, N: int\n) -> Tuple[NDArray, NDArray]:\n \"\"\"Double layer\"\"\"\n # pylint: disable=too-many-locals\n\n θ, Δθ = grid(N)\n S, T = _mesh_grid(θ)\n\n dist, xdiff, ydiff = _distance(S, T, ellipse.gamma)\n\n jac = ellipse.jacobian(T)\n ν_x, ν_y = ellipse.normal_ext(T)\n\n dist_eq_0 = diag_indices(N)\n dist_not_0 = where(logical_not(eye(N, dtype=bool)))\n\n cos_term = empty((N, N))\n cos_term[dist_not_0] = (\n ν_x[dist_not_0] * xdiff[dist_not_0] + ν_y[dist_not_0] * ydiff[dist_not_0]\n ) / dist[dist_not_0]\n\n L1 = empty((N, N))\n L1[dist_not_0] = (\n (-k / (4 * pi))\n * cos_term[dist_not_0]\n * jv(1, k * dist[dist_not_0])\n * jac[dist_not_0]\n )\n L1[dist_eq_0] = 0\n\n L2 = empty((N, N), dtype=complex)\n L2[dist_not_0] = (\n (0.25 * 1j * k)\n * hankel1(1, k * dist[dist_not_0])\n * cos_term[dist_not_0]\n * jac[dist_not_0]\n )\n L2[dist_not_0] -= L1[dist_not_0] * log(\n 4 * sin(0.5 * (S[dist_not_0] - T[dist_not_0])) ** 2\n )\n L2[dist_eq_0] = (-1 / (4 * pi)) * jac[dist_eq_0] * ellipse.curvature(θ)\n L2 *= Δθ\n\n quasi_sing = where(less(absolute(remainder(S + T, 2 * pi) - pi), Δθ / 2))\n L2[quasi_sing] = -atan(Δθ / (4 * ellipse.ɛ)) / pi\n\n return (kress_weight(N) * L1 + L2, θ)\n","repo_name":"zmoitier/Scattering_BIE_QPAX","sub_path":"src/helmholtz/bie/layer_mpqr.py","file_name":"layer_mpqr.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"30152953063","text":"import os\r\nimport configparser\r\n\r\n\r\ndef does_config_exist():\r\n exist_account_conf = os.path.isfile('./conf.ini')\r\n if exist_account_conf:\r\n print(\"conf.ini: found\")\r\n else:\r\n print(\"conf.ini: not found.. Generating\")\r\n config = configparser.ConfigParser()\r\n config['general'] = {'csgo_path': \"N/A\",\r\n 'steam_path': \"N/A\",\r\n 'steam_api_key': \"N/A\"}\r\n config['1'] = {'username': '',\r\n 'password': '',\r\n 'lastuse': '',\r\n 'rank': '',\r\n 'prime': '',\r\n 'active': '',\r\n 'autolaunch': '',\r\n 'steamid': ''}\r\n with open('conf.ini', 'w') as configfile:\r\n config.write(configfile)\r\n configfile.close()\r\n","repo_name":"Joifage/CSGOSmuffSwitcher","sub_path":"functions/configbuild.py","file_name":"configbuild.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"70465501196","text":"# -*- coding: utf-8 -*-\n\nimport random\n\nimport concurrent.futures\nfrom concurrent.futures.thread import ThreadPoolExecutor\n\nfrom conftest import (\n admin_token,\n food_store,\n json_get,\n order_store,\n simple_make_order,\n token_gen, new_cart, json_patch, json_post)\n\n\ndef buy_to_stock(food_id, target_stock):\n remain_stock = max(food_store[food_id][\"stock\"] - target_stock, 0)\n while remain_stock > 0:\n count = min(remain_stock, 3)\n res = simple_make_order([{\"food_id\": food_id, \"count\": count}])\n\n # should success when food have remain stock\n assert res.status_code == 200\n\n remain_stock -= count\n\n\ndef test_food_stock_consistency():\n\n food_id = random.choice(list(food_store.keys()))\n buy_to_stock(food_id, 0)\n\n # should fail when food out of stock\n res = simple_make_order([{\"food_id\": food_id, \"count\": 1}])\n assert res.status_code == 403\n assert res.json() == {\"code\": \"FOOD_OUT_OF_STOCK\",\n \"message\": u\"食物库存不足\"}\n\n\ndef test_admin_query_orders():\n res = json_get(\"/admin/orders\", admin_token)\n assert res.status_code == 200\n\n q_orders = res.json()\n assert len(q_orders) == len(order_store)\n\n for q_order in q_orders:\n order_id = q_order[\"id\"]\n assert order_id in order_store\n assert q_order[\"user_id\"] == order_store[order_id][\"user_id\"]\n assert q_order[\"items\"] == order_store[order_id][\"items\"]\n\n\ndef test_food_not_oversold_under_concurrent():\n\n TEST_FOOD_COUNT = 5\n TEST_FOOD_STOCK = 10\n\n # random choose foods with more than 10 stock\n test_food_ids = random.sample(\n [f for f, s in food_store.items() if s[\"stock\"] >= TEST_FOOD_STOCK],\n TEST_FOOD_COUNT)\n for food_id in test_food_ids:\n buy_to_stock(food_id, TEST_FOOD_STOCK)\n assert food_store[food_id][\"stock\"] == TEST_FOOD_STOCK\n\n # enumerate all food items\n total_food_items = []\n for food_id in test_food_ids:\n remain_stock = food_store[food_id][\"stock\"]\n items = [{\"food_id\": food_id, \"count\": 1}] * remain_stock\n total_food_items.extend(items)\n assert len(total_food_items) == TEST_FOOD_COUNT * TEST_FOOD_STOCK\n\n # try to buy as much as twice of the stock\n test_food_items = total_food_items * 2\n random.shuffle(test_food_items)\n\n # prepare carts & tokens, each carts contains 2 foods\n cart_ids, tokens, items_list = [], [], []\n for food_items in zip(test_food_items[::2], test_food_items[1::2]):\n _, token = next(token_gen)\n cart_id = new_cart(token)\n\n for item in food_items:\n res = json_patch(\"/carts/%s\" % cart_id, token, item)\n assert res.status_code == 204\n\n cart_ids.append(cart_id)\n tokens.append(token)\n items_list.append(food_items)\n\n def _make(cart_id, token, food_items):\n res = json_post(\"/orders\", token, {\"cart_id\": cart_id})\n if res.status_code == 200:\n for food_item in food_items:\n food_store[food_item[\"food_id\"]][\"stock\"] -= 1\n return res\n\n # make order with prepared carts, using 3 concurrent threads\n # allow sell slower (remain stock > 0)\n # best sell all and correct (remain stock == 0)\n # disallow oversold (remain stock < 0)\n with ThreadPoolExecutor(max_workers=3) as executor:\n future_results = [\n executor.submit(_make, ct, tk, fs)\n for ct, tk, fs in zip(cart_ids, tokens, items_list)]\n concurrent.futures.wait(future_results, timeout=30)\n\n # test not oversold\n for food_id in test_food_ids:\n # print(\"stock %s -> %s\" % (food_id, food_store[food_id][\"stock\"]))\n assert food_store[food_id][\"stock\"] >= 0\n","repo_name":"zyearn/eleme-hackathon","sub_path":"tests/test_stock.py","file_name":"test_stock.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"28"} +{"seq_id":"40968988375","text":"from math import exp,log,e\nimport numpy as np \nfrom matplotlib import pyplot as plt\nfrom gaussxw import gaussxwab\ndef integrand(a,x):\n\treturn x**(a-1)*exp(-x)\nx_arr=np.linspace(0,5,100)\ngamma2,gamma3,gamma4=[],[],[]\n\nfor i in range(len(x_arr)):\n\tgamma2.append(integrand(2,x_arr[i]))\n\tgamma3.append(integrand(3,x_arr[i]))\n\tgamma4.append(integrand(4,x_arr[i]))\n\nplt.plot(x_arr,gamma2,label='gamma2')\nplt.plot(x_arr,gamma3,label='gamma3')\nplt.plot(x_arr,gamma4,label='gamma4')\nplt.legend()\nplt.savefig('plot')\n\n\ndef integrand2(a,z):\n\tc=a-1\n\tx=z*c/(1-z)\n\treturn (c/((1-z)**2) )*e**(c*log(x)-x)\n\ndef ingl_quad(a):\n\tintgl=0\n\tval,wts=gaussxwab(1000,0,1)\n\tfor i in range(len(val)):\n\t\tintgl+=integrand2(a,val[i])*wts[i]\n\treturn intgl\nf=open('out.txt','w')\nf.write(\"Gamma function evaluations:\\n.............................................\")\nf.write(\"\\nGamma(3/2): \" +str(round(ingl_quad(3/2),8)))\nf.write(\"\\nGamma(3): \" +str(round(ingl_quad(3),8)))\nf.write(\"\\nGamma(6): \" +str(round(ingl_quad(6),8)))\nf.write(\"\\nGamma(10): \" +str(round(ingl_quad(10),8)))\nf.close()\n","repo_name":"staradutt/computational-physics","sub_path":"Module2/P7/p7.py","file_name":"p7.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"20711529109","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\ndesc: 爬取 url 列表\nauthor: lu.luo\ndate: 2018-04-02\n\"\"\"\nimport scrapy\nimport sqlite3\n\nfrom ..items import SmzdmItem\n\n\nclass MySpider(scrapy.Spider):\n name = \"smzdm\"\n url_models = [\"http://search.smzdm.com/?c=faxian&s={0}&v=a\",\n \"http://search.smzdm.com/?c=faxian&s={0}&v=a&p=2\"]\n conn = sqlite3.connect(\"./smzdm.db\")\n cursor = conn.cursor()\n cursor.execute(\"select distinct key from keywords;\")\n keys_dicts = cursor.fetchall()\n keys = [_key[0] for _key in keys_dicts]\n\n # start urls\n start_urls = []\n for key in keys:\n for url in url_models:\n add_url = url.format(key)\n start_urls.append(add_url)\n\n def parse(self, response):\n # We want to inspect one specific response.\n items = response.xpath(\"//div/ul[@id='feed-main-list']/li\")\n key = response.xpath(\"//input[@id='J_search_input']/@value\").extract_first()\n for item in items:\n info = SmzdmItem()\n info[\"good\"] = item.xpath(\".//h5[@class='feed-block-title']/a/text()\").extract_first()\n info[\"price\"] = item.xpath(\".//a/div[@class='z-highlight']/text()\").extract_first()\n info[\"url\"] = item.xpath(\".//div[@class='feed-link-btn-inner']/a/@href\").extract_first()\n info[\"store\"] = item.xpath(\".//span[@class='feed-block-extras']/span/text()\").extract_first()\n info[\"keyword\"] = key\n yield info\n\n","repo_name":"mashpolo/scrapy_cheap","sub_path":"smzdm/smzdm/spiders/crawl_smzdm.py","file_name":"crawl_smzdm.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"14536545427","text":"# Dabid Beazley 2015 PyCon \"Concurency from the Ground up Live\"\n\nimport socket\n\n# select(list_1, list_2, list_3) - функция для мониторинга состояния объектов socket. Работает со всеми объектами,\n# имеющими метод .fileno() (файловый дискриптор - номер файла). Мониторит изменения в объектах.\n# list_1 - первый список, те объекты, за которыми надо наблюдать, когда они станут доступны для чтения\n# list_2 - второй список, те объекты, за которыми надо следить, когда они станут доступны для записи\n# list_3 - третий список, те объекты, у которых мы ожидаем какие-то ошибки.\n# Возвращает списки, когда они станут доступны.\nfrom select import select\n\n# Список для задач\ntasks = []\n\nto_read = {}\nto_write = {}\n\n\ndef server():\n # Server - субъект\n # server_socket - субъект, который будет принимать запрос\n # AF - глобальная переменная модуля socket, address family (INET - IP V4) (указывающая на IP). Стандартный протокол,\n # разделенный точками на 4 части, по 1 байту на часть.\n # socket.SOCK_STREAM - означает, что речь пойдёт о поддержке протокола TCP\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Убираем таймаут при остановке программы (обычно 1,5 минуты на досылку данных)\n # SOL_SOCKET - уровень на котором устанавливаются опции (SOL - socket level) SOCKET = server_socket (наш сокет)\n # SO_REUSEADDR - допустить повторное использование адреса (SO - socket option)\n # 1 = True (включить)\n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # связываем субъект с конкретным адресом и портом\n server_socket.bind(('localhost', 5000))\n\n # Слушать по адресу порт (ожидание пакетов). Слушать входящий буфер на предмет подключений\n server_socket.listen()\n\n # Обработка полученных пакетов\n while True:\n\n # Перед блокирующей функцией мы отфутболиваем сокет, откуда был вызван next, функция ставится на паузу, и её\n # выполнение продолжится только тогда, когда socket сможет выполнить функцию без задержек\n yield ('read', server_socket)\n # Получить отправленный клиентом пакет\n client_socket, addr = server_socket.accept() # read\n\n print('Connection from', addr)\n # Добавляем функцию client_socket с параметрами сокета\n tasks.append(client(client_socket))\n\n\ndef client(client_socket):\n # Ждем сообщение от пользователя\n while True:\n\n yield ('read', client_socket)\n # Получить отправленный клиентом пакет\n request = client_socket.recv(4096) # read\n\n # Условия выхода из цикла\n if not request:\n break\n\n # ответ пользователю, если есть запрос, закодированный в bytes\n else:\n response = 'Hello world./n'.encode()\n\n yield ('write', client_socket)\n # Ответить клиенту\n client_socket.send(response) # write\n\n # Сообщение на сервер, о том что клиент отключился\n print('Outside inner while loop')\n # Закрываем соединение\n client_socket.close()\n\n\n# Событийный цикл для управления\ndef event_loop():\n while any([tasks, to_read, to_write]):\n while not tasks:\n ready_to_read, ready_to_write, _ = select(to_read, to_write, [])\n\n # Наполняем списки значениями ключей (генераторными функциями)\n for sock in ready_to_read:\n tasks.append(to_read.pop(sock))\n\n for sock in ready_to_write:\n tasks.append(to_write.pop(sock))\n\n try:\n # Получаем кортеж\n task = tasks.pop(0)\n\n # Распаковываем кортеж\n reason, sock = next(task)\n\n # В словаре создаём пару: ключ - socket, значение - объект генератора, который остался после вызова next()\n if reason == 'read':\n to_read[sock] = task\n\n if reason == 'write':\n to_write[sock] = task\n except StopIteration:\n print('Done!')\n\n\ntasks.append(server())\n","repo_name":"armornik/async_function","sub_path":"4_async_generators.py","file_name":"4_async_generators.py","file_ext":"py","file_size_in_byte":5405,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"3913867236","text":"from numpy import*\nv = array(eval(input(\"Vetor: \")))\ni = 0\nb = 0\np = 0\n\nwhile(i < size(v)):\n\tif(v[i]>99):\n\t\ti = i\n\t\tp = p + 1\n\t\tprint(i)\n\telse:\n\t\tp = p + 0\n\ti = i + 1\nprint(p)\t","repo_name":"JosephLevinthal/Research-projects","sub_path":"5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4170/codes/1764_1185.py","file_name":"1764_1185.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"10051400906","text":"from __future__ import print_function\r\n \r\nimport subprocess\r\nimport sys\r\n\r\ndef convert(filename,outputfile):\r\n import cloudconvert as c\r\n\r\n # to write output to output.txt\r\n o=open('output.txt', 'w')\r\n\r\n def get_extension(filename):\r\n import os.path\r\n return os.path.splitext(filename)[1]\r\n\r\n filen_extension=get_extension(filename)\r\n\r\n print('Uploading file to server...',file=o)\r\n\r\n api=c.Api('MJ9qM1Eu2PhM7yegfHBQiAjxrcUmGQCo3uC1yymNyPoiUGFhXIUpbtIHXkQjiBJP') #api key for cloudconvert \r\n process = api.convert({\r\n 'inputformat': filen_extension.replace('.',''),\r\n 'outputformat': 'ico',\r\n 'input': 'upload',\r\n 'file': open(filename, 'rb')\r\n })\r\n \r\n print('Uploaded file to server. Converting....',file=o)\r\n\r\n process.wait()\r\n\r\n download_file_name=filename.replace(filen_extension,'.icns')\r\n\r\n print('Downloading....',file=o)\r\n process.download(outputfile)\r\n\r\n print('Done!',file=o)\r\n\r\n\r\n","repo_name":"Armster15/magicicon","sub_path":"magicicon.py","file_name":"magicicon.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"25049315512","text":"# https://www.hackerrank.com/challenges/jumping-on-the-clouds/problem\n\ndef jumpingOnClouds(c: list[int]) -> int:\n res, i = 0, 0\n l = len(c)\n \n while i < l - 2:\n i = i + 1 if c[i+2] else i + 2\n res += 1\n\n return res + 1 if i < l - 1 else res","repo_name":"garrou/hackerrank","sub_path":"algorithms/easy/jumping_on_clouds.py","file_name":"jumping_on_clouds.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"39560176362","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 18 14:52:16 2023\r\n@author: laksh\r\nCode to run exploration simulations corresponding to Figure 5 of report\r\n\"\"\"\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom helper_funcs import *\r\nimport time\r\nfrom pypet import Environment, cartesian_product, Trajectory\r\nimport logging\r\nimport os # For path names working under Linux and Windows\r\nfrom numba.core.errors import NumbaDeprecationWarning, NumbaPendingDeprecationWarning\r\nimport warnings\r\nfrom helper_exploration import *\r\n\r\nwarnings.simplefilter('ignore', category=NumbaDeprecationWarning)\r\nwarnings.simplefilter('ignore', category=NumbaPendingDeprecationWarning)\r\n \r\ngenerations = 300000\r\n\r\ndef add_param(traj):\r\n \"\"\"Adds all parameters to `traj`\"\"\"\r\n print('Adding Parameters')\r\n\r\n # Following lines adds common paramaters\r\n traj.f_add_parameter('com.sig_e', np.sqrt(0.5).item(),\r\n comment='Common phenotypic variance of both species')\r\n traj.f_add_parameter('com.sig_s', np.sqrt(300.0).item(),\r\n comment='Strength of stabilising selection')\r\n traj.f_add_parameter('com.sig_u', np.sqrt(10.0).item(),\r\n comment='Utilisation curve variance')\r\n traj.f_add_parameter('com.sig_eps', np.sqrt(2).item(),\r\n comment='Strength of environmental fluctuations')\r\n traj.f_add_parameter('com.rho', 0.5,\r\n comment='Autocorrelation between developmental'\r\n 'environment and selection environment')\r\n traj.f_add_parameter('com.tau', 0.5,\r\n comment='fraction of generation between'\r\n 'development and selection')\r\n traj.f_add_parameter('com.r', 0.1,\r\n comment='Growth rate')\r\n traj.f_add_parameter('com.seed', 0,\r\n comment='Value of seed for choosing random values')\r\n traj.f_add_parameter('com.tot', generations,\r\n comment='Number of generations ot run the simulation')\r\n \r\n \r\n # Following lines add species parameters\r\n traj.f_add_parameter('sp.A', np.array([5.0, 5.0]),\r\n comment='Optimal genetic trait value')\r\n traj.f_add_parameter('sp.B', np.array([3.0, 3.0]),\r\n comment='Optimal plasticity')\r\n traj.f_add_parameter('sp.a0', np.array([5.3, 4.7]),\r\n comment='Initial genetic trait value')\r\n traj.f_add_parameter('sp.b0', np.array([2.5, 2.51]),\r\n comment='Initial plasticity value')\r\n traj.f_add_parameter('sp.kar', np.array([60000.0, 60000.0]),\r\n comment='Carrrying capacities')\r\n traj.f_add_parameter('sp.n0', traj.kar/2,\r\n comment='Inital populations, default half of carrying')\r\n traj.f_add_parameter('sp.Gaa', np.array([0.5, 0.5]),\r\n comment='variance of trait a')\r\n traj.f_add_parameter('sp.Gbb', np.array([0.045, 0.045]),\r\n comment='variance of trait b')\r\n # growth parameter: 0 -> static population, 1 -> growing population\r\n traj.f_add_parameter('sp.grow', np.array([1, 0]),\r\n comment='growth parameter')\r\n # plasticity parameter: -2 -> no fluctuations, -1 -> no plasticity\r\n # 0 -> constant plasticity, 1 -> evolving plasticity\r\n traj.f_add_parameter('sp.plast', np.array([1, 0]),\r\n comment='plasticity parameter')\r\n \r\n# main function to add paramaters, add exploration, run simulations and store results\r\ndef param_explore(traj):\r\n \"\"\" Here is where you change all the kinda exploration you wanna do!\"\"\"\r\n print('Exploring across sig_s and sig_u')\r\n \r\n explore_dict = {'sp.b0': [np.array([2.51, i]) for i in np.arange(0, 2.501, 0.025)],\r\n 'sp.n0': [np.array([20000, i]) for i in np.arange(5000.0, 60000.0, 5000.0)]}\r\n \r\n explore_dict = cartesian_product(explore_dict, ('sp.b0','sp.n0'))\r\n \r\n \r\n traj.f_explore(explore_dict)\r\n print('added exploration')\r\n\r\n# main function to add paramaters, add exploration, run simulations and store results\r\ndef main(fn, fld, traje):\r\n filename = os.path.join('hdf5', fld, fn)\r\n env = Environment(trajectory=traje,\r\n comment='Setting up the pypet pipeline for our '\r\n 'temporal model of character displacement. ',\r\n add_time=False, # We don't want to add the current time to the name,\r\n log_stdout=True,\r\n log_config='DEFAULT',\r\n multiproc=True,\r\n ncores=24,\r\n wrap_mode='QUEUE',\r\n filename=filename,\r\n overwrite_file=True)\r\n traj = env.trajectory\r\n add_param(traj)\r\n \r\n # Let's explore\r\n param_explore(traj)\r\n\r\n # Run the experiment\r\n env.run(run_main1)\r\n\r\n # Finally disable logging and close all log-files\r\n env.disable_logging()\r\n \r\n\r\nif __name__ == '__main__': \r\n\r\n fld = \"dummy\"\r\n \r\n fn = f\"CDisp_Sp2CpCpop.hdf5\"\r\n traje = 'dummy'\r\n \r\n main(fn, fld, traje)\r\n post_proc(fn, fld, traje)\r\n","repo_name":"Lakshya3141/CD_plasticity","sub_path":"Exploration_fixedPlasticityconstantPop.py","file_name":"Exploration_fixedPlasticityconstantPop.py","file_ext":"py","file_size_in_byte":5317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"32724856606","text":"from plone.app.testing import PloneSandboxLayer, PLONE_FIXTURE, IntegrationTesting, FunctionalTesting\nfrom plone.testing import z2\nfrom plone.app.testing import setRoles, login, TEST_USER_ID, TEST_USER_NAME\n\nclass EDRNSitePortlets(PloneSandboxLayer):\n defaultBases = (PLONE_FIXTURE,)\n def setUpZope(self, app, configurationContext):\n import edrnsite.portlets\n self.loadZCML(package=edrnsite.portlets)\n z2.installProduct(app, 'edrnsite.portlets')\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'edrnsite.portlets:default')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n login(portal, TEST_USER_NAME)\n portal.invokeFactory('Folder', 'folder', title=u'Test Folder')\n def tearDownZope(self, app):\n z2.uninstallProduct(app, 'edrnsite.portlets')\n\n\nEDRNSITE_PORTLETS_FIXTURE = EDRNSitePortlets()\nEDRNSITE_PORTLETS_INTEGRATION_TESTING = IntegrationTesting(bases=(EDRNSITE_PORTLETS_FIXTURE,), name='EDRNSitePortlets:Integration')\nEDRNSITE_PORTLETS_FUNCTIONAL_TESTING = FunctionalTesting(bases=(EDRNSITE_PORTLETS_FIXTURE,), name='EDRNSitePortlets:Functional')\n","repo_name":"EDRN/edrnsite.portlets","sub_path":"edrnsite/portlets/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"40869598419","text":"from django.urls import path\n\nfrom accounts.views import AccountsView, GetCSRFToken, CheckDuplicateView, CheckAuthenticatedView, LoginView, LogoutView\n\nurlpatterns = [\n path('authenticated', CheckAuthenticatedView.as_view()),\n path('duplicate', CheckDuplicateView.as_view()),\n path('', AccountsView.as_view()),\n path('login', LoginView.as_view()),\n path('logout', LogoutView.as_view()),\n path('csrf_cookie', GetCSRFToken.as_view()),\n]","repo_name":"yongun2/Django-Session-Authentication-And-CSRF","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"71853852235","text":"from monitor.monitor import Monitor\nfrom controller.controller import Controller\nimport threading\nimport time\nimport os\n\n\nclass Instance:\n\n instance_list = []\n lock = threading.Lock()\n\n def __init__(self, config: dict) -> None:\n self.instance_list.append(self)\n self.is_alive: bool = True\n self.is_asleep = True\n self.port: int = os.getenv('GRPC_PORT')\n self.config: dict = config\n self.controller: Controller = self.create_controller()\n print(f\"Creando instancia: IP={self.ip}, ID={self.id}\", flush=True)\n self.sleep()\n \n\n def sleep(self):\n print(f\"----> Iniciando espera de registro para IP={self.ip}\", flush=True)\n for _ in range(20):\n time.sleep(10)\n if not self.is_asleep:\n print(f\"--------> Instancia con IP={self.ip} se registro, iniciamos su monitoreo\", flush=True)\n self.monitor: Monitor = self.create_monitor()\n self.start()\n return\n print(\"--------> Instancia no se registro en el tiempo dado, eliminando\", flush=True)\n Instance.remove_instance(self.id)\n\n @classmethod\n def awaken(cls, instance_id):\n for instance in cls.instance_list:\n if instance.id == instance_id:\n instance.is_asleep = False\n\n @classmethod\n def remove_instance(cls, id) -> None:\n cls.lock.acquire()\n new_list: list[cls] = []\n \n for instance in cls.instance_list:\n if instance.id == id:\n print(f\"Eliminando instancia: IP={instance.ip}, ID={instance.id}\", flush=True)\n instance.kill()\n instance.controller.delete_instance(instance.id)\n else:\n new_list.append(instance)\n\n cls.instance_list: list[cls] = new_list\n cls.lock.release()\n\n def create_controller(self) -> Controller:\n controller: Controller = Controller(self.config)\n self.id, self.ip = controller.create_instance()\n return controller\n\n def create_monitor(self) -> Monitor:\n monitor = Monitor(self)\n if monitor.application_failed_to_start():\n print(f\"Fallo en la creacion del monitor para instancia IP={self.ip}.\\nEliminando\", flush=True)\n self.remove_instance(self.id)\n return monitor\n\n def kill(self) -> None:\n self.is_alive: bool = False\n\n def get_socket(self) -> str:\n return f'{self.ip}:{self.port}'\n\n def start(self) -> None:\n threading.Thread(target=self.watch_connection).start()\n threading.Thread(target=self.watch_metric).start()\n\n def watch_connection(self) -> None:\n print(f\"Comienza checkeo por heartbeat a instancia {self.ip}\", flush=True)\n while self.is_alive:\n self.monitor.ping()\n time.sleep(1)\n\n def watch_metric(self) -> None:\n while self.is_alive:\n print(\"======================\", flush=True)\n print(\"======================\", flush=True)\n print(\"Comienza analisis de metricas para ip\", self.ip, flush=True)\n self.monitor.update_metric()\n metric: int = self.monitor.get_metric()\n print(f\"----> Metrica consultada actualmente para instancia con ip {self.ip}:\",metric, flush=True)\n self.check_termination(metric)\n self.check_creation(metric)\n print(\"======================\", flush=True)\n print(\"======================\", flush=True)\n time.sleep(30)\n\n def check_termination(self, metric: int) -> None:\n print(\"----------------------\", flush=True)\n print(f\"Comienza check para terminar instancia | Revisando a `{self.ip}`\", flush=True)\n print(f\"----> La metrica minima es {self.config['policy_config']['delete_policy']} y la actual es {metric}\", flush=True)\n print(f\"----> El minimo numero de instancias es {self.config['policy_config']['min_instances']} y la actual {Controller.instances}\", flush=True)\n \n if metric >= self.config['policy_config']['delete_policy']:\n print(f\"-------->La instancia cumple con el minimo de metrica\", flush=True)\n return\n\n\n if Controller.instances <= self.config['policy_config']['min_instances']:\n print(f\"--------> La instancia NO cumple con el minimo de metrica PERO llegamos al minimo de instancias\", flush=True)\n return\n \n print(f\"--------> Borrando instancia con ip {self.ip}\", flush=True)\n self.remove_instance(self.id)\n\n def check_creation(self, metric: int) -> None:\n print(\"----------------------\", flush=True)\n print(f\"Comienza check para crear nueva instancia | Revisando a `{self.ip}`\", flush=True)\n print(f\"----> La metrica maxima es {self.config['policy_config']['delete_policy']} y la actual es {metric}\", flush=True)\n print(f\"----> El maximo numero de instancias es {self.config['policy_config']['min_instances']} y la actual {Controller.instances}\", flush=True)\n \n if metric <= self.config['policy_config']['create_policy']:\n print(f\"--------> La instancia cumple con el maximo de metrica\", flush=True)\n return\n\n if Controller.instances >= self.config['policy_config']['max_instances']:\n print(f\"--------> La instancia NO cumple con el maximo de metrica PERO llegamos al maximo de instancias\", flush=True)\n return\n print(f\"--------> Creando nueva instancia debido a que la intancia con ip {self.ip} sobrepasa condiciones requeridas\", flush=True)\n threading.Thread(target=Instance, args=[self.config]).start()\n","repo_name":"Drew138/proyecto-2-telematica","sub_path":"orchestrator/src/instance/instance.py","file_name":"instance.py","file_ext":"py","file_size_in_byte":5673,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"5802013202","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 8 21:30:50 2022\r\n\r\n@author: Digital Zone\r\n\"\"\"\r\n#Problem 1\r\ndef SearchA(Arr,x):\r\n indices=[]\r\n for i in range(len(Arr)): \r\n if(Arr[i] == x):\r\n indices.append(i)\r\n return indices\r\n#Problem 3\r\ndef Minimum(Arr,starting,ending):\r\n smallest=Arr[starting] \r\n index = starting \r\n for i in range(starting,ending):\r\n if(Arr[i+1]0 and Arr[i] Y, then for the corresponding two nodes x and y in the copied list, x.random --> y.\n\nReturn the head of the copied linked list.\n\nThe linked list is represented in the input/output as a list of n nodes. Each node is represented as a pair of [val, random_index] where:\n\nval: an integer representing Node.val\nrandom_index: the index of the node (range from 0 to n-1) that the random pointer points to, or null if it does not point to any node.\nYour code will only be given the head of the original linked list.'''\n\nclass Solution:\n def copyRandomList(self, head: 'Optional[Node]') -> 'Optional[Node]':\n if not head:\n return None\n\n node_map = dict()\n node_map[None] = None\n\n orig_node = head \n while orig_node:\n copy_node = Node(orig_node.val)\n node_map[orig_node] = copy_node\n orig_node = orig_node.next\n\n orig_node = head\n while orig_node:\n copy_node = node_map[orig_node]\n copy_node.next = node_map[orig_node.next]\n copy_node.random = node_map[orig_node.random]\n orig_node = orig_node.next\n\n return node_map[head]","repo_name":"jrmeeker/Leetcode-Solutions","sub_path":"CopyListWithRandomPointer.py","file_name":"CopyListWithRandomPointer.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"74149432075","text":"import json\nimport sqlite3\nfrom flask import Flask \nfrom flask.json import jsonify\nfrom flask import abort\n\n\napp = Flask(__name__)\n\ndb = 'park.db'\n\n@app.route('/api/list')\ndef send_park_list():\n\n con = sqlite3.connect(db)\n data = con.execute('SELECT park_id, name FROM parks ORDER BY park_id')\n parks = [ { 'park_id': row[0], 'name': row[1]} for row in data ]\n con.close()\n return jsonify(parks)\n\n\n@app.route('/api/')\ndef send_park(park_id):\n if len(park_id) < 5:\n park_id = park_id.zfill(5)\n\n con = sqlite3.connect(db)\n data = con.execute('SELECT json FROM parks WHERE park_id = ?', (park_id,) ).fetchone()\n if data:\n park_json = data[0]\n park_info = json.loads(park_json)\n return park_info\n else:\n abort(404, f'Park with id {park_id} not found')\n\n\n@app.errorhandler(404)\ndef not_found(e):\n return 'Not found'","repo_name":"claraj/mn_state_park_api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"17866039124","text":"def conFrecuencias(v):\n r = []\n for elemento in v:\n if(isinstance(elemento, str)):\n valor = int(elemento.split(\"p\")[0])\n frecuencia = int(elemento.split(\"p\")[1])\n for i in range (frecuencia):\n r.append(valor) \n else:\n r.append(elemento)\n return r\n\nvector=(\"0p2\", \"2p4\", \"3p2\", \"4p2\", \"7p2\")\nvector=conFrecuencias(vector) # 0p2 significa: 'cero' repetido 'dos' veces\n\nprint(vector)","repo_name":"Archerd6/Python-utils","sub_path":"Estadística/Una variable/Scripts con funciones más simples/Vector con fecuencias a Vector normal.py","file_name":"Vector con fecuencias a Vector normal.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"14460240973","text":"from PyQt5.QtWidgets import QGraphicsScene\n\nfrom urh.ui.LabeledArrow import LabeledArrow\n\n\nclass LegendScene(QGraphicsScene):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.ones_arrow = None\n self.zeros_arrow = None\n\n def draw_one_zero_arrows(self, ymid):\n y = self.sceneRect().y()\n h = self.sceneRect().height()\n if ymid < y:\n ymid = y\n elif ymid > y + h:\n ymid = y + h\n\n w_view = self.sceneRect().width()\n\n if self.zeros_arrow is None:\n self.zeros_arrow = LabeledArrow(w_view / 2, y + h / 2 + ymid, w_view / 2, y + h, 0)\n self.addItem(self.zeros_arrow)\n else:\n self.removeItem(self.zeros_arrow)\n self.zeros_arrow = LabeledArrow(w_view / 2, y + h / 2 + ymid, w_view / 2, y + h, 0)\n self.addItem(self.zeros_arrow)\n\n if self.ones_arrow is None:\n self.ones_arrow = LabeledArrow(w_view / 2, y, w_view / 2, y + h / 2 + ymid, 1)\n self.addItem(self.ones_arrow)\n else:\n self.removeItem(self.ones_arrow)\n self.ones_arrow = LabeledArrow(w_view / 2, y, w_view / 2, y + h / 2 + ymid, 1)\n self.addItem(self.ones_arrow)\n\n def clear(self):\n self.zeros_arrow = None\n self.ones_arrow = None\n super().clear()","repo_name":"rsumner33/urh-2","sub_path":"src/urh/ui/LegendScene.py","file_name":"LegendScene.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"32280925981","text":"# Standard Library\nimport os\n\n# Local\nfrom src.utils import convert_story_csv_to_txt_file\n\ndata_dir = '/Users/pti/challenges/4761_fair_fairytale/fair-fairytale-nlp/data/FairytaleQA/split_by_origin'\n\nfairytale_csvs = []\n\nfor path, dirs, files in os.walk(data_dir):\n for csv_file in files:\n if csv_file[-9:] == 'story.csv':\n txt_file = csv_file[:-10] + '.txt'\n convert_story_csv_to_txt_file(os.path.join(path, csv_file), os.path.join(path, txt_file))\n \n","repo_name":"neu-hai/fair-fairytale","sub_path":"utils/convert_all_story_csv_to_text.py","file_name":"convert_all_story_csv_to_text.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"939714548","text":"# -*- coding: utf-8 -*-\n\nimport pika\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\nchannel = connection.channel()\n\nchannel.exchange_declare(exchange='first', exchange_type='fanout')\n\nfor i in range(10):\n channel.basic_publish(exchange='first', routing_key='', body=str(i))\n","repo_name":"yaodwwy/videosCode","sub_path":"rabbitMQ(学习完成)/博客实验(完全消化)/9_示例_一条消息多种处理, 临时队列(完全消化)/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"36165891231","text":"\"\"\"empty message\n\nRevision ID: 7b8d5011742e\nRevises: df80fd943985\nCreate Date: 2021-08-17 23:22:51.328215\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '7b8d5011742e'\ndown_revision = 'df80fd943985'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('detalleordentrabajo', sa.Column('id_ordentrabajo', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'detalleordentrabajo', 'ordentrabajo', ['id_ordentrabajo'], ['id'])\n op.add_column('ordentrabajo', sa.Column('id_contrato', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'ordentrabajo', 'contrato', ['id_contrato'], ['id'])\n op.add_column('statusorden', sa.Column('id_contrato', sa.Integer(), nullable=True))\n op.add_column('statusorden', sa.Column('id_userorden', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'statusorden', 'userorden', ['id_userorden'], ['id'])\n op.create_foreign_key(None, 'statusorden', 'contrato', ['id_contrato'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'statusorden', type_='foreignkey')\n op.drop_constraint(None, 'statusorden', type_='foreignkey')\n op.drop_column('statusorden', 'id_userorden')\n op.drop_column('statusorden', 'id_contrato')\n op.drop_constraint(None, 'ordentrabajo', type_='foreignkey')\n op.drop_column('ordentrabajo', 'id_contrato')\n op.drop_constraint(None, 'detalleordentrabajo', type_='foreignkey')\n op.drop_column('detalleordentrabajo', 'id_ordentrabajo')\n # ### end Alembic commands ###\n","repo_name":"wotanCode/4GA-RedSystem-Full-Stack-Final-Project-Frontend","sub_path":"migrations/versions/7b8d5011742e_.py","file_name":"7b8d5011742e_.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"72578691594","text":"from docx import Document\r\nimport docx2txt\r\nimport os\r\nfrom tkinter import *\r\n\r\n\r\ndef show_typos(misspelled_dict):\r\n def got_it():\r\n root.destroy()\r\n root = Tk()\r\n root.title(\"Typo&Correction Window\")\r\n scroll = Scrollbar(root)\r\n scroll.pack(side=RIGHT,fill=Y)\r\n\r\n T = Text(root, height=25, width=47, yscrollcommand=scroll.set)\r\n T.insert(END, \"TYPO\\t\\t CORRECT\\n\")\r\n T.pack()\r\n B = Button(root, text=\"Got it\", width=6, command=got_it)\r\n B.pack()\r\n scroll.config(command=T.yview)\r\n\r\n for typo in misspelled_dict.keys():\r\n T.insert(END, typo+\"\\t\\t \"+misspelled_dict.get(typo)+\"\\n\")\r\n\r\n root.mainloop()\r\n\r\n\r\ndef typolistread():\r\n f = open('TypoCollection.csv', 'r')\r\n content = f.readlines()\r\n\r\n typo_dict = {}\r\n\r\n for line in content:\r\n items = line.split(',')\r\n key, values = items[0], items[1]\r\n values = values[:-1]\r\n typo_dict[key] = values\r\n f.close()\r\n return typo_dict\r\n\r\n\r\ndef correcting_process(typo_dict, my_text):\r\n misspelled_dict = {}\r\n for correction in typo_dict.keys():\r\n if correction in my_text:\r\n my_text = my_text.replace(correction, typo_dict.get(correction))\r\n misspelled_dict.update({correction:typo_dict.get(correction)})\r\n\r\n return my_text, misspelled_dict\r\n\r\n\r\ndef docx_process(savefilename, typo_dict):\r\n savefilename = savefilename + \".docx\"\r\n # my_text is a string that has all text in the initial docx file\r\n # Make sure to have only one file per extension so that TypoDetector can automatically correct.\r\n for files in os.listdir(\".\"):\r\n if files.endswith(\".docx\"):\r\n my_text = docx2txt.process(files)\r\n break\r\n doc = Document()\r\n para = doc.add_paragraph()\r\n my_text, misspelled_dict = correcting_process(typo_dict, my_text)\r\n run = para.add_run(my_text)\r\n doc.save(savefilename)\r\n return misspelled_dict\r\n\r\n\r\ndef txt_process(savefilename, typo_dict):\r\n savefilename = savefilename + \".txt\"\r\n # my_text is a string that has all text in the initial txt file\r\n # Make sure to have only one file per extension so that TypoDetector can automatically correct.\r\n for files in os.listdir(\".\"):\r\n if files.endswith(\".txt\"):\r\n f = open(files,'r')\r\n my_text = f.read()\r\n break\r\n of = open(savefilename,\"w+\")\r\n my_text, misspelled_dict = correcting_process(typo_dict, my_text)\r\n of.write(my_text)\r\n of.close()\r\n return misspelled_dict\r\n","repo_name":"JeaminRhee/WordTypoCorrector-Korean-","sub_path":"main_.py","file_name":"main_.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"7633047708","text":"\n\"\"\"\n138. Copy List with Random Pointer\nA linked list is given such that each node contains an additional random pointer which could point to any node in the list or null.\nReturn a deep copy of the list.\n\"\"\"\n\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val, next, random):\n self.val = val\n self.next = next\n self.random = random\n\"\"\"\n\n\nclass Solution:\n\n def copyRandomList(self, head: 'Node') -> 'Node':\n memo = {}\n return self.copyNode(head, memo)\n\n def copyNode(self, cur, memo):\n if not cur:\n return None\n if cur in memo:\n return m[cur]\n\n copy = Node(cur.val, None, None)\n memo[cur] = copy\n copy.next = self.copyNode(cur.next, m)\n copy.random = self.copyNode(cur.random, m)\n return copy\n","repo_name":"iverson52000/DataStructure_Algorithm","sub_path":"LeetCode/0138. Copy List With Random Pointer.py","file_name":"0138. Copy List With Random Pointer.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"24782628998","text":"import vtk\r\nimport cv2\r\nimport gui\r\nimport itk\r\nimport numpy as np\r\nimport data_transforms\r\nimport SimpleITK as sitk\r\nimport matplotlib.pyplot as plt\r\nfrom myshow import myshow, myshow3d\r\n\r\n\"\"\"\r\nnodule0 = sitk.ReadImage(\"1.3.6.1.4.1.14519.5.2.1.6279.6001.269689294231892620436462818860.2.50000051251220000101.nrrd\")\r\nnodule0_mask1 = sitk.ReadImage(\"1.3.6.1.4.1.14519.5.2.1.6279.6001.269689294231892620436462818860.2.50000051251220000101Mask_1.nrrd\")\r\nnodule0_mask2 = sitk.ReadImage(\"1.3.6.1.4.1.14519.5.2.1.6279.6001.269689294231892620436462818860.2.50000051251220000101Mask_2.nrrd\")\r\nnodule0_mask3 = sitk.ReadImage(\"1.3.6.1.4.1.14519.5.2.1.6279.6001.269689294231892620436462818860.2.50000051251220000101Mask_3.nrrd\")\r\n\r\nnodule1 = sitk.ReadImage(\"1.3.6.1.4.1.14519.5.2.1.6279.6001.822128649427327893802314908658.42.50000051251220000101.nrrd\")\r\nnodule1_mask1 = sitk.ReadImage(\"1.3.6.1.4.1.14519.5.2.1.6279.6001.822128649427327893802314908658.42.50000051251220000101Mask_1.nrrd\")\r\nnodule1_mask2 = sitk.ReadImage(\"1.3.6.1.4.1.14519.5.2.1.6279.6001.822128649427327893802314908658.42.50000051251220000101Mask_2.nrrd\")\r\nnodule1_mask3 = sitk.ReadImage(\"1.3.6.1.4.1.14519.5.2.1.6279.6001.822128649427327893802314908658.42.50000051251220000101Mask_3.nrrd\")\r\n\r\nnodule2 = sitk.ReadImage(\"1.3.6.1.4.1.14519.5.2.1.6279.6001.217955041973656886482758642958.22.50000051251220000101.nrrd\")\r\nnodule2_mask1 = sitk.ReadImage(\"1.3.6.1.4.1.14519.5.2.1.6279.6001.217955041973656886482758642958.22.50000051251220000101Mask_1.nrrd\")\r\nnodule2_mask2 = sitk.ReadImage(\"1.3.6.1.4.1.14519.5.2.1.6279.6001.217955041973656886482758642958.22.50000051251220000101Mask_2.nrrd\")\r\nnodule2_mask3 = sitk.ReadImage(\"1.3.6.1.4.1.14519.5.2.1.6279.6001.217955041973656886482758642958.22.50000051251220000101Mask_3.nrrd\")\r\n\"\"\"\r\nnodule3 = sitk.ReadImage(\"1.3.6.1.4.1.14519.5.2.1.6279.6001.100953483028192176989979435275.32.50000051251220000101.nrrd\")\r\n#nodule3_mask1 = sitk.ReadImage(\"1.3.6.1.4.1.14519.5.2.1.6279.6001.100953483028192176989979435275.32.50000051251220000101Mask_1.nrrd\")\r\n#nodule3_mask2 = sitk.ReadImage(\"1.3.6.1.4.1.14519.5.2.1.6279.6001.100953483028192176989979435275.32.50000051251220000101Mask_2.nrrd\")\r\nnodule3_mask3 = sitk.ReadImage(\"1.3.6.1.4.1.14519.5.2.1.6279.6001.100953483028192176989979435275.32.50000051251220000101Mask_3.nrrd\")\r\n\r\nimagearray = np.load(\"image.npy\")\r\nimagearray = imagearray.astype('float64')\r\n\r\nmaskarray = np.load(\"mask.npy\")\r\nmaskarray = maskarray.astype('float64')\r\n\r\n# Maskelemek için yazılmış emek kokan fonksiyon\r\ndef mask_image(image, mask):\r\n image_array = sitk.GetArrayFromImage(image)\r\n mask_array = sitk.GetArrayFromImage(mask)\r\n \r\n masked_array = image_array * mask_array\r\n masked_nodule = sitk.GetImageFromArray(masked_array)\r\n return masked_nodule\r\n\r\n# Mask3 nodule \r\n#masked3_nodule0 = mask_image(nodule0, nodule0_mask3)\r\n#masked3_nodule1 = mask_image(nodule1, nodule1_mask3)\r\n#masked3_nodule2 = mask_image(nodule2, nodule2_mask3)\r\nmasked3_nodule3 = mask_image(nodule3, nodule3_mask3)\r\n\r\n# Mask2 nodule \r\n#masked2_nodule0 = mask_image(nodule0, nodule0_mask2)\r\n#masked2_nodule1 = mask_image(nodule1, nodule1_mask2)\r\n#masked2_nodule2 = mask_image(nodule2, nodule2_mask2)\r\nmasked2_nodule3 = mask_image(nodule3, nodule3_mask2)\r\n\r\n\r\n# Mask1 nodule \r\n#masked1_nodule0 = mask_image(nodule0, nodule0_mask1)\r\n#masked1_nodule1 = mask_image(nodule1, nodule1_mask1)\r\n#masked1_nodule2 = mask_image(nodule2, nodule2_mask1)\r\nmasked1_nodule3 = mask_image(nodule3, nodule3_mask1)\r\n\r\n#myshow(masked_nodule0)\r\n\r\n\r\ndef render(vtk_img):\r\n colors = vtk.vtkNamedColors()\r\n \r\n colors.SetColor(\"BkgColor\", [0, 0, 0, 0])\r\n #51, 77, 102, 255\r\n \r\n \r\n # Create the renderer, the render window, and the interactor. The renderer\r\n # draws into the render window, the interactor enables mouse- and\r\n # keyboard-based interaction with the scene.\r\n ren = vtk.vtkRenderer()\r\n renWin = vtk.vtkRenderWindow()\r\n renWin.AddRenderer(ren)\r\n iren = vtk.vtkRenderWindowInteractor()\r\n iren.SetRenderWindow(renWin)\r\n \r\n # The following reader is used to read a series of 2D slices (images)\r\n # that compose the volume. The slice dimensions are set, and the\r\n # pixel spacing. The data Endianness must also be specified. The reader\r\n # uses the FilePrefix in combination with the slice number to construct\r\n # filenames using the format FilePrefix.%d. (In this case the FilePrefix\r\n # is the root name of the file: quarter.)\r\n \r\n # The volume will be displayed by ray-cast alpha compositing.\r\n # A ray-cast mapper is needed to do the ray-casting.\r\n volumeMapper = vtk.vtkFixedPointVolumeRayCastMapper()\r\n volumeMapper.SetInputData(vtk_img)\r\n \r\n # The color transfer function maps voxel intensities to colors.\r\n # It is modality-specific, and often anatomy-specific as well.\r\n # The goal is to one color for flesh (between 500 and 1000)\r\n # and another color for bone (1150 and over).\r\n volumeColor = vtk.vtkColorTransferFunction()\r\n volumeColor.AddRGBPoint(0, 127.0, 0.0, 0.0) #akciğer\r\n volumeColor.AddRGBPoint(500, 20.0, 20.5, 20.3) #bronşlar\r\n volumeColor.AddRGBPoint(1000, 1.0, 0.5, 0.3)\r\n volumeColor.AddRGBPoint(1150, 1.0, 0.0, 0.9)\r\n \r\n # The opacity transfer function is used to control the opacity\r\n # of different tissue types.\r\n volumeScalarOpacity = vtk.vtkPiecewiseFunction()\r\n volumeScalarOpacity.AddPoint(0, 0.05)\r\n volumeScalarOpacity.AddPoint(500, 1.0)\r\n volumeScalarOpacity.AddPoint(1000, 1.0)\r\n volumeScalarOpacity.AddPoint(1150, 1.0)\r\n \r\n # The gradient opacity function is used to decrease the opacity\r\n # in the \"flat\" regions of the volume while maintaining the opacity\r\n # at the boundaries between tissue types. The gradient is measured\r\n # as the amount by which the intensity changes over unit distance.\r\n # For most medical data, the unit distance is 1mm.\r\n volumeGradientOpacity = vtk.vtkPiecewiseFunction()\r\n volumeGradientOpacity.AddPoint(0, 0.0)\r\n volumeGradientOpacity.AddPoint(90, 0.5)\r\n volumeGradientOpacity.AddPoint(100, 1.0)\r\n \r\n # The VolumeProperty attaches the color and opacity functions to the\r\n # volume, and sets other volume properties. The interpolation should\r\n # be set to linear to do a high-quality rendering. The ShadeOn option\r\n # turns on directional lighting, which will usually enhance the\r\n # appearance of the volume and make it look more \"3D\". However,\r\n # the quality of the shading depends on how accurately the gradient\r\n # of the volume can be calculated, and for noisy data the gradient\r\n # estimation will be very poor. The impact of the shading can be\r\n # decreased by increasing the Ambient coefficient while decreasing\r\n # the Diffuse and Specular coefficient. To increase the impact\r\n # of shading, decrease the Ambient and increase the Diffuse and Specular.\r\n volumeProperty = vtk.vtkVolumeProperty()\r\n volumeProperty.SetColor(volumeColor)\r\n volumeProperty.SetScalarOpacity(volumeScalarOpacity)\r\n volumeProperty.SetGradientOpacity(volumeGradientOpacity)\r\n volumeProperty.SetInterpolationTypeToLinear()\r\n volumeProperty.ShadeOn()\r\n volumeProperty.SetAmbient(0.2)\r\n volumeProperty.SetDiffuse(1.0)\r\n volumeProperty.SetSpecular(1.0)\r\n \r\n # The vtkVolume is a vtkProp3D (like a vtkActor) and controls the position\r\n # and orientation of the volume in world coordinates.\r\n volume = vtk.vtkVolume()\r\n volume.SetMapper(volumeMapper)\r\n volume.SetProperty(volumeProperty)\r\n \r\n # Finally, add the volume to the renderer\r\n ren.AddViewProp(volume)\r\n \r\n # Set up an initial view of the volume. The focal point will be the\r\n # center of the volume, and the camera position will be 400mm to the\r\n # patient's left (which is our right).\r\n camera = ren.GetActiveCamera()\r\n c = volume.GetCenter()\r\n camera.SetViewUp(0, 0, -1)\r\n camera.SetPosition(c[0], c[1] - 400, c[2])\r\n camera.SetFocalPoint(c[0], c[1], c[2])\r\n camera.Azimuth(30.0)\r\n camera.Elevation(30.0)\r\n \r\n # Set a background color for the renderer\r\n ren.SetBackground(colors.GetColor3d(\"BkgColor\"))\r\n \r\n # Increase the size of the render window\r\n renWin.SetSize(640, 480)\r\n \r\n # Interact with the data.\r\n iren.Start()\r\n\r\nimage = sitk.GetImageFromArray(imagearray)\r\nmask = sitk.GetImageFromArray(maskarray)\r\n\r\nmasked_image = mask_image(image, mask)\r\n#myshow(masked_image)\r\n\r\n\r\ndeneme = masked_image\r\nsitk_image = deneme\r\nimage_dimension = 3\r\nindex = [15]*image_dimension\r\n\r\nitk_image = data_transforms.sitk_to_itk(sitk_image, image_dimension, index)\r\n\r\n# Rendering\r\nvtk_img = data_transforms.vtk_image_from_image(itk_image)\r\nrender(vtk_img)\r\n\r\n","repo_name":"anilcanozdemir/Bitirme","sub_path":"prototype.py","file_name":"prototype.py","file_ext":"py","file_size_in_byte":8766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"13939992282","text":"import os\nfrom datetime import datetime\n\n\ndef get_day_added(__date, __increament):\n even_months = [4, 6, 9, 11]\n odd_months = [1, 3, 5, 7, 8, 10, 12]\n exception_month = [2]\n if __date.day + __increament > 28 and __date.month in exception_month:\n if __date.year % 4 != 0:\n month = __date.month + 1\n day = __date.day + __increament - 28\n else:\n if __date.day + __increament == 29:\n month = __date.month\n day = 29\n else:\n month = __date.month + 1\n day = __date.day + __increament - 29\n elif __date.day + __increament > 30 and __date.month in even_months:\n month = __date.month + 1\n day = __date.day + __increament - 30\n elif __date.day + __increament > 31 and __date.month in odd_months:\n month = __date.month + 1\n day = __date.day + __increament - 31\n else:\n month = __date.month\n day = __date.day + __increament\n return __date.replace(month=month, day=day)\n\n\nBASE_URL = os.getenv(\"WEB_URL\", \"https://www.makemytrip.com/\")\nMAX_WAIT_TIME = 45\nUSER = os.getenv(\"USER_NAME\", \"tesla.test.162@gmail.com\")\nPWD = os.getenv(\"USER_PASS\", \"4rA*X4rsW$6HAT6\")\nSCROLL_VIEWPORT_WAIT = 2\n\n# USER INFO\nF_NAME = os.getenv(\"FNAME\", \"Tesla\")\nL_NAME = os.getenv(\"LNAME\", \"Tester\")\nCONTACT = os.getenv(\"CONTACT\", \"9876543210\")\nLOCATION = os.getenv('LOCATION',\"Bangkok\")\n\n_now = datetime.now()\n_now = get_day_added(_now, 2)\nformat_ = \"%a %b %d %Y\"\nIN_DAY = _now.strftime(format_)\n_now = get_day_added(_now, 1)\nOUT_DAY = _now.strftime(format_)\n\n","repo_name":"habin163/make_my_trip","sub_path":"src/utils/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"43418760781","text":"#리스트의 길이가 길 경우 sort메서드 사용시 효율성이 매우 떨어짐\n#heapq라는 자료구조 사용\n#push와 pop 메서드 호출시 자동으로 정렬 해주는 기능을 가짐\n\n\n\nimport heapq\ndef solution(scovile, K):\n answer = 0\n heap = []\n for num in scovile:\n heapq.heappush(heap,num)\n while(True):\n if K <= heap[0]:\n return answer\n elif len(heap) == 1 and heap[0] < K:\n return -1\n heapq.heappush(heap,heapq.heappop(heap)+(heapq.heappop(heap) * 2))\n answer += 1\n\nprint(solution([1,2,3,9,10,12],7))\n\n\n","repo_name":"woo00oo/codingtest_study","sub_path":"프로그래머스/힙(더 맵게).py","file_name":"힙(더 맵게).py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"37722346668","text":"import logging\nfrom typing import List, Tuple\n\nimport pandas as pd\nimport plotly.express as px\nimport plotly.graph_objects as go\nfrom sklearn import metrics\nfrom sklearn.metrics import precision_recall_curve\n\nfrom immuneML.environment.Constants import Constants\nfrom immuneML.environment.Label import Label\nfrom immuneML.reports.ReportOutput import ReportOutput\nfrom immuneML.reports.ReportResult import ReportResult\nfrom immuneML.reports.multi_dataset_reports.MultiDatasetReport import MultiDatasetReport\nfrom immuneML.util.PathBuilder import PathBuilder\n\n\nclass PerformanceOverview(MultiDatasetReport):\n \"\"\"\n PerformanceOverview report creates an ROC plot and precision-recall plot for optimal trained models on multiple datasets. The labels on the plots\n are the names of the datasets, so it might be good to have user-friendly names when defining datasets that are still a combination of\n letters, numbers and the underscore sign.\n\n This report can be used only with MultiDatasetBenchmarkTool as it will plot ROC and PR curve for trained models across datasets. Also, it requires\n the task to be immune repertoire classification and cannot be used for receptor or sequence classification. Furthermore, it uses predictions on\n the test dataset to assess the performance and plot the curves. If the parameter refit_optimal_model is set to True, all data will be used to fit\n the optimal model, so there will not be a test dataset which can be used to assess performance and the report will not be generated.\n\n If datasets have the same number of examples, the baseline PR curve will be plotted as described in this publication:\n Saito T, Rehmsmeier M. The Precision-Recall Plot Is More Informative than the ROC Plot When Evaluating Binary Classifiers on Imbalanced Datasets.\n PLOS ONE. 2015;10(3):e0118432. doi:10.1371/journal.pone.0118432\n\n If the datasets have different number of examples, the baseline PR curve will not be plotted.\n\n YAML specification:\n\n .. indent with spaces\n .. code-block:: yaml\n\n reports:\n my_performance_report: PerformanceOverview\n\n \"\"\"\n\n @classmethod\n def build_object(cls, **kwargs):\n return PerformanceOverview(**kwargs)\n\n def _generate(self) -> ReportResult:\n\n self.result_path = PathBuilder.build(self.result_path / self.name)\n\n assert all(self.instruction_states[0].label_configuration.get_labels_by_name() == state.label_configuration.get_labels_by_name() and\n self.instruction_states[0].label_configuration.get_label_values(\n self.instruction_states[0].label_configuration.get_labels_by_name()[0]) ==\n state.label_configuration.get_label_values(state.label_configuration.get_labels_by_name()[0])\n for state in self.instruction_states), \\\n \"PerformanceOverview: there is a difference in labels between instructions, the plots cannot be created.\"\n assert len(self.instruction_states[0].label_configuration.get_labels_by_name()) == 1, \\\n 'PerformanceOverview: multiple labels were provided, but only one can be used in this report.'\n\n assert all(state.refit_optimal_model is False for state in self.instruction_states), \\\n f\"{PerformanceOverview.__name__}: no test datasets were available to assess the performance of optimal models as they were refitted on \" \\\n f\"the full datasets. No reports will be generated.\"\n\n label = self.instruction_states[0].label_configuration.get_label_objects()[0]\n\n optimal_hp_items = [list(state.optimal_hp_items.values())[0] for state in self.instruction_states]\n\n colors = px.colors.sequential.Viridis[::2][::-1]\n figure_auc, table_aucs = self.plot_roc(optimal_hp_items, label, colors)\n figure_pr, table_pr = self.plot_precision_recall(optimal_hp_items, label, colors)\n\n return ReportResult(name=self.name,\n info=\"A ROC plot and a precision-recall plot for optimal trained models on multiple datasets.\",\n output_figures=[figure_auc, figure_pr], output_tables=table_aucs + table_pr)\n\n def plot_roc(self, optimal_hp_items, label: Label, colors) -> Tuple[ReportOutput, List[ReportOutput]]:\n report_data_outputs = []\n figure = go.Figure()\n\n figure.add_trace(go.Scatter(x=[0, 1], y=[0, 1], mode='lines', name='baseline', line=dict(color=Constants.PLOTLY_BLACK, dash='dash'),\n hoverinfo=\"skip\"))\n\n for index, item in enumerate(optimal_hp_items):\n if item.test_predictions_path is None:\n logging.warning(f'{PerformanceOverview.__name__}: there are no test predictions for dataset '\n f'{self.instruction_states[index].dataset.name}, skipping this dataset when generating performance overview...')\n else:\n\n df = pd.read_csv(item.test_predictions_path)\n true_class = df[f\"{label.name}_true_class\"].values\n predicted_class = df[f\"{label.name}_{label.positive_class}_proba\"].values\n fpr, tpr, _ = metrics.roc_curve(y_true=true_class, y_score=predicted_class)\n auc = metrics.roc_auc_score(true_class, predicted_class)\n name = self.instruction_states[index].dataset.name + f' (AUC = {round(auc, 2)})'\n figure.add_trace(go.Scatter(x=fpr, y=tpr, mode='lines', name=name, marker=dict(color=colors[index], line=dict(width=3)), hoverinfo=\"skip\"))\n\n data_path = self.result_path / f\"roc_curve_data_{name}.csv\"\n pd.DataFrame({\"FPR\": fpr, \"TPR\": tpr}).to_csv(data_path, index=False)\n report_data_outputs.append(ReportOutput(data_path, f'ROC curve data for dataset {name} (csv)'))\n\n figure_path = self.result_path / \"roc_curve.html\"\n figure.update_layout(template='plotly_white', xaxis_title='false positive rate', yaxis_title='true positive rate')\n figure.write_html(str(figure_path))\n\n return ReportOutput(figure_path, 'ROC curve'), report_data_outputs\n\n def plot_precision_recall(self, optimal_hp_items: list, label: Label, colors):\n report_data_outputs = []\n figure = go.Figure()\n\n for index, item in enumerate(optimal_hp_items):\n df = pd.read_csv(item.test_predictions_path)\n\n true_class = df[f\"{label.name}_true_class\"].values\n predicted_proba = df[f\"{label.name}_{label.positive_class}_proba\"].values\n precision, recall, _ = precision_recall_curve(y_true=true_class, probas_pred=predicted_proba)\n name = self.instruction_states[index].dataset.name\n figure.add_trace(go.Scatter(x=recall, y=precision, mode='lines', name=name, marker=dict(color=colors[index], line=dict(width=3)),\n hoverinfo=\"skip\"))\n\n data_path = self.result_path / f\"precision_recall_data_{name}.csv\"\n pd.DataFrame({\"precision\": precision, \"recall\": recall}).to_csv(data_path, index=False)\n report_data_outputs.append(ReportOutput(data_path, f'precision-recall curve data for dataset {name}'))\n\n figure_path = self.result_path / \"precision_recall_curve.html\"\n figure.update_layout(template='plotly_white', xaxis_title=\"recall\", yaxis_title=\"precision\")\n figure.write_html(str(figure_path))\n\n return ReportOutput(figure_path, 'precision-recall curve'), report_data_outputs\n","repo_name":"uio-bmi/immuneML","sub_path":"immuneML/reports/multi_dataset_reports/PerformanceOverview.py","file_name":"PerformanceOverview.py","file_ext":"py","file_size_in_byte":7520,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"28"} +{"seq_id":"24995789980","text":"# After running docker-compose up, this script checks whether all services are reachable within a reasonable timeout\nimport time\n\nimport grpc\nimport psycopg2\nfrom integrationtests.utils import get_modyn_config\nfrom modyn.storage.internal.grpc.generated.storage_pb2_grpc import StorageStub # noqa: F401\nfrom modyn.utils import grpc_connection_established\n\nTIMEOUT = 180 # seconds\n\n\ndef terminate_on_timeout(start_time: int) -> None:\n curr_time = round(time.time())\n\n if curr_time - start_time < TIMEOUT:\n return\n\n raise TimeoutError(\"Reached timeout\")\n\n\ndef storage_running() -> bool:\n config = get_modyn_config()\n\n storage_address = f\"{config['storage']['hostname']}:{config['storage']['port']}\"\n storage_channel = grpc.insecure_channel(storage_address)\n\n if not grpc_connection_established(storage_channel):\n print(f\"Could not establish gRPC connection to storage at {storage_address}. Retrying.\")\n return False\n\n print(\"Sucessfully connected to storage!\")\n\n return True\n\n\ndef model_storage_running() -> bool:\n config = get_modyn_config()\n\n model_storage_address = f\"{config['model_storage']['hostname']}:{config['model_storage']['port']}\"\n model_storage_channel = grpc.insecure_channel(model_storage_address)\n\n if not grpc_connection_established(model_storage_channel):\n print(f\"Could not establish gRPC connection to model storage at {model_storage_address}. Retrying.\")\n return False\n\n print(\"Sucessfully connected to model storage!\")\n\n return True\n\n\ndef evaluator_running() -> bool:\n config = get_modyn_config()\n\n evaluator_address = f\"{config['evaluator']['hostname']}:{config['evaluator']['port']}\"\n evaluator_channel = grpc.insecure_channel(evaluator_address)\n\n if not grpc_connection_established(evaluator_channel):\n print(f\"Could not establish gRPC connection to evaluator at {evaluator_address}. Retrying.\")\n return False\n\n print(\"Sucessfully connected to evaluator!\")\n\n return True\n\n\ndef trainer_server_running() -> bool:\n config = get_modyn_config()\n\n trainer_server_address = f\"{config['trainer_server']['hostname']}:{config['trainer_server']['port']}\"\n trainer_server_channel = grpc.insecure_channel(trainer_server_address)\n\n if not grpc_connection_established(trainer_server_channel):\n print(f\"Could not establish gRPC connection to trainer server at {trainer_server_address}. Retrying.\")\n return False\n\n print(\"Sucessfully connected to trainer server!\")\n\n return True\n\n\ndef storage_db_running() -> bool:\n config = get_modyn_config()\n try:\n psycopg2.connect(\n host=config[\"storage\"][\"database\"][\"host\"],\n port=config[\"storage\"][\"database\"][\"port\"],\n database=config[\"storage\"][\"database\"][\"database\"],\n user=config[\"storage\"][\"database\"][\"username\"],\n password=config[\"storage\"][\"database\"][\"password\"],\n connect_timeout=5,\n )\n\n print(\"Sucessfully connected to storage database!\")\n\n return True\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Error while connecting to the database: \" + str(error))\n return False\n\n\ndef metadata_db_running() -> bool:\n config = get_modyn_config()\n try:\n psycopg2.connect(\n host=config[\"metadata_database\"][\"host\"],\n port=config[\"metadata_database\"][\"port\"],\n database=config[\"metadata_database\"][\"database\"],\n user=config[\"metadata_database\"][\"username\"],\n password=config[\"metadata_database\"][\"password\"],\n connect_timeout=5,\n )\n\n print(\"Sucessfully connected to metadata database!\")\n\n return True\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Error while connecting to the database: \" + str(error))\n return False\n\n\ndef selector_running() -> bool:\n config = get_modyn_config()\n\n selector_address = f\"{config['selector']['hostname']}:{config['selector']['port']}\"\n selector_channel = grpc.insecure_channel(selector_address)\n\n if not grpc_connection_established(selector_channel):\n print(f\"Could not establish gRPC connection to selector at {selector_address}. Retrying.\")\n return False\n\n print(\"Sucessfully connected to selector!\")\n\n return True\n\n\ndef system_running() -> bool:\n return (\n storage_db_running()\n and storage_running()\n and selector_running()\n and metadata_db_running()\n and model_storage_running()\n and evaluator_running()\n and trainer_server_running()\n )\n\n\ndef main() -> None:\n start_time = round(time.time())\n\n while True:\n if system_running():\n return\n\n terminate_on_timeout(start_time)\n time.sleep(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"eth-easl/modyn","sub_path":"integrationtests/test_docker_compose.py","file_name":"test_docker_compose.py","file_ext":"py","file_size_in_byte":4843,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"28"} +{"seq_id":"13729957076","text":"import os\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nclass DataPicker:\n '''\n 数据采集器。\n '''\n\n def __init__(self, url):\n # 初始化时接受一个采集地址\n self._url = url\n self._res = None\n\n def get_data(self):\n # get方法直接采集\n try:\n res = requests.get(self._url)\n self._res = res.text\n print('抓取数据成功....')\n except Exception as e:\n print('爬取数据出错: %s ' % e)\n raise e\n return self._res\n\n\nclass DataClean:\n '''\n 数据清洗器\n 可以解析HTML中的版本信息\n '''\n\n def __init__(self, raw_data):\n self._raw_data = raw_data\n self._version_list = None\n\n def Clean(self):\n soup = BeautifulSoup(self._raw_data, 'html.parser')\n version_tags = soup.find_all('h2')\n version_list = [item.get_text().lower() for item in version_tags]\n print(version_list)\n self._version_list = version_list\n print('解析数据成功....')\n return self._version_list\n\n\nclass FileGen:\n '''\n 文件生成器,生成版本列表的txt文件\n '''\n\n def __init__(self, version_list):\n self._version_list = version_list\n\n def write_data(self):\n with open(os.path.dirname(os.path.abspath(__file__)) + os.sep + 'result.txt', mode='w', encoding='utf-8') as f:\n for line in self._version_list:\n f.writelines(line + '\\n')\n print('写文件成功....')\n\n\nif __name__ == '__main__':\n url = r'https://documentation.solarwinds.com/en/success_center/servu/Content/previous_versions.htm'\n # 抓取数据\n res = DataPicker(url).get_data()\n # 格式化数据,获得版本列表\n version_list = DataClean(res).Clean()\n # 写文件\n FileGen(version_list).write_data()\n","repo_name":"15399230309/flask","sub_path":"app/Serv-u/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"17467660805","text":"from PIL import ImageGrab as IG\nimport pyautogui as pa\nimport sys\nimport os\nimport time\nimport re\n\n\npa.FAILSAFE = True\nsec_between_keys = 0.25\nsec_between_term = 3\nsec_sleep = 0.5\n\n#스크린샷\ndef screenGrab():\n box = ()\n im = IG.grab(box)\n im.save(os.getcwd() + '\\\\img\\\\full_snap__' + str(int(time.time())) + '.png', 'PNG')\n\n#화면이 켜질때 까지 기다리기\nall = pa.getWindows()\nfor i in all:\n if 'http://bms.ken.go.kr/?USERID=driver' in i:\n print(i, 'yes')\n else:\n continue\npa.getWindow('카카오톡').set_foreground()\nte = pa.getWindow('카카오톡').get_position()\nprint(te)\n\n'''\nRAON K Hybrid Agent\nStudy_webCrawling from 2018 [D:\\OneDrive - 학현초등학교\\Gdrive\\★작업중\\SW_PyCharm\\studyPython from 2018] - ...\\crawling\\crawler_naver news_all_180802 .py [Study_webCrawling from 2018] - PyCharm\n...\\pyAutogui\\pyAutoGui_Neis_Gyeljae_180906.py [Study_webCrawling from 2018] - PyCharm\nhttp://bms.ken.go.kr/?USERID=driver1&APPRIDLIST=J10CB182424951849000&APPRDEPTID=J100004848&APPR - Internet Explorer\nhttp://bms.ken.go.kr/ - 결재대기 | 업무관리시스템 - Internet Explorer\n업무포털 - 석진일/학현초등학교 - Internet Explorer\n경기도교육청\n카카오톡\n이미지 014.png - 픽픽\nWorkFlowy\nWindows에서 파이썬 스크립트 실행용 exe 실행파일 구성방법 - Chrome\ncli.exe - Everything\n받은 쪽지 - 최종철(학현초등학교 전담)\n받은 쪽지 - 김소희(학현초등학교 4학년)\nDaum 지도 - Chrome\nTotal Commander 7.04 - University of Amsterdam\n백업 및 동기화\n'''","repo_name":"951237/python_pyautogui","sub_path":"pyautogui_training/pyAutoGui_getWindows_180828.py","file_name":"pyAutoGui_getWindows_180828.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"26181281127","text":"import socket\r\nimport random\r\nimport time\r\nimport numpy as np # 텐서플로 입력, 출력을 numpy를 통해서 수행\r\n\r\n# Game 클래스\r\nclass Game:\r\n # 생성자\r\n def __init__(self):\r\n self.gameCount = 0 # 게임이 몇번 이루어졌는지 통계용\r\n\r\n # <통신 함수들>\r\n\r\n # 접속 요청\r\n def connect(self):\r\n while True:\r\n # 접속을 실행할 소켓을 TCP/IP로 생성\r\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n try:\r\n self.sock.connect( ('127.0.0.1', 8791) )\r\n except socket.timeout:\r\n time.sleep(1.0)\r\n continue\r\n except:\r\n return False\r\n # 정상적으로 접속요청이 성공한 경우\r\n break\r\n return True\r\n\r\n # 접속 종료\r\n def close(self):\r\n self.sock.close()\r\n\r\n\r\n # 서버로부터 데이터를 받는 함수\r\n def recv(self):\r\n # 패킷의 길이를 읽어온다.\r\n buf = b\"\"\r\n while len(buf) < 4:\r\n try:\r\n t = self.sock.recv(4-len(buf))\r\n except:\r\n return \"et\", \"recv length error\"\r\n buf += t\r\n length = int(buf.decode())\r\n buf = b\"\"\r\n while len(buf) < length:\r\n try:\r\n t = self.sock.recv(length-len(buf))\r\n except:\r\n return \"et\", \"recv packet error\"\r\n buf += t\r\n # 읽은 내용을 공백으로 분리\r\n # pr 0001 --> ss[0] = 'pr', ss[1] = '0001'\r\n ss = buf.decode().split()\r\n if ss[0] == 'ab': return \"ab\", \"abort from server\"\r\n return ss[0], ss[1]\r\n\r\n # 서버로 데이터를 보내는 함수\r\n def send(self, buf):\r\n self.sock.send(buf.encode())\r\n\r\n # <수행 함수들>\r\n\r\n # 미리 두기 함수\r\n def preRun(self, p):\r\n # 서버로 prerun을 실행하도록 명령어 전달\r\n self.send(\"0008 pr %04d\"%p)\r\n\r\n # 서버로부터 prerun 결과를 전달 받는다.\r\n cmd, buf = self.recv()\r\n # 서버로부터 전달받은 결과가 pr 명령이 아닌 경우 실패\r\n if cmd != 'pr': return False, None\r\n ref = (0.0, 1.0, -1.0, 0.0) if self.turn==1 else (0.0, -1.0, 1.0, 0.0)\r\n st = np.array( [ref[int(buf[i])] for i in range(64)] )\r\n return True, st\r\n\r\n # 돌을 두기 함수\r\n def action(self, board):\r\n # hints : 이번턴에서 놓을 수 있는 자리\r\n hints = [ p for p in range(64) if board[p] == '0' ]\r\n\r\n ref = (0.0, 1.0, -1.0, 0.0) if self.turn==1 else (0.0, -1.0, 1.0, 0.0)\r\n st = np.array( [ref[int(buf[i])] for i in range(64)] )\r\n \r\n # 랜덤 두기에서는 선택을 무작위로 하나 선택합니다.\r\n p = random.choice(hints)\r\n\r\n # p를 선택했을때의 상태값(next status) V(nst) == q(st, p)\r\n _, nst = self.preRun(p)\r\n\r\n return st, nst, p\r\n\r\n # <핸들러 함수들>\r\n\r\n # 시작 명령을 받았을 때 처리\r\n def onStart(self, buf):\r\n # packet = LEN st turn\r\n self.turn = int(buf)\r\n # 에피소드 기록을 위한 리스트\r\n self.episode = []\r\n colors = ( \"\", \"White\", \"Black\" )\r\n print(f\"Game {self.gameCount+1} {colors[self.turn]}\")\r\n\r\n # 종료 명령을 받았을 때 처리\r\n def onQuit(self, buf):\r\n # packet = LEN qt <결과>\r\n self.gameCount += 1\r\n w, b = int(buf[:2]), int(buf[2:])\r\n result = w-b if self.turn==1 else b-w\r\n winText = ( \"You Lose\", \"Draw\", \"You Win\" )\r\n win = 2 if result > 0 else 0 if result < 0 else 1\r\n print(f\"{winText[win]} W : {w}, B : {b}\")\r\n return win, result\r\n\r\n\r\n # 준비가 되었다고 명령을 받았을 때 처리\r\n def onBoard(self, buf):\r\n # packet = LEN bd <결과>\r\n st, nst, p = self.action(buf)\r\n if p < 0: return False\r\n self.send(\"0008 pt %04d\"%p)\r\n self.episode.append( (st, self.turn^3) )\r\n self.episode.append( (nst, self.turn) )\r\n print(f\"({p//8}, {p%8})\", end=\"\")\r\n return True\r\n\r\n# 게임이 끝났는지 검사하는 플래그\r\nquitFlag = False\r\n# 통계용으로 몇판을 이겼고, 몇판을 졌는지, 몇판을 비겼는지\r\n# winlose = [ Lose, Draw, Win ]\r\nwinlose = [ 0, 0, 0 ]\r\n\r\n# game 클래스 오브젝트 생성\r\ngame = Game()\r\n\r\n# 전체 반복문\r\nwhile not quitFlag:\r\n if not game.connect(): break\r\n episode = []\r\n while True:\r\n cmd, buf = game.recv()\r\n if cmd == 'et':\r\n print(f\"Network error!! : {buf}\")\r\n break\r\n if cmd == 'qt':\r\n w, r = game.onQuit(buf)\r\n winlose[w] += 1\r\n print(f\"Wins:{winlose[2]}, Loses:{winlose[0]}, Draws:{winlose[1]}\")\r\n print(f\"Win ratio:{winlose[2]*100/sum(winlose):.2f}%\")\r\n break\r\n if cmd == 'ab':\r\n print(\"Game Abort!!\")\r\n break\r\n if cmd == 'st':\r\n game.onStart(buf)\r\n elif cmd == 'bd':\r\n if not game.onBoard(buf): break\r\n # 게임을 종료\r\n game.close()\r\n # 종료후 바로 끝내지 않고 1초간 대기\r\n time.sleep(1.0)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"crchoi1991/PythonRL","sub_path":"ReversiRandom.py","file_name":"ReversiRandom.py","file_ext":"py","file_size_in_byte":5300,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"1460315543","text":"import inspect\nimport types\nfrom typing import Any, Dict\n\nimport torch\nimport torch_geometric.nn.dense.linear\nfrom mqbench.fuser_method_mappings import fuse_custom_config_dict\nfrom mqbench.utils.logger import logger\nfrom mqbench.utils.registry import DEFAULT_MODEL_QUANTIZER\nfrom torch.fx import Tracer\nfrom torch.fx.graph_module import GraphModule\nfrom torch.quantization.quantize_fx import _swap_ff_with_fxff\n\n__all__ = ['prepare_by_platform', 'gnn_prepare_by_platform']\n\nfrom mqbench.prepare_by_platform import BackendType, CustomedTracer\nfrom mqbench.prepare_by_platform import get_qconfig_by_platform, duplicate_reused_nodes, prepare_constant_dict\n\n\ndef prepare_by_platform(\n model: torch.nn.Module,\n deploy_backend: BackendType,\n prepare_custom_config_dict: Dict[str, Any] = {},\n custom_tracer: Tracer = None):\n \"\"\"\n Args:\n model (torch.nn.Module):\n deploy_backend (BackendType):\n\n >>> prepare_custom_config_dict : {\n extra_qconfig_dict : Dict, Find explanations in get_qconfig_by_platform,\n extra_quantizer_dict: Extra params for quantizer.\n preserve_attr: Dict, Specify attribute of model which should be preserved\n after prepare. Since symbolic_trace only store attributes which is\n in forward. If model.func1 and model.backbone.func2 should be preserved,\n {\"\": [\"func1\"], \"backbone\": [\"func2\"] } should work.\n Attr below is inherited from Pytorch.\n concrete_args: Specify input for model tracing.\n extra_fuse_dict: Specify extra fusing patterns and functions.\n }\n\n \"\"\"\n model_mode = 'Training' if model.training else 'Eval'\n logger.info(\"Quantize model Scheme: {} Mode: {}\".format(deploy_backend, model_mode))\n\n # Get Qconfig\n extra_qconfig_dict = prepare_custom_config_dict.get('extra_qconfig_dict', {})\n qconfig = get_qconfig_by_platform(deploy_backend, extra_qconfig_dict)\n\n _swap_ff_with_fxff(model)\n # Preserve attr.\n preserve_attr_dict = dict()\n if 'preserve_attr' in prepare_custom_config_dict:\n for submodule_name in prepare_custom_config_dict['preserve_attr']:\n cur_module = model\n if submodule_name != \"\":\n cur_module = getattr(model, submodule_name)\n preserve_attr_list = prepare_custom_config_dict['preserve_attr'][submodule_name]\n preserve_attr_dict[submodule_name] = {}\n for attr in preserve_attr_list:\n preserve_attr_dict[submodule_name][attr] = getattr(cur_module, attr)\n # Symbolic trace\n concrete_args = prepare_custom_config_dict.get('concrete_args', None)\n customed_leaf_module = prepare_custom_config_dict.get('leaf_module', [])\n tracer = CustomedTracer(customed_leaf_module=tuple(customed_leaf_module))\n if custom_tracer is not None:\n tracer = custom_tracer\n # todo: works tracing, but not check if correct\n # It works, but things is, the pyg conv use pyg.nn.dense.Linear instead of torch.nn.Linear\n # That why the fakequant is not attached.\n # Now the weight should works if we changes that fake linear\n # in the future, we need to automate the replacement\n # also, we need to find a way to insert fake quantization for act.:q!\n if isinstance(model, GraphModule):\n graph = model.graph\n else:\n graph = tracer.trace(model, concrete_args)\n\n name = model.__class__.__name__ if isinstance(model, torch.nn.Module) else model.__name__\n modules = dict(model.named_modules())\n graph, duplicated_modules = duplicate_reused_nodes(graph, modules)\n constant_nodes = prepare_constant_dict(graph, model)\n modules.update(duplicated_modules)\n modules.update(constant_nodes)\n graph_module = GraphModule(modules, graph, name)\n # Model fusion.\n extra_fuse_dict = prepare_custom_config_dict.get('extra_fuse_dict', {})\n extra_fuse_dict.update(fuse_custom_config_dict)\n # Prepare\n import mqbench.custom_quantizer # noqa: F401\n extra_quantizer_dict = prepare_custom_config_dict.get('extra_quantizer_dict', {})\n quantizer = DEFAULT_MODEL_QUANTIZER[deploy_backend](extra_quantizer_dict, extra_fuse_dict)\n prepared = quantizer.prepare(graph_module, qconfig)\n # Restore attr.\n if 'preserve_attr' in prepare_custom_config_dict:\n for submodule_name in prepare_custom_config_dict['preserve_attr']:\n cur_module = prepared\n _type = type(model)\n if submodule_name != \"\":\n cur_module = getattr(prepared, submodule_name)\n _type = type(getattr(model, submodule_name))\n preserve_attr_list = prepare_custom_config_dict['preserve_attr'][submodule_name]\n for attr_name in preserve_attr_list:\n logger.info(\"Preserve attr: {}.{}\".format(submodule_name, attr_name))\n _attr = preserve_attr_dict[submodule_name][attr_name]\n if inspect.ismethod(_attr):\n _attr = types.MethodType(getattr(_type, attr_name), cur_module)\n setattr(cur_module, attr_name, _attr)\n return prepared\n\n\ndef gnn_prepare_by_platform(\n model: torch.nn.Module,\n deploy_backend: BackendType,\n prepare_custom_config_dict: Dict[str, Any] = {},\n custom_tracer: Tracer = None):\n # TODO: the version in compatible version of torch_geometric is not working, use another Transformer from newer repo\n # TODO: the newer version still can not traced inside MessagePassing, need to implement our own.\n # from torch_geometric.nn.fx import Transformer\n from fx import Transformer\n # class Transformer:\n # def __init__(self, model):\n # super(Transformer, self).__init__()\n # self.model = model\n # raise NotImplementedError(\"Not Implement\")\n #\n # def transform(self):\n # raise NotImplementedError(\"Not Implement\")\n\n model = Transformer(model).transform()\n model = replace_linear_pyg2torch(model)\n\n return prepare_by_platform(model, deploy_backend, prepare_custom_config_dict, custom_tracer)\n\n\ndef replace_linear_pyg2torch(fx_model):\n assert isinstance(fx_model, torch.fx.GraphModule)\n graph = fx_model.graph\n modules = dict(fx_model.named_modules())\n from torch.fx.experimental.optimization import replace_node_module\n\n for node in graph.nodes:\n if node.op == 'call_module':\n old_module = modules[node.target]\n if isinstance(old_module, torch_geometric.nn.dense.linear.Linear):\n weight = old_module.weight.clone().detach()\n bias = old_module.bias.clone().detach()\n new_module = torch.nn.Linear(weight.shape[1], weight.shape[0], not (bias is None), weight.device, weight.dtype)\n new_module.weight.data = weight\n new_module.bias.data = bias\n replace_node_module(node, modules, new_module)\n return torch.fx.GraphModule(fx_model, graph)\n","repo_name":"davidMc0109/nnqgnn","sub_path":"experiment/traced_all_fixbit/prepare_by_platform.py","file_name":"prepare_by_platform.py","file_ext":"py","file_size_in_byte":7029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"41024294857","text":"from numpy import pi,linspace,meshgrid,sin,cos,sqrt,sum,array,ones,zeros,hstack,vstack,sign,mod,isfinite,ceil,isclose\nfrom scipy.special import ellipk, ellipe\nfrom multiprocessing import Process, Queue, cpu_count\n#import tables\nimport warnings\n\ndef diff_12_central(x, y):\n x0 = x[:-2]\n x1 = x[1:-1]\n x2 = x[2:]\n y0 = y[:-2]\n y1 = y[1:-1]\n y2 = y[2:]\n f = (x2 - x1) / (x2 - x0)\n\n d_one = (1 - f) * (y2 - y1) / (x2 - x1) + f * (y1 - y0) / (x1 - x0) # first derivative at x1 (dy/dx)\n d_two = 2 * (y0 / ((x1 - x0) * (x2 - x0)) + y1 / ((x1 - x0) * (x1 - x2)) + y2 / (\n (x2 - x0) * (x2 - x1))) # second derivative at x1 (d^2y/dx^2)\n\n return d_one, d_two\n\ndef new_greens_test(R,Z):\n m,n = len(R),len(R)\n gpsi = zeros((m,n))\n R2 = R**2\n mu_0 = 4*pi*10**-7\n pre_factor = mu_0/(4*pi)\n for i,(r0,z0) in enumerate(zip(R,Z)):\n if isclose(r0,0,rtol=0,atol=1E-12):\n continue\n fac0 = (Z-z0)**2\n d = sqrt(fac0 + (R+r0)**2)\n d_ = sqrt(fac0 + (R-r0)**2)\n k_2 = 4*R*r0/d**2\n K = ellipk(k_2)\n E = ellipe(k_2)\n denom = d_**2*d\n fac1 = d_**2*K\n fac2 = (fac0 + R2 + r0**2)*E\n gpsi_tmp = pre_factor*R*r0/d * 4/k_2*((2-k_2)*K - 2*E)\n gpsi_tmp[~isfinite(gpsi_tmp)]=0\n gpsi[:,i] = gpsi_tmp\n return gpsi\n\ndef short_greens_test(R,Z):\n # must pass in 2D R and Z\n n,m = R.shape\n r,z = R.flatten(),Z.flatten()\n gpsi = zeros((m*n,m))\n r2 = r**2\n mu_0 = 4*pi*10**-7\n pre_factor = mu_0/(4*pi)\n for i,(r0,z0) in enumerate(zip(r[0:m],z[0:m])):\n if isclose(r0,0,rtol=0,atol=1E-12):\n continue\n fac0 = (z-z0)**2\n d = sqrt(fac0 + (r+r0)**2)\n d_ = sqrt(fac0 + (r-r0)**2)\n k_2 = 4*r*r0/d**2\n K = ellipk(k_2)\n E = ellipe(k_2)\n denom = d_**2*d\n fac1 = d_**2*K\n fac2 = (fac0 + r2 + r0**2)*E\n gpsi_tmp = pre_factor*r*r0/d * 4/k_2*((2-k_2)*K - 2*E)\n gpsi_tmp[~isfinite(gpsi_tmp)]=0\n gpsi[:,i] = gpsi_tmp\n return gpsi\n\ndef get_gpsi(R,Z):\n # must pass in 2D R and Z\n n,m = R.shape\n r,z = R.flatten(),Z.flatten()\n gpsis = zeros((m*n,m))\n gpsi = zeros((m*n,m*n))\n r2 = r**2\n mu_0 = 4*pi*10**-7\n pre_factor = mu_0/(4*pi)\n print(\"computing gpsi blocks...\")\n for i,(r0,z0) in enumerate(zip(r[0:m],z[0:m])):\n if isclose(r0,0,rtol=0,atol=1E-12):\n continue\n fac0 = (z-z0)**2\n d = sqrt(fac0 + (r+r0)**2)\n d_ = sqrt(fac0 + (r-r0)**2)\n k_2 = 4*r*r0/d**2\n K = ellipk(k_2)\n E = ellipe(k_2)\n denom = d_**2*d\n fac1 = d_**2*K\n fac2 = (fac0 + r2 + r0**2)*E\n gpsi_temp = pre_factor*r*r0/d * 4/k_2*((2-k_2)*K - 2*E)\n gpsi_temp[~isfinite(gpsi_temp)]=0\n gpsis[:,i] = gpsi_temp\n print(\"creating reflected block matrix\")\n gpsis2 = zeros((m*(2*n-1),m))\n gpsis2[(n-1)*m:,:] = gpsis\n for k in range(n-1):\n if k == 0:\n gpsis2[0:m,:] = gpsis[-m:,:]\n else:\n gpsis2[k*m:(k+1)*m,:] = gpsis[-(k+1)*m:-k*m,:]\n print(\"building huge matrix...\")\n for p in range(n):\n gpsi[:,p*m:(p+1)*m] = gpsis2[(n-(p+1))*m:(2*n-(p+1))*m,:]\n print(\"returning...\")\n return gpsi\n\ndef get_greens(R,Z,rzdir,out_q=None,out_idx=None):\n warnings.simplefilter(\"ignore\",RuntimeWarning)\n m,n = len(R),len(rzdir)\n print(m,n)\n gpsi = zeros((m,n))\n gBR = zeros((m,n))\n gBZ = zeros((m,n))\n R2 = R**2\n mu_0 = 4*pi*10**-7\n pre_factor = mu_0/(4*pi)\n for i,(r0,z0,csign) in enumerate(rzdir):\n if isclose(r0,0,rtol=0,atol=1E-12):\n continue\n fac0 = (Z-z0)**2\n d = sqrt(fac0 + (R+r0)**2)\n d_ = sqrt(fac0 + (R-r0)**2)\n k_2 = 4*R*r0/d**2\n K = ellipk(k_2)\n E = ellipe(k_2)\n denom = d_**2*d\n fac1 = d_**2*K\n fac2 = (fac0 + R2 + r0**2)*E\n gpsi_tmp = csign*pre_factor*R*r0/d * 4/k_2*((2-k_2)*K - 2*E)\n gpsi_tmp[~isfinite(gpsi_tmp)]=0\n gpsi[:,i] = gpsi_tmp\n gBR_tmp = -2*csign*pre_factor*(Z-z0)*(fac1 - fac2)/(R*denom)\n gBR_tmp[~isfinite(gBR_tmp)]=0\n gBR[:,i] = gBR_tmp\n gBZ_tmp = 2*csign*pre_factor*(fac1 - (fac2-2*r0**2*E))/denom\n gBZ_tmp[~isfinite(gBZ_tmp)]=0\n gBZ[:,i] = gBZ_tmp\n out_tup = (gpsi,gBR,gBZ)\n if out_q is None:\n return out_tup\n else:\n if out_idx is None:\n raise ValueError(\"I don't know where to put this output, please specify out_idx\")\n out_q.put((out_idx,)+out_tup)\n\ndef compute_greens(R,Z,rzdir=None,nprocs=1):\n warnings.simplefilter(\"ignore\",RuntimeWarning)\n proc_max = cpu_count()\n if rzdir is None:\n rzdir = vstack((R,Z,ones(len(R)))).T\n m,n = len(R),len(rzdir)\n print(m, n)\n gpsi = zeros((m,n))\n gBR = zeros((m,n))\n gBZ = zeros((m,n))\n if nprocs > proc_max:\n nprocs = proc_max\n procs = []\n out_q = Queue()\n chunksize = int(ceil(rzdir.shape[0]/float(nprocs)))\n print(chunksize)\n for i in xrange(nprocs):\n p = Process(target=get_greens,args=(R,Z,rzdir[i*chunksize:(i+1)*chunksize,:]),kwargs={\"out_q\":out_q,\"out_idx\":i})\n procs.append(p)\n p.start()\n\n for j in xrange(nprocs):\n print(\"getting g_tup #: {0}\".format(j))\n g_tup = out_q.get()\n idx = g_tup[0]\n gpsi[:,idx*chunksize:(idx+1)*chunksize] = g_tup[1]\n gBR[:,idx*chunksize:(idx+1)*chunksize] = g_tup[2]\n gBZ[:,idx*chunksize:(idx+1)*chunksize] = g_tup[3]\n\n for p in procs:\n p.join()\n\n return (gpsi,gBR,gBZ)\n\n#def write_large_greens(R,Z,filename,rzdir=None,chunkbytes=1024**3,nprocs=1):\n# # default chunksize is 1GB per greens function\n# if rzdir is None:\n# rzdir = vstack((R,Z,ones(len(R)))).T\n# m,n = len(R),len(rzdir)\n# gridbytes = 8*m\n# print(\"chunkbytes: \", chunkbytes)\n# print(\"gridbytes: \", gridbytes)\n# n_perchunk = int(ceil(chunkbytes/float(gridbytes)))\n# print(\"n_perchunk: \", n_perchunk)\n# n_chunks = int(ceil(n/float(n_perchunk)))\n# print(\"n_chunks: \", n_chunks)\n# fh = tables.openFile(filename,mode=\"w\")\n# filters = tables.Filters(complevel=5,complib='blosc')\n# gpsi_arr = fh.createCArray(fh.root,'gpsi',tables.Atom.from_dtype(R.dtype),\n# shape=(m,n),filters=filters)\n# gBR_arr = fh.createCArray(fh.root,'gBR',tables.Atom.from_dtype(R.dtype),\n# shape=(m,n),filters=filters)\n# gBZ_arr = fh.createCArray(fh.root,'gBZ',tables.Atom.from_dtype(R.dtype),\n# shape=(m,n),filters=filters)\n# for i in xrange(n_chunks):\n# print(\"processing chunk {0}\".format(i))\n# gpsi_chunk,gBR_chunk,gBZ_chunk = compute_greens(R,Z,rzdir=rzdir[i*n_perchunk:(i+1)*n_perchunk],nprocs=nprocs)\n# print(\"chunk shapes: {0}, {1}, {2}\".format(gpsi_chunk.shape,gBR_chunk.shape,gBZ_chunk.shape))\n# gpsi_arr[:,i*n_perchunk:(i+1)*n_perchunk] = gpsi_chunk\n# gBR_arr[:,i*n_perchunk:(i+1)*n_perchunk] = gBR_chunk\n# gBZ_arr[:,i*n_perchunk:(i+1)*n_perchunk] = gBZ_chunk\n#\n# fh.close()\n\n\n\n\n","repo_name":"eepeterson/pleiades","sub_path":"pleiades/analysis/math.py","file_name":"math.py","file_ext":"py","file_size_in_byte":7082,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"28"} +{"seq_id":"7963447117","text":"import hmac\nimport time\nimport requests\n\nfrom app.core.config import settings\nfrom app.helpers.logger import log\nfrom app.helpers.crypto import mintItems\nfrom app.helpers.database import db\n\n\ndef create_invoice(buyer, property_id, price, title, amount) -> str:\n currency = 'USD'\n order_date = int(time.time())\n order_reference = f'{buyer}-{property_id}-{amount}-{int(time.time())}'\n\n signature_key = f\"{settings.WFP_MERCHANT_LOGIN};{settings.WFP_DOMAIN_NAME};\" \\\n f\"{order_reference};{order_date};\" \\\n f\"{price * amount};{currency};\" \\\n f\"{title};{amount};{price}\"\n\n signature = hmac.new(\n settings.WFP_MERCHANT_KEY.encode('utf-8'),\n signature_key.encode('utf-8'),\n digestmod='MD5'\n ).hexdigest()\n\n result = requests.post(\n settings.WFP_BASE_URL,\n data={\n 'merchantAccount': settings.WFP_MERCHANT_LOGIN,\n 'merchantDomainName': settings.WFP_DOMAIN_NAME,\n\n 'orderReference': order_reference,\n 'orderDate': order_date,\n\n 'amount': price * amount,\n 'currency': currency,\n 'returnUrl': settings.WFP_PAYMENT_CONFIRMATION_ROUTE,\n\n 'productName': [title],\n 'productCount': [amount],\n 'productPrice': [price],\n\n 'merchantSignature': str(signature)\n }\n )\n\n return result.url\n\n\ndef get_last_transactions():\n finish = int(time.time())\n start = finish - 18000\n\n signature_key = f\"{settings.WFP_MERCHANT_LOGIN};{start};{finish}\"\n\n signature = hmac.new(\n settings.WFP_MERCHANT_KEY.encode('utf-8'),\n signature_key.encode('utf-8'),\n digestmod='MD5'\n ).hexdigest()\n\n result = requests.post(\n settings.WFT_API_BASE_URL,\n json={\n 'transactionType': 'TRANSACTION_LIST',\n 'merchantAccount': settings.WFP_MERCHANT_LOGIN,\n 'merchantSignature': str(signature),\n 'apiVersion': 1,\n 'dateBegin': start,\n 'dateEnd': finish\n }\n )\n\n if result.status_code == 200 and result.json() and result.json()['reason'] == 'Ok':\n return result.json()['transactionList']\n else:\n return []\n\n\ndef check_payments():\n while True:\n try:\n for transaction in get_last_transactions():\n if transaction['transactionStatus'] == 'Approved':\n if not db.get_payment(transaction['orderReference']):\n db.save_payment(transaction)\n\n details = transaction['orderReference'].split('-')\n metadata = db.get_metadata(int(details[1]))\n\n mintItems(details[0], int(details[1]), int(details[2]), metadata.price)\n\n time.sleep(5)\n\n except Exception as exc:\n log.error(exc)\n","repo_name":"vovkapultik/pre-backend","sub_path":"app/helpers/payments.py","file_name":"payments.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"26747259651","text":"#! /usr/bin/python\n# player script to listen for select score board \"type\" button press\n# Kevin Hinds http://www.kevinhinds.com\n# License: GPL 2.0\nimport sys, time, json, string, cgi, subprocess, json, datetime, memcache\nfrom gpiozero import Button\nfrom Adafruit_LED_Backpack import AlphaNum4\nmc = memcache.Client(['127.0.0.1:11211'], debug=0)\n\n# setup the alphanumeric displays\ndisplayOne = AlphaNum4.AlphaNum4(address=0x77)\ndisplayOne.begin()\ndisplayOne.set_brightness(8)\ndisplayOne.clear()\n\ndisplayTwo = AlphaNum4.AlphaNum4(address=0x76)\ndisplayTwo.begin()\ndisplayTwo.set_brightness(8)\ndisplayTwo.clear()\n\ndef displayType(scoreBoardType):\n \"\"\"set the ALPHNUM to scoreBoardType string value, with some padded spaces\"\"\"\n scoreBoardType = str(scoreBoardType)\n scoreBoardType = \" \" + scoreBoardType + \" \"\n \n displayOne.clear()\n displayOne.print_str(scoreBoardType[0:4])\n displayOne.write_display()\n \n displayTwo.clear()\n displayTwo.print_str(scoreBoardType[4:8])\n displayTwo.write_display()\n\ndef select():\n \"\"\"based on current game selected \"\"\"\n scoreBoardType = mc.get(\"TYPE\")\n\n if (scoreBoardType == \"SCORE\"):\n scoreBoardType = \"TIMER\"\n\n elif (scoreBoardType == \"TIMER\"):\n scoreBoardType = \"CLEAR\"\n\n elif (scoreBoardType == \"CLEAR\"):\n scoreBoardType = \"SCORE\"\n\n elif (scoreBoardType == \"None\"):\n scoreBoardType = \"SCORE\"\n\n mc.set(\"TYPE\", scoreBoardType)\n displayType(scoreBoardType)\n \n\n# set up defaults and show on alphanumeric display\nmc.set(\"TYPE\", \"SCORE\")\nmc.set(\"PLAYER1\", -1)\nmc.set(\"PLAYER2\", -1)\n\nscoreBoardType = mc.get(\"TYPE\")\ndisplayType(scoreBoardType)\n\nbutton = Button(17)\nselect()\nwhile True:\n button.when_pressed = select\n","repo_name":"khinds10/FidgetSpinnerGame","sub_path":"select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"15682961557","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n#第一步:准备数据\r\nx_data = np.linspace(-1, 1, 100)\r\ny_data = 2*x_data + np.random.randn(*x_data.shape) * 0.4\r\n# 第二步:创建模型\r\ndef model(w,x,b):\r\n return w*x+b\r\n#第三步:定义损失函数\r\ndef loss_function(w,x,y,b):\r\n loss=y-model(w,x,b)\r\n return tf.reduce_mean(tf.square(loss))\r\n#第四步:定义梯度函数:\r\ndef grad(w,x,y,b):\r\n with tf.GradientTape() as tape:\r\n loss=loss_function(w,x,y,b)\r\n return tape.gradient(loss,[w,b])\r\nw=tf.Variable(np.random.randn(),tf.float32,name=\"W\")\r\nb=tf.Variable(1.0,tf.float32,name=\"b\")\r\ntrain_time=10\r\nlearning_rate=0.01\r\nloss_list=[]\r\noptimizer=tf.keras.optimizers.SGD(learning_rate)\r\nfor i in range(train_time):\r\n for xs,ys in zip(x_data,y_data):\r\n grads=grad(w,xs,ys,b)\r\n optimizer.apply_gradients(zip(grads,[w,b]))\r\n loss_=loss_function(w,xs,ys,b).numpy()\r\n print(\"w的值\", w.numpy(), \"b的值\",b.numpy(), \"平均损失:\",loss_)\r\n loss_list.append(loss_)\r\n\r\nplt.scatter(x_data,y_data,color=\"green\")\r\nplt.plot(x_data,w.numpy()*x_data+b.numpy())\r\nplt.show()\r\n# plt.scatter(x_data, y_data)\r\n# plt.show()\r\n","repo_name":"sevenbean/Machine-Learning","sub_path":"TensorFlow2.0教程实例/单变量线性回归/tensorflow2.0.py","file_name":"tensorflow2.0.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"13335506241","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 9/6/20 11:34 AM\n@Author : Lucius\n@FileName: data.py\n@Software: PyCharm\n\"\"\"\n\nimport h5py\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset, DataLoader, TensorDataset\n\nDATASET_PATH = 'dataset/chaotic_rnn_inputs_g2p5_dataset_N50_S50'\n\n\ndef get_dataset(path=DATASET_PATH):\n with h5py.File(path, 'r') as data:\n train_data = torch.from_numpy(np.array(data['train_data']))\n valid_data = torch.from_numpy(np.array(data['valid_data']))\n train_dataset = TensorDataset(train_data)\n valid_dataset = TensorDataset(valid_data)\n return train_dataset, valid_dataset\n\n\ndef get_train_data_loader(batch_size=256, shuffle=True):\n train_dataset, valid_dataset = get_dataset()\n train_data_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, num_workers=2)\n return train_data_loader\n\n\ndef get_valid_data_loader(batch_size=256):\n train_dataset, valid_dataset = get_dataset()\n valid_data_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, num_workers=2)\n return valid_data_loader\n\n","repo_name":"Lucius-lsr/LFADSG","sub_path":"lfadsg/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"27491200837","text":"# coding: utf-8\n\nimport numpy as np\n\n\nclass NeuralNetwork:\n def __init__(self, layer_dims: tuple, *, sigma: float):\n self.w, self.b, self.layer_dims = [], [], layer_dims\n for i in range(1, len(layer_dims)):\n self.w.append(np.random.randn(layer_dims[i], layer_dims[i - 1]) * sigma)\n self.b.append(np.zeros((layer_dims[i], 1)))\n\n def forward_propagation(self, x: np.ndarray, *, training: bool = False, dropout: dict = None) -> list:\n a = [x]\n nl = len(self.layer_dims)\n for l in range(1, nl - 1):\n al = np.dot(self.w[l - 1], a[l - 1]) + self.b[l - 1]\n np.maximum(al, 0, out=al)\n if not (dropout is None):\n if training:\n al = al * dropout[\"mask\"][l]\n else:\n al = al * (1. - dropout[\"rate\"])\n a.append(al)\n al = np.dot(self.w[nl - 2], a[nl - 2]) + self.b[nl - 2]\n # TODO: solve overflow problem in some more reasonable way (https://zhuanlan.zhihu.com/p/22260935)\n np.clip(al, -30., 30., al)\n al = 1. / (1. + np.exp(-al))\n a.append(al)\n return a\n\n def back_propagation(self, y: np.ndarray, a: list, dropout_mask: list = None) -> (list, list):\n nl = len(self.layer_dims)\n dz, dw, db = [None] * nl, [None] * (nl - 1), [None] * (nl - 1)\n dz[nl - 1] = (a[nl - 1] - y) / y.shape[1]\n for l in reversed(range(nl - 1)):\n dw[l] = np.dot(dz[l + 1], a[l].T)\n if not (dropout_mask is None):\n dw[l] = dw[l] * dropout_mask[l + 1]\n db[l] = np.sum(dz[l + 1], axis=1, keepdims=True)\n dz[l] = np.dot(self.w[l].T, dz[l + 1]) * (a[l] > 0)\n return dw, db\n\n @staticmethod\n def cost(y: np.ndarray, al: np.ndarray) -> np.ndarray:\n return -np.mean(y * np.log(al) + (1 - y) * np.log(1. - al))\n\n def gradient_check(self, x: np.ndarray, y: np.ndarray, eps: float = 1e-8):\n nl = len(self.layer_dims)\n dw, db = [None] * (nl - 1), [None] * (nl - 1)\n a = self.forward_propagation(x)\n tdw, tdb = self.back_propagation(y, a)\n for l in range(nl - 1):\n dw[l] = np.zeros(self.w[l].shape)\n for i in range(self.w[l].shape[0]):\n for j in range(self.w[l].shape[1]):\n self.w[l][i, j] = self.w[l][i, j] + eps\n a = self.forward_propagation(x)\n c1 = self.cost(y, a[len(a) - 1])\n self.w[l][i, j] = self.w[l][i, j] - eps * 2.\n a = self.forward_propagation(x)\n c2 = self.cost(y, a[len(a) - 1])\n self.w[l][i, j] = self.w[l][i, j] + eps\n dw[l][i, j] = (c1 - c2) / (eps * 2.)\n db[l] = np.zeros(self.b[l].shape)\n for i in range(self.b[l].shape[0]):\n for j in range(self.b[l].shape[1]):\n self.b[l][i, j] = self.b[l][i, j] + eps\n a = self.forward_propagation(x)\n c1 = self.cost(y, a[len(a) - 1])\n self.b[l][i, j] = self.b[l][i, j] - eps * 2.\n a = self.forward_propagation(x)\n c2 = self.cost(y, a[len(a) - 1])\n self.b[l][i, j] = self.b[l][i, j] + eps\n db[l][i, j] = (c1 - c2) / (eps * 2.)\n print(np.linalg.norm(tdw[l] - dw[l]), np.linalg.norm(tdb[l] - db[l]))\n print(np.linalg.norm((tdw[l] - dw[l]) / dw[l]), np.linalg.norm((tdb[l] - db[l]) / db[l]))\n\n def gradient_descent_update(self, dw: list, db: list, params=None) -> dict:\n if params is None:\n params = {\"learning_rate\": 0.7}\n for l in range(len(self.layer_dims) - 1):\n self.w[l] = self.w[l] - params[\"learning_rate\"] * dw[l]\n self.b[l] = self.b[l] - params[\"learning_rate\"] * db[l]\n return {}\n\n def gradient_descent_momentum_update(self, dw: list, db: list, cache: dict, params=None) -> dict:\n if params is None:\n params = {\"f\": 0.1, \"learning_rate\": 0.02}\n if not cache:\n cache = {\"v_w\": [], \"v_b\": []}\n for l in range(len(self.layer_dims) - 1):\n cache[\"v_w\"].append(np.zeros(self.w[l].shape))\n cache[\"v_b\"].append(np.zeros(self.b[l].shape))\n for l in range(len(self.layer_dims) - 1):\n cache[\"v_w\"][l] = (1. - params[\"f\"]) * cache[\"v_w\"][l] + dw[l]\n cache[\"v_b\"][l] = (1. - params[\"f\"]) * cache[\"v_b\"][l] + db[l]\n self.w[l] = self.w[l] - params[\"learning_rate\"] * cache[\"v_w\"][l]\n self.b[l] = self.b[l] - params[\"learning_rate\"] * cache[\"v_b\"][l]\n return cache\n\n def optimize(self, x: np.ndarray, y: np.ndarray, x_cv: np.ndarray, y_cv: np.ndarray,\n optimization_params: dict = None, iter_num: int = 1500, dropout_rate: float = None,\n l2_decay: float = 0.) -> (float, float):\n best_so_far = {\"cost\": np.infty, \"w\": None, \"b\": None, \"iter_num\": 0}\n cache = {}\n no_update_cnt = 0\n for i in range(iter_num):\n if dropout_rate is None:\n a = self.forward_propagation(x, training=True)\n dw, db = self.back_propagation(y, a)\n else:\n dropout_mask = [np.ones((x.shape[0], 1))]\n for dim in self.layer_dims[1:-1]:\n dropout_mask.append(np.random.rand(dim, 1) >= dropout_rate)\n dropout_mask.append(np.ones((self.layer_dims[-1], 1)))\n a = self.forward_propagation(x, dropout_mask=dropout_mask, dropout_rate=dropout_rate, training=True)\n dw, db = self.back_propagation(y, a, dropout_mask=dropout_mask)\n for l in range(len(self.layer_dims) - 1):\n dw[l] = dw[l] + self.w[l] * l2_decay\n db[l] = db[l] + self.b[l] * l2_decay\n cache = self.gradient_descent_momentum_update(dw, db, cache, optimization_params)\n a = self.forward_propagation(x_cv, dropout_rate=dropout_rate)\n cost = self.cost(y_cv, a[-1])\n if cost < best_so_far[\"cost\"]:\n best_so_far[\"cost\"] = cost\n best_so_far[\"w\"] = self.w\n best_so_far[\"b\"] = self.b\n best_so_far[\"iter_num\"] = i + 1\n no_update_cnt = 0\n else:\n no_update_cnt = no_update_cnt + 1\n if no_update_cnt % 10 == 0:\n optimization_params[\"learning_rate\"] = optimization_params[\"learning_rate\"] * 0.5\n if no_update_cnt >= 30:\n break\n self.w = best_so_far[\"w\"]\n self.b = best_so_far[\"b\"]\n # print(best_so_far[\"iter_num\"])\n return self.cost(y, self.forward_propagation(x, dropout_rate=dropout_rate)[-1]), self.cost(\n y_cv, self.forward_propagation(x_cv, dropout_rate=dropout_rate)[-1])\n\n def predict(self, x: np.ndarray, dropout_rate: float = None):\n a = self.forward_propagation(x, dropout_rate=dropout_rate)\n return a[len(self.layer_dims) - 1] >= 0.5\n","repo_name":"gonglinyuan/titanic","sub_path":"NeuralNetwork_old.py","file_name":"NeuralNetwork_old.py","file_ext":"py","file_size_in_byte":7112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"37298370472","text":"#!/usr/bin/env python3\n\nimport rospy\nimport numpy as np\n\nfrom geometry_msgs.msg import PointStamped, QuaternionStamped, PoseStamped, Twist, Vector3Stamped\nfrom sensor_msgs.msg import Joy\n\n\n# Parameters\nX_BOUNDS = [-3.0, 3.0]\nY_BOUNDS = [-2.0, 2.0]\nZ_BOUNDS = [0.5, 1.5]\n\nRATE = 100 # hz \nDT = 1/RATE # seconds\n\n\nclass JoyController:\n def __init__(self):\n rospy.init_node('joy_controller_node', disable_signals=True)\n self.rate = rospy.Rate(RATE) # GCS rate is 20 Hz\n self.dt = DT # seconds\n\n self.update_dt = 1.0 # seconds\n\n print(\"Initializing joy controller\")\n\n self.curr_pos = np.array([0.0, 0.0, 1.0])\n self.curr_att = np.zeros(4)\n\n self.vel_cmd = np.zeros(3)\n\n # Publishers\n self.pos_pub = rospy.Publisher(\"drone2/setpoint/position\", PointStamped, queue_size=1)\n self.att_pub = rospy.Publisher(\"drone2/setpoint/attitude\", QuaternionStamped, queue_size=1)\n self.vel_pub = rospy.Publisher(\"drone2/setpoint/velocity\", Vector3Stamped, queue_size=1)\n\n # Subscribers\n mocap_sub = rospy.Subscriber(\"drone2/mavros/local_position/pose\", PoseStamped, self.mocap_cb)\n joy_sub = rospy.Subscriber(\"joy\", Joy, self.joy_cb)\n \n def mocap_cb(self, data):\n # Unpack Data\n self.curr_pos[0] = data.pose.position.x\n self.curr_pos[1] = data.pose.position.y\n self.curr_pos[2] = data.pose.position.z\n\n self.curr_att[0] = data.pose.orientation.w\n self.curr_att[1] = data.pose.orientation.x\n self.curr_att[2] = data.pose.orientation.y\n self.curr_att[3] = data.pose.orientation.z\n\n\n def in_bounds(self):\n return (self.curr_pos[0] >= X_BOUNDS[0] and self.curr_pos[0] <= X_BOUNDS[1] and\n self.curr_pos[1] >= Y_BOUNDS[0] and self.curr_pos[1] <= Y_BOUNDS[1] and\n self.curr_pos[2] >= Z_BOUNDS[0] and self.curr_pos[2] <= Z_BOUNDS[1])\n\n\n def joy_cb(self, data):\n # Unpack Data\n self.joy_buttons = data.buttons\n self.joy_axes = data.axes\n\n # Buttons (0 if not pressed, 1 if pressed)\n # 0: A\n # 1: B\n # 3: X\n # 4: Y\n # 6: LB\n # 7: RB\n # 13: button stick left\n # 14: button stick right\n\n # Axes (-1 to 1)\n # 0: left stick right/left\n # 1: left stick down/up\n # 2: right stick right/left\n # 3: right stick down/up\n # 4: RT (1 if not pressed, -1 if pressed)\n # 5: LT (1 if not pressed, -1 if pressed)\n # 6: cross key right/left\n # 7: cross key down/up\n print(data.axes)\n\n # Right stick - XY velocity command\n self.vel_cmd[0] = -0.5 * self.joy_axes[2]\n self.vel_cmd[1] = 0.5 * self.joy_axes[3]\n\n\n def run(self, event=None):\n\n # Get velocity command\n print(f\"vel_cmd: {self.vel_cmd}\")\n vx = self.vel_cmd[0]\n vy = self.vel_cmd[1]\n vz = self.vel_cmd[2]\n\n t_now = rospy.Time.now()\n\n # Variables to publish\n vel_msg = Vector3Stamped()\n vel_msg.header.stamp = t_now\n if self.in_bounds():\n vel_msg.vector.x = vx\n vel_msg.vector.y = vy\n vel_msg.vector.z = vz\n else:\n vel_msg.vector.x = 0.0\n vel_msg.vector.y = 0.0\n vel_msg.vector.z = 0.0\n\n # Publish\n self.vel_pub.publish(vel_msg)\n\n\nif __name__ == '__main__':\n \n jc = JoyController()\n\n rospy.Timer(rospy.Duration(jc.dt), jc.run)\n rospy.spin()\n\n","repo_name":"adamdai/brain2drone","sub_path":"src/joy_controller.py","file_name":"joy_controller.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"32637765243","text":"# %%\n\nimport numpy as np\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.utils import plot_model\nfrom tensorflow.keras.layers import Input, LSTM, Embedding, Dense\n\n# %%\n\n# %%\nlstm_units = 500\nencoder_input_size = 4096\ntime_steps_enc = 40\nvocab_size = 5000\nemd_size = 200\nmax_seq_len = 50\n\n# %% Encoder\n\nenc_input = Input((None, encoder_input_size), name='enc_input')\nenc_lstm = LSTM(lstm_units, return_state=True, name='enc_lstm')\nX = enc_input\nX = enc_lstm(X)\nenc_out, enc_h, enc_c = X\nenc_states = [enc_h, enc_c]\n\n# %%Decoder\n\ndec_input = Input((None,), name='dec_input')\ndec_embedding = Embedding(vocab_size+1, emd_size, input_length=max_seq_len,\n trainable=False, name='dec_embd', mask_zero=True)\ndec_lstm = LSTM(lstm_units, return_sequences=True,\n return_state=True, name='dec_lstm')\ndec_dense = Dense(vocab_size, activation='softmax')\nY = dec_input\nY = dec_embedding(Y)\nY = dec_lstm(Y, initial_state=enc_states)\ndec_out, _, _ = Y\nY = dec_dense(dec_out)\n\n# %%Model\n\nmodel = Model(inputs=[enc_input, dec_input], outputs=Y)\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy')\n# %%\nmodel.summary()\nplot_model(model, show_shapes=True)\n# %%\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy', metrics=['acc'])\n# %%\nbatch_size = 128\nvalidation_split = 0.2\nepochs = 20\nvalidation_batch_size = 128\n\nmodel.fit(x=[eng_padded], y=target_padded, batch_size=batch_size, epochs=epochs,\n validation_split=validation_split, validation_batch_size=validation_batch_size)\n\n# %%\n\n# %%inference model\n\nencoder_model_inf = Model(enc_input, enc_states)\n\ndec_inf_input_h = Input((lstm_units,))\ndec_inf_input_c = Input((lstm_units,))\ndec_inf_input_states = [dec_inf_input_h, dec_inf_input_c]\n\ndec_inf_out, dec_inf_h, dec_inf_c = dec_lstm(dec_embedding(dec_input),\n initial_state=dec_inf_input_states)\ndec_inf_states = [dec_inf_h, dec_inf_c]\ndec_inf_output = dec_dense(dec_inf_out)\n\ndecoder_model_inf = Model(inputs=[dec_input] + dec_inf_input_states,\n outputs=[dec_inf_output] + dec_inf_states)\n\n# %%\nencoder_model_inf.summary()\nplot_model(encoder_model_inf, show_shapes=True)\n# %%\ndecoder_model_inf.summary()\nplot_model(decoder_model_inf, show_shapes=True)\n# %%\n\n\ndef generate(input_seq):\n\n states_val = encoder_model_inf.predict(input_seq)\n target_seq = [word_index['\\t']]\n\n predicted_sent = []\n stop_condition = False\n\n while not stop_condition:\n\n decoder_out, decoder_h, decoder_c = decoder_model_inf.predict(\n x=[np.array(target_seq)] + states_val)\n max_val_index = np.argmax(decoder_out[0, 0])\n\n if reverse_word_index[max_val_index] == '\\n' or max_val_index == 0 or len(predicted_sent) > max_seq_len:\n stop_condition = True\n\n target_seq = [max_val_index]\n predicted_sent.append(reverse_word_index[max_val_index])\n states_val = [decoder_h, decoder_c]\n\n return \" \".join(predicted_sent)\n\n# %%\n\n\nword_index = {\"a\": 1, \"\\t\": 2}\nreverse_word_index = {1: \"a\", 2: \"\\t\", 4756: \"HHH\"}\ngenerate(np.random.randn(1, 20, 4096))\n\n# %%\n","repo_name":"ArihantRawat/Video_Captioning","sub_path":"utils/seq2seq-v1.py","file_name":"seq2seq-v1.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"23733316406","text":"# 提取PNG图像序列的特定区域,并将他们存储为VTI格式的文件\nimport os\nimport vmtk.vmtkimagereader as imagereader\nimport vmtk.vmtkimagewriter as imagewriter\n\n# 读取PNG图像 使用otsuthresholds 大津算法\ndef pngToVti():\n input_datadir = 'C:/Users/chenjiaxing/Desktop/CT-Data/png/Spine_Bone_Ext/'\n outputFileName = 'vessel.vti'\n reader = imagereader.vmtkImageReader()\n \n # 图像序\n filelist = os.listdir(input_datadir)\n #print(len(filelist))\n reader.InputFilePrefix = input_datadir\n reader.InputFilePattern = \"%s%d.png\"\n reader.DataExtent = [0 ,512 ,0, 512, 1, len(filelist)]\n reader.Format = \"png\"\n reader.UseITKIO = 0\n reader.Execute()\n\n writer = imagewriter.vmtkImageWriter()\n writer.Image = reader.Image\n writer.Format = \"vtkxml\"\n writer.OutputFileName = outputFileName\n writer.WindowLevel = [150, 1200] #窗宽和窗位\n writer.Execute()\n\n\n\nif __name__=='__main__':\n pngToVti()","repo_name":"Chen41284/Python","sub_path":"VMTK/pngTovti.py","file_name":"pngTovti.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"72413664715","text":"from typing import List\n\nfrom binary_tree_node import BinaryTreeNode\nfrom test_framework import generic_test\nfrom collections import deque\n\ndef binary_tree_depth_order(tree: BinaryTreeNode) -> List[List[int]]:\n if not tree:\n return []\n res = []\n q = deque([tree])\n while q:\n curLevelRes = []\n nextQ = deque()\n while q:\n node = q.popleft()\n curLevelRes.append(node.data)\n if node.left: \n nextQ.append(node.left)\n if node.right:\n nextQ.append(node.right)\n res.append(curLevelRes)\n q = nextQ\n return res\n\n \n\n\nif __name__ == '__main__':\n exit(\n generic_test.generic_test_main('tree_level_order.py',\n 'tree_level_order.tsv',\n binary_tree_depth_order))\n","repo_name":"darrenzhang2000/EPI-Judge","sub_path":"epi_judge_python/tree_level_order.py","file_name":"tree_level_order.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"7382702934","text":"#! /usr/bin/env python3\n\nimport json\nimport sys\nimport os\n\ncc = None\noutput = None\n\n# Only the ninja backend produces compile_commands.json\nif sys.argv[1] == 'ninja':\n with open('compile_commands.json') as f:\n cc = json.load(f)\n output = {x['output'] for x in cc}\n\nfor obj in sys.argv[2:]:\n if not os.path.exists(obj):\n sys.exit(f'File {obj} not found.')\n if sys.argv[1] == 'ninja' and obj not in output:\n sys.exit(1)\n print('Verified', obj)\n","repo_name":"mesonbuild/meson","sub_path":"test cases/common/22 object extraction/check-obj.py","file_name":"check-obj.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":4986,"dataset":"github-code","pt":"28"} +{"seq_id":"5344145698","text":"# 분해합\nn = input()\nlength = len(n)\nanswer = 0\n\nfor i in range(1, int(n)):\n tmp = i\n sum = i\n for j in range(length):\n sum += (tmp % 10)\n tmp = int(tmp/10)\n if sum == int(n):\n answer = i\n break\nprint(answer)\n","repo_name":"ygeenee616/AlgorithmStudy","sub_path":"BruteForce/problem2231/yeonjin.py","file_name":"yeonjin.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"9756176099","text":"import argparse\nimport asyncio\nimport functools\nimport json\nimport os\nfrom io import BytesIO\n\nimport uvicorn\nfrom fastapi import FastAPI, BackgroundTasks, File, Body, UploadFile, Request\nfrom fastapi.responses import StreamingResponse\nfrom faster_whisper import WhisperModel\nfrom starlette.staticfiles import StaticFiles\nfrom starlette.templating import Jinja2Templates\nfrom zhconv import convert\n\nfrom utils.data_utils import remove_punctuation\nfrom utils.utils import add_arguments, print_arguments\n\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'True'\n\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n\nadd_arg(\"host\", type=str, default=\"0.0.0.0\", help=\"\")\nadd_arg(\"port\", type=int, default=5000, help=\"\")\nadd_arg(\"model_path\", type=str, default=\"models/sam2ai/whisper-odia-small-finetune-int8-ct2\", help=\"\")\nadd_arg(\"use_gpu\", type=bool, default=False, help=\"\")\nadd_arg(\"use_int8\", type=bool, default=True, help=\"\")\nadd_arg(\"beam_size\", type=int, default=10, help=\"\")\nadd_arg(\"num_workers\", type=int, default=2, help=\"\")\nadd_arg(\"vad_filter\", type=bool, default=True, help=\"\")\nadd_arg(\"local_files_only\", type=bool, default=True, help=\"\")\nargs = parser.parse_args()\nprint_arguments(args)\n\n# \nassert os.path.exists(args.model_path), f\"{args.model_path}\"\n# \nif args.use_gpu:\n if not args.use_int8:\n model = WhisperModel(args.model_path, device=\"cuda\", compute_type=\"float16\",\n num_workers=args.num_workers, local_files_only=args.local_files_only)\n else:\n model = WhisperModel(args.model_path, device=\"cuda\",\n compute_type=\"int8_float16\", num_workers=args.num_workers,\n local_files_only=args.local_files_only)\nelse:\n model = WhisperModel(args.model_path, device=\"cpu\",\n compute_type=\"int8\", num_workers=args.num_workers,\n local_files_only=args.local_files_only)\n\n# \n# _, _ = model.transcribe(\"dataset/test.wav\", beam_size=5)\n\napp = FastAPI(title=\"OdiaGenAI Whisper ASR\")\napp.mount('/static', StaticFiles(directory='static'), name='static')\ntemplates = Jinja2Templates(directory=\"templates\")\nmodel_semaphore = None\n\n\ndef release_model_semaphore():\n model_semaphore.release()\n\n\ndef recognition(file: File, to_simple: int,\n remove_pun: int, language: str = \"bn\",\n task: str = \"transcribe\"\n ):\n\n segments, info = model.transcribe(file, beam_size=10, task=task, language=language, vad_filter=args.vad_filter)\n for segment in segments:\n text = segment.text\n if to_simple == 1:\n # text = convert(text, '')\n pass\n if remove_pun == 1:\n # text = remove_punctuation(text)\n pass\n ret = {\"result\": text, \"start\": round(segment.start, 2), \"end\": round(segment.end, 2)}\n # \n yield json.dumps(ret).encode() + b\"\\0\"\n\n\n@app.post(\"/recognition_stream\")\nasync def api_recognition_stream(\n to_simple: int = Body(1, description=\"\", embed=True),\n remove_pun: int = Body(0, description=\"\", embed=True),\n language: str = Body(\"bn\", description=\"\", embed=True),\n task: str = Body(\"transcribe\", description=\"\", embed=True),\n audio: UploadFile = File(..., description=\"\")\n ):\n\n global model_semaphore\n if language == \"None\": language = None\n if model_semaphore is None:\n model_semaphore = asyncio.Semaphore(5)\n await model_semaphore.acquire()\n contents = await audio.read()\n data = BytesIO(contents)\n generator = recognition(\n file=data, to_simple=to_simple,\n remove_pun=remove_pun, language=language,\n task=task\n )\n background_tasks = BackgroundTasks()\n background_tasks.add_task(release_model_semaphore)\n return StreamingResponse(generator, background=background_tasks)\n\n\n@app.post(\"/recognition\")\nasync def api_recognition(\n to_simple: int = Body(1, description=\"\", embed=True),\n remove_pun: int = Body(0, description=\"\", embed=True),\n language: str = Body(\"bn\", description=\"\", embed=True),\n task: str = Body(\"transcribe\", description=\"\", embed=True),\n audio: UploadFile = File(..., description=\"\")\n ):\n\n if language == \"None\":language=None\n contents = await audio.read()\n data = BytesIO(contents)\n generator = recognition(\n file=data, to_simple=to_simple,\n remove_pun=remove_pun, language=language,\n task=task\n )\n results = []\n for output in generator:\n output = json.loads(output[:-1].decode(\"utf-8\"))\n results.append(output)\n ret = {\"results\": results, \"code\": 0}\n return ret\n\n\n@app.get(\"/\")\nasync def index(request: Request):\n return templates.TemplateResponse(\n \"index.html\", {\"request\": request, \"id\": id}\n )\n\n\nif __name__ == '__main__':\n uvicorn.run(app, host=args.host, port=args.port)\n","repo_name":"OdiaGenAI/Olive_Odia_ASR","sub_path":"infer_server.py","file_name":"infer_server.py","file_ext":"py","file_size_in_byte":4991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"21447152734","text":"import requests\nimport sys\nfrom bs4 import BeautifulSoup\n\nargs = sys.argv # translate from = args[1], translate to = args[2], word = args[3]\nerror = False\nlanguages = {'1': 'Arabic', '2': 'German', '3': 'English', '4': 'Spanish', '5': 'French', '6': 'Hebrew', '7': 'Japanese', '8': 'Dutch', '9': 'Polish', '10': 'Portuguese', '11': 'Romanian', '12': 'Russian', '13': 'Turkish'}\nlanguages_list = ['Arabic', 'German', 'English', 'Spanish', 'French', 'Hebrew', 'Japanese', 'Dutch', 'Polish', 'Portuguese', 'Romanian', 'Russian', 'Turkish']\n\n\ndef translate(lang_from, lang_to, word):\n global error\n url = f'https://context.reverso.net/translation/{lang_from.lower()}-{lang_to.lower()}/{word.lower()}'\n\n try:\n r = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})\n except requests.exceptions.ConnectionError:\n print('Something wrong with your internet connection')\n error = True\n return False\n if r.status_code == 404:\n print(f'Sorry, unable to find {word}')\n error = True\n return False\n\n soup = BeautifulSoup(r.content, 'html.parser')\n\n words = soup.find('div', id=\"translations-content\")\n words_final = words.text.split()\n sentences = soup.find('section', {'id': 'examples-content'})\n both = sentences.find_all('div', {'class': 'example'})\n from_helper = sentences.find_all('div', {'class': 'src ltr'})\n from_l = from_helper[0].text.strip()\n print(\"\")\n print(f'{lang_to.capitalize()} translation:')\n print(words_final[0])\n\n print(f'\\n{lang_to.capitalize()} example:')\n print(from_l)\n to_l = both[0].text.strip().replace('\\n', '').replace(from_l, '').strip()\n print(to_l, '\\n')\n\n with open(f'{word}.txt', 'a', encoding='utf-8') as file:\n file.write(f'{lang_to} Translation:\\n')\n file.write(f'{words_final[0]}\\n')\n file.write(f'\\n{lang_to} Example:\\n')\n file.write(f'{from_l}\\n')\n file.write(f'{to_l}\\n\\n\\n')\n\n\nif len(args) != 4:\n print(\"The script should be called with 3 arguments, 1.translate from, 2.translate to, 3.word to translate\")\n print(\"e.g. python translator.py english german hello\")\nelse:\n if args[1].capitalize() not in languages_list:\n print(f\"Sorry, the program doesn't support {args[1]}\")\n elif args[2].capitalize() not in languages_list and args[2] != 'all':\n print(f\"Sorry, the program doesn't support {args[2]}\")\n elif args[2] == 'all':\n for n in languages:\n if args[1].capitalize() == languages[n]:\n pass\n else:\n if error is True:\n break\n else:\n translate(args[1], languages[n], args[3])\n else:\n translate(args[1], args[2], args[3])\n\n\n\n\n\n\n","repo_name":"Zmolik/multilingual_online_translator","sub_path":"translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"40086837922","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 20 17:20:33 2019\n\n@author: Administrator\n\"\"\"\n\nclass Solution:\n def countDigitOne(self, n: int) -> int:\n# count = 0\n# for k in range(1, n+1):\n# for p in list(str(k)):\n# if p == '1':\n# count += 1\n# return count\n\n \n# count = 0\n# while n > 9:\n# m = len(str(n)) - 1\n# tmp = 10**m - 1\n## print(tmp)\n## print(self.fun(tmp))\n# count += self.fun(tmp)\n# n -= tmp\n# return count + 1\n \n# tmp = n\n# n = 10 ** (len(str(n)) - 1) - 1\n# tmp -= n\n# count = 0\n# while n:\n# count += self.fun(n)\n# n //= 10\n# return count\n# \n# def fun(self, n):\n# tmp = 0\n# count = 1\n# m = len(str(n))\n# for k in range(1, m):\n# tmp += count\n# count = tmp * 9 + 10 ** k\n# return count\n \n# count = 0\n# \n# if n == 10**len(str(n))-1:\n# while n:\n# count += self.fun(n)\n# n //= 10\n# return count\n# \n# count += self.countDigitOne( 10 ** (len(str(n))-1)-1 )\n# \n# tmp = n - 10 ** ( len(str(n))-1 )\n# count += self.countDigitOne( tmp )\n# \n# count += tmp + 1\n# \n## tmp = n\n## n = 10 ** (len(str(n)) - 1) - 1\n## tmp -= n\n## count = 0\n## while n:\n## count += self.fun(n)\n## n //= 10\n## return count\n# \n# def fun(self, n):\n# tmp = 0\n# count = 1\n# m = len(str(n))\n# for k in range(1, m):\n# tmp += count\n# count = tmp * 9 + 10 ** k\n# return count\n \n \n if n <= 0: #特殊情况\n return 0\n \n if n <= 9:\n return 1\n \n count = 0\n \n if n == 10**len(str(n))-1: #只含9\n while n:\n count += self.fun(n)\n n //= 10\n return count\n \n# while :\n# count += self.countDigitOne(10**(len(str(n))-1)-1)\n# n = n - 10**(len(str(n))-1)\n# if n <= 10**(len(str(n))-1):\n# count += n+1#min(10**(len(str(n))-1), n+1)#\n# count += self.countDigitOne(n)\n# else:\n## count += 10**(len(str(n))-1)#min(10**(len(str(n))-1), n+1)#\n# tmp = int(''.join(list(str(n))[1:]))\n# count += self.countDigitOne(tmp)\n# \n# while :\n# if n > 10**(len(str(n)-1)-1):\n \n count = 0\n m = int(list(str(n))[0]) #直接看最高位是几 #有3个部分可能含有1\n for k in range(m):\n count += self.countDigitOne( 10**(len(str(n))-1)-1 ) #=1=\n# print(k, self.countDigitOne( 10**(len(str(n))-1)-1 ))\n if m == 1: #=2=\n tmp = ''.join(list(str(n))[1:])\n print(tmp, type(tmp))\n count += int(tmp) + 1\n else:\n count += 10 ** (len(str(n))-1)\n# print(10 ** (len(str(n))-1))\n tmp = ''.join(list(str(n))[1:])\n# print(tmp, type(str(tmp)))\n count += self.countDigitOne( int(tmp) ) #=3=\n# print(count)\n return count\n \n def fun(self, n): #找规律\n tmp = 0\n count = 1\n m = len(str(n))\n for k in range(1, m):\n tmp += count\n count = tmp * 9 + 10 ** k\n return count\n \n \nsolu = Solution()\n#n = 13\n#n = 3184191\nn = 9999\n#n = 1000\n#n = 9\nn = 1024\nn = 999\nn = 1999\nn = 1789\n#n = 2789\nn = 2\nn = 20\n#n = 21\nn = -1\nprint(solu.countDigitOne(n))\n#print(solu.fun(7))\n\n\nclass Solution(object):\n def countDigitOne(self, n):\n \"\"\"\n 用递归做的,可以改成记忆化搜索,加快时间\n \"\"\"\n if n<=0: return 0\n if n<10: return 1\n last = int(str(n)[1:])\n power = 10**(len(str(n))-1) \n high = int(str(n)[0])\n if high == 1:\n return self.countDigitOne(last) + self.countDigitOne(power-1) + last+1\n else:\n return self.countDigitOne(last) + high*self.countDigitOne(power-1) + power","repo_name":"1050669722/LeetCode-Answers","sub_path":"Python/problem0233.py","file_name":"problem0233.py","file_ext":"py","file_size_in_byte":4290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"18225414973","text":"\"\"\"\nCustom dataset to load videos (image sequences).\nVideoDataset is a modified version of the AlignedDataset in Pix2Pix.\nIn contrast to AlignedDataset, VideoDataset also allows loading of image blocks (n subsequent image pairs at a time).\nData augmentation is also adjusted accordingly.\nFurthermore, we made various minor code design changes.\nMost notably, we don't use opt (and a long line of keyword args instead) and don't inherit from BaseDataset.\n\"\"\"\n\nimport os\nimport random\n\nimport numpy as np\nfrom PIL import Image\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\n\n\ndef read_image(image_path):\n \"\"\"\n Reads an image from file as PIL.Image in RGB format\n :param image_path: path of the image file\n :return: PIL RBG image\n \"\"\"\n return Image.open(image_path).convert('RGB')\n\n\ndef image_to_numpy(image):\n \"\"\"\n Transforms a PIL.Image to a numpy array\n :param image: PIL image\n :return: numpy array\n \"\"\"\n return np.asarray(image, dtype=\"float32\") / 255\n\n\ndef get_lowest_level_dirs(root_dir):\n \"\"\"\n For a given root directory, get a list of all subdirectories (including root_dir) which contain files\n :param root_dir: root directory\n :return: list of subdirectories containing files\n \"\"\"\n file_paths = []\n for root, _, file_names in os.walk(root_dir):\n for file_name in file_names:\n file_paths.append(os.path.join(root, file_name))\n return sorted(list({os.path.dirname(file_path) for file_path in file_paths}))\n\n\nclass VideoDataset(data.Dataset):\n \"\"\"Custom dataset class to load images block-wise\"\"\"\n\n def __init__(\n self, root_dirs, root_names, block_size=1,\n overlap=False, augment_data=True,\n width=256, height=256, load_width=286, load_height=286,\n ):\n \"\"\"\n Initialize the dataset\n :param root_dirs: List of directories containing the images\n :param root_names: Name of each dir in root_dirs\n :param block_size: Block size (how many images to process at once)\n :param overlap: Whether subsequent blocks should overlap or not\n :param augment_data: Whether to perform data augmentation (random crop + flip) or not\n :param width: Width of the loaded images (= random crop width if augment_data=True)\n :param height: Height of the loaded images (= random crop height if augment_data=True)\n :param load_width: Width to which each image is resized before the random crop (if augment_data=True)\n :param load_height: Height to which each image is resized before the random crop (if augment_data=True)\n \"\"\"\n\n assert len(root_names) == len(root_dirs) # make sure each root dir has a name\n\n self.root_names = root_names\n self.block_size = block_size\n self.overlap = overlap\n self.augment_data = augment_data\n self.width = width\n self.height = height\n self.load_width = load_width\n self.load_height = load_height\n\n # define data transformations applied similarly to all blocks: NHWC to NCHW > normalize\n transform_list_numpy = [\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ]\n self.data_transform_numpy = transforms.Compose(transform_list_numpy)\n\n # define list of all sequence dirs for both source and target\n self.image_dirs = [get_lowest_level_dirs(root_dir) for root_dir in root_dirs]\n\n # define list of list of all image names for both source and target\n self.image_names = [[[f for f in sorted(os.listdir(image_dir)) if os.path.isfile(os.path.join(image_dir, f))] for image_dir in i] for i in self.image_dirs]\n\n # make sure all dirs contain same number of videos and frames\n for i in range(1, len(self.image_dirs)):\n assert len(self.image_dirs[0]) == len(self.image_dirs[i])\n for j in range(len(self.image_dirs[0])):\n assert len(self.image_names[0][j]) == len(self.image_names[i][j])\n\n # define lengths of each sequence (how many blocks can be loaded from each sequence)\n def get_sequence_len(sequence):\n \"\"\"Return the length of a given sequence (i.e. the number of image blocks that can be loaded)\"\"\"\n if self.overlap:\n return max(0, len(sequence) - self.block_size + 1)\n return len(sequence) // self.block_size\n self.sequence_lens = [get_sequence_len(sequence) for sequence in self.image_names[0]]\n\n # get number of channels from first image for each directory\n self.image_channels = [\n image_to_numpy(read_image(os.path.join(self.image_dirs[i][0], self.image_names[i][0][0]))).shape[2]\n for i in range(len(self.image_dirs))\n ]\n\n # print dataset information\n self.print_info()\n\n def __len__(self):\n \"\"\"\n Return the length of the dataset\n This is the sum of image blocks that can be loaded per sequence; all remaining frames will be discarded\n \"\"\"\n return max(0, np.sum(self.sequence_lens)) # max(0,...) should not be needed, but pylint complains, so whatever\n\n def __getitem__(self, index):\n \"\"\"\n Get an item at a specific index in the dataset\n Each item is a dict {source: x, target: y} where x is a source image block and y a target image block\n :param index: index of the item in the dataset\n :return: item at the specific index\n \"\"\"\n\n # define block-specific data transformations: resize > random crop > random flip\n # This MUST be called here (and can not be done once in __init__()), because it is block-specific!\n data_transform = self.get_transforms()\n\n # initialize source and target image blocks, and list of source and target images names\n image_blocks = {\n root_name: np.zeros((self.block_size*channels, self.height, self.width), dtype=\"float32\")\n for channels, root_name in zip(self.image_channels, self.root_names)\n }\n image_paths = {root_name+\"_path\": [] for root_name in self.root_names}\n\n # find sequence index (which sequence to load from) and block_index (which block to load in the sequence)\n sequence_index = 0\n sequence_len_sum = 0\n while sequence_len_sum + self.sequence_lens[sequence_index] < index + 1:\n sequence_len_sum += self.sequence_lens[sequence_index]\n sequence_index += 1\n block_index = index - sequence_len_sum\n\n # add images into source and target blocks\n for i in range(self.block_size):\n # find frame index (which frame to load in the sequence)\n image_index = block_index + i if self.overlap else block_index * self.block_size + i\n\n # load image and add it to the image block; add image path to image_paths\n for j, root_name in enumerate(self.root_names):\n image_name = self.image_names[j][sequence_index][image_index]\n image_path = os.path.join(self.image_dirs[j][sequence_index], image_name)\n image_paths[root_name+\"_path\"].append(image_path)\n image = self.read_and_transform(image_path, data_transform)\n image_blocks[root_name][i*self.image_channels[j]:(i+1)*self.image_channels[j], :, :] = image\n\n return {**image_blocks, **image_paths}\n\n def read_and_transform(self, image_path, data_transform):\n \"\"\"\n Reads an image and performs several transformations\n :param image_path: path of the image file\n :param data_transform: block-specific data transformations\n :return: transformed image (np.array)\n \"\"\"\n image = read_image(image_path) # read image from image_path as Pillow.Image\n image = data_transform(image) # perform block-specific data transformations\n image = image_to_numpy(image) # convert to [0,1]-normalized numpy array\n image = self.data_transform_numpy(image) # perform further data transformations\n return image\n\n def get_transforms(self):\n \"\"\"\n Define block-specific data transformations: resize > random crop > random flip\n :return: data transformations (torchvision.transforms.Compose)\n \"\"\"\n transform_list = []\n crop_size = (self.width, self.height)\n load_size = (self.load_width, self.load_height)\n if self.augment_data:\n crop_location, do_flip = self.sample_transformation_params(load_size, crop_size)\n transform_list.append(transforms.Resize(load_size, Image.BICUBIC))\n transform_list.append(\n transforms.Lambda(lambda img: self.parametrized_random_crop(img, crop_location, crop_size))\n )\n transform_list.append(transforms.Lambda(lambda img: self.parametrized_random_flip(img, do_flip)))\n else:\n transform_list.append(transforms.Resize(crop_size, Image.BICUBIC))\n return transforms.Compose(transform_list)\n\n def print_info(self):\n \"\"\"Print dataset size, source dimensions and target dimensions\"\"\"\n print(\"---------- Dataset Information ----------\")\n print(\"dataset size:\", len(self))\n for i, root_name in enumerate(self.root_names):\n print(root_name, \"dimensions:\", self.image_channels[i], \"x\", self.width, \"x\", self.height)\n print(\"-----------------------------------------\")\n\n @staticmethod\n def parametrized_random_crop(img, crop_position, crop_size):\n \"\"\"\n Parametrized Random Crop\n :param img: image to perform random cropping on\n :param crop_position: start position of the random crop (top left corner) - (x, y)\n :param crop_size: random crop size (width, height)\n :return: random crop\n \"\"\"\n image_width, image_height = img.size\n crop_x, crop_y = crop_position\n crop_width, crop_height = crop_size\n if image_width > crop_width or image_height > crop_height:\n return img.crop((crop_x, crop_y, crop_x + crop_width, crop_y + crop_height))\n return img\n\n @staticmethod\n def parametrized_random_flip(img, do_flip):\n \"\"\"\n Parametrized Horizontal Random Flip\n :param img: image to perform random flip on\n :param do_flip: whether to flip the image or not\n :return: flipped image if do_flip else original input image\n \"\"\"\n if do_flip:\n return img.transpose(Image.FLIP_LEFT_RIGHT)\n return img\n\n @staticmethod\n def sample_transformation_params(load_size, crop_size):\n \"\"\"\n Sample parameters for random crop (crop_postion) and for random flip (do_flip)\n :param load_size: size (width, height) of the image before the random crop\n :param crop_size: random crop size (width, height)\n :return: crop position, do_flip\n \"\"\"\n load_width, load_height = load_size\n crop_width, crop_height = crop_size\n crop_x = random.randint(0, np.maximum(0, load_width - crop_width))\n crop_y = random.randint(0, np.maximum(0, load_height - crop_height))\n crop_position = (crop_x, crop_y)\n do_flip = random.random() > 0.5\n return crop_position, do_flip\n","repo_name":"fa9r/TimeCycleGAN","sub_path":"timecyclegan/dataset/video_dataset.py","file_name":"video_dataset.py","file_ext":"py","file_size_in_byte":11282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"23028450488","text":"# coding=utf8\n\n'''\nGiven a binary tree, return the inorder traversal of its nodes' values.\n\nFor example:\nGiven binary tree [1,null,2,3],\n 1\n \\\n 2\n /\n 3\nreturn [1,3,2].\n\nNote: Recursive solution is trivial, could you do it iteratively?\n'''\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def inorderTraversal(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n if not root:\n return []\n stack = []\n curr = root\n res = []\n while curr or stack:\n while curr:\n stack.append(curr)\n curr = curr.left\n curr = stack.pop()\n res.append(curr.val)\n curr = curr.right\n return res\n","repo_name":"onestarshang/leetcode_onestar","sub_path":"binary-tree-inorder-traversal.py","file_name":"binary-tree-inorder-traversal.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"73377624074","text":"from datetime import datetime\nfrom dateutil.parser import parse\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.db import models, transaction\nfrom django.forms.models import model_to_dict\nfrom django.urls import reverse\n\nfrom tom_common.hooks import run_hook\n\nINSTRUMENT_FIELDS = ['code', 'instrument', 'instrument_type', 'state']\n\nINSTRUMENT_TYPES = (\n ('2M0-SCICAM-SPECTRAL', '2m0-SciCam-Spectral'),\n ('2M0-SCICAM-MUSCAT', '2m0-SciCam-MuSCAT'),\n ('2M0-FLOYDS-SCICAM', '2m0-FLOYDS-SciCam'),\n ('1M0-SCICAM-SINISTRO', '1m0-SciCam-Sinistro'),\n ('1M0-NRES-SCICAM', '1m0-NRES-SciCam'),\n ('0M4-SCICAM-SBIG', '0m4-SciCam-SBIG')\n)\n\nINSTRUMENT_STATES = (\n ('DISABLED', 'Disabled'),\n ('MANUAL', 'Manual'),\n ('COMMISSIONING', 'Commissioning'),\n ('STANDBY', 'Standby'),\n ('SCHEDULABLE', 'Schedulable')\n)\n\nclass Instrument(models.Model):\n \"\"\"\n Class representing an instrument in the Calibration-TOM\n\n :param code: The code identifying the instrument, e.g. kb95\n :type code: str\n\n :param instrument: The full site.enc.tel.inst designation of the instrument, e.g. coj.clma.0m4a.kb24\n :type code: str\n\n :param instrument_type: The type of instrument, e.g. 0m4-SciCam-SBIG.\n :type instrument_type: str\n\n :param state: The instrument state, e.g. SCHEDULABLE.\n :type state: str\n\n :param created: The time at which the instrument was created in the Calibration-TOM.\n :type created: datetime\n\n :param modified: The time at which the instrument was changed in the Calibration-TOM.\n :type modified: datetime\n\n :param lat: The latitude of the telescope that has the instrument installed, in degrees.\n :type lat: float\n\n :param lon: The longitude of the telescope that has the instrument installed, in degrees.\n :type lon: float\n\n \"\"\"\n\n\n code = models.CharField(\n max_length=10, default='', verbose_name='Code', help_text='The code identifying the instrument e.g. kb95',\n unique=True\n )\n instrument = models.CharField(\n max_length=100, default='', verbose_name='Instrument', help_text='The full site.enclosure.telescope.code designation of the instrument e.g. coj.clma.0m4a.kb24',\n unique=True\n )\n instrument_type = models.CharField(\n max_length=100, choices=INSTRUMENT_TYPES, verbose_name='Instrument Type', help_text='The type of instrument, e.g. 1m0-SciCam-Sinistro.'\n )\n state = models.CharField(\n max_length=100, choices=INSTRUMENT_STATES, verbose_name='Instrument State', help_text='The instrument state, e.g. SCHEDULABLE.'\n )\n created = models.DateTimeField(\n auto_now_add=True, verbose_name='Time Created', help_text='The time at which the instrument was created in the Calibration-TOM.'\n )\n modified = models.DateTimeField(\n auto_now=True, verbose_name='Last Modified', help_text='The time at which the instrument was last updated in the Calibration-TOM.'\n )\n #telescope = models.CharField(\n # max_length=100, default='', verbose_name='Telescope', help_text='The telescope on which the instrument is installed, e.g. coj.doma.1m0a.'\n #)\n #lat = models.FloatField(\n # null=True, blank=True, verbose_name='Latitude', help_text='Latitude, in degrees.'\n #)\n #lon = models.FloatField(\n # null=True, blank=True, verbose_name='Longitude', help_text='Longitude, in degrees.'\n #)\n\n @transaction.atomic\n def save(self, *args, **kwargs):\n \"\"\"\n Saves Instrument model data to the database. After saving to the database, also runs the\n hook ``instrument_post_save``. The hook run is the one specified in ``settings.py``.\n\n \"\"\"\n\n created = False if self.id else True\n super().save(*args, **kwargs)\n\n if not created:\n run_hook('instrument_post_save', instrument=self, created=created)\n\n def __str__(self):\n return str(self.code)\n\n def get_absolute_url(self):\n return reverse('instruments:detail', kwargs={'pk': self.id})\n\n @property\n def future_observations(self):\n \"\"\"\n Gets all observations scheduled for this ``Instrument``\n\n :returns: List of ``ObservationRecord`` objects without a terminal status\n :rtype: list\n \"\"\"\n return [\n obs for obs in self.observationrecord_set.exclude(status='').order_by('scheduled_start') if not obs.terminal\n ]\n\n def as_dict(self):\n \"\"\"\n Returns dictionary representation of attributes, excluding all attributes not associated with the ``type`` of\n this ``Instrument``.\n\n :returns: Dictionary of key/value pairs representing instrument attributes\n :rtype: dict\n \"\"\"\n fields_for_type = INSTRUMENT_FIELDS\n\n return model_to_dict(self, fields=fields_for_type)\n\n\nclass InstrumentList(models.Model):\n \"\"\"\n Class representing a list of instruments in the Calibration-TOM.\n\n :param name: The name of the instrument list\n :type name: str\n\n :param instruments: Set of ``Instrument`` objects associated with this ``InstrumentList``\n\n :param created: The time at which this instrument list was created.\n :type created: datetime\n\n :param modified: The time at which this instrument list was modified in the Calibration-TOM.\n :type modified: datetime\n \"\"\"\n name = models.CharField(max_length=200, help_text='The name of the instrument list.')\n instruments = models.ManyToManyField(Instrument)\n created = models.DateTimeField(\n auto_now_add=True, help_text='The time which this instrument list was created in the Calibration-TOM.'\n )\n modified = models.DateTimeField(\n auto_now=True, verbose_name='Last Modified',\n help_text='The time which this instrument list was changed in the Calibration-TOM.'\n )\n\n class Meta:\n ordering = ('-created', 'name',)\n\n def __str__(self):\n return self.name\n","repo_name":"LCOGT/calibration-tom","sub_path":"network/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"9908424637","text":"#!/usr/bin/env python3\n\nfrom qiskit import QuantumCircuit, execute, Aer, IBMQ\nfrom qiskit.compiler import transpile, assemble\nfrom qiskit.tools.jupyter import *\nfrom qiskit.visualization import *\nfrom qiskit.providers.jobstatus import JOB_FINAL_STATES, JobStatus\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister, execute, Aer\nimport time, math, argparse\n\n#\n# qc_helpers.py\n# \n# author: burt rosenberg\n# date: 17 june 2020\n# last update:\n#\n\nargs_g = 0 \n\ndef load_or_save_IBMQ_account(api_token=None):\n\tglobal args_g\n\tif api_token:\n\t\t# only needs to be done once\n\t\tIBMQ.save_account(api_token)\n\tprovider = IBMQ.load_account()\n\treturn provider\n\ndef wait_for_job(backend, job, wait_interval=5):\n\tglobal args_g\n\tretrieved_job = backend.retrieve_job(job.job_id())\n\tstart_time = time.time()\n\tjob_status = job.status()\n\twhile job_status not in JOB_FINAL_STATES:\n\t\tif not args_g.unverbose:\n\t\t\tprint(f'Status @ {time.time() - start_time:0.0f} s: {job_status.name},'\n\t\t\t f' est. queue position: {job.queue_position()}')\n\t\ttime.sleep(wait_interval)\n\t\tjob_status = job.status()\n\ndef list_backends(backends):\n\tglobal args_g\n\tprint('backends available:')\n\tfor be in backends:\n\t\tst = be.status()\n\t\tif st.operational:\n\t\t\tprint(f'\\t{be.name()}, pending jobs:{st.pending_jobs}')\n\ndef match_backend_name(be, be_s,min_len=3):\n\tglobal args_g\n\tif len(be)<3:\n\t\treturn \"\"\n\tfor be_t in be_s:\n\t\tif be in be_t:\n\t\t\treturn be_t\n\treturn \"\"\n\ndef parse_args():\n\tglobal args_g\n\tparser = argparse.ArgumentParser(description=\"Quantum circuit to demonstrate superposition\")\n\tparser.add_argument('backend', nargs='?', help='ibmq backend name or partial name')\n\tparser.add_argument(\"-L\", \"--list_backends\", action=\"store_true\", help=\"list the available backends\")\n\tparser.add_argument('-v', '--verbose', action='store_true', help='verbose output')\n\tparser.add_argument('-V', '--unverbose', action='store_true', help='silent output')\n\targs_g = parser.parse_args()\n\treturn args_g\n\n\n","repo_name":"burtr/Workbook","sub_path":"qc-mini/qc_helpers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"16958258272","text":"import requests\nfrom bs4 import BeautifulSoup\n\nfrom django.core.mail import send_mail\n\nfrom catalog.models import AuthorAndQuote\n\nfrom celery import shared_task\n\n\n@shared_task()\ndef send_email_task(email_address, message):\n send_mail(\n 'Your email',\n message,\n 'from@example.com',\n [email_address],\n fail_silently=False\n )\n\n\n@shared_task()\ndef quotes_add():\n list_1 = list()\n for page in range(11):\n r = requests.get(f'https://quotes.toscrape.com/page/{page}/')\n soup = BeautifulSoup(r.content, features=\"html.parser\")\n quotes = soup.find_all(\"div\", {\"class\": \"quote\"})\n for quote in quotes:\n if len(list_1) < 5:\n if not AuthorAndQuote.objects.filter(quote=quote.find('span', {'class': 'text'}).text).exists():\n quote_text = quote.find('span', {'class': 'text'}).text\n author_name = quote.find('small', {'class': 'author'}).text\n author_link = quote.find('a')['href']\n link = f\"https://quotes.toscrape.com{author_link}\"\n rp = requests.get(link)\n if rp.status_code == 200:\n soup_2 = BeautifulSoup(rp.content, features=\"html.parser\")\n birth_date = soup_2.find('span', {'class': 'author-born-date'}).text\n author_details = ' '.join(soup_2.find('div', {'class': 'author-description'}).text.split())\n list_1.append([quote_text, author_name, author_details, birth_date])\n # print(list_1)\n else:\n break\n if list_1:\n for el in list_1:\n AuthorAndQuote.objects.create(name=el[1], details=el[2], birth_date=el[3], quote=el[0])\n else:\n send_mail(\n 'Your email',\n 'There are no more new quotes',\n 'from@example.com',\n 'myemail@example.com',\n fail_silently=False\n )\n","repo_name":"l0g1van/django_project","sub_path":"catalog/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"32670532134","text":"import sys\r\nimport os\r\nimport random\r\nfrom PySide2.QtWidgets import (QApplication, QLabel, QPushButton, QVBoxLayout, QWidget, QGridLayout, QLineEdit, QSpacerItem, QRadioButton, QGroupBox, QProgressBar)\r\nfrom PySide2 import QtCore\r\n#from PySide2.QtCore import Slot, Qt, QThread, QObject\r\n#import psutil\r\n#from psutil._common import bytes2human\r\nimport wmi\r\nimport subprocess\r\nimport time\r\nfrom threading import Timer, Thread\r\nimport pythoncom\r\n\r\n# Possible fix to the threading issue: https://stackoverflow.com/questions/36434706/pyqt-proper-use-of-emit-and-pyqtsignal\r\n# Also, checkout fbs installer\r\n# Also, https://pythonprogramming.net/menubar-pyqt-tutorial/?completed=/button-functions-pyqt-tutorial/\r\n# Also, possible fix two: https://www.riverbankcomputing.com/static/Docs/PyQt5/signals_slots.html\r\n# TODO: Create a proper build script\r\n\r\nclass mainScreen(QWidget):\r\n def __init__(self):\r\n QWidget.__init__(self)\r\n\r\n self.driveLabel = QLabel('Serial Number:')\r\n self.sizeLabel = QLabel('Size:')\r\n self.statusLabel = QLabel('Status:')\r\n self.partitionLabel = QLabel('Partitions:')\r\n self.indexLabel = QLabel('Index:')\r\n self.checkLabel = QLabel('Master:')\r\n self.progress = QProgressBar()\r\n self.wipeButton = QPushButton('Wipe')\r\n self.cancelButton = QPushButton('Cancel')\r\n self.bottomStatus = QLabel('Ready to Wipe')\r\n self.refactor = QPushButton('Refactor')\r\n\r\n # TODO: Add a group box to make this look better\r\n # EDIT TODO: Use GUI application to build a really good looking app!\r\n # TODO: Add selection of wipe method, verification, and certificate output\r\n\r\n self.layout = QGridLayout()\r\n\r\n self.layout.addWidget(self.driveLabel, 0, 0, 1, 2)\r\n self.layout.addWidget(self.sizeLabel, 0, 3)\r\n self.layout.addWidget(self.statusLabel, 0, 5, 1, 2)\r\n self.layout.addWidget(self.partitionLabel, 0, 7)\r\n self.layout.addWidget(self.indexLabel, 0, 8)\r\n self.layout.addWidget(self.checkLabel, 0, 9)\r\n self.layout.addWidget(self.progress, 26, 0, 1, 10)\r\n self.layout.addWidget(self.wipeButton, 27, 2, 1, 2)\r\n self.layout.addWidget(self.cancelButton, 27, 4, 1, 2)\r\n self.layout.addWidget(self.bottomStatus, 28, 0, 1, 10)\r\n self.layout.addWidget(self.refactor, 27, 0, 1, 2)\r\n self.drivesSet = 0\r\n self.driveNames = []\r\n self.driveStatus = []\r\n self.driveSize = []\r\n self.drivePartitions = []\r\n self.driveIndex = []\r\n self.masterRadio = []\r\n self.master = self.getMaster()\r\n self.progressInt = 10\r\n for i in range(25):\r\n toAdd = QLabel('')\r\n self.driveNames.append(toAdd)\r\n toAdd2 = QLabel('')\r\n self.driveSize.append(toAdd2)\r\n toAdd3 = QLabel('')\r\n self.driveStatus.append(toAdd3)\r\n toAdd4 = QLabel('')\r\n self.drivePartitions.append(toAdd4)\r\n toAdd5 = QLabel('')\r\n self.driveIndex.append(toAdd5)\r\n\r\n self.setWindowTitle('Auto Kill Disk')\r\n #icon = \r\n #self.setWindowIcon()\r\n self.wipeButton.clicked.connect(self.startButtonClicked)\r\n self.refactor.clicked.connect(self.refactorDrives)\r\n\r\n self.worker = refactorThread()\r\n self.worker.refSig.connect(self.refactorDrives)\r\n self.worker.start()\r\n\r\n def addDrive(self, name, status, size, partitions, index):\r\n self.driveNames[self.drivesSet].setText(name)\r\n self.driveStatus[self.drivesSet].setText(status)\r\n self.driveSize[self.drivesSet].setText(size + ' GB')\r\n self.drivePartitions[self.drivesSet].setText(str(partitions))\r\n self.driveIndex[self.drivesSet].setText(str(index))\r\n toAdd = QRadioButton()\r\n self.masterRadio.append(toAdd)\r\n self.layout.addWidget(self.masterRadio[self.drivesSet], self.drivesSet + 1, 9)\r\n if int(index) == int(self.master):\r\n self.masterRadio[self.drivesSet].setChecked(True)\r\n self.drivesSet += 1 \r\n\r\n def addPayloadNames(self):\r\n for i in range(25):\r\n self.layout.addWidget(self.driveNames[i], i + 1, 0, 1, 2)\r\n self.layout.addWidget(self.driveStatus[i], i + 1, 5, 1, 2)\r\n self.layout.addWidget(self.driveSize[i], i + 1, 3)\r\n self.layout.addWidget(self.drivePartitions[i], i + 1, 7)\r\n self.layout.addWidget(self.driveIndex[i], i + 1, 8)\r\n\r\n def resetSpacing(self):\r\n self.layout.setContentsMargins(10, 10, 0, 10)\r\n\r\n def startButtonClicked(self):\r\n self.bottomStatus.setText('Are you sure you want to wipe?')\r\n self.confirmButton = QPushButton('Confirm')\r\n self.layout.addWidget(self.confirmButton, 27, 6, 1, 2)\r\n self.setLayout(self.layout)\r\n\r\n self.confirmButton.clicked.connect(self.getIndex)\r\n\r\n def setText(self):\r\n self.bottomStatus.setText('Ready to Wipe')\r\n\r\n def getIndex(self):\r\n self.confirmButton.deleteLater()\r\n self.bottomStatus.setText('Starting Wipe')\r\n self.indexToWipe = []\r\n self.serialToWipe = []\r\n for i in range(len(self.masterRadio)):\r\n if not self.masterRadio[i].isChecked():\r\n self.indexToWipe.append(self.index[i])\r\n self.serialToWipe.append(self.serial[i])\r\n if len(self.indexToWipe) == len(self.index):\r\n self.bottomStatus.setText('Error: No master drive selected!')\r\n self.refactorDrives()\r\n t = Timer(3, self.setText)\r\n t.start()\r\n return None\r\n elif len(self.indexToWipe) == 0:\r\n self.refactorDrives()\r\n self.bottomStatus.setText('No drives available to wipe!')\r\n t = Timer(3, self.setText)\r\n t.start()\r\n else:\r\n serialString = self.serialToWipe[0]\r\n for i in range(1, len(self.serialToWipe)):\r\n serialString += ', ' + self.serialToWipe[i]\r\n self.bottomStatus.setText('Wiping Drives:' + serialString)\r\n self.progress.setValue(self.progressInt)\r\n for i in range(len(self.masterRadio)):\r\n # TODO: Figure out which index is the master so we don't wipe it\r\n if self.masterRadio[i].isChecked():\r\n self.driveStatus[i].setText('MASTER')\r\n else:\r\n self.driveStatus[i].setText('WIPING')\r\n wipeDrives(self, self.indexToWipe)\r\n\r\n def refactorDrives(self):\r\n pythoncom.CoInitialize()\r\n for i in range(len(self.driveNames)):\r\n self.driveNames[i].setText('')\r\n self.driveStatus[i].setText('')\r\n self.driveSize[i].setText('')\r\n self.drivePartitions[i].setText('')\r\n self.driveIndex[i].setText('')\r\n for i in range(len(self.masterRadio)):\r\n self.masterRadio[i].deleteLater()\r\n self.setLayout(self.layout)\r\n del self.masterRadio\r\n self.masterRadio = []\r\n self.drivesSet = 0\r\n for i in range(25):\r\n toAdd = QLabel('')\r\n self.driveNames.append(toAdd)\r\n toAdd2 = QLabel('')\r\n self.driveSize.append(toAdd2)\r\n toAdd3 = QLabel('')\r\n self.driveStatus.append(toAdd3)\r\n toAdd4 = QLabel('')\r\n self.drivePartitions.append(toAdd4)\r\n toAdd5 = QLabel('')\r\n self.driveIndex.append(toAdd5)\r\n self.addPayloadNames()\r\n self.setLayout(self.layout)\r\n getDisks(self)\r\n self.addPayloadNames()\r\n self.setLayout(self.layout)\r\n\r\n def getMaster(self):\r\n exists = os.path.isfile('config.txt')\r\n if not exists:\r\n self.bottomStatus.setText('Error: Config file not found. Defaulting to master drive with index 0.')\r\n t = Timer(5, self.setText)\r\n t.start()\r\n return 0\r\n with open('config.txt', 'r') as fh:\r\n for line in fh:\r\n toSet = line.strip(' ')\r\n self.bottomStatus.setText('Loading complete')\r\n t = Timer(5, self.setText)\r\n t.start()\r\n return toSet\r\n\r\nclass refactorThread(QtCore.QThread):\r\n refSig = QtCore.Signal()\r\n\r\n def __init__(self):\r\n QtCore.QThread.__init__(self)\r\n\r\n def run(self):\r\n while True:\r\n self.refSig.emit()\r\n # Make sure the time doesn't go much lower than 1 or the application will stop responding\r\n time.sleep(1)\r\n\r\n#Comes from https://github.com/giampaolo/psutil/blob/master/scripts/disk_usage.py\r\ndef getDisks(window):\r\n counter = 0\r\n window.index = []\r\n window.serial = []\r\n c = wmi.WMI()\r\n for pm in c.Win32_DiskDrive():\r\n #print(pm.SerialNumber, pm.Name, pm.Size, pm.Status, pm.SystemName, pm.Signature, pm.Partitions)\r\n window.addDrive(pm.SerialNumber.strip(' '), pm.Status, str(round( (int(pm.Size) / 1000000000), 2)), pm.Partitions, pm.Index)\r\n window.index.append(pm.Index)\r\n window.serial.append(pm.SerialNumber.strip(' '))\r\n\r\ndef wipeDrives(parent, indexList):\r\n print('Running')\r\n initialFiles = fileCount()\r\n increment = 90 / len(indexList)\r\n for index in indexList:\r\n #string = 'KillDisk -erasemethod=[3] -passes=[3] -verification=[10] -retryattempts=[5] -wipehdd=[' + index.strip(' ') + '] -noconfirmation'\r\n string = 'C:/\"Program Files\"/\"LSoft Technologies\"/\"Active@ KillDisk Ultimate 11\"/KillDisk.exe -erasemethod=2 -passes=3 -verification=25 -retryattempts=5 -erasehdd=' + str(index) + ' -cp=C:/Users/%USERNAME%/Desktop/\"Certificate Output\"/ -nc -bm\\n'\r\n p = subprocess.Popen(string, stdout = subprocess.PIPE, shell = True)\r\n (output, err) = p.communicate()\r\n p_status = p.wait()\r\n files = fileCount()\r\n if not files == initialFiles:\r\n self.progressInt += increment\r\n self.progress.setValue(self.progressInt)\r\n initialFiles += 1\r\n else:\r\n toBreak = False\r\n while not toBreak:\r\n time.sleep(3000)\r\n files = fileCount()\r\n if fileCount == initialFiles:\r\n self.progressInt += increment\r\n self.progress.setValue(self.progressInt)\r\n initialFiles += 1\r\n toBreak = True\r\n break\r\n\r\ndef fileCount():\r\n fileCount = 0\r\n for _, dirs, files in os.walk('C:/Users/%USERNAME%/Desktop/\"Certificate Output\"/'):\r\n fileCount += len(files)\r\n return fileCount\r\n\r\nif __name__ == '__main__':\r\n main = QApplication(sys.argv)\r\n window = mainScreen()\r\n\r\n getDisks(window)\r\n\r\n window.resetSpacing()\r\n window.addPayloadNames()\r\n window.setLayout(window.layout)\r\n window.setFixedSize(500, 600)\r\n\r\n window.show()\r\n\r\n sys.exit(main.exec_())\r\n","repo_name":"DMoore12/AutoKillDisk","sub_path":"autoKillDisk.py","file_name":"autoKillDisk.py","file_ext":"py","file_size_in_byte":10935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"34705595148","text":"# Definition for a point\n# class Point:\n# def __init__(self, a=0, b=0):\n# self.x = a\n# self.y = b\n\nclass Solution:\n # @param points, a list of Points\n # @return an integer\n def maxPoints(self, points):\n max = 0 \n for pi in points:\n max_temp = 0\n same_element = 0\n slopes = dict()\n for pj in points:\n slope = 0\n if pi.y == pj.y and pi.x == pj.x:\n same_element = same_element + 1 \n continue\n if pi.y == pj.y :\n slope = 0\n elif pi.x == pj.x:\n slope = float(\"inf\")\n else:\n slope = (pi.y-pj.y)/(pi.x-pj.x+0.0)\n count = slopes.get(slope, 0) + 1 \n slopes.update({slope:count})\n if count > max_temp:\n max_temp = count\n max_temp = max_temp + same_element\n if max < max_temp:\n max = max_temp\n \n return max\n","repo_name":"wuliaoaaa/leetcode","sub_path":"Max Points on a Line.py","file_name":"Max Points on a Line.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"2839467782","text":"# -*- coding: utf-8 -*-\n\nfrom sqpdfo.runtime import *\nfrom numpy import inf, arange\nfrom copy import copy\n\nfrom sqpdfo.sqpdfo_compute_multiplier import sqpdfo_compute_multiplier_\nfrom sqpdfo.sqpdfo_optimality import sqpdfo_optimality_\nfrom sqpdfo.sqpdfo_iter_printout import sqpdfo_iter_printout_\nfrom sqpdfo.sqpdfo_solve_TR_bc import sqpdfo_solve_TR_bc_\nfrom sqpdfo.sqpdfo_augmX_evalf import sqpdfo_augmX_evalf_\nfrom sqpdfo.bcdfo_augment_Y import bcdfo_augment_Y_\nfrom sqpdfo.sqpdfo_swap_in_Y import sqpdfo_swap_in_Y_\nfrom sqpdfo.bcdfo_computeP import bcdfo_computeP_\nfrom sqpdfo.bcdfo_gradP import bcdfo_gradP_\nfrom sqpdfo.bcdfo_projgrad import bcdfo_projgrad_\nfrom sqpdfo.sqpdfo_computeHessian import sqpdfo_computeHessian_\nfrom sqpdfo.bcdfo_poisedness_Y import bcdfo_poisedness_Y_\nfrom sqpdfo.bcdfo_repair_Y import bcdfo_repair_Y_\nfrom sqpdfo.bcdfo_find_new_yj import bcdfo_find_new_yj_\nfrom sqpdfo.bcdfo_replace_in_Y import bcdfo_replace_in_Y_\nfrom sqpdfo.sqpdfo_find_smallf import sqpdfo_find_smallf_\nfrom sqpdfo.bcdfo_include_in_Y import bcdfo_include_in_Y_\nfrom numpy import array, zeros, concatenate, zeros_like\nimport sqpdfo.sqpdfo_global_variables as glob\n\n\ndef sqpdfo_main_(func_=None,n_=None,nb_=None,mi_=None,me_=None,lm_=None,nitold_=None,nit_=None,\\\n i_xbest_=None,lb_=None,ub_=None,m_=None,X_=None,fX_=None,ciX_=None,ceX_=None,\\\n ind_Y_=None,QZ_=None,RZ_=None,delta_=None,cur_degree_=None,neval_=None,\\\n maxeval_=None,maxit_=None,fcmodel_=None,gx_=None,normgx_=None,show_errg_=None,\\\n pquad_=None,pdiag_=None,plin_=None,stallfact_=None,eps_rho_=None,Deltamax_=None,\\\n rep_degree_=None,epsilon_=None,verbose_=None,eta1_=None,eta2_=None,gamma1_=None,\\\n gamma2_=None,gamma3_=None,interpol_TR_=None,factor_CV_=None,Lambda_XN_=None,\\\n Lambda_CP_=None,factor_FPU_=None,factor_FPR_=None,Lambda_FP_=None,\\\n criterion_S_=None,criterion_FP_=None,criterion_CP_=None,mu_=None,theta_=None,\\\n eps_TR_=None,eps_L_=None,lSolver_=None,stratLam_=None,eps_current_=None,\\\n vstatus_=None,xstatus_=None,sstatus_=None,dstatus_=None,ndummyY_=None,\\\n sspace_save_=None,xspace_save_=None,xfix_=None,fxmax_=None,poised_model_=None,\\\n M_=None,kappa_ill_=None,kappa_th_=None,eps_bnd_=None,poised_=None,Y_radius_=None,\\\n c_=None,level_=None,whichmodel_=None,hardcons_=None,noisy_=None,scaleX_=None,\\\n scalefacX_=None,CNTsin_=None,shrink_Delta_=None,scale_=None,shift_Y_=None,\\\n info_=None,options_=None,values_=None,*args,**kwargs):\n\t\n ###############################################################################\n # Main optimization loop for SQPDFO.\n ###############################################################################\n\n func=copy(func_)\n n=copy(n_)\n nb=copy(nb_)\n mi=copy(mi_)\n me=copy(me_)\n lm=copy(lm_)\n nitold=copy(nitold_)\n nit=copy(nit_)\n i_xbest=copy(i_xbest_)\n lb=copy(lb_)\n ub=copy(ub_)\n m=copy(m_)\n X=copy(X_)\n fX=copy(fX_)\n ciX=copy(ciX_)\n ceX=copy(ceX_)\n ind_Y=copy(ind_Y_)\n QZ=copy(QZ_)\n RZ=copy(RZ_)\n delta=copy(delta_)\n cur_degree=copy(cur_degree_)\n neval=copy(neval_)\n maxeval=copy(maxeval_)\n maxit=copy(maxit_)\n fcmodel=copy(fcmodel_)\n gx=copy(gx_)\n normgx=copy(normgx_)\n show_errg=copy(show_errg_)\n pquad=copy(pquad_)\n pdiag=copy(pdiag_)\n plin=copy(plin_)\n stallfact=copy(stallfact_)\n eps_rho=copy(eps_rho_)\n Deltamax=copy(Deltamax_)\n rep_degree=copy(rep_degree_)\n epsilon=copy(epsilon_)\n verbose=copy(verbose_)\n eta1=copy(eta1_)\n eta2=copy(eta2_)\n gamma1=copy(gamma1_)\n gamma2=copy(gamma2_)\n gamma3=copy(gamma3_)\n interpol_TR=copy(interpol_TR_)\n factor_CV=copy(factor_CV_)\n Lambda_XN=copy(Lambda_XN_)\n Lambda_CP=copy(Lambda_CP_)\n factor_FPU=copy(factor_FPU_)\n factor_FPR=copy(factor_FPR_)\n Lambda_FP=copy(Lambda_FP_)\n criterion_S=copy(criterion_S_)\n criterion_FP=copy(criterion_FP_)\n criterion_CP=copy(criterion_CP_)\n mu=copy(mu_)\n theta=copy(theta_)\n eps_TR=copy(eps_TR_)\n eps_L=copy(eps_L_)\n lSolver=copy(lSolver_)\n stratLam=copy(stratLam_)\n eps_current=copy(eps_current_)\n vstatus=copy(vstatus_)\n xstatus=copy(xstatus_)\n sstatus=copy(sstatus_)\n dstatus=copy(dstatus_)\n ndummyY=copy(ndummyY_)\n sspace_save=copy(sspace_save_)\n xspace_save=copy(xspace_save_)\n xfix=copy(xfix_)\n fxmax=copy(fxmax_)\n poised_model=copy(poised_model_)\n M=copy(M_)\n kappa_ill=copy(kappa_ill_)\n kappa_th=copy(kappa_th_)\n eps_bnd=copy(eps_bnd_)\n poised=copy(poised_)\n Y_radius=copy(Y_radius_)\n c=copy(c_)\n level=copy(level_)\n whichmodel=copy(whichmodel_)\n hardcons=copy(hardcons_)\n noisy=copy(noisy_)\n scaleX=copy(scaleX_)\n scalefacX=copy(scalefacX_)\n CNTsin=copy(CNTsin_)\n shrink_Delta=copy(shrink_Delta_)\n scale=copy(scale_)\n shift_Y=copy(shift_Y_)\n info=copy(info_)\n options=copy(options_)\n values=copy(values_)\n\n # Initialization\n \n old_delta = copy(delta); # for printing\n sigma = 1; # initial penalty parameter\n rho_factor = 0.3; # it is imposed that pred/vred >= rho_factor*sigmab \n # (must be in (0,1))\n tau1 = copy(gamma2); # trust radius reduction factor if out of domain\n tau2 = copy(gamma3); # good trust radius augmentation factor \n # when active and rho is >= eta1\n tau3 = 5; # extra trust radius augmentation factor \n # when active and rho is >= eta2\n \n nbr_slacks = glob.get_nbr_slacks()\n sl = glob.get_slacks()\n slplus = zeros_like(sl)\n\n constrained_pbl=copy(me)\n null_step=0\n ce = info.ce\n if nbr_slacks:\n merit=info.f + sigma * \\\n norm_(ce - concatenate((zeros((len(ce)-nbr_slacks,1)),sl**2)))\n else:\n merit=info.f + sigma * norm_(info.ce)\n msg='Unexpected message from sqpdfo_main'\n m=size_(X,2)-1\n indfree=find_(vstatus == c.free)\n indfix=find_(vstatus >= c.fixed)\n nfix=length_(indfix)\n Y=X[indfree,ind_Y]\n x=X[indfree,i_xbest]\n \n if not isempty_(indfree):\n indfree=indfree.reshape(-1)\n if not isempty_(indfix):\n indfix=indfix.reshape(-1)\n \n n=size_(x,1)\n fY=fX[ind_Y]\n fx=fX[i_xbest]\n itype=' '\n pc=0\n s=zeros((size_(x)))\n norms=0\n pred=0\n ciplus=array([])\n ceplus=array([])\n \n if mi > 0:\n ciY=copy(ciX[:,ind_Y])\n else:\n ciY=array([])\n gci=array([])\n \n if me > 0:\n ceY=copy(ceX[:,ind_Y])\n else:\n ceY=array([])\n gce=array([])\n \n if options.final_degree == values.quadratic:\n pfinal = pquad\n elif options.final_degree == values.diagonal:\n pfinal = pdiag\n elif options.final_degree == values.linear:\n pfinal = plin\n\n ##########################################################################\n # Begin main loop\n ##########################################################################\n\n radius_has_been_rejected=copy(False)\n \n while 1:\n \n # Stop on counter.\n\n if info.niter >= options.miter:\n info.flag=values.stop_on_max_iter\n\n # Final printout\n\n sqpdfo_iter_printout_(info,old_delta,norms,pc,itype,values,nb,mi,\\\n options,constrained_pbl,merit)\n return nit,i_xbest,x,fx,m,X,fX,ciX,ceX,ind_Y,delta,eps_current,\\\n cur_degree,fcmodel,gx,normgx,vstatus,xstatus,sstatus,dstatus,M,\\\n ndummyY,sspace_save,xspace_save,msg,CNTsin,neval,lm,info\n \n if info.nsimul[1] >= options.msimul:\n \n info.flag=values.stop_on_max_simul\n\n # Final printout\n\n sqpdfo_iter_printout_(info,old_delta,norms,pc,itype,values,nb,mi,\\\n options,constrained_pbl,merit)\n return nit,i_xbest,x,fx,m,X,fX,ciX,ceX,ind_Y,delta,eps_current,\\\n cur_degree,fcmodel,gx,normgx,vstatus,xstatus,sstatus,dstatus,M,\\\n ndummyY,sspace_save,xspace_save,msg,CNTsin,neval,lm,info\n \n # if radius_has_been_rejected == false:\n\n xk=copy(x)\n\n #--------------------------------------------------------------------\n # Check stopping criteria.\n #--------------------------------------------------------------------\n \n # Compute feasibility and complementarity.\n \n lbounds=- inf * ones_(size_(x))\n ubounds=inf * ones_(size_(x))\n ilb=(abs(lb[indfree] - x) < 1e-05).reshape(-1)\n iub=(abs(ub[indfree] - x) < 1e-05).reshape(-1)\n lbounds[ilb]=lb[indfree[ilb]]\n ubounds[iub]=ub[indfree[iub]]\n \n lm,info=\\\n sqpdfo_compute_multiplier_(x,lbounds,ubounds,info,options,values,nargout=2)\n \n feas,comp,info=\\\n sqpdfo_optimality_(x,lm,lb[indfree],ub[indfree],info,options,nargout=3)\n \n if info.flag:\n return nit,i_xbest,x,fx,m,X,fX,ciX,ceX,ind_Y,delta,eps_current,\\\n cur_degree,fcmodel,gx,normgx,vstatus,xstatus,sstatus,dstatus,M,\\\n ndummyY,sspace_save,xspace_save,msg,CNTsin,neval,lm,info\n \n info.glagn=norm_(info.glag,inf)\n info.feasn=norm_(feas,inf)\n info.compl=norm_(comp,inf)\n \n if (info.niter > 0) and (options.verbose >= 3):\n \n fprintf_(options.fout,'\\nOptimality:\\n')\n \n if constrained_pbl:\n fprintf_(options.fout,' |grad Lag| = %12.5e\\n'%(info.glagn))\n fprintf_(options.fout,' feasibility = %12.5e\\n'%(info.feasn))\n else:\n fprintf_(options.fout,' |grad f| = %12.5e\\n'%(info.glagn))\n\n # Stop on convergence.\n\n if ((info.glagn <= options.tol_grad) and (info.feasn <= options.tol_feas) \\\n and (info.compl <= options.tol_bnds)) or (delta<1e-10) or (pred == - 1.0):\n\n # check for accuracy and improve if necessary\n\n augment=rep_degree - cur_degree\n\n # No additional interpolation point is required for non-terminal repair.\n\n if (augment <= 0):\n\n # Compute poisedness of the interpolation set Y\n\n poised,Y_radius=\\\n bcdfo_poisedness_Y_(QZ,RZ,Y,eps_L,x,lSolver,whichmodel,hardcons,\\\n lb,ub,indfree,stratLam,scale,shift_Y,nargout=2)\n \n poisedness_known=1\n\n # Compute gradient accuracy \n\n errg=poised * Y_radius / factor_CV\n \n if options.verbose >= 3:\n disp_('error on gradient before set improvement = ',str(errg))\n \n # Check whether convergence tolerances are satisfied\n \n if (((info.glagn <= options.tol_grad) \\\n and (info.feasn <= options.tol_feas) \\\n and (info.compl <= options.tol_bnds) and errg <= epsilon)) \\\n and level=='toplevel':\n \n info.niter=info.niter + 1\n \n itype='conv'\n\n # Final printout\n\n sqpdfo_iter_printout_(info,old_delta,norms,pc,itype,values,\\\n nb,mi,options,constrained_pbl,merit)\n \n info.flag=values.success\n \n msg='Convergence in '+str(neval)+\\\n ' evaluations of the objective function.'\n \n return nit,i_xbest,x,fx,m,X,fX,ciX,ceX,ind_Y,delta,eps_current,\\\n cur_degree,fcmodel,gx,normgx,vstatus,xstatus,sstatus,dstatus,M,\\\n ndummyY,sspace_save,xspace_save,msg,CNTsin,neval,lm,info\n \n else:\n \n info.niter=info.niter + 1\n\n #iteration printout\n \n sqpdfo_iter_printout_(info,old_delta,norms,pc,itype,values,\\\n nb,mi,options,constrained_pbl,merit)\n\n # Not at a solution: improve the interpolation set.\n\n if options.verbose >= 3:\n disp_('not immediately converged - improve set!')\n \n itype='impr'\n\n # Reduce eps_current if repair degree is reached.\n\n if (augment <= 0):\n \n if (pred == -1):\n \n # in case of not yet converged feasibility problem\n if (info.feasn > options.tol_feas):\n eps_current = mu * delta\n \n # else improve in mu * eps_current\n else:\n eps_current = min_(mu * eps_current, epsilon)\n \n else:\n eps_current = epsilon\n\n # Rebuild a poised model in the eps_current ball, and ...\n\n if (normgx <= epsilon or pred == -1):\n\n # ... remove far points (with strict criterion: farfact = 1, forcing all\n # new interpolation points to be within distance epsilon)\n\n effective_FPR=1\n else:\n\n # ... remove far points (with looser criterion)\n\n effective_FPR=copy(factor_FPR)\n\n # One needs to add new interpolation points to reach the desired degree.\n # (only entered if rep_degree is higher than linear!)\n\n if (augment > 0):\n \n itype='augm'\n \n # If gradient small, find a new point in the epsilon-environment, \n # not in Delta (distinguish between infty-norm and 2-norm local solver)\n \n if (info.glagn <= epsilon):\n \n if (lSolver == 2):\n delta=epsilon / sqrt_(n)\n eps_current=epsilon / sqrt_(n)\n else:\n delta=copy(epsilon)\n eps_current=copy(epsilon)\n\n # Pick a random interpolation point.\n\n ynew=- delta * ones_(n,1) + 2 * delta * rand_(n,1)\n \n # add the random point to the set Y\n \n cur_degree,QZ,RZ,Y,xbase,scale=\\\n bcdfo_augment_Y_(ynew,Y[:,0:cur_degree],whichmodel,\\\n shift_Y,delta,normgx,kappa_ill,nargout=6)\n \n ind_Y[cur_degree-1]=cur_degree-1\n \n # Find an optimal new point to replace the random point\n\n if (hardcons):\n ynew,improvement=\\\n bcdfo_find_new_yj_bc_(QZ,RZ,Y,cur_degree,delta,eps_L,xbase,\\\n lSolver,whichmodel,xl,xu,indfree,stratLam,\\\n scale,shift_Y,nargout=2)\n else:\n ynew,improvement=\\\n bcdfo_find_new_yj_(QZ,RZ,Y,cur_degree,delta,eps_L,xbase,\\\n lSolver,whichmodel,scale,shift_Y,nargout=2)\n \n # replace the random point by the new point in Y \n \n QZ,RZ,Y,xbase,scale=\\\n bcdfo_replace_in_Y_(QZ,RZ,ynew,Y,cur_degree,xbase,whichmodel,\\\n scale,shift_Y,delta,normgx,kappa_ill,nargout=5)\n \n replaced=array([cur_degree-1])\n\n # The current interpolation set has the requested degree.\n\n else:\n\n # If gradient small, repair in epsilon-radius, else in Delta\n # (distinguish between infty-norm and 2-norm local solver)\n\n if pred == -1:\n radius = min_(delta, eps_current)\n elif (info.glagn <= factor_CV * epsilon):\n if (lSolver == 2):\n radius=min_(delta / sqrt_(n),epsilon / sqrt_(n))\n else:\n radius=min_(delta, epsilon)\n else:\n radius=max_(delta,eps_current)\n \n # Check that the trust-region radius has not become so small that a \n # repair step of this size will not be meaningful.\n \n if (radius < stallfact * norm_(x) or radius < epsilon * 1e-5):\n \n msg='Algorithm stopped after '+str(neval)+\\\n ' evaluations of the objective function because Delta small.'\n \n info.flag=values.stop_on_small_trust_region\n \n # final printout\n \n sqpdfo_iter_printout_(info,radius,norms,pc,itype,values,nb,mi,\\\n options,constrained_pbl,merit)\n \n return nit,i_xbest,x,fx,m,X,fX,ciX,ceX,ind_Y,delta,eps_current,\\\n cur_degree,fcmodel,gx,normgx,vstatus,xstatus,sstatus,dstatus,M,\\\n ndummyY,sspace_save,xspace_save,msg,CNTsin,neval,lm,info\n\n # repair Y\n \n QZ,RZ,Y,replaced,poised,Y_radius,x,scale=\\\n bcdfo_repair_Y_(QZ,RZ,Y,radius,effective_FPR,Lambda_FP,Lambda_CP,\\\n eps_L,x,lSolver,whichmodel,hardcons,lb,ub,indfree,\\\n stratLam,scale,shift_Y,normgx,kappa_ill,nargout=8)\n \n if options.verbose >= 3:\n disp_('improve interpolation set (in radius = ',str(radius),\\\n ') : replaced = ',str(replaced),', poised = ',str(poised),\\\n ', Y_radius = ',str(Y_radius))\n \n if (options.verbose >= 4):\n poised,Y_radius=\\\n bcdfo_poisedness_Y_(QZ,RZ,Y,eps_L,x,lSolver,whichmodel,\\\n hardcons,lb,ub,indfree,stratLam,scale,shift_Y,nargout=2)\n \n disp_(' poisedness(Y) = ',str(poised))\n \n poised_model=1\n\n # Compute the corresponding function values.\n\n for i in range(0,length_(replaced)):\n \n j=int(replaced[i])\n\n # Set index of new point and update status of the old point\n\n m=m + 1\n xstatus[ind_Y[j]]=c.unused\n ind_Y[j]=m\n \n try:\n xstatus[m]=c.inY\n except IndexError:\n concatenate_([xstatus,[m]],axis=1)\n\n # Update X and evaluate function\n\n X,fX,ciX,ceX,neval,xstatus,sstatus,dstatus,info,outdic=\\\n sqpdfo_augmX_evalf_(func,Y[:,[j]],m,X,fX,ciX,ceX,nfix,xfix,\\\n indfix,indfree,fxmax,neval,xstatus,c.inY,\\\n sstatus,dstatus,scaleX,scalefacX,info,\\\n options,values,nargout=10)\n \n fY[j]=fX[m]\n \n if mi > 0:\n ciY[:,j]=copy(ciX[:,m].T)\n if me > 0:\n ceY[:,j]=copy(ceX[:,m].T)\n \n poised_model=0\n \n if msg=='Error':\n \n if level=='toplevel':\n disp_(msg)\n \n return nit,i_xbest,x,fx,m,X,fX,ciX,ceX,ind_Y,delta,eps_current,\\\n cur_degree,fcmodel,gx,normgx,vstatus,xstatus,sstatus,dstatus,M,\\\n ndummyY,sspace_save,xspace_save,msg,CNTsin,neval,lm,info\n\n # Move to the best point found, if different from x.\n\n i_xold=copy(i_xbest)\n x,fx,QZ,RZ,Y,fY,ciY,ceY,ind_Y,i_xbest,scale,info=\\\n sqpdfo_find_smallf_(c,QZ,RZ,Y,fY,ciY,ceY,ind_Y,i_xbest,cur_degree,\\\n indfree,x,lb,ub,fx,dstatus,whichmodel,scale,shift_Y,\\\n delta,normgx,kappa_ill,sigma,info,nargout=12)\n\n # Compute new model(s).\n\n fcmodel=bcdfo_computeP_(QZ,RZ,Y,concatenate_([fY.reshape(1,-1),ciY,ceY]),\\\n whichmodel,fcmodel,ind_Y,i_xold,m,gx,scale,shift_Y)\n\n # Compute the gradient(s) of the new model(s).\n\n gx=bcdfo_gradP_(fcmodel[[0],:],x,x,scale,shift_Y)\n normgx,_=bcdfo_projgrad_(n,x,gx,lb[indfree],ub[indfree])\n \n if mi > 0:\n gci=zeros_(mi,n)\n for i in range(0,mi):\n gci[i,:]=bcdfo_gradP_(fcmodel[[1 + i],:],x,x,scale,shift_Y).T\n if me > 0:\n gce=zeros_(me,n)\n for i in range(0,me):\n gce[i,:]=bcdfo_gradP_(fcmodel[[1 + mi + i],:],x,x,scale,shift_Y).T\n\n # Update Hessian approximation (and gradients in info.g, info.ai, info.ae)\n \n M,pc,info=\\\n sqpdfo_computeHessian_(func,x,null_step,constrained_pbl,lm,M,n,me,mi,s,\\\n gx,gci,gce,info,options,values,fcmodel,Y,fY,ciY,\\\n ceY,sigma,scale,shift_Y,QZ,RZ,whichmodel,ind_Y,\\\n i_xbest,m,nargout=3)\n\n # Terminate if the solution has been found.\n \n # Compute feasibility and complementarity.\n \n lbounds=- inf * ones_(size_(x))\n ubounds=inf * ones_(size_(x))\n ilb=(abs(lb[indfree] - x) < 1e-05).reshape(-1)\n iub=(abs(ub[indfree] - x) < 1e-05).reshape(-1)\n lbounds[ilb]=lb[indfree[ilb]]\n ubounds[iub]=ub[indfree[iub]]\n \n # compute multiplier\n \n lm,info=\\\n sqpdfo_compute_multiplier_(x,lbounds,ubounds,info,options,values,nargout=2)\n \n # compute optimality\n \n feas,comp,info=\\\n sqpdfo_optimality_(x,lm,lb[indfree],ub[indfree],info,options,nargout=3)\n \n if info.flag:\n return nit,i_xbest,x,fx,m,X,fX,ciX,ceX,ind_Y,delta,eps_current,\\\n cur_degree,fcmodel,gx,normgx,vstatus,xstatus,sstatus,dstatus,M,\\\n ndummyY,sspace_save,xspace_save,msg,CNTsin,neval,lm,info\n \n info.glagn=norm_(info.glag,inf)\n info.feasn=norm_(feas,inf)\n info.compl=norm_(comp,inf)\n \n if (info.niter > 0) and (options.verbose >= 3):\n fprintf_(options.fout,'\\nOptimality:\\n')\n \n if constrained_pbl:\n fprintf_(options.fout,' |grad Lag| = %12.5e\\n'%(info.glagn))\n fprintf_(options.fout,' feasibility = %12.5e\\n'%(info.feasn))\n else:\n fprintf_(options.fout,' |grad f| = %12.5e\\n'%(info.glagn))\n\n # Stop on convergence.\n\n errg=poised * Y_radius / factor_CV\n \n if options.verbose >= 3:\n disp_('error on gradient after set improvement = ',str(errg))\n \n # check whether convergence tolerances are satisfied \n \n if (info.glagn / factor_CV <= options.tol_grad)\\\n and (info.feasn / factor_CV <= options.tol_feas)\\\n and (info.compl / factor_CV <= options.tol_bnds) and errg <= epsilon\\\n and cur_degree >= rep_degree and level=='toplevel':\n \n info.niter=info.niter + 1\n \n itype='conv' \n \n sqpdfo_iter_printout_(info,old_delta,norms,pc,itype,values,nb,mi,\\\n options,constrained_pbl,merit)\n \n msg='Convergence in '+str(neval)+\\\n ' evaluations of the objective function.'\n \n return nit,i_xbest,x,fx,m,X,fX,ciX,ceX,ind_Y,delta,eps_current,\\\n cur_degree,fcmodel,gx,normgx,vstatus,xstatus,sstatus,dstatus,M,\\\n ndummyY,sspace_save,xspace_save,msg,CNTsin,neval,lm,info\n \n if options.verbose >= 3:\n disp_('not converged after improvement of interpolation set')\n\n # Reset the radius to a multiple of ||gx||.\n\n delta=copy(radius)\n\n #if ( augment <= 0 ):\n # delta = min( min( theta * normgx, epsilon ), Deltamax )\n\n #-----------------------------------------------------------------------\n # Globalization\n #-----------------------------------------------------------------------\n \n if not(radius_has_been_rejected):\n f0 = info.f; # memorize f at the current iterate, \n # useful in the sufficient decrease condition\n if nbr_slacks: \n ce0 = copy(info.ce) - \\\n concatenate((zeros((len(info.ce)-nbr_slacks,1)),sl**2))\n else:\n ce0 = info.ce; # memorize ce at the current iterate in\n # case of step rejection\n ce0n = norm_(ce0);\n merit0 = f0 + sigma * ce0n; # inital value of the merit function, \n # for the given sigma\n prec_r = options.tol_feas/10; # initial precision for the restoration\n # problem\n prec_t = options.tol_grad/10; # initial precision for the tangent \n # problem\n\n if options.verbose >= 5:\n fprintf_(options.fout,'\\nStep computation: merit = %12.5e\\n'%(merit0))\n \n if options.verbose == 4:\n fprintf_(options.fout,' radius |r| |t| |s|',\\\n ' sigma rho\\n')\n \n info.niter=info.niter + 1\n\n #-----------------------------------------------------------------------\n # Iteration printout\n #-----------------------------------------------------------------------\n \n sqpdfo_iter_printout_(info,old_delta,norms,pc,itype,values,nb,mi,\\\n options,constrained_pbl,merit)\n \n if options.verbose >= 5:\n fprintf_(options.fout,' Trust radius = %8.2e\\n'%(delta))\n\n # -----------------------------------------------\n # Compute new trial point in TR and inside bounds\n # -----------------------------------------------\n\n # Compute Hessian of the interpolation model\n \n #M = bcdfo_hessP( fcodel(1,:), x, x, scale, shift_Y );\n \n # solve trust-region subproblem in delta\n \n old_delta=copy(delta)\n\n xnew,deltaTR,rpred,active_r,active_t,lm_computed,lm,info,slplus=\\\n sqpdfo_solve_TR_bc_(func,x,lb,ub,delta,mi,me,M,\\\n prec_r,prec_t,info,options,values,\\\n radius_has_been_rejected,lm,ceY,ciY,gx,indfree,\\\n nargout=9)\n \n # check for error\n\n if info.flag == values.fail_unexpected:\n return nit,i_xbest,x,fx,m,X,fX,ciX,ceX,ind_Y,delta,eps_current,\\\n cur_degree,fcmodel,gx,normgx,vstatus,xstatus,sstatus,dstatus,M,\\\n ndummyY,sspace_save,xspace_save,msg,CNTsin,neval,lm,info\n\n # ---------------------\n # Full step computation\n # ---------------------\n\n #s=xnew - xk\n #x=copy(xk)\n s=xnew-x\n \n if nbr_slacks:\n norms=norm_(concatenate((s,slplus-sl)))\n else:\n norms=norm_(s)\n \n if options.verbose >= 3:\n fprintf_(options.fout,' Full step:\\n |s| = %8.2e\\n'%(norms))\n\n # -----------------------------\n # Compute new penalty parameter\n # -----------------------------\n\n qcost=info.g.T.dot(s) + 0.5 * (s.T.dot(M.dot(s)))\n \n if rpred == 0:\n sigmab=0.0\n else:\n sigmab=qcost / ((1 - rho_factor) * rpred)\n \n if sigma < sigmab: \n sigma=max_(sigmab,1.5 * sigma)\n \n if sigma > 1e+299:\n fprintf_(options.fout,'\\n### sqpdfo_main: Penalty parameter (sigma): %15.8e '\\\n 'is too big\\n\\n'%(sigma))\n \n info.flag=values.fail_unexpected\n \n return nit,i_xbest,x,fx,m,X,fX,ciX,ceX,ind_Y,delta,eps_current,\\\n cur_degree,fcmodel,gx,normgx,vstatus,xstatus,sstatus,dstatus,M,\\\n ndummyY,sspace_save,xspace_save,msg,CNTsin,neval,lm,info\n \n # re-evaluate the merit function at x, since sigma has changed\n merit0=f0 + sigma * ce0n\n \n if options.verbose >= 4:\n fprintf_(options.fout,' Penalty parameter = %8.2e (threshold %8.2e)\\n'\\\n %(sigma,sigmab))\n\n # -----------------------------------------\n # Evaluate function values at the new point\n # -----------------------------------------\n\n if (interpol_TR == 1): # save for computing backtracking interpolation of TR\n gTs=gx.T.dot(s)\n \n # compute new trial point \n \n xplus=x + s\n\n # Set index of new point\n\n m=m + 1\n\n # Include point in X and evaluate f\n # (xstatus(m) is set to 0 but is updated later on)\n \n X,fX,ciX,ceX,neval,xstatus,sstatus,dstatus,info,retval=\\\n sqpdfo_augmX_evalf_(func,xplus,m,X,fX,ciX,ceX,nfix,xfix,indfix,indfree,\\\n fxmax,neval,xstatus,0,sstatus,dstatus,scaleX,\\\n scalefacX,info,options,values,nargout=10)\n \n if (info.flag):\n \n return nit,i_xbest,x,fx,m,X,fX,ciX,ceX,ind_Y,delta,eps_current,\\\n cur_degree,fcmodel,gx,normgx,vstatus,xstatus,sstatus,dstatus,M,\\\n ndummyY,sspace_save,xspace_save,msg,CNTsin,neval,lm,info\n \n else:\n \n fxplus=copy(fX[m])\n \n if ceX.any():\n ceX[ceX>=1e25] = 10*max_(ceX[ceX<1e25])\n ceX[ceX<=-1e25] = 10*min_(ceX[ceX>-1e25])\n ceplus = copy(array([ceX[:,m]]).T)\n\n # ---------------\n # Step validation\n # ---------------\n \n if retval:\n \n if retval == 1:\n \n if options.verbose >= 5:\n fprintf_(options.fout,' Step rejected (out of an implicit domain)\\n')\n \n elif retval == 2:\n \n if options.verbose > 1:\n fprintf_(options.fout,'\\n### sqpdfo_main: evaluation of the function stopped\\n\\n')\n \n info.flag=values.stop_on_simul\n \n return nit,i_xbest,x,fx,m,X,fX,ciX,ceX,ind_Y,delta,eps_current,\\\n cur_degree,fcmodel,gx,normgx,vstatus,xstatus,sstatus,dstatus,M,\\\n ndummyY,sspace_save,xspace_save,msg,CNTsin,neval,lm,info\n \n else: # unexpected\n \n if options.verbose > 1:\n fprintf_(options.fout,'\\n### sqpdfo_main: error during evaluation of the function')\n \n info.flag=values.fail_on_simul\n \n return nit,i_xbest,x,fx,m,X,fX,ciX,ceX,ind_Y,delta,eps_current,\\\n cur_degree,fcmodel,gx,normgx,vstatus,xstatus,sstatus,dstatus,M,\\\n ndummyY,sspace_save,xspace_save,msg,CNTsin,neval,lm,info\n \n itype='xfail'\n \n radius_has_been_rejected=copy(True)\n\n # Recover ce at the current iterate\n\n info.ce=ce0\n\n # Shrink trust region radius\n\n #delta = tau1*norms;\n delta=tau1 * delta\n \n if options.verbose == 3 or options.verbose >= 5:\n fprintf_(options.fout,' Step rejected due to failure in function'\\\n ' evaluation\\n')\n \n else:\n \n # -------------------------------------\n # Compute merit function and ratio rho\n # -------------------------------------\n \n if nbr_slacks:\n merit=fxplus + sigma * norm_(ceplus - \\\n concatenate((zeros((len(ceplus)-nbr_slacks,1)),slplus**2))) \n else:\n merit=fxplus + sigma * norm_(ceplus)\n \n if np.isinf(merit):\n fprintf_(options.fout,' Merit function: %15.8e -> %15.8e\\n'\\\n %(merit0,merit))\n \n info.flag=values.fail_unexpected\n \n return nit,i_xbest,x,fx,m,X,fX,ciX,ceX,ind_Y,delta,eps_current,\\\n cur_degree,fcmodel,gx,normgx,vstatus,xstatus,sstatus,dstatus,M,\\\n ndummyY,sspace_save,xspace_save,msg,CNTsin,neval,lm,info\n\n if options.verbose >= 3:\n fprintf_(options.fout,' Merit function: %15.8e -> %15.8e\\n'\\\n %(merit0,merit))\n \n ared=merit0 - merit\n pred=- qcost + sigma * rpred\n \n if rpred < 0:\n # inaccurate model is assumed, thus an improvement step is performed\n # at the next iteration\n pred = -1.0\n \n if pred < 0: \n # should not occur, since here s ~= 0\n # an improvement step is performed at the next iteration\n pred=-1.0\n \n if options.verbose >= 3:\n fprintf_(options.fout,'\\n### sqpdfo_main: pred = %9.2e should be positive\\n\\n'%(pred))\n \n elif pred == 0: \n # here, stationarity is assumed but model maybe inaccurate to state \n # convergence thus, an improvement step is performed next iteration\n pred=- 1.0\n \n if options.verbose >= 3:\n disp_('### sqpdfo_main : Warning : predicted reduction is 0 ###')\n \n rho=ared / pred\n \n if pred == - 1.0:\n rho=- 1.0\n \n if (rho >= eta1):\n succ=1\n else:\n succ=0\n \n if options.verbose == 4: \n fprintf_(options.fout,' %8.2e %7.1e %7.1e %9.2e\\n'\\\n %(delta,norms,sigma,rho))\n# fprintf_(options.fout,' %8.2e %7.1e %7.1e %7.1e %7.1e %9.2e\\n'\\\n# %(delta,norm_r,norm_(t),norms,sigma,rho))\n\n ###################################################################\n # Include the new point in the interpolation set Y \n ###################################################################\n\n i_xold=copy(i_xbest) # save to compute min-frob-norm model\n pos=-1\n \n # --------------------------------------\n # Successful iteration (accept the step)\n # --------------------------------------\n \n if (rho >= eta1):\n \n if options.verbose >= 3:\n fprintf_(options.fout,' Step accepted (rho = %9.2e;'\\\n ' ared = %9.2e, pred = %9.2e)\\n'%(rho,ared,pred))\n \n if (merit >= merit0):\n info.flag=values.fail_on_non_decrease\n return nit,i_xbest,x,fx,m,X,fX,ciX,ceX,ind_Y,delta,eps_current,\\\n cur_degree,fcmodel,gx,normgx,vstatus,xstatus,sstatus,dstatus,M,\\\n ndummyY,sspace_save,xspace_save,msg,CNTsin,neval,lm,info\n \n # Augment interpolation set if not fully quadratic yet\n\n if (cur_degree < pfinal or (whichmodel == 3 \\\n and cur_degree < pfinal + pfinal)):\n cur_degree,QZ,RZ,Y,xbase,scale=\\\n bcdfo_augment_Y_(xplus,Y[:,0:cur_degree],whichmodel,shift_Y,\\\n delta,normgx,kappa_ill,nargout=6)\n \n pos=copy(cur_degree)-1\n \n # Include xplus in the interpolation set, by replacing another point if\n # the model is already fully quadratic.\n \n else:\n QZ,RZ,Y,pos,x,scale=\\\n bcdfo_include_in_Y_(xplus,QZ,RZ,Y,arange(0,cur_degree),Lambda_XN,\\\n criterion_S,x,whichmodel,succ,scale,shift_Y,\\\n delta,normgx,kappa_ill,nargout=6)\n \n if (pos >= 0):\n xstatus[ind_Y[pos]]=c.unused\n \n # If xplus could/should be included in the interpolation set\n\n if (pos >= 0):\n \n itype='succ'\n \n if (options.verbose >= 3):\n disp_(' replacing/including interpolation point ',\\\n str(pos),' (successful)')\n \n xstatus[m]=c.inY\n \n try:\n ind_Y[pos]=m\n fY[pos]=copy(fxplus)\n except IndexError:\n ind_Y=concatenate_([ind_Y,[m]],axis=1)\n fY=concatenate_([fY, [fxplus]],axis=1)\n \n if me > 0:\n try:\n info.ce=copy(ceplus)\n ceY[:,pos]=copy(info.ce.T)\n except IndexError:\n info.ce=copy(ceplus)\n ceY=concatenate_([ceY, info.ce],axis=1)\n \n if nbr_slacks:\n # move slack variable away from zero if inequality\n # value gets above zero (here trial use of 0.1)\n \n for i in range(0,nbr_slacks):\n if slplus[i]==0 and info.ce[me-nbr_slacks+i]>0.01:\n slplus[i] = sqrt_(info.ce[me-nbr_slacks+i])\n \n sl = slplus\n glob.set_slacks(slplus)\n \n # Move it in the first position, redefining the base point.\n\n QZ,RZ,Y,ind_Y,fY,ciY,ceY,x,scale=\\\n sqpdfo_swap_in_Y_(0,pos,QZ,RZ,Y,ind_Y,fY,ciY,ceY,x,whichmodel,\\\n scale,shift_Y,delta,normgx,kappa_ill,nargout=9)\n \n fx=copy(fxplus)\n i_xbest=copy(m)\n \n if (not shift_Y):\n x=copy(Y[:,[0]])\n \n poised_model=0\n \n # Compute the associated polynomial interpolation models.\n\n fcmodel=\\\n bcdfo_computeP_(QZ,RZ,Y,concatenate_(\\\n [fY.reshape(1,-1),ciY,ceY]),whichmodel,\\\n fcmodel[[0],:],ind_Y,i_xold,m,gx,scale,shift_Y)\n \n # Compute model gradients for objective and constraint\n # functions.\n \n gx=bcdfo_gradP_(fcmodel[[0],:],x,x,scale,shift_Y)\n normgx,_=bcdfo_projgrad_(n,x,gx,lb[indfree],ub[indfree])\n \n if mi > 0:\n gci=zeros_(mi,n)\n for i in range(0,mi):\n gci[i,:]=\\\n bcdfo_gradP_(fcmodel[[1 + i],:],x,x,scale,shift_Y).T\n if me > 0:\n gce=zeros_(me,n)\n for i in range(0,me):\n gce[i,:]=\\\n bcdfo_gradP_(fcmodel[[1 + mi + i],:],x,x,scale,shift_Y).T\n \n # Update the trust-region radius.\n \n if rho >= eta2:\n if (active_r or active_t) and delta<5.0:\n delta=delta * tau3\n else:\n delta=min_(max_(tau2 * norms,delta),Deltamax)\n else:\n if rho >= eta1:\n if (active_r or active_t):\n delta=delta * tau2\n else:\n delta=min_(max_(tau2 * norms,delta),Deltamax)\n \n radius_has_been_rejected=copy(False)\n \n # Re-compute Lagrange multipliers (if not already done)\n\n if lm_computed == 0:\n lbounds=- inf * ones_(size_(x))\n ubounds=inf * ones_(size_(x))\n ilb=(abs(lb[indfree] - x) < 1e-05).reshape(-1)\n iub=(abs(ub[indfree] - x) < 1e-05).reshape(-1)\n lbounds[ilb]=lb[indfree[ilb]]\n ubounds[iub]=ub[indfree[iub]]\n \n lm,info=\\\n sqpdfo_compute_multiplier_(x,lbounds,ubounds,info,options,\\\n values,nargout=2)\n \n # Update Hessian approximation\n \n M,pc,info=\\\n sqpdfo_computeHessian_(func,x,null_step,constrained_pbl,lm,M,\\\n n,me,mi,s,gx,gci,gce,info,options,values,\\\n fcmodel,Y,fY,ciY,ceY,sigma,scale,shift_Y,\\\n QZ,RZ,whichmodel,ind_Y,i_xbest,m,nargout=3)\n \n # ---------------------------------------\n # Unsuccessful iteration - step rejection\n # ---------------------------------------\n \n # model accuracy is questionable - go to next iteration \n # and choose new interpolation set\n \n if pred == - 1.0:\n pos=0\n rho=1\n \n if (rho < eta1) or (pos == -1):\n \n itype='repD,repF,repC,redD'\n itype='unsuc'\n \n radius_has_been_rejected=copy(True)\n \n if options.verbose == 3 or options.verbose >= 5:\n fprintf_(options.fout,' Step rejected (rho = %9.2e;'\\\n ' ared = %9.2e, pred = %9.2e)\\n'%(rho,ared,pred))\n \n # The model is not fully quadratic yet: add (if possible)\n # the new point to the interpolation set and recompute the model.\n \n if (((cur_degree < pfinal) or (whichmodel == 3 \\\n and cur_degree < pfinal + pfinal)) and (rho < eta1)):\n \n cur_degree,QZ,RZ,Y,xbase,scale=\\\n bcdfo_augment_Y_(xplus,Y[:,0:cur_degree],whichmodel,shift_Y,\\\n delta,normgx,kappa_ill,nargout=6)\n \n if (options.verbose >= 3):\n disp_(' including interpolation point ',\\\n str(cur_degree-1),' (augm)')\n \n # Update status and position of the new point\n\n xstatus[m]=c.inY\n \n try:\n ind_Y[cur_degree-1]=m\n fY[cur_degree-1]=copy(fxplus)\n except IndexError:\n ind_Y=concatenate_([ind_Y,[m]],axis=1)\n fY=concatenate_([fY, [fxplus]],axis=1)\n \n if mi > 0:\n try:\n ciY[:,cur_degree-1]=copy(ciplus.T)\n except IndexError:\n ciY=concatenate_([ciY, ciplus],axis=1)\n if me > 0:\n try:\n ceY[:,cur_degree-1]=copy(ceplus.T)\n except IndexError:\n ceY=concatenate_([ceY, ceplus],axis=1) \n \n poised_model=0\n \n # Compute new model(s).\n\n fcmodel=bcdfo_computeP_(QZ,RZ,Y,concatenate_([fY.reshape(1,-1),\\\n ciY,ceY]),whichmodel,fcmodel,ind_Y,i_xold,\\\n m,gx,scale,shift_Y)\n \n # Compute the gradient(s) of the new model(s).\n \n gx=bcdfo_gradP_(fcmodel[[0],:],x,x,scale,shift_Y)\n normgx,_=bcdfo_projgrad_(n,x,gx,lb[indfree],ub[indfree])\n\n if mi > 0:\n gci=zeros_(mi,n)\n for i in range(0,mi):\n gci[i,:]=\\\n bcdfo_gradP_(fcmodel[[1 + i],:],x,x,scale,shift_Y).T\n if me > 0:\n gce=zeros_(me,n)\n for i in range(0,me):\n gce[i,:]=\\\n bcdfo_gradP_(fcmodel[[1 + mi + i],:],x,x,scale,shift_Y).T\n itype='augm'\n pos=copy(m)\n \n # Shrink trust region in unsuccessful iteration\n\n if (shrink_Delta == 1 and delta > epsilon):\n #delta = gamma2 * norms;\n\n delta=gamma2 * delta\n\n # Enter if the model is already fully quadratic *or* \n # xplus could not yet be included in the set. \n # The decision to include xplus here depends on possibly eliminating \n # another point.\n \n if (cur_degree >= pfinal or pos == -1):\n \n if ((pos == -1) and (poised_model == 0 or delta <= eps_current)):\n \n # Compute the distance of the interpolation points to the\n # current iterate. (Distinguish between the badly conditioned\n # successful and the unsuccessful case!)\n \n d=zeros(cur_degree)\n \n if (rho >= eta1):\n for j in range(0,cur_degree):\n if (lSolver == 1):\n d[j]=norm_(Y[:,[j]] - xplus)\n else:\n d[j]=norm_(Y[:,[j]] - xplus,inf)\n else:\n for j in range(1,cur_degree): \n if (lSolver == 1):\n d[j]=norm_(Y[:,[j]] - x)\n else:\n d[j]=norm_(Y[:,[j]] - x,inf)\n \n # Compute the basic distance used to define far/close points.\n\n FPlength=factor_FPU * (1 + eps_TR) * delta\n \n # Replace a far interpolation point.\n\n if (rho >= eta1):\n criterion_FPn='weighted' # use weighted measure, \n # not furthest point\n else:\n criterion_FPn=copy(criterion_FP)\n \n QZ,RZ,Y,pos,x,scale=\\\n bcdfo_include_in_Y_(xplus,QZ,RZ,Y,find_(d > FPlength),\\\n Lambda_FP,criterion_FPn,x,whichmodel,\\\n succ,scale,shift_Y,delta,normgx,\\\n kappa_ill,nargout=6)\n \n if (pos >= 0):\n \n itype='repF'\n \n if (options.verbose >= 3):\n disp_(' replacing interpolation point ',pos,' (far)')\n \n # Update status and position of the new point\n\n xstatus[ind_Y[pos]]=c.unused\n xstatus[m]=c.inY\n ind_Y[pos]=copy(m)\n fY[pos]=copy(fxplus)\n \n if mi > 0:\n ciY[:,pos]=copy(ciplus.T)\n if me > 0:\n ceY[:,pos]=copy(ceplus.T)\n \n # Swap points if included a successful point\n\n if (rho >= eta1):\n \n QZ,RZ,Y,ind_Y,fY,ciY,ceY,x,scale=\\\n sqpdfo_swap_in_Y_(0,pos,QZ,RZ,Y,ind_Y,fY,ciY,ceY,\\\n x,whichmodel,scale,shift_Y,delta,\\\n normgx,kappa_ill,nargout=9)\n \n fx=copy(fxplus)\n info.f=fx\n \n if mi > 0:\n info.ci=copy(ciY[:,[0]])\n if me > 0:\n info.ce=copy(ceY[:,[0]])\n \n if nbr_slacks:\n # set slacks if any\n for i in range(0,nbr_slacks):\n if slplus[i]==0 \\\n and info.ce[me-nbr_slacks+i]>0.01:\n slplus[i] = sqrt_(info.ce[\\\n me-nbr_slacks+i])\n sl = slplus\n glob.set_slacks(slplus)\n \n i_xbest=copy(m)\n \n if (not shift_Y):\n x=Y[:,[0]]\n \n poised_model=0\n \n if (options.verbose >= 3):\n disp_(' swapped point to position 1')\n \n itype='repFs'\n \n # Update the trust-region radius.\n\n delta=min_(max_(gamma3 * norms,delta),Deltamax)\n \n else:\n # Shrink trust region in unsuccessful iteration\n\n if (shrink_Delta == 1 and delta > epsilon):\n \n #delta = gamma2 * norms;\n delta=gamma2 * delta\n \n # Compute the associated polynomial interpolation model.\n\n fcmodel=bcdfo_computeP_(QZ,RZ,Y,concatenate_\\\n ([fY.reshape(1,-1),ciY,ceY]),\\\n whichmodel,fcmodel,ind_Y,i_xold,\\\n m,gx,scale,shift_Y)\n \n # Compute the gradient(s) of the new model(s).\n \n gx=bcdfo_gradP_(fcmodel[[0],:],x,x,scale,shift_Y)\n normgx,_=bcdfo_projgrad_(n,x,gx,lb[indfree],ub[indfree])\n \n if mi > 0:\n gci=zeros_(mi,n)\n for i in range(0,mi):\n gci[i,:]=\\\n bcdfo_gradP_(fcmodel[[1 + i],:],x,x,scale,shift_Y).T\n if me > 0:\n gce=zeros_(me,n)\n for i in range(0,me):\n gce[i,:]=\\\n bcdfo_gradP_(fcmodel[[1 + mi +\\\n i],:],x,x,scale,shift_Y).T\n \n # Replace a close interpolation point.\n\n if (pos == -1):\n \n if (rho >= eta1):\n criterion_CPn='standard' # find best improvement\n else:\n criterion_CPn=copy(criterion_CP)\n \n if (rho >= eta1):\n Lambda_CPn=1e-15 # try hard to include a successful \n # point\n else:\n Lambda_CPn=copy(Lambda_CP)\n d[0]=2 * FPlength # excludes the current iterate\n \n QZ,RZ,Y,pos,x,scale=\\\n bcdfo_include_in_Y_(xplus,QZ,RZ,Y,find_(d <= FPlength),\\\n Lambda_CPn,criterion_CPn,x,whichmodel,\\\n succ,scale,shift_Y,delta,normgx,\\\n kappa_ill,nargout=6)\n \n if (pos >= 0):\n \n itype='repC' \n \n # Safeguard i_x for frobenius model type 4 when \n # replacing point 1\n\n if (pos == 0):\n i_xold=ind_Y[1] \n \n if (options.verbose >= 3):\n disp_(' replacing interpolation point ',\\\n str(pos-1),' (close)')\n \n # Update status and position of the new point\n\n xstatus[ind_Y[pos]]=c.unused\n xstatus[m]=c.inY\n ind_Y[pos]=copy(m)\n fY[pos]=copy(fxplus)\n \n if mi > 0:\n ciY[:,pos]=copy(ciplus.T)\n if me > 0:\n ceY[:,pos]=copy(ceplus.T)\n \n # Swap points if included a successful point\n\n if (rho >= eta1):\n QZ,RZ,Y,ind_Y,fY,ciY,ceY,x,scale=\\\n sqpdfo_swap_in_Y_(0,pos,QZ,RZ,Y,ind_Y,fY,ciY,\\\n ceY,x,whichmodel,scale,shift_Y,\\\n delta,normgx,kappa_ill,nargout=9)\n \n fx=copy(fxplus)\n info.f=fx\n \n if mi > 0:\n info.ci=ciY[:,[0]]\n \n if me > 0:\n info.ce=ceY[:,[0]]\n \n if nbr_slacks:\n # set slacks if any\n for i in range(0,nbr_slacks):\n if slplus[i]==0 \\\n and info.ce[me-nbr_slacks+i]>0.01:\n slplus[i] = sqrt_(info.ce[\\\n me-nbr_slacks+i])\n sl = slplus\n glob.set_slacks(slplus)\n \n i_xbest=copy(m)\n \n if (not shift_Y):\n x=copy(Y[:,[0]])\n \n poised_model=0\n \n if (options.verbose >= 3):\n disp_(' swapped point to position 1')\n \n itype='repCs'\n \n # Update the trust-region radius.\n\n delta=min_(max_(gamma3 * norms,delta),Deltamax)\n \n else:\n # Shrink trust region in unsuccessful iteration\n\n if (shrink_Delta == 1 and delta > epsilon):\n #delta = gamma2 * norms;\n delta=gamma2 * delta\n \n # Compute associated polynomial interpolation model.\n\n fcmodel=bcdfo_computeP_(QZ,RZ,Y,concatenate_\\\n ([fY.reshape(1,-1),ciY,ceY]),\\\n whichmodel,fcmodel,ind_Y,\\\n i_xold,m,gx,scale,shift_Y)\n \n # Compute the gradient(s) of the new model(s).\n \n gx=bcdfo_gradP_(fcmodel[[0],:],x,x,scale,shift_Y)\n normgx,_=bcdfo_projgrad_(n,x,gx,lb[indfree],ub[indfree])\n \n if mi > 0:\n gci=zeros_(mi,n)\n for i in range(0,mi):\n gci[i,:]=\\\n bcdfo_gradP_(fcmodel[[1 +\\\n i],:],x,x,scale,shift_Y).T\n if me > 0:\n gce=zeros_(me,n)\n for i in range(0,me):\n gce[i,:]=\\\n bcdfo_gradP_(fcmodel[[1 + mi +\\\n i],:],x,x,scale,shift_Y).T\n \n # Decrease the radius.\n\n if (pos == -1):\n \n if (options.verbose >= 3):\n disp_(' decreasing the TR radius')\n \n # Set status of the new point\n\n xstatus[m]=c.unused\n \n # Compute new trust-region radius\n\n if (interpol_TR == 1):\n curvature=- pred - gTs\n gam_inter=(eta2 - 1) * gTs / (fxplus - fx - gTs - \\\n eta2 * curvature)\n delta=max_(gamma1,min_(gam_inter,gamma2)) * \\\n min_(delta,norms)\n else:\n# delta = gamma2 * delta;\n delta=gamma2 * norms\n \n itype='redD'\n \n # Check that the trust-region radius has not become so small \n # that a step of this size will not be significant.\n \n if (delta < stallfact * norm_(x) or delta < epsilon * 1e-5):\n msg='Algorithm stopped after '+str(neval)+\\\n ' evaluations of the objective function because Delta small.'\n \n info.flag=values.stop_on_small_trust_region\n \n # final printout\n \n sqpdfo_iter_printout_(info,delta,norms,pc,itype,\\\n values,nb,mi,options,constrained_pbl,merit)\n \n return nit,i_xbest,x,fx,m,X,fX,ciX,ceX,ind_Y,delta,\\\n eps_current,cur_degree,fcmodel,gx,normgx,vstatus,\\\n xstatus,sstatus,dstatus,M,ndummyY,sspace_save,\\\n xspace_save,msg,CNTsin,neval,lm,info\n \n # Recover ce at the current iterate\n\n #info.ce=ce0\n info.f=f0\n\n # Recompute Lagrange multiplier\n\n lbounds=- inf * ones_(size_(x))\n ubounds=inf * ones_(size_(x))\n ilb=(abs(lb[indfree] - x) < 1e-05).reshape(-1)\n iub=(abs(ub[indfree] - x) < 1e-05).reshape(-1)\n lbounds[ilb]=lb[indfree[ilb]]\n ubounds[iub]=ub[indfree[iub]]\n lm,info=\\\n sqpdfo_compute_multiplier_(x,lbounds,ubounds,info,options,values,\\\n nargout=2)\n \n # Compute / update Hessian\n \n M,pc,info=\\\n sqpdfo_computeHessian_(func,x,null_step,constrained_pbl,lm,M,n,me,mi,\\\n s,gx,gci,gce,info,options,values,fcmodel,Y,fY,\\\n ciY,ceY,sigma,scale,shift_Y,QZ,RZ,whichmodel,\\\n ind_Y,i_xbest,m,nargout=3)\n \n return nit,i_xbest,x,fx,m,X,fX,ciX,ceX,ind_Y,delta,eps_current,\\\n cur_degree,fcmodel,gx,normgx,vstatus,xstatus,sstatus,dstatus,M,\\\n ndummyY,sspace_save,xspace_save,msg,CNTsin,neval,lm,info\n\n","repo_name":"DLR-SC/sqpdfo","sub_path":"sqpdfo/sqpdfo_main.py","file_name":"sqpdfo_main.py","file_ext":"py","file_size_in_byte":64100,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"28"} +{"seq_id":"24204790097","text":"'''\n\tindex.py \tI shall do nothing but render the interface\n'''\n\nimport os\n\nimport wsgiref.handlers\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\n\nimport web2hunter as WH\n\nclass MainHandler(webapp.RequestHandler):\n\ttPath = os.path.join( os.path.dirname (__file__), \"templates\" )\n\n\tdef get(self):\n\t\ttodo = self.request.get('getname');\n\t\t\n\t\tif (todo == 'true'):\n\t\t\tif (self.request.headers.get('Referer') != None):\n\t\t\t\t# stop others from stealing your bandwidth\n\t\t\t\t#if (self.request.headers.get('Referer').find('web2hunter.appspot.com')!=-1):\n\n\t\t\t\t\t# return a possible domain name\n\t\t\t\t\tpossiblename = WH.genName()\n\t\t\t\n\t\t\t\t\twhile (possiblename == \"\"):\n\t\t\t\t\t\tpossiblename = WH.genName()\n\t\t\t\n\t\t\t\t\t# lets sanitize the possible name\n\t\t\t\t\t# for sometime we get the response header for some reason -\n\t\t\t\t\t# Content-Type: text/html; charset=utf-8 Cache-Control: ... blabla\n\t\t\t\t\t# well a foo.split()[-1] would help\n\n\t\t\t\t\t# food for javascript + sanitization\n\t\t\t\t\tself.response.out.write (possiblename.split()[-1])\n\t\t\t\t#else:\n\t\t\t\t\t#self.response.out.write (\"nothing for you\");\n\t\t\telse:\n\t\t\t\tself.response.out.write (\"nothing for you\");\n\t\telse:\n\t\t\t# render the template\n\t\t\toutstr = template.render (\n\t\t\t\tself.tPath + '/index.html', None )\n\t\n\t\t\tself.response.out.write (outstr)\n\t\t\n\ndef main():\n\tapplication = webapp.WSGIApplication( [('/.*',MainHandler)], debug=True )\n\twsgiref.handlers.CGIHandler().run(application)\n\nif __name__ == '__main__':\n\tmain()\n\n","repo_name":"ideamonk/Web2Hunter-GAE","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"44553709440","text":"from fastapi import APIRouter, Depends\n\nfrom app.user_roles import Roles\nfrom core.factory.cloudauth import get_current_auth\nfrom core.factory.web import get_current_app\nfrom web.core.tenants import workspace_header\n\nrouter_tests_questions = APIRouter()\nrouter_questions = APIRouter()\n\n\ndef init_router():\n app = get_current_app()\n auth = get_current_auth()\n\n app.include_router(\n router_tests_questions,\n prefix=\"/feed/tests/questions\",\n tags=[\"[M03] Feed Tests / Questions\"],\n dependencies=[\n Depends(workspace_header),\n Depends(auth.allow(Roles.ADMINS_AND_PATIENT, \"Feed Tests / Questions\")),\n ]\n )\n\n app.include_router(\n router_questions,\n prefix=\"/feed/questions\",\n tags=[\"[M03] Feed / Questions\"],\n dependencies=[\n Depends(workspace_header),\n Depends(auth.allow(Roles.ADMINS_AND_PATIENT, \"Feed / Questions\")),\n ]\n )\n","repo_name":"stdTG/healthy-care-backend","sub_path":"app/feed/web/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"10353684280","text":"import gzip\nimport os\nimport pickle\n\nimport h5py\nimport numpy as np\n\n\ndef savepklz(data_to_dump, dump_file_full_name, force_run=False):\n ''' Saves a pickle object and gzip it '''\n\n if not force_run:\n raise RuntimeError(\"This function should no longer be used!\")\n\n with gzip.open(dump_file_full_name, 'wb') as out_file:\n pickle.dump(data_to_dump, out_file)\n\n\ndef loadpklz(dump_file_full_name, force_run=False):\n ''' Loads a gziped pickle object '''\n\n if not force_run:\n raise RuntimeError(\"This function should no longer be used!\")\n\n with gzip.open(dump_file_full_name, 'rb') as in_file:\n dump_data = pickle.load(in_file)\n\n return dump_data\n\n\ndef saveh5(dict_to_dump, dump_file_full_name):\n ''' Saves a dictionary as h5 file '''\n\n with h5py.File(dump_file_full_name, 'w') as h5file:\n writeh5(dict_to_dump, h5file)\n\n\ndef writeh5(dict_to_dump, h5node):\n ''' Recursive function to write dictionary to h5 nodes '''\n\n for _key in dict_to_dump.keys():\n if isinstance(dict_to_dump[_key], dict):\n h5node.create_group(_key)\n cur_grp = h5node[_key]\n writeh5(dict_to_dump[_key], cur_grp)\n else:\n h5node[_key] = dict_to_dump[_key]\n\n\ndef loadh5(dump_file_full_name):\n ''' Loads a h5 file as dictionary '''\n\n with h5py.File(dump_file_full_name, 'r') as h5file:\n dict_from_file = readh5(h5file)\n\n return dict_from_file\n\n\ndef readh5(h5node):\n ''' Recursive function to read h5 nodes as dictionary '''\n\n dict_from_file = {}\n for _key in h5node.keys():\n if isinstance(h5node[_key], h5py._hl.group.Group):\n dict_from_file[_key] = readh5(h5node[_key])\n else:\n dict_from_file[_key] = h5node[_key].value\n\n return dict_from_file\n\n\ndef loadMonitor(dump_file_full_name_no_ext):\n ''' Saves the monitor file '''\n\n # Check h5 file, load that if it exists, else use the old one\n if os.path.exists(dump_file_full_name_no_ext + '.h5'):\n with h5py.File(dump_file_full_name_no_ext + '.h5', 'r') as h5file:\n monitor = {}\n for _key in h5file.keys():\n monitor[_key] = h5file[_key].value\n # turn into lists if it was a list\n if _key.endswith('_list'):\n monitor[_key] = list(monitor[_key])\n\n elif os.path.exists(dump_file_full_name_no_ext + '.pklz'):\n monitor = loadpklz(dump_file_full_name_no_ext + '.pklz')\n else:\n raise RuntimeError('Dump file for {} does not exist!'.format(\n dump_file_full_name_no_ext))\n\n return monitor\n\n\ndef saveMonitor(monitor, dump_file_full_name_no_ext):\n ''' Loads the monitor file '''\n\n with h5py.File(dump_file_full_name_no_ext + '.h5', 'w') as h5file:\n for _key in monitor.keys():\n val = monitor[_key]\n # If it's a list, turn it into np array\n\n # NOTE: the lists in the monitor are just to have\n # appending operation not allocate everytime an element\n # is added. In fact, they can be easily transformed into\n # an ndarray\n if _key.endswith('_list'):\n val = np.asarray(val)\n\n h5file[_key] = val\n\n#\n# dump_utils.py ends here\n","repo_name":"bme-2020/fbpconv_tf","sub_path":"dump_tools.py","file_name":"dump_tools.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"8098668036","text":"from trading import database\nimport argparse\nimport sys\nfrom ib_insync import util, IB\nfrom configparser import ConfigParser, ExtendedInterpolation\nimport os\nimport logging\nimport numpy as np\nimport pandas as pd\nimport pandas\nimport time\nimport dateutil\nimport threading\n\n\n\n\ndef runProg(args):\n \"\"\"run program\"\"\"\n\n pd.set_option('display.width', 200)\n\n # log to a file\n util.logToFile(f'createTraingData.log')\n\n # load the config file\n configFile = args.configFile\n config = ConfigParser(interpolation=ExtendedInterpolation(), defaults=os.environ)\n config.read(configFile)\n\n # load data from configFile\n host = config.get('InteractiveBrokers', 'host')\n port = config.getint('InteractiveBrokers', 'port')\n DBType = config.get('DataBase', 'DBType')\n DBFileName = config.get('DataBase', 'DBFileName')\n clientId = config.get('InteractiveBrokers', 'clientId')\n\n # override configFile if clientId is given on the command line\n if args.clientId is not None:\n clientId = args.clientId\n\n if 1:\n # faster way for now\n ib = IB()\n ib.connect(host=host, port=port, clientId=clientId)\n pass\n\n pass\n\n # create database class\n mydbSQLite = database.tradingDB(DBType=DBType, DBFileName=DBFileName)\n # load existing database\n mydbSQLite.instantiateExistingTablesAndClasses(ib=ib)\n # set log level\n mydbSQLite._loggerSQLAlchemy.setLevel(logging.ERROR)\n\n # create database class\n mydbMySQL = database.tradingDB(DBType='mysql', DBFileName=DBFileName)\n # load existing database\n mydbMySQL.instantiateExistingTablesAndClasses(ib=ib)\n # set log level\n mydbMySQL._loggerSQLAlchemy.setLevel(logging.ERROR)\n\n\n tblsSQLiteORM = mydbSQLite.MarketDataInfoTableDataFrame['tableORM']\n tblsMySQLORM = mydbMySQL.MarketDataInfoTableDataFrame['tableORM']\n\n nTables = len(tblsMySQLORM)\n\n ssnSQLite = mydbSQLite.Session()\n ssnMySQL = mydbMySQL.Session()\n\n for i in range(0,nTables):\n tt1 = time.time()\n\n tblSQLiteSchema = tblsSQLiteORM.iloc[i].__table__\n tblMySQLSchema = tblsMySQLORM.iloc[i].__table__\n\n\n print(tblSQLiteSchema.name)\n print(tblMySQLSchema)\n\n qs = ssnSQLite.query(tblSQLiteSchema)\n df_read = pd.read_sql(qs.statement, qs.session.bind)\n\n mydbMySQL.upsertDataFrame(df_read,tblMySQLSchema)\n ssnMySQL.commit()\n\n tt2 = time.time()\n ttdiff = tt2 - tt1\n print(f'bla {tblSQLiteSchema.name}: {ttdiff}')\n pass\n\n ssnSQLite.close()\n ssnMySQL.close()\n\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('-c', '--configFile', help='Config File Name', required=True, type=str)\nparser.add_argument('--clientId', help='clientId to connect to TWS/gateway', required=False, default=None, type=int)\nif __name__ == '__main__':\n args = parser.parse_args()\n sys.exit(runProg(args))\n","repo_name":"snhuber/trading","sub_path":"trading/scripts/copyTablesFromSQLiteToMySQL.py","file_name":"copyTablesFromSQLiteToMySQL.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"74784596873","text":"from src.db_manager import DBManager\nfrom src.hh import HH\n\ndef main():\n print('Приветствую Вас! Сейчас мы поработаем с вакансиями сайта hh.ru!')\n my_db = DBManager('vacancies')\n print('Сoздаем базу данных и таблицы')\n print()\n my_db.create_database()\n print('База данных создана.')\n print()\n data_hh= HH()\n employers = data_hh.get_data_vacancies()\n my_db.insert(employers)\n print('Вакансии успешно загружены!')\n\n\n print('Выберите цифру из предложенного меню:')\n print(f\"\"\"\n 1. Получить список всех компаний и количество вакансий у каждой компании\n 2. Получить список всех вакансий с указанием названия компании, названия вакансии и зарплаты и ссылки на вакансию\n 3. Получить среднюю зарплату по вакансиям\n 4. Получить список всех вакансий, у которых зарплата выше средней по всем вакансиям\n 5. получает список всех вакансий, в названии которых содержится ключевое слово\n 0. Выход из программы\"\"\")\n print()\n while True:\n user_input = input('Введите цифру: ')\n if user_input == '1':\n my_db.get_companies_and_vacancies_count()\n elif user_input == '2':\n my_db.get_all_vacancies()\n elif user_input == '3':\n my_db.get_avg_salary()\n elif user_input == '4':\n my_db.get_vacancies_with_higher_salary()\n elif user_input == '5':\n user_keyword = input('Введите слово: ')\n my_db.get_vacancies_with_keyword(user_keyword)\n elif user_input == '0':\n break\n else:\n print('Некорректный ввод')\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","repo_name":"svetlanaromanenko06/course_work_5","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"19334163201","text":"import pyaudio\n\nCHUNK = 1024\nFORMAT = pyaudio.paInt16\nCHANNELS = 2\nRATE = 44100\n\np = pyaudio.PyAudio()\nstream = p.open(\n format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=chunk\n)\nframes = []\nstream.read(CHUNK)\n\nimport scipy.io.wavfile as wavfile\nimport numpy as np\nimport pylab as pl\nrate, data = wavfile.read('FILE.wav')\nt = np.arange(len(data[:,0]))*1.0/rate\npl.plot(t, data[:,0])\npl.show()\n\nframes.append(data)\nstream.stop_stream()\nstream.close()\np.terminate()\n","repo_name":"pangolingo/hue-colorpicker","sub_path":"audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"70003764557","text":"from pychron.dvc import analysis_path, dvc_load, dvc_dump\n\n\ndef get_runlist():\n return [\n \"66573-01A\",\n \"66573-01B\",\n \"66573-01C\",\n \"66573-01D\",\n \"66573-01E\",\n \"66573-01F\",\n \"66573-01G\",\n \"66573-01H\",\n \"66573-01I\",\n \"66573-01J\",\n \"66573-01K\",\n \"66573-01L\",\n \"66573-01M\",\n \"66572-01A\",\n \"66572-01B\",\n \"66572-01C\",\n \"66572-01D\",\n \"66572-01E\",\n \"66572-01F\",\n \"66572-01G\",\n \"66572-01H\",\n \"66572-01I\",\n \"66572-01J\",\n \"66572-01K\",\n \"66572-01L\",\n \"66572-01M\",\n ]\n\n\ndef fix():\n repository = \"Saifuddeen01097\"\n root = \"/Users/ross/PychronDev/data/.dvc/repositories\"\n runlist = get_runlist()\n for runid in runlist:\n for modifier in (\"intercepts\",):\n fix_run(runid, repository, root, modifier)\n fix_iso_list(runid, repository, root)\n\n\ndef fix_iso_list(runid, repository, root):\n path = analysis_path(runid, repository, root=root)\n # print('asdf', path)\n obj = dvc_load(path)\n isotopes = obj[\"isotopes\"]\n try:\n v = isotopes.pop(\"PHHCbs\")\n v[\"name\"] = \"Ar39\"\n isotopes[\"Ar39\"] = v\n obj[\"isotopes\"] = isotopes\n dvc_dump(obj, path)\n except KeyError:\n return\n\n\ndef fix_run(runid, repository, root, modifier):\n path = analysis_path(runid, repository, root=root, modifier=modifier)\n # print('asdf', path)\n obj = dvc_load(path)\n # print('ff', obj)\n try:\n v = obj.pop(\"PHHCbs\")\n obj[\"Ar39\"] = v\n dvc_dump(obj, path)\n msg = \"fixed\"\n except KeyError:\n msg = \"skipped\"\n\n print(runid, msg)\n\n\nif __name__ == \"__main__\":\n fix()\n# ============= EOF =============================================\n","repo_name":"NMGRL/pychron","sub_path":"pychron/dvc/fix/fix_isotopes.py","file_name":"fix_isotopes.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"28"} +{"seq_id":"14145968187","text":"def lin():\r\n print('-'*55)\r\n\r\n\r\nvoto = 0\r\nmoradores = 0\r\nlindomar = 0\r\nclaudio = 0\r\nelmar = 0\r\nwhile(voto < 100):\r\n lin()\r\n print('Insira de 1 a 3 os seus canditatos a sindico:\\n- Lindomar (VOTE-01)\\n- Claudio (VOTE-02)\\n- Elmar(VOTE-03)')\r\n insert = int(input('Insira o voto: '))\r\n if (insert > 3 or insert < 1):\r\n print('ERRO NOS VALORES DO VOTO, INSIRA NOVAMENTE.')\r\n else:\r\n if(insert == 1):\r\n lindomar +=1\r\n print('VOTO - Lindomar!')\r\n elif(insert == 2):\r\n claudio +=1\r\n print('VOTO - Claudio.')\r\n else:\r\n elmar +=1\r\n print('VOTO - Elmar.')\r\n voto += 1\r\n # print(f'Porcentagem de votos SIM: {sim / (sim + nao) * 100: .1f} %.')\r\n print(f'Porcentagem: {(lindomar / voto)*100: .1f}%')\r\n print(f'Porcentagem: {(claudio / voto)*100: .1f}%')\r\n print(f'Porcentagem: {(elmar / voto )*100: .1f}%')","repo_name":"Gus027/MENU_DE_VOTO_PYTHON","sub_path":"voto.py","file_name":"voto.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"2284980774","text":"def maxProfit(a):\n min_so_far=a[0] # we assume the minimum value of stock is a[0]\n max_profit=0 \n for i in range(0,len(a)):\n min_so_far=min(min_so_far,a[i]) # for each a[i] we take min of min so far and a[i]\n profit=a[i]-min_so_far #this is important for eacj a[i]we check how much profit will it give from the minimum so far\n max_profit=max(profit,max_profit)\n \n return max_profit\n\n\na=[7,1,5,3,6,4]\nprint(maxProfit(a))","repo_name":"apurvsharma2108/data_structures","sub_path":"dsa 450/arrays/stock_buy_sell.py","file_name":"stock_buy_sell.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"34981295854","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 29 13:10:35 2022\n\n@author: Ola\n\"\"\"\n\nimport numpy as np\nfrom proj import *\n\nplik = \"wsp_inp.txt\"\ntablica = np.genfromtxt(plik, delimiter=',', skip_header = 4)\n\nT_grs80 = Transformacje(model = \"grs80\")\nwsp_out = f'Konwersja współrzędnych geodezyjnych \\\\ Aleksandra Skolimowska\\n'\n\nwhile True:\n print('\\nWybierz opcję')\n print('\\nA - Transformacja współrzędnych ortokartezjańskich X, Y, Z na współrzędne geodezyjne fi, lambda, h')\n print('B - Transformacja współrzędnych geodezyjnych fi, lambda, h na współrzędne ortokartezjańskie X, Y, Z')\n print('C - Transformacja współrzednych geodezyjnych do układu 1992')\n print('D - Transformacja współrzednych geodezyjnych do układu 2000')\n print('E - Transformacja współrzędnych ortokartezjańskich X, Y, Z na współrzędne topocentryczne N, E, U')\n print('F - Obliczenie odległosci 2D i 3D')\n print('G - Obliczenie kąta azymutu i kąta elewacji')\n print('H - Zakończ')\n \n opcja = input('Wybierz opcję = ')\n \n if opcja == 'H':\n break\n \n elif opcja == 'A':\n print('\\nTransformacja współrzędnych ortokartezjańskich X, Y, Z na współrzędne geodezyjne fi, lambda, h')\n print(' fi | lambda | h')\n wsp_out += f' fi | lam | h \\n'\n blh = []\n for i in range(len(tablica)):\n b, l, h = T_grs80.xyz2blh_hirvonen(tablica[i][0], tablica[i][1], tablica[i][2])\n blh.append([b, l, h])\n blh = np.array(blh)\n for i in range(len(blh)):\n wsp_out += f'{blh[i][0]} | {blh[i][1]} | {blh[i][2]} \\n'\n print(blh)\n \n elif opcja == 'B':\n print('\\nTransformacja współrzędnych geodezyjnych fi, lambda, h na współrzędne ortokartezjańskie X, Y, Z')\n print(' X | Y | Z')\n wsp_out += f' X | Y | Z \\n'\n XYZ = []\n for i in range(len(tablica)):\n b, l, h = T_grs80.xyz2blh_hirvonen(tablica[i][0], tablica[i][1], tablica[i][2])\n X, Y, Z = T_grs80.blh2xyz_odwrotny(b, l, h)\n XYZ.append([X, Y, Z])\n XYZ = np.array(XYZ)\n for i in range(len(XYZ)):\n wsp_out += f'{XYZ[i][0]} | {XYZ[i][1]} | {XYZ[i][2]} \\n'\n print(XYZ)\n \n elif opcja == 'C':\n print('\\nTransformacja współrzednych geodezyjnych do układu 1992')\n print(' x92 | y92 ')\n wsp_out += f' x92 | y92 \\n'\n u92 = []\n for i in range(len(tablica)):\n b, l, h = T_grs80.xyz2blh_hirvonen(tablica[i][0], tablica[i][1], tablica[i][2])\n x92, y92 = T_grs80.u1992(b, l)\n u92.append([x92, y92])\n u92 = np.array(u92)\n for i in range(len(u92)):\n wsp_out += f'{u92[i][0]} | {u92[i][1]} \\n'\n print(u92)\n \n elif opcja == 'D':\n print('\\nTransformacja współrzednych geodezyjnych do układu 2000')\n print(' x00 | y00 ')\n wsp_out += f' x00 | y00 \\n'\n u00 = []\n for i in range(len(tablica)):\n b, l, h = T_grs80.xyz2blh_hirvonen(tablica[i][0], tablica[i][1], tablica[i][2])\n x00, y00 = T_grs80.u2000(b, l)\n u00.append([x00, y00])\n u00 = np.array(u00)\n for i in range(len(u00)):\n wsp_out += f'{u00[i][0]} | {u00[i][1]} \\n'\n print(u00)\n \n elif opcja == 'E':\n print('\\nTransformacja współrzędnych ortokartezjańskich X, Y, Z na współrzędne topocentryczne N, E, U')\n print(' N | E | U')\n wsp_out += f' N | E | U \\n'\n NEU = []\n X0 = 0\n Y0 = 0\n Z0 = 0\n for i in range(len(tablica)):\n n,e,u = T_grs80.NEU(X0, Y0, Z0, tablica[i][0], tablica[i][1],tablica[i][2])\n NEU.append([n,e,u])\n NEU = np.array(NEU)\n for i in range(len(NEU)):\n wsp_out += f'{NEU[i][0]} | {NEU[i][1]} | {NEU[i][2]} \\n'\n \n print(NEU)\n \n elif opcja == 'F':\n print('\\nObliczenie odległosci 2D i 3D')\n print(' 2D | 3D ')\n wsp_out += f' 2D | 3D \\n'\n X0 = 0\n Y0 = 0\n Z0 = 0\n d2_d3 = []\n for i in range(len(tablica)):\n d2, d3 = T_grs80.odl_2d_3d(X0, Y0, Z0, tablica[i][0], tablica[i][1],tablica[i][2])\n d2_d3.append([d2, d3])\n d2_d3 = np.array(d2_d3)\n for i in range(len(d2_d3)):\n wsp_out += f'{d2_d3[i][0]} | {d2_d3[i][1]} \\n'\n print(d2_d3)\n \n elif opcja == 'G':\n print('\\nObliczenie kąta azymutu i kąta elewacji')\n print(' Azymut | Elewacja ')\n wsp_out += f' Azymut | Elewacja \\n'\n X0 = 0\n Y0 = 0\n Z0 = 0\n az_el = []\n for i in range(len(tablica)):\n az, el = T_grs80.azym_elew(X0, Y0, Z0, tablica[i][0], tablica[i][1],tablica[i][2])\n az_el.append([az, el])\n az_el = np.array(az_el)\n for i in range(len(az_el)):\n wsp_out += f'{az_el[i][0]} | {az_el[i][1]} \\n'\n print(az_el)\n else:\n print('\\nPodano złą opcję, wybierz jeszcze raz.')\n\n \nzapis_plik = open('wsp_out.txt', 'a')\nzapis_plik.write(wsp_out)\nzapis_plik.close()\n","repo_name":"askol01/projekt1","sub_path":"projekt.py","file_name":"projekt.py","file_ext":"py","file_size_in_byte":5402,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"26134192596","text":"import pandas as pd\n\n\ndf = pd.read_excel(\"10m50sc_pinout.xls\",converters={'Bank Number':str, 'E144 (2)':int})\ndf.style.hide_index()\n#print(df)\nbank_arr = [\"1A\",\"1B\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\"]\n\n#bank = \"1A\"\nbank=input(\"input bank: {1A,1B,2,3,4,5,6,7,8}\\n\")\nfiltered_bank = df.loc[df['Bank Number'] == bank]\nfiltered_cols = filtered_bank[[\"Bank Number\",\"E144 (2)\"]]\n\nnum_of_pins = filtered_cols.shape[0]\nfiltered_cols = filtered_cols.to_string(index=False)\nprint(filtered_cols)\nprint(\"# of pins: {}\".format(num_of_pins))\n\n\n\n","repo_name":"chengyanwu/SLAB_Mousecam","sub_path":"DataSheets/gen_FPGA_symbols.py","file_name":"gen_FPGA_symbols.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"37297824490","text":"from random import sample\nfrom algorithms.collaborative_filtering.neighborhood import NeighborhoodCF\nfrom data_structures import DynamicArray\nfrom utils import knn\n\n\nclass Clustering(NeighborhoodCF):\n \"\"\"\n Description\n Clustering algorithm which extends NeighborhoodCF.\n \"\"\"\n def __init__(\n self, neighbors=[], n_neighbors=5, treshold=0.5, clusters=[],\n centroids=[], cluster_map=[]):\n \"\"\"\n Description\n Clustering's constructor.\n\n Arguments\n :param neighbors: The neighborhood model.\n :type neighbors: list\n :param treshold: A minimum similarity which pairs need to have for\n clusters.\n :type treshold: float\n :param clusters: The cluster model.\n :type clusters: list\n :param centroids: The centroids model.\n :type centroids: list\n :param cluster_map: The inverted index of elements to their cluster\n :type cluster_map: dictionary\n \"\"\"\n self.th = treshold\n self.centroids = self._init_model(centroids, self._init_centroids)\n self.clusters = self._init_model(clusters, self._init_clusters)\n self.cluster_map = self._init_model(\n cluster_map, self._init_cluster_map)\n super().__init__(neighbors, n_neighbors)\n\n def _init_centroids(self, elements):\n \"\"\"\n Description\n A function which computes and returns an initial centroid.\n\n Arguments\n :param elements: The candidates to centroids.\n :type elements: set\n \"\"\"\n if len(elements) == 0:\n return []\n return sample(elements, 1)\n\n def _init_clusters(self, elements):\n \"\"\"\n Description\n A function which computes and returns the initial cluster.\n\n Arguments\n :param elements: The set to form clusters by.\n :type elements: set\n \"\"\"\n clusters = [set() for centroid in self.centroids]\n for element in elements:\n sims = [self.similarity_between(\n element, centroid) for centroid in self.centroids]\n max_sim = max(sims)\n if max_sim < self.th:\n self.centroids.append(element)\n clusters.append({element})\n else:\n centroid_index = sims.index(max_sim)\n clusters[centroid_index].add(element)\n return clusters\n\n def _init_cluster_map(self, elements):\n \"\"\"\n Description\n A function which computes and returns an inverted index\n which maps elements to their clusters.\n\n Arguments\n :param elements: The set to form the inverted index by.\n :type elements: set\n \"\"\"\n cluster_map = dict()\n for element in elements:\n for index, cluster in enumerate(self.clusters):\n if element in cluster:\n cluster_map[element] = index\n break\n return cluster_map\n\n def _init_neighborhood(self):\n \"\"\"\n Description\n A function which computes and returns the neighborhood\n model which is a DynamicArray object.\n \"\"\"\n neighbors = DynamicArray(\n default_value=lambda: DynamicArray(default_value=lambda: list()))\n for cluster in self.clusters:\n cluster_neighborhood = self._init_neighborhood_cluster(cluster)\n neighbors.append(cluster_neighborhood)\n return neighbors\n\n def _init_neighborhood_cluster(self, candidate_set):\n \"\"\"\n Description\n A function which computes and returns the neighborhood\n for a cluster which is a DynamicArray object.\n\n Argument\n :param candidate_set: The cluster.\n :type candidate_set: DynamicArray\n \"\"\"\n neighbors = DynamicArray(\n [self._neighborhood(\n ide, candidate_set\n ) for ide in candidate_set], default_value=lambda: list())\n return neighbors\n\n def _neighborhood(self, ident, candidate_set):\n \"\"\"\n Description\n A function which computes and returns the neighborhood\n of an element inside a cluster which is a DynamicArray object.\n\n Argument\n :param ident: The element to calculate the neighborhood for.\n :type ident: int\n :param candidate_set: The cluster.\n :type candidate_set: DynamicArray\n \"\"\"\n candidates = candidate_set.difference({ident})\n return knn(ident, candidates, self.n_neighbors,\n self.similarity_between)\n\n def neighborhood_of(self, identifier):\n \"\"\"\n Description\n A function which returns the neighborhood of an\n element.\n\n Argument\n :param ident: Element of which we want to return the neighborbood.\n :type ident: int\n \"\"\"\n try:\n cluster_index = self.cluster_map[identifier]\n position = list(self.clusters[cluster_index]).index(identifier)\n return self.neighbors[cluster_index][position]\n except KeyError:\n return []\n\n def increment(self, identifier):\n \"\"\"\n Description\n A function which increments the current cluster model\n for a new entry.\n\n Arguments\n :param identifier: An element of a rating.\n :type identifier: int\n \"\"\"\n sims = [self.similarity_between(\n identifier, centroid) for centroid in self.centroids]\n try:\n max_sim = max(sims)\n except ValueError:\n max_sim = 0\n if max_sim < self.th:\n self.centroids.append(identifier)\n self.clusters.append({identifier})\n self.cluster_map[identifier] = len(self.clusters) - 1\n else:\n centroid_index = sims.index(max_sim)\n self.clusters[centroid_index].add(identifier)\n self.cluster_map[identifier] = centroid_index\n cluster = self.clusters[centroid_index]\n self.neighbors[centroid_index] = self._init_neighborhood_cluster(\n cluster)\n","repo_name":"andre-b-fernandes/FEUP-DISS","sub_path":"algorithms/collaborative_filtering/neighborhood/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":6273,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"28"} +{"seq_id":"44540727500","text":"# DATA\ndataset='Tusimple'\ndata_root = '../tusimple/'\n\n# TRAIN\nepoch = 100\nbatch_size = 32\noptimizer = 'Adam' #['SGD','Adam']\n# learning_rate = 0.1\nlearning_rate = 4e-4\nweight_decay = 1e-4\nmomentum = 0.9\nforce_lr_value = False # custom\n\nscheduler = 'cos' #['multi', 'cos']\n# steps = [50,75]\ngamma = 0.1\nwarmup = 'linear'\nwarmup_iters = 100\n\n# NETWORK\nbackbone = '26'\ngriding_num = 100\nuse_aux = True\nbackbone_checkpoint = 'checkpoints RedNet/rednet26-4948f75f.pth' # custom\nfinetune_backbone = False # custom\nfrozen_blocks = None # custom\n\n# LOSS\nsim_loss_w = 1.0\nshp_loss_w = 0.0\n\n# EXP\nnote = ''\n\nlog_path = '../checkpoints/tusimple/'\n\n# FINETUNE or RESUME MODEL PATH\nfinetune = None\nresume = None\n\n# TEST\ntest_model = None\ntest_work_dir = None\n\nnum_lanes = 4","repo_name":"stdrr/AML-Final_Project","sub_path":"code/UltraFastLaneDetection/configs/tusimple.py","file_name":"tusimple.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"73162441356","text":"from copy import deepcopy\nfrom unittest import TestCase\n\nfrom hamcrest import is_\nfrom hamcrest.core import assert_that\n\n\nclass Heap:\n def __init__(self, items=None, comparator=int.__gt__):\n self._data = [] if items is None else deepcopy(items)\n self._comparator = comparator\n for i in range(len(self._data) // 2 - 1, -1, -1):\n self._heapify_down(i)\n\n def size(self) -> int:\n return len(self._data)\n\n def __len__(self):\n return self.size()\n\n def __getitem__(self, item):\n return self._data[item]\n\n def add(self, item) -> None:\n self._data.append(item)\n self._heapify_up()\n\n def pop(self):\n result = self._data[0]\n self._data[0] = self._data[-1]\n del self._data[-1]\n self._heapify_down(0)\n return result\n\n def _heapify_up(self, index=None):\n if index is None:\n index = len(self._data) - 1\n if index > 0:\n parent = (index - 1) // 2\n if index < len(self) and self._comparator(self[index], self[parent]):\n self._data[parent], self._data[index] = self[index], self[parent]\n self._heapify_up(parent)\n\n def _heapify_down(self, index=0):\n left = index * 2 + 1\n right = index * 2 + 2\n largest = index\n if left < len(self) and self._comparator(self[left], self[index]):\n largest = left\n if right < len(self) and self._comparator(self[right], self[largest]):\n largest = right\n if largest != index:\n self._data[largest], self._data[index] = self[index], self[largest]\n self._heapify_down(largest)\n\n\nclass TestHeap(TestCase):\n def test_size_of_empty_heap(self):\n heap = Heap()\n assert_that(heap.size(), is_(0))\n assert_that(len(heap), is_(0))\n\n def test_add_item(self):\n self._assert_heap_add([10], [10])\n self._assert_heap_add([10, 9], [10, 9])\n self._assert_heap_add([9, 10], [10, 9])\n self._assert_heap_add([10, 9, 8], [10, 9, 8])\n self._assert_heap_add([8, 9, 10], [10, 8, 9])\n self._assert_heap_add([10, 9, 8, 7], [10, 9, 8, 7])\n self._assert_heap_add([7, 8, 9, 10], [10, 9, 8, 7])\n self._assert_heap_add([7, 8, 9, 10, 11], [11, 10, 8, 7, 9])\n\n def _assert_heap_add(self, input, expected):\n assert_that(self._build_heap(input)._data, is_(expected))\n\n def _build_heap(self, input):\n heap = Heap()\n for item in input:\n heap.add(item)\n return heap\n\n def test_getitem(self):\n heap = self._build_heap([3, 2, 1])\n self.assertRaises(TypeError, lambda: heap['asdf'])\n assert_that(heap[0], is_(3))\n assert_that(heap[1], is_(2))\n assert_that(heap[2], is_(1))\n assert_that(heap[-1], is_(1))\n\n def test_pop_item(self):\n self._assert_heap_pop([5, 4, 2, 3, 1], [4, 3, 2, 1])\n self._assert_heap_pop([7, 5, 6, 4, 3, 2, 1], [6, 5, 2, 4, 3, 1])\n\n def _assert_heap_pop(self, heap_data, expedted_after_pop):\n heap = Heap()\n heap._data = deepcopy(heap_data)\n max_item = heap.pop()\n assert_that(heap._data, is_(expedted_after_pop))\n assert_that(max_item, is_(heap_data[0]))\n\n def test_build_heap_from_list(self):\n heap = Heap([1, 2, 3, 4, 5])\n assert_that(heap._data, is_([5, 4, 3, 1, 2]))\n","repo_name":"csfulop/algorithms_python","sub_path":"algorithms/data_structure/test_heap.py","file_name":"test_heap.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"16236055065","text":"#!/usr/bin/env python3\nimport os\nfrom zipfile import ZipFile, ZIP_DEFLATED\n\n\ndef main():\n with ZipFile('anki_roam_import.ankiaddon', mode='w', compression=ZIP_DEFLATED) as zip_file:\n for path in '__init__.py', 'anki_roam_import', 'config.json', 'config.md', 'manifest.json':\n add_to_zip(zip_file, path)\n\n\ndef add_to_zip(zip_file: ZipFile, path: str) -> None:\n if '__pycache__' in path:\n return\n\n zip_file.write(path)\n\n if os.path.isdir(path):\n for child_path in os.listdir(path):\n add_to_zip(zip_file, os.path.join(path, child_path))\n\n\nmain()\n","repo_name":"gmcmanus/anki-roam-import","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"28"} +{"seq_id":"4277106002","text":"import numpy as np\nimport math\n\ndef shuffle(X, y, seed=None):\n \"\"\"\n Shuffles the batch from a given set of datapoints and lables.\n \"\"\"\n if seed:\n np.random.seed(seed)\n\n index = np.arange(X.shape[0])\n np.random.shuffle(index)\n\n return X[index], y[index]\n\n\ndef batch_iterator(X, y=None, batch_size=32):\n \"\"\"\n Batch generator class\n\n Args:\n - X: X data\n - y: labels for each X data\n - batch_size: Batch size you want to generate. Defaults to 32\n \"\"\"\n number_of_samples = X.shape[0]\n\n for i in np.arange(0, number_of_samples, batch_size):\n start, end = i, min(i + batch_size, number_of_samples)\n if y is not None:\n yield X[start:end], y[start:end]\n else:\n yield X[start:end]\n\n\ndef to_categorical(x, n_col=None):\n \"\"\"\n Preforms one hot encodings for the data labels\n\n Args:\n - ``X``: Numpy Array containing your data points\n - ``n_col``: Number of column for your data. If not explicitly mentioned, it's automatically calculated.\n\n Example::\n >>> import numpy as np\n >>> def to_categorical(x, n_col=None):\n >>> if not n_col:\n >>> n_col = np.amax(x) + 1\n\n >>> one_hot = np.zeros((x.shape[0], n_col))\n >>> one_hot[np.arange(x.shape[0]), x] = 1\n >>> return one_hot\n\n >>> x = np.array([2, 3, 4, 1, 2, 3])\n >>> z = to_categorical(x, 5)\n >>> print(z)\n\n >>> x = np.array([1, 2, 3, 4, 6])\n >>> z = to_categorical(x, 7)\n >>> print(z)\n \"\"\"\n if not n_col:\n n_col = np.amax(x) + 1\n\n one_hot = np.zeros((x.shape[0], n_col))\n one_hot[np.arange(x.shape[0]), x] = 1\n return one_hot\n\n\ndef diag(x):\n \"\"\"\n Vector to diagonal matrix conversion.\n \"\"\"\n diagonal = np.zeros((len(x), len(x)))\n for i in range(len(diagonal[0])):\n diagonal[i, i] = x[i]\n\n return diagonal\n\n\ndef train_test_split(X, y, test_size=0.5, makeshuffle=True, seed=None):\n \"\"\"\n Make train test split on dataset.\n\n Args:\n - ``X``: X dataset of numpy array\n - ``y``: y labels for that array\n - ``test_size``: How much percentage of data you want for your test dataset\n - ``makeshuffle``: do you want to shuffle the data before splitting?\n - ``seed``: mention a random seed for reproducing results\n\n Returns:\n ``X_train``, ``X_test``, ``y_train``, ``y_test``\n\n \"\"\"\n if makeshuffle:\n X, y = shuffle(X, y, seed)\n\n split_i = len(y) - int(len(y) // (1 / test_size))\n X_train, X_test = X[:split_i], X[split_i:]\n y_train, y_test = y[:split_i], y[split_i:]\n\n return X_train, X_test, y_train, y_test\n\n\ndef euclidean_distance(x1, x2):\n \"\"\"\n Calculates the l2 distance between two vectors\n \"\"\"\n distance = 0\n # Squared distance between each coordinate\n for i in range(len(x1)):\n distance += pow((x1[i] - x2[i]), 2)\n return math.sqrt(distance)\n\n\ndef normalize(X, axis=-1, order=2):\n \"\"\" Normalize the dataset X \"\"\"\n l2 = np.atleast_1d(np.linalg.norm(X, order, axis))\n l2[l2 == 0] = 1\n return X / np.expand_dims(l2, axis)\n","repo_name":"theroyakash/AKDPRFramework","sub_path":"AKDPRFramework/utils/dataops.py","file_name":"dataops.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"21906524039","text":"import json\nimport random\nimport os\nimport time\nimport base64\nfrom pysimplebase import SimpleBase, DBSession\nimport requests\nfrom ru.travelfood.simple_ui import NoSQL as noClass\n\n\nBASIC_BIRDS = [\n {\n \"name\": \"tucan\",\n \"feathers_color\": \"rainbow\"\n },\n {\n \"name\": \"parrot\",\n \"feathers_color\": \"red\"\n },\n {\n \"name\": \"eagle\",\n \"feathers_color\": \"desert\"\n },\n {\n \"name\": \"stork\",\n \"feathers_color\": \"white\"\n },\n {\n \"name\": \"cock\",\n \"feathers_color\": \"pink\"\n },\n {\n \"name\": \"jackdaw\",\n \"feathers_color\": \"black\"\n },\n {\n \"name\": \"albatross\",\n \"feathers_color\": \"white\"\n },\n {\n \"name\": \"rook\",\n \"feathers_color\": \"black\"\n },\n {\n \"name\": \"goose\",\n \"feathers_color\": \"white\"\n },\n {\n \"name\": \"lark\",\n \"feathers_color\": \"white\"\n },\n {\n \"name\": \"woodpecker\",\n \"feathers_color\": \"black\"\n }\n]\n\n\ndef customcards_on_open(hashMap, _files=None, _data=None):\n\n j = {\"customcards\": {\n \"options\": {\n \"search_enabled\": True,\n \"save_position\": True\n },\n \"layout\": {\n \"type\": \"LinearLayout\",\n \"orientation\": \"vertical\",\n \"height\": \"match_parent\",\n \"width\": \"match_parent\",\n \"weight\": \"0\",\n \"Elements\": [\n {\n \"type\": \"LinearLayout\",\n \"orientation\": \"horizontal\",\n \"height\": \"wrap_content\",\n \"width\": \"match_parent\",\n \"weight\": \"0\",\n \"Elements\": [\n {\n \"type\": \"LinearLayout\",\n \"orientation\": \"vertical\",\n \"height\": \"wrap_content\",\n \"width\": \"match_parent\",\n \"weight\": \"1\",\n \"Elements\": [\n {\n \"type\": \"TextView\",\n \"show_by_condition\": \"\",\n \"Value\": \"@string1\",\n \"NoRefresh\": False,\n \"document_type\": \"\",\n \"mask\": \"\",\n \"Variable\": \"\"\n },\n {\n \"type\": \"TextView\",\n \"show_by_condition\": \"\",\n \"Value\": \"@string2\",\n \"NoRefresh\": False,\n \"document_type\": \"\",\n \"mask\": \"\",\n \"Variable\": \"\"\n }\n ]\n }\n ]\n },\n {\n \"type\": \"TextView\",\n \"show_by_condition\": \"\",\n \"Value\": \"@descr\",\n \"NoRefresh\": False,\n \"document_type\": \"\",\n \"mask\": \"\",\n \"Variable\": \"\",\n \"TextSize\": \"-1\",\n \"TextColor\": \"#6F9393\",\n \"TextBold\": False,\n \"TextItalic\": True,\n \"BackgroundColor\": \"\",\n \"width\": \"wrap_content\",\n \"height\": \"wrap_content\",\n \"weight\": 0\n }\n ]\n }\n\n }\n }\n\n j[\"customcards\"][\"cardsdata\"] = []\n ncl = noClass(\"birds_nosql\")\n birds = json.loads(ncl.get(\"birds\"))\n for bird in birds:\n\n unit = {\n \"key\": bird[\"name\"],\n\n \"descr\": bird[\"name\"],\n \"val\": bird[\"name\"],\n \"string1\": bird[\"name\"],\n \"string2\": bird[\"feathers_color\"]\n }\n\n j[\"customcards\"][\"cardsdata\"].append(unit)\n\n if not hashMap.containsKey(\"cards\"):\n hashMap.put(\"cards\", json.dumps(\n j, ensure_ascii=False).encode('utf8').decode())\n\n return hashMap\n\n\ndef customcards_touch(hashMap, _files=None, _data=None):\n hashMap.put(\"toast\", \"res=\"+str(hashMap.get(\"listener\")+\"/\" +\n str(hashMap.get(\"layout_listener\"))\n + \"/\"+str(hashMap.get(\"card_data\"))))\n return hashMap\n\n\ndef refresh_nosql_bd(hashMap, _files=None, _data=None):\n if hashMap.get(\"listener\") == \"refresh_button\":\n ncl = noClass(\"birds_nosql\")\n\n url = 'http://127.0.0.1:5000/birds'\n\n try:\n response = requests.get(url, timeout=2)\n if response.status_code == 200:\n json_data = response.json()\n else:\n json_data = []\n except Exception as exc:\n hashMap.put(\"toast\", f\"{str(exc)} external data isn't availaible\")\n return hashMap\n\n for bird in json_data:\n ncl.put(\"birds\", json.dumps(bird, ensure_ascii=False), True)\n hashMap.put(\"toast\", str(ncl.get(\"birds\")))\n\n return hashMap\n\n\ndef birds_on_create(hashMap, _files=None, _data=None):\n if not hashMap.containsKey(\"bname\"):\n hashMap.put(\"bname\", \"Например: parrot\")\n if not hashMap.containsKey(\"bfeathers_color\"):\n hashMap.put(\"bfeathers_color\", \"Например: orange\")\n return hashMap\n\n\ndef input_new_bird(hashMap, _files=None, _data=None):\n ncl = noClass(\"birds_nosql\")\n bird_data = {\"name\": hashMap.get(\"bname\"),\n \"feathers_color\": hashMap.get(\"bfeathers_color\")}\n if hashMap.get(\"listener\") == \"accept_inp_bird\":\n ncl.put(\"birds\", json.dumps(bird_data,\n ensure_ascii=False),\n True)\n try:\n url = 'http://127.0.0.1:5000/birds'\n requests.post(url, json=bird_data, timeout=2)\n except Exception as exc:\n hashMap.put(\"toast\", f\"{str(exc)} external data isn't availaible\")\n return hashMap\n\n hashMap.put(\"toast\", str(ncl.get(\"birds\")))\n return hashMap\n","repo_name":"keyayeten/birds_management","sub_path":"handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":6428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"73442156555","text":"#1.kutuphaneler\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sbn\r\n\r\n#tenis oynanır yada oynanmaz\r\nveriler = pd.read_csv(\"odev_tenis.txt\")\r\n\r\ntemp = veriler.iloc[:,1:3].values\r\n#test\r\n\r\nfrom sklearn import preprocessing\r\n\r\nveriler2 = veriler.apply(preprocessing.LabelEncoder().fit_transform)# bütün verileri otomatik encod eder yani 0 1\r\n\r\n#havanın label encode olmasını istemiyorduk bu yüzden ayırdık ve onehotencoding uyguladık\r\nhava = veriler2.iloc[:,:1]\r\n\r\nohe = preprocessing.OneHotEncoder()\r\nhava = ohe.fit_transform(hava).toarray()\r\nprint(hava)\r\n\r\n#numpy dizileri dataframe dönüşümleri\r\nhavadurumu = pd.DataFrame(data=hava, index = range(14), columns=['sunny','overcast','rainy'])\r\nsonveriler = pd.concat([havadurumu,veriler.iloc[:,1:3]],axis=1)\r\nsonveriler = pd.concat([veriler2.iloc[:,-2:],sonveriler],axis=1)\r\n\r\n#verilerin test için bölünmesi\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nx_train, x_test, y_train, y_test = train_test_split(sonveriler.iloc[:,:-1],sonveriler.iloc[:,-1:],test_size=0.33,random_state=0)\r\n\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nregressor = LinearRegression()\r\n\r\nregressor.fit(x_train,y_train)\r\n\r\ny_pred = regressor.predict(x_test)\r\n\r\n\r\n\r\n#GERİ ELEME (Backward elimination)\r\nimport statsmodels.api as sm\r\n\r\nX = np.append(arr = np.ones((14,1)).astype(int), values=sonveriler.iloc[:,:-1], axis=1) #axis = 1 kolon olarak eklemesini sağlar\r\n\r\nX_l = sonveriler.iloc[:,[0,1,2,3,4,5]].values\r\nX_l = np.array(X_l,dtype=float)\r\nmodel = sm.OLS(sonveriler.iloc[:,-1:],X_l).fit()\r\nprint(model.summary()) #Rapor\r\n#rapordaki P>|t| değerlerine bakıyoruz ve en yüksek p değerine sahip olanı elicez çünkü modeli kötü etkiliyor\r\n\r\nsonveriler = sonveriler.iloc[:,1:]\r\n\r\nX = np.append(arr = np.ones((14,1)).astype(int), values=sonveriler.iloc[:,:-1], axis=1) #axis = 1 kolon olarak eklemesini sağlar\r\n\r\nX_l = sonveriler.iloc[:,[0,1,2,3,4]].values\r\nX_l = np.array(X_l,dtype=float)\r\nmodel = sm.OLS(sonveriler.iloc[:,-1:],X_l).fit()\r\nprint(model.summary()) #Rapor\r\n\r\nx_train = x_train.iloc[:,1:]\r\nx_test = x_test.iloc[:,1:]\r\n\r\nregressor.fit(x_train,y_train)\r\n\r\ny_pred = regressor.predict(x_test)\r\n","repo_name":"berkayberatsonmez/Prediction","sub_path":"Multiple Linear Regression/odevtenis.py","file_name":"odevtenis.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"8939739499","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 13 21:34:55 2022.\n\n@author: fabian\n\"\"\"\n\n\nimport logging\nimport tempfile\nfrom dataclasses import dataclass\n\nfrom linopy.io import read_netcdf\n\nparamiko_present = True\ntry:\n import paramiko\nexcept ImportError:\n paramiko_present = False\nlogger = logging.getLogger(__name__)\n\ncommand = \"\"\"\nimport linopy\n\nm = linopy.read_netcdf(\"{model_unsolved_file}\")\nm.solve({solve_kwargs})\nm.to_netcdf(\"{model_solved_file}\")\n\"\"\"\n\n\n@dataclass\nclass RemoteHandler:\n \"\"\"\n Handler class for solving models on a remote machine via an SSH connection.\n\n The basic idea of the handler is to provide a workflow that:\n\n 1. defines a model on the local machine\n 2. saves it to a file on the local machine\n 3. copies that file to the remote machine\n 4. loads, solves and writes out the model, all on the remote machine\n 5. copies the solved model to the local machine\n 6. loads the solved model on the local machine\n\n\n The Handler opens an interactive shell in which the commands are executed.\n All standard outputs of the remote are directly displayed in the local prompt.\n You can directly set a connected SSH client for the RemoteHandler if you\n don't want to use the default connection parameters `host`, `username` and\n `password`.\n\n If the SSH keys are stored in a default location, the keys are autodetected\n and the RemoteHandler does not require a password argument.\n\n Parameters\n ----------\n hostname : str\n Name of the server to connect to. This is used if client is None.\n port : int\n The server port to connect to. This is used if client is None.\n username : str\n The username to authenticate as (defaults to the current local username).\n This is used if client is None.\n password : str\n Used for password authentication; is also used for private key\n decryption. Not necessary if ssh keys are auto-detectable.\n This is used if client is None.\n client : paramiko.SSHClient\n Already connected client to use instead of initializing a one with\n the above arguments.\n python_script : callable\n Format function which takes the arguments `model_unsolved_file`,\n `solve_kwargs` and `model_solved_files`. Defaults to\n `linopy.remote.command.format`, where `linopy.remote.command` is the\n string of the python command.\n python_executable : str\n Python executable to use on the remote machine.\n python_file : str\n Path where to store the python script on the remote machine.\n model_unsolved_file : str\n Path where to temporarily store the unsolved model on the local machine\n before copying it over.\n model_solved_file : str\n Path where to temporarily store the solved model on the remote machine.\n\n\n Example\n -------\n\n >>> import linopy\n >>> from linopy import Model\n >>> from numpy import arange\n >>> from xarray import DataArray\n >>>\n >>> N = 10\n >>> m = Model()\n >>> coords = [arange(N), arange(N)]\n >>> x = m.add_variables(coords=coords)\n >>> y = m.add_variables(coords=coords)\n >>> con1 = m.add_constraints(x - y >= DataArray(arange(N)))\n >>> con2 = m.add_constraints(x + y >= 0)\n >>> obj = m.add_objective((2 * x + y).sum())\n >>>\n >>> host = \"my-remote-machine.com\"\n >>> username = \"my-username\"\n >>> handler = linopy.remote.RemoteHandler(host, username=username) # doctest: +SKIP\n >>>\n >>> # optionally activate a conda environment\n >>> handler.execute(\"conda activate my-linopy-env\") # doctest: +SKIP\n >>>\n >>> m = handler.solve_on_remote(m) # doctest: +SKIP\n \"\"\"\n\n hostname: str = None\n port: int = 22\n username: str = None\n password: str = None\n client: \"paramiko.SSHClient\" = None\n\n python_script: callable = command.format\n python_executable: str = \"python\"\n python_file: str = \"/tmp/linopy-execution.py\"\n\n model_unsolved_file: str = \"/tmp/linopy-unsolved-model.nc\"\n model_solved_file: str = \"/tmp/linopy-solved-model.nc\"\n\n def __post_init__(self):\n assert paramiko_present, \"The required paramiko package is not installed.\"\n\n if self.client is None:\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.connect(self.hostname, self.port, self.username, self.password)\n self.client = client\n\n logger.info(\"Open interactive shell session.\")\n self.channel = self.client.invoke_shell()\n self.stdin = self.channel.makefile(\"wb\", -1)\n self.stdout = self.channel.makefile(\"r\", -1)\n self.stderr = self.channel.makefile(\"r\", -1)\n\n logger.info(\"Open an SFTP session on the SSH server\")\n self.sftp_client = self.client.open_sftp()\n\n def __del__(self):\n self.client.close()\n\n def write_python_file_on_remote(self, **solve_kwargs):\n \"\"\"\n Write the python file of the RemoteHandler on the remote machine under\n `self.python_file`.\n \"\"\"\n logger.info(f\"Saving python script at {self.python_file} on remote\")\n script_kwargs = dict(\n model_unsolved_file=self.model_unsolved_file,\n solve_kwargs=f\"**{solve_kwargs}\",\n model_solved_file=self.model_solved_file,\n )\n with self.sftp_client.open(self.python_file, \"w\") as fn:\n fn.write(self.python_script(**script_kwargs))\n\n def write_model_on_remote(self, model):\n \"\"\"\n Write a model on the remote machine under `self.model_unsolved_file`.\n \"\"\"\n logger.info(f\"Saving unsolved model at {self.model_unsolved_file} on remote\")\n with tempfile.NamedTemporaryFile(prefix=\"linopy\", suffix=\".nc\") as fn:\n model.to_netcdf(fn.name)\n self.sftp_client.put(fn.name, self.model_unsolved_file)\n\n def execute(self, cmd):\n \"\"\"\n Execute a shell command on the remote machine.\n \"\"\"\n cmd = cmd.strip(\"\\n\")\n self.stdin.write(cmd + \"\\n\")\n finish = \"End of stdout. Exit Status\"\n echo_cmd = f\"echo {finish} $?\"\n self.stdin.write(echo_cmd + \"\\n\")\n self.stdin.flush()\n\n print_stdout = False\n exit_status = 0\n for line in self.stdout:\n line = str(line).strip(\"\\n\").strip()\n if line.endswith(cmd):\n # up to now everything was login and stdin\n print_stdout = True\n elif line.startswith(finish):\n exit_status = int(line.rsplit(maxsplit=1)[1])\n break\n elif finish not in line and print_stdout:\n print(line)\n\n if exit_status:\n raise OSError(\"Execution on remote raised an error, see above.\")\n\n def solve_on_remote(self, model, **kwargs):\n \"\"\"\n Solve a linopy model on the remote machine.\n\n This function\n\n 1. saves the model to a file on the local machine.\n 2. copies that file to the remote machine.\n 3. loads, solves and writes out the model, all on the remote machine.\n 4. copies the solved model to the local machine.\n 5. loads and returns the solved model.\n\n Parameters\n ----------\n model : linopy.model.Model\n **kwargs :\n Keyword arguments passed to `linopy.model.Model.solve`.\n\n Returns\n -------\n linopy.model.Model\n Solved model.\n \"\"\"\n self.write_python_file_on_remote(**kwargs)\n self.write_model_on_remote(model)\n\n command = f\"{self.python_executable} {self.python_file}\"\n\n logger.info(\"Solving model on remote.\")\n self.execute(command)\n\n logger.info(\"Retrieve solved model from remote.\")\n with tempfile.NamedTemporaryFile(prefix=\"linopy\", suffix=\".nc\") as fn:\n self.sftp_client.get(self.model_solved_file, fn.name)\n solved = read_netcdf(fn.name)\n\n self.sftp_client.remove(self.python_file)\n self.sftp_client.remove(self.model_solved_file)\n\n return solved\n","repo_name":"PyPSA/linopy","sub_path":"linopy/remote.py","file_name":"remote.py","file_ext":"py","file_size_in_byte":8138,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"28"} +{"seq_id":"18436776506","text":"\"\"\" Data processors and helpers \"\"\"\nimport csv\nimport json\nimport logging\nimport os\n\nimport html\nimport sys\n\nfrom tqdm import tqdm\nfrom transformers import glue_processors, glue_output_modes\nfrom transformers.file_utils import is_tf_available\nfrom transformers.data.processors.utils import DataProcessor, InputExample, InputFeatures\nfrom sklearn.model_selection import train_test_split\n\nsys.path.append(\"../../\")\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))\n\n\nif is_tf_available():\n import tensorflow as tf\n\nlogger = logging.getLogger(__name__)\n\n\ndef convert_examples_to_features(\n examples,\n tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n):\n \"\"\"\n Loads a data file into a list of ``InputFeatures``\n\n Args:\n examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.\n tokenizer: Instance of a tokenizer that will tokenize the examples\n max_length: Maximum example length\n task: GLUE task\n label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method\n output_mode: String indicating the output mode. Either ``regression`` or ``classification``\n pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)\n pad_token: Padding token\n pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)\n mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values\n and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for\n actual values)\n\n Returns:\n If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``\n containing the task-specific features. If the input is a list of ``InputExamples``, will return\n a list of task-specific ``InputFeatures`` which can be fed to the model.\n\n \"\"\"\n is_tf_dataset = False\n if is_tf_available() and isinstance(examples, tf.data.Dataset):\n is_tf_dataset = True\n\n if task is not None:\n # processor = glue_processors[task]()\n processor = processors[task]()\n if label_list is None:\n label_list = processor.get_labels()\n logger.info(\"Using label list %s for task %s\" % (label_list, task))\n if output_mode is None:\n output_mode = glue_output_modes[task]\n logger.info(\"Using output mode %s for task %s\" % (output_mode, task))\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n len_examples = 0\n if is_tf_dataset:\n example = processor.get_example_from_tensor_dict(example)\n example = processor.tfds_map(example)\n len_examples = tf.data.experimental.cardinality(examples)\n else:\n len_examples = len(examples)\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d/%d\" % (ex_index, len_examples))\n\n inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length,\n truncation = True)\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask\n token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_length, \"Error with input length {} vs {}\".format(len(input_ids), max_length)\n assert len(attention_mask) == max_length, \"Error with input length {} vs {}\".format(\n len(attention_mask), max_length\n )\n assert len(token_type_ids) == max_length, \"Error with input length {} vs {}\".format(\n len(token_type_ids), max_length\n )\n\n if output_mode == \"classification\":\n label = label_map[example.label]\n elif output_mode == \"regression\":\n label = float(example.label)\n else:\n raise KeyError(output_mode)\n\n # if ex_index < 5:\n # logger.info(\"*** Example ***\")\n # logger.info(\"guid: %s\" % (example.guid))\n # logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n # logger.info(\"attention_mask: %s\" % \" \".join([str(x) for x in attention_mask]))\n # logger.info(\"token_type_ids: %s\" % \" \".join([str(x) for x in token_type_ids]))\n # logger.info(\"label: %s (id = %d)\" % (example.label, label))\n\n features.append(\n InputFeatures(\n input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label\n )\n )\n\n if is_tf_available() and is_tf_dataset:\n\n def gen():\n for ex in features:\n yield (\n {\n \"input_ids\": ex.input_ids,\n \"attention_mask\": ex.attention_mask,\n \"token_type_ids\": ex.token_type_ids,\n },\n ex.label,\n )\n\n return tf.data.Dataset.from_generator(\n gen,\n ({\"input_ids\": tf.int32, \"attention_mask\": tf.int32, \"token_type_ids\": tf.int32}, tf.int64),\n (\n {\n \"input_ids\": tf.TensorShape([None]),\n \"attention_mask\": tf.TensorShape([None]),\n \"token_type_ids\": tf.TensorShape([None]),\n },\n tf.TensorShape([]),\n ),\n )\n\n return features\n\nclass Sst2Processor(DataProcessor):\n \"\"\"Processor for the SST-2 data set (GLUE version).\"\"\"\n\n def get_example_from_tensor_dict(self, tensor_dict):\n \"\"\"See base class.\"\"\"\n return InputExample(\n tensor_dict[\"idx\"].numpy(),\n tensor_dict[\"sentence\"].numpy().decode(\"utf-8\"),\n None,\n str(tensor_dict[\"label\"].numpy()),\n )\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n if not os.path.isfile(os.path.join(data_dir, \"train_42.json\")):\n self._train_dev_split(data_dir)\n with open(os.path.join(data_dir, \"train_42.json\")) as json_file:\n [X_train, y_train] = json.load(json_file)\n train_examples = self._create_examples(zip(X_train, y_train), \"train\")\n return train_examples\n # return self._create_examples(self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n if not os.path.isfile(os.path.join(data_dir, \"dev_42.json\")):\n self._train_dev_split(data_dir)\n with open(os.path.join(data_dir, \"dev_42.json\")) as json_file:\n [X_val, y_val] = json.load(json_file)\n dev_examples = self._create_examples(zip(X_val, y_val), \"dev\")\n return dev_examples\n # return self._create_examples(self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n if os.path.isfile(os.path.join(data_dir, \"test.json\")):\n with open(os.path.join(data_dir, \"test.json\")) as json_file:\n [X_test, y_test] = json.load(json_file)\n else:\n\n lines = self._read_tsv(os.path.join(data_dir, \"dev.tsv\"))\n X_test = []\n y_test = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n X_test.append(line[0])\n y_test.append(line[1])\n\n # Write the dev set into a json file (for this seed)\n with open(os.path.join(data_dir, \"test.json\"), \"w\") as f:\n json.dump([X_test, y_test], f)\n\n test_examples = self._create_examples(zip(X_test, y_test), \"test\")\n return test_examples\n\n def get_augm_examples(self, X, y):\n return self._create_examples(zip(X,y), \"augm\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n if set_type == \"augm\":\n if type(line[0]) is not str:\n text_a = line[0][0]\n else:\n text_a = line[0]\n else:\n if i == 0:\n continue\n text_a = line[0]\n label = line[1]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n def _train_dev_split(self, data_dir, seed=42):\n \"\"\"Splits train set into train and dev sets.\"\"\"\n lines = self._read_tsv(os.path.join(data_dir, \"train.tsv\"))\n X = []\n Y = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n X.append(line[0])\n Y.append(line[1])\n\n X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.1, stratify=Y, random_state=seed)\n\n # Write the train set into a json file (for this seed)\n with open(os.path.join(data_dir, \"train_{}.json\".format(seed)), \"w\") as f:\n json.dump([X_train , Y_train], f)\n\n # Write the dev set into a json file (for this seed)\n with open(os.path.join(data_dir, \"dev_{}.json\".format(seed)), \"w\") as f:\n json.dump([X_val , Y_val], f)\n\n return\n\nglue_tasks = [\"sst-2\"]\n\nclass ImdbProcessor(DataProcessor):\n \"\"\"Processor for the PubMed data set.\"\"\"\n\n def get_example_from_tensor_dict(self, tensor_dict):\n \"\"\"See base class.\"\"\"\n return InputExample(\n tensor_dict[\"idx\"].numpy(),\n tensor_dict[\"sentence\"].numpy().decode(\"utf-8\"),\n None,\n str(tensor_dict[\"label\"].numpy()),\n )\n\n def _read_csv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\",\", quotechar=quotechar))\n\n def get_train_examples(self, data_dir):\n if not os.path.isfile(os.path.join(data_dir, \"train_42.json\")):\n self._train_dev_split(data_dir)\n with open(os.path.join(data_dir, \"train_42.json\")) as json_file:\n [X_train, y_train] = json.load(json_file)\n # X, Y = [], []\n # lines = self._read_csv(os.path.join(data_dir, \"train\", \"train.tsv\"))\n # for line in lines[1:]:\n # # X.append(\",\".join(line[1:]).rstrip())\n # X.append(\",\".join(line[:-1]).rstrip())\n # # Y.append(line[0])\n # Y.append(line[-1])\n train_examples = self._create_examples(zip(X_train, y_train), \"train\")\n return train_examples\n\n def get_dev_examples(self, data_dir):\n if not os.path.isfile(os.path.join(data_dir, \"dev_42.json\")):\n self._train_dev_split(data_dir)\n with open(os.path.join(data_dir, \"dev_42.json\")) as json_file:\n [X_val, y_val] = json.load(json_file)\n dev_examples = self._create_examples(zip(X_val, y_val), \"dev\")\n return dev_examples\n\n # def get_contrast_examples(self, file=None, ori=False, data_dir=IMDB_CONTR_DATA_DIR):\n # # if not os.path.isfile(os.path.join(data_dir, \"dev_42.json\")):\n # # self._train_dev_split(data_dir)\n # # with open(os.path.join(data_dir, \"dev_42.json\")) as json_file:\n # # [X_val, y_val] = json.load(json_file)\n # prefix='original' if ori else 'contrast'\n # X, Y = [], []\n # lines = self._read_csv(os.path.join(data_dir, \"{}_{}.tsv\".format(file, prefix)))\n # labelname2int={\"Positive\":\"1\", \"Negative\":\"0\"}\n # for i, line in enumerate(lines):\n # if i == 0:\n # continue\n # # X.append(\",\".join(line[1:]).rstrip())\n # X.append(\",\".join(line).rstrip().split('\\t')[1])\n # # Y.append(line[0])\n # Y.append(labelname2int[\",\".join(line).rstrip().split('\\t')[0]])\n # dev_examples = self._create_examples(zip(X, Y), \"{}_{}\".format(file, prefix))\n # return dev_examples\n\n def get_test_examples(self, data_dir):\n if os.path.isfile(os.path.join(data_dir, \"test.json\")):\n with open(os.path.join(data_dir, \"test.json\")) as json_file:\n [X_test, y_test] = json.load(json_file)\n else:\n\n X_test, y_test = [], []\n lines = self._read_csv(os.path.join(data_dir, \"test\", \"test.tsv\"))\n for line in lines[1:]:\n # X.append(\",\".join(line[1:]).rstrip())\n X_test.append(\",\".join(line[:-1]).rstrip())\n # Y.append(line[0])\n y_test.append(line[-1])\n\n # Write the dev set into a json file (for this seed)\n with open(os.path.join(data_dir, \"test.json\"), \"w\") as f:\n json.dump([X_test, y_test], f)\n\n test_examples = self._create_examples(zip(X_test, y_test), \"test\")\n\n return test_examples\n\n def get_augm_examples(self, X, y):\n return self._create_examples(zip(X,y), \"augm\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the dev set.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n if set_type == \"augm\":\n if type(line) is not str:\n text_a = line[0][0]\n else:\n text_a = line[0]\n label = line[1]\n else:\n text_a, label = line\n # if dom != -1:\n # label = [label, str(dom)]\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n def _train_dev_split(self, data_dir, seed=42):\n \"\"\"Splits train set into train and dev sets.\"\"\"\n X, Y = [], []\n lines = self._read_csv(os.path.join(data_dir, \"train\", \"train.tsv\"))\n for line in lines[1:]:\n # X.append(\",\".join(line[1:]).rstrip())\n X.append(\",\".join(line[:-1]).rstrip())\n # Y.append(line[0])\n Y.append(line[-1])\n\n X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.1, stratify=Y, random_state=seed)\n\n # Write the train set into a json file (for this seed)\n with open(os.path.join(data_dir, \"train_{}.json\".format(seed)), \"w\") as f:\n json.dump([X_train , Y_train], f)\n\n # Write the dev set into a json file (for this seed)\n with open(os.path.join(data_dir, \"dev_{}.json\".format(seed)), \"w\") as f:\n json.dump([X_val , Y_val], f)\n\n return\n\nprocessors = {\n \"sst-2\": Sst2Processor,\n \"imdb\": ImdbProcessor,\n}\n\noutput_modes = {\n \"sst-2\": \"classification\",\n \"imdb\": \"classification\",\n}\n","repo_name":"kaushalrai7797/SCALe-Supervised-Contrastive-approach-for-Active-Learning","sub_path":"contrastive-active-learning-(Katerina et. al.)/utilities/preprocessors.py","file_name":"preprocessors.py","file_ext":"py","file_size_in_byte":16067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"15122951889","text":"import math\nimport numpy as np \nfrom numpy import linalg as lin\n\ndef dict_to_matrix(A, B):\n B.append(A)\n matrix = np.transpose(B)\n return matrix\n\ndef determinar_coordenadas(sistema):\n det_sistema = calcula_determinante(sistema, 3)\n b1 = calcula_determinante(sistema, 0)/det_sistema\n b2 = calcula_determinante(sistema, 1)/det_sistema\n b3 = calcula_determinante(sistema, 2)/det_sistema\n return b1,b2, b3\n\ndef calcula_determinante(matrix, index):\n matrix_aux = [['', '', ''], ['', '', ''], ['', '', '']]\n for i_index, i in enumerate(matrix):\n for j_index, j in enumerate(i):\n if(j_index == 3):\n continue\n elif(j_index == index):\n matrix_aux[i_index][j_index] = eval(matrix[i_index][3])\n else:\n matrix_aux[i_index][j_index] = eval(j)\n return np.linalg.det(np.array(matrix_aux))\n\ndef ler_entradas():\n A_aux = input('Insira aqui o vetor A, na forma (X;Y;Z): ').replace('(', '').replace(')', '').replace(' ', '')\n A = A_aux.split(';')\n B = []\n for i in range(3):\n vetor_aux = input(f'Insira o vetor B{i+1}, na forma(X;Y;Z): ').replace('(', '').replace(')', '').replace(' ', '')\n vetor_aux = vetor_aux.split(';')\n for index, element in enumerate(vetor_aux):\n vetor_aux[index] = f'{element}b{i+1}'\n B.append(vetor_aux)\n #print(vetor_aux)\n return(A, B)\n\ndef resolve_sistema():\n A, B = ler_entradas()\n #A, B = ['7', '8', '6'], [['2', '3', '7'],['18', '-1', '8'],['-5', '6', '9']]\n sistema = dict_to_matrix(A, B)\n b1, b2, b3 = determinar_coordenadas(sistema)\n #print(f'A[B] = [{b1} {b2} {b3}]')\n return b1, b2, b3\n\ndef getFuncoes():\n return [['Coordenadas do vetor', 'prova2.resolve_sistema()']]\n\nif __name__ == \"__main__\":\n resolve_sistema()\n","repo_name":"ebmeurer/algebra-linear","sub_path":"prova2.py","file_name":"prova2.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"6907119093","text":"__metaclass__ = type\n\n__all__ = [\n 'ISourcePackageFormatSelection',\n 'ISourcePackageFormatSelectionSet',\n ]\n\nfrom zope.interface import (\n Attribute,\n Interface,\n )\n\n\nclass ISourcePackageFormatSelection(Interface):\n \"\"\"A source package format allowed within a DistroSeries.\"\"\"\n\n id = Attribute(\"ID\")\n distroseries = Attribute(\"Target series\")\n format = Attribute(\"Permitted source package format\")\n\n\nclass ISourcePackageFormatSelectionSet(Interface):\n \"\"\"Set manipulation tools for the SourcePackageFormatSelection table.\"\"\"\n\n def getBySeriesAndFormat(distroseries, format):\n \"\"\"Return the ISourcePackageFormatSelection for the given series and\n format.\"\"\"\n\n def add(distroseries, format):\n \"\"\"Allow the given source package format in the given series.\"\"\"\n","repo_name":"pombredanne/launchpad-3","sub_path":"lib/lp/soyuz/interfaces/sourcepackageformat.py","file_name":"sourcepackageformat.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"72894502795","text":"file = open(\"data/day6.txt\")\ndata = file.read()\nlines = [x for x in data.split('\\n')]\n\ntimes = lines[0].split()\ndist = lines[1].split()\n\np2_time, p2_dist = '', ''\nnum_ways = 1\nfor i in range(1, len(times)):\n # Part 1 calculation\n count = 0\n tmp_time = 0\n while tmp_time < int(times[i]):\n if(tmp_time * ((int(times[i])) - tmp_time) > int(dist[i])):\n count += 1\n tmp_time += 1\n num_ways *= count\n\n # Part 2 calculation\n p2_time = p2_time + times[i]\n p2_dist = p2_dist + dist[i]\n\ntmp_time = 0\ncount = 0\nwhile tmp_time < int(p2_time):\n if(tmp_time * ((int(p2_time)) - tmp_time) > int(p2_dist)):\n count += 1\n tmp_time += 1\n\nprint(f'Part 1 = {num_ways}')\nprint(f'Part 2 = {count}')\nfile.close()","repo_name":"the-bigmike/AdventOfCode","sub_path":"2023/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"37204968376","text":"import pandas as pd\nimport numpy as np\nimport os\nimport sys\n\n# obj_num = int(sys.argv[1])\n# filename_err = str(sys.argv[2])\nout_num = 0 #int(sys.argv[3])\n\ncc_dir = './dat/'\n# cross_object_cc_dir = '/mnt/c/Users/psyko/Physics/gamma-optical/2001/output/cross_object_gam+opt/'\ncc_file_num = 0\n\n# object_nums = np.array([7020,7021,7022])\nobject_nums = np.array([7030,7031,7032])\n\n\n# for ii in range(0,len(object_arr)):\n\t# obj_num = object_arr[ii]\t\n\nfor ii in range(0,len(object_nums)):\n\tobj_num = object_nums[ii]\n\t\n\t###\n\t###\n\t### Getting full time array\n\t###\n\t###\n\n\t# cross_corr_err_tag = 1\n\t# try:\n\n\tfilename_err = cc_dir+'object'+str(obj_num).zfill(4)+'_stats'+str(cc_file_num).zfill(3)+'.dat'\n\tread_in = np.loadtxt(filename_err)\n\n\ttime_err = read_in[0]\n\tdata_err = read_in[1:]\n\n\tcor_mean = data_err[0]\n\tcor_err_d = data_err[1]\n\tcor_err_u = data_err[2]\n\tprint(cor_mean[7500:7510],cor_err_d[7500:7510], cor_err_u[7500:7510])\n\t# cor_err_d = cor_mean-data_err[1]\n\t# cor_err_u = data_err[2]-cor_mean\n\n\tcor_err = (cor_err_d + cor_err_u)/2.\n\n\tcor_err[cor_mean == -1.1] = 1.\n\tcor_mean[cor_mean == -1.1] = 0.\n\n\tcor_err[np.isnan(cor_mean)] = 1.\n\tcor_mean[np.isnan(cor_mean)] = 0.\n\t\t\n\t\t\n\t# except:\n\t\t# cross_corr_err_tag = 0\n\t\t# print('No cc for object '+str(obj_num))\n\t\t# exit()\n\t\t# # continue\n\n\n\t###\n\t###\n\t### Bayesian Block analysis\n\t###\n\t###\n\n\t## Log likelihood from SCARGLE et al. 2013 equation 41\n\tdef fitness(y, yerr):\n\t\ta_k = 1/2*np.sum(1/yerr**2)\n\t\tb_k = -np.sum(y/yerr**2)\n\t\treturn b_k**2/4/a_k\n\n\tncp_prior = 1.32+0.577*np.log10(len(cor_mean))\n\tprint('ncp_prior = '+str(ncp_prior))\n\n\tbest = np.zeros(len(cor_mean))\n\tlast = np.zeros(len(cor_mean))\n\n\tbest[0] = fitness(cor_mean[0], cor_err[0]) - ncp_prior\n\tlast[0] = 0\n\n\tfor ii in range(1,len(cor_mean)):\n\t\tA = np.zeros(ii+1)\n\t\tfor r in range(0,ii+1):\n\t\t\tA[r] = fitness(cor_mean[r:ii+1], cor_err[r:ii+1]) - ncp_prior\n\t\t\tif r == 0:\n\t\t\t\tA[r]+= 0\n\t\t\telse:\n\t\t\t\tA[r]+= best[r-1]\n\t\t# print(A)\n\t\tlast[ii] = A.argmax()\n\t\tbest[ii] = A.max()\n\t\t# print('last = '+str(last[ii]))\n\t\t# print('best = '+str(best[ii]))\n\t\t\n\t##find change points\n\tindex = int(last[-1])\n\tchange_points = np.array([index], dtype=int)\n\twhile index > 0:\n\t\tchange_points = np.append(change_points, int(last[index-1]))\n\t\tindex = int(last[index-1])\n\t\t\n\tchange_points = change_points[::-1]\n\tprint(change_points)\n\n\tdef norm_bin(y, yerr):\n\t\ta_k = 1/2*np.sum(1/yerr**2)\n\t\tb_k = -np.sum(y/yerr**2)\n\t\t# print(a_k,b_k)\n\t\treturn -b_k/2/a_k\n\n\tbin_norms = np.zeros(len(change_points))\n\tfor ii in range(0,len(change_points)):\n\t\tif ii == len(change_points)-1:\n\t\t\tbin_norms[ii] = norm_bin(cor_mean[change_points[ii]:], cor_err[change_points[ii]:])\n\t\telse:\n\t\t\tbin_norms[ii] = norm_bin(cor_mean[change_points[ii]:change_points[ii+1]], cor_err[change_points[ii]:change_points[ii+1]])\n\n\toutput_arr = np.array([change_points, bin_norms])\n\n\toutput_filename = './change_points/object'+str(obj_num).zfill(4)+'_cc_change_points_'+str(out_num).zfill(3)+'.dat'\n\tnp.savetxt(output_filename, output_arr)\n\tprint('Finished object '+str(obj_num))\n\n\n","repo_name":"bjbuckman/blazar_analysis","sub_path":"calc_cc_bayesian_block/calc_bayesian_block_cc_v1.py","file_name":"calc_bayesian_block_cc_v1.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"18264686286","text":"from core.views.menu import GetMenuPrincipal\nfrom fiche_famille.utils.utils_famille import LISTE_ONGLETS as LISTE_ONGLETS_FAMILLES\nfrom fiche_individu.utils.utils_individu import LISTE_ONGLETS as LISTE_ONGLETS_INDIVIDUS\n\n\ndef GetPermissionsPossibles(organisateur=None):\n \"\"\" Liste des commandes pour créer les permissions des utilisateurs \"\"\"\n liste_permissions = []\n\n # Commandes de menu\n menu_principal = GetMenuPrincipal(organisateur=organisateur)\n for menu in menu_principal.GetChildren():\n for sous_menu in menu.GetChildren():\n for commande in sous_menu.GetChildren():\n liste_permissions.append((commande.code, \"%s | %s\" % (commande.parent.parent.titre, commande.titre)))\n\n # Fiche famille\n for commande in LISTE_ONGLETS_FAMILLES:\n liste_permissions.append((\"famille_%s\" % commande[\"code\"], \"Fiche famille | %s\" % commande[\"label\"]))\n\n # Fiche individu\n for commande in LISTE_ONGLETS_INDIVIDUS:\n liste_permissions.append((\"individu_%s\" % commande[\"code\"], \"Fiche individuelle | %s\" % commande[\"label\"]))\n\n return liste_permissions\n\n","repo_name":"Noethys/Noethysweb","sub_path":"noethysweb/core/utils/utils_permissions.py","file_name":"utils_permissions.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"fr","doc_type":"code","stars":4,"dataset":"github-code","pt":"28"} +{"seq_id":"30341578558","text":"#!/usr/bin/env python3\n\nimport sys\nimport logging\nimport json\n\n# import requests under python 2 or 3\ntry:\n import urllib.request as urllib_request # for Python 3\nexcept ImportError:\n import urllib2 as urllib_request # for Python 2\nimport gzip\n\ntry:\n from tqdm import tqdm, trange\nexcept:\n print(\"Please `pip3 install tqdm`.\")\n sys.exit(1)\n\nLOG_LEVELS = [\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"]\nDEFAULT_LOG_LEVEL = \"WARNING\"\n\nREQUEST_DEBUGGING = False\n\nif REQUEST_DEBUGGING:\n # These two lines enable debugging at httplib level (requests->urllib3->http.client)\n # You will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.\n # The only thing missing will be the response.body which is not logged.\n try:\n import http.client as http_client\n except ImportError:\n # Python 2\n import httplib as http_client\n http_client.HTTPConnection.debuglevel = 1\n\n # You must initialize logging, otherwise you'll not see debug output.\n logging.basicConfig()\n logging.getLogger().setLevel(logging.DEBUG)\n requests_log = logging.getLogger(\"requests.packages.urllib3\")\n requests_log.setLevel(logging.DEBUG)\n requests_log.propagate = True\n\n\ndef get_json(an_url, log_level=3):\n req = urllib_request.Request(\n an_url,\n data=None,\n headers={\n \"accept-encoding\": \"gzip,deflate\",\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0\",\n },\n )\n\n if log_level == 0:\n tqdm.write(\"Fetching %s... \" % an_url)\n\n response = urllib_request.urlopen(req)\n result = gzip.decompress(response.read()).decode(\"utf-8\")\n output = json.loads(result)\n return output\n\n\ndef get_push_pending_jobs(project, push_id, platform_filter=None, log_level=3):\n # TODO: check push health, if complete, we can exit here.\n\n ##### phase 2: get jobs for each push\n\n # data['results']\n\n # TODO: don't hardcode these (the field legend is provided at the end of all requests)\n # filtering for android-hw: fields 2, 15, 22\n # 2 is build platform\n # 15 is job_type_name (worker/test)\n # 22 is platform\n # - seems like they all work\n #\n # 23 is pushid\n # field 26 is result (success, ???)\n # field 30 is state (completed, ???)\n key_pushid = 23\n key_platform = 22\n key_state = 30\n key_job_type_name = 15\n\n # https://treeherder.mozilla.org/api/project/mozilla-central/jobs/?return_type=list&count=2000&push_id=443884\n\n # https://treeherder.mozilla.org/api/project/mozilla-central/jobs/?return_type=list&count=2000&push_id=443884&offset=2000\n\n pending_jobs = 0\n iteration = 0\n while True:\n if iteration == 0:\n res = get_json(\n \"https://treeherder.mozilla.org/api/project/%s/jobs/?return_type=list&count=2000&push_id=%s\"\n % (project, push_id),\n log_level,\n )\n else:\n offset = iteration * 2000\n res = get_json(\n \"https://treeherder.mozilla.org/api/project/%s/jobs/?return_type=list&count=2000&offset=%s&push_id=%s\"\n % (project, offset, push_id),\n log_level,\n )\n result_count = len(res[\"results\"])\n for item in res[\"results\"]:\n # tqdm.write(item[30])\n if item[key_state] == \"pending\":\n if platform_filter:\n if platform_filter in item[key_platform]:\n pending_jobs += 1\n if log_level == 0:\n tqdm.write(\n \" - %s: %s\"\n % (item[key_platform], item[key_job_type_name])\n )\n else:\n # tqdm.write(item[key_job_type_name])\n pending_jobs += 1\n iteration += 1\n # tqdm.write(result_count)\n if result_count != 2000:\n return pending_jobs\n # never reached!?!\n return pending_jobs\n\n\ndef get_pending_jobs(projects, filter=None, pages=4, page_size=50, early_exit=True):\n ####### phase 1: get try pushes\n last_seen_commit = \"\"\n results_dict = {}\n\n # TODO: multithread?\n proj_iterator = tqdm(projects, desc=\"projects\")\n for project in proj_iterator:\n proj_iterator.set_postfix(project=project)\n pending_job_total = 0\n results_dict[project] = 0\n\n if log_level <= 1:\n tqdm.write(\"%s ---------------------------------------------\" % project)\n\n # eventually?\n # while True:\n push_pbar = tqdm(total=page_size * pages, desc=\"pushes\")\n for i in range(0, pages):\n pending_jobs_this_page = 0\n # TODO: figure out how to avoid overlap\n # url = \"https://treeherder.mozilla.org/api/project/mozilla-central/push/\"\n # https://treeherder.mozilla.org/api/project/mozilla-central/push/?full=true&count=10&fromchange=63bd1994e17c43e699c23f11ca01266d48e61d1e\n # https://treeherder.mozilla.org/api/project/mozilla-central/push/?full=true&count=11&push_timestamp__lte=1552211644\n if i != 0:\n url = (\n \"https://treeherder.mozilla.org/api/project/%s/push/?full=true&count=%s&tochange=%s\"\n % (project, page_size + 1, last_seen_commit)\n )\n else:\n url = (\n \"https://treeherder.mozilla.org/api/project/%s/push/?full=true&count=%s\"\n % (project, page_size)\n )\n\n output = get_json(url, log_level)\n # tqdm.write(output)\n\n results = output[\"results\"]\n for result in results:\n last_seen_commit = result[\"revision\"]\n #\n count = get_push_pending_jobs(project, result[\"id\"], filter, log_level)\n # tqdm.write(count)\n pending_jobs_this_page += count\n pending_job_total += count\n #\n push_pbar.update(1)\n if log_level <= 1:\n tqdm.write(\n \"%s:%s (%s): %s pending jobs\"\n % (result[\"id\"], result[\"revision\"], result[\"author\"], count)\n )\n\n results_dict[project] += pending_jobs_this_page\n\n if log_level == 0:\n tqdm.write(\"pending jobs this page: %s\" % pending_jobs_this_page)\n # don't print this message if we're on the last page already\n if early_exit and i + 1 != pages and pending_jobs_this_page == 0:\n tqdm.write(\n \"%s: page %s: no pending jobs found on this page, stopping search early.\"\n % (project, i + 1)\n )\n break\n pending_jobs_this_page = 0\n # TODO: print a summary of this project's pending jobs?\n push_pbar.close()\n return results_dict\n\n\nif __name__ == \"__main__\":\n import argparse\n\n PAGE_SIZE = 20\n PAGES = 3\n\n parser = argparse.ArgumentParser(\n usage=\"%(prog)s [options]\",\n description=\"Scan treeherder to get a count of pending jobs.\",\n )\n # TODO: make this take a csv vs a single\n parser.add_argument(\n \"--project\",\n \"-p\",\n help=\"a single project to inspect for pending jobs (defaults to use mozilla-inbound, autoland, try, and mozilla-central)\",\n )\n parser.add_argument(\n \"--filter\", \"-f\", help=\"require pending jobs to match this string\"\n )\n parser.add_argument(\n \"--page-size\",\n default=PAGE_SIZE,\n dest=\"page_size\",\n type=int,\n help=\"how many results per page to fetch (default is %s)\" % PAGE_SIZE,\n )\n parser.add_argument(\n \"--pages\",\n default=PAGES,\n type=int,\n help=\"how many pages of results should we inspect (default is %s)\" % PAGES,\n )\n parser.add_argument(\n \"-n\",\n \"--no-early-exit\",\n dest=\"no_early_exit\",\n action=\"store_true\",\n help=\"don't exit early if no pending jobs found on a page\",\n )\n # handle multiple -v args (like -vvv)\n parser.add_argument(\n \"--verbose\", \"-v\", action=\"append_const\", dest=\"log_level\", const=-1\n )\n\n args = parser.parse_args()\n log_level = LOG_LEVELS.index(DEFAULT_LOG_LEVEL)\n\n early_exit = True\n if args.no_early_exit:\n early_exit = False\n\n # For each \"-q\" and \"-v\" flag, adjust the logging verbosity accordingly\n # making sure to clamp off the value from 0 to 4, inclusive of both\n for adjustment in args.log_level or ():\n log_level = min(len(LOG_LEVELS) - 1, max(log_level + adjustment, 0))\n\n log_level_name = LOG_LEVELS[log_level]\n # tqdm.write(log_level)\n # sys.exit()\n\n # # TODO: sanity check args.project\n if args.project:\n projects = [args.project]\n else:\n projects = [\"try\", \"mozilla-inbound\", \"autoland\", \"mozilla-central\"]\n\n if args.filter:\n results_dict = get_pending_jobs(\n projects, args.filter, args.pages, args.page_size, early_exit\n )\n else:\n results_dict = get_pending_jobs(\n projects, args.filter, args.pages, args.page_size, early_exit\n )\n\n # display a final summary of results\n tqdm.write(\"\")\n grand_total = 0\n filter_string = \"\"\n if args.filter:\n filter_string = \"'%s' \" % args.filter\n for key in results_dict:\n grand_total += results_dict[key]\n tqdm.write(\"%s: pending %sjobs: %s\" % (key, filter_string, results_dict[key]))\n if len(projects) > 1:\n tqdm.write(\"total: pending %sjobs: %s\" % (filter_string, grand_total))\n","repo_name":"Mozilla-GitHub-Standards/11fc81912df7fbfbdc8e57af12280b1f8273063a15918345dff656fa9c8ff5fb","sub_path":"get_pending_jobs/get_pending_jobs.py","file_name":"get_pending_jobs.py","file_ext":"py","file_size_in_byte":9773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"9584510892","text":"import pandas as pd\nfrom PIL import Image\nfrom torch.utils.data.dataset import Dataset\nfrom torchvision import transforms as tfms\n\n\nclass AnimalsDataset(Dataset):\n def __init__(self, csv_file, transforms=None):\n # read the csv file\n self.length = len(csv_file)\n self.filepaths = csv_file[[\"file\"]].to_numpy().flatten()\n self.labels = csv_file[[\"label\"]].to_numpy().flatten()\n\n # save transforms for later\n if transforms is None:\n transforms = tfms.Compose([\n tfms.ToTensor(),\n tfms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n ])\n self.transforms = transforms\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, index):\n filepath = self.filepaths[index]\n img = Image.open(filepath)\n img = self.transforms(img)\n\n label = self.labels[index]\n\n return img, label\n","repo_name":"j-adamczyk/Pattern_recognition","sub_path":"lab4_CNN_intro/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"1425469288","text":"#!/usr/bin/python3\n\n#Basic imports\nimport time, sys\nimport numpy as np\nfrom tqdm import tqdm\nfrom random import random\nfrom ase.constraints import FixAtoms\nfrom ase.io import read, write\nfrom ase.optimize import BFGS\nfrom ase.calculators.lammpsrun import LAMMPS\n\n#GA imports\nfrom ase.ga.data import PrepareDB\nfrom ase.ga.startgenerator import StartGenerator\nfrom ase.ga.utilities import closest_distances_generator\nfrom ase.ga.utilities import get_all_atom_types\nfrom ase.ga.data import DataConnection\nfrom ase.ga.population import Population\nfrom ase.ga.standard_comparators import InteratomicDistanceComparator\nfrom ase.ga.cutandsplicepairing import CutAndSplicePairing\nfrom ase.ga.offspring_creator import OperationSelector\nfrom ase.ga.standardmutations import MirrorMutation\nfrom ase.ga.standardmutations import RattleMutation\nfrom ase.ga.standardmutations import PermutationMutation\nfrom ase.ga.parallellocalrun import ParallelLocalRun\n\nimport time\nstart_time = time.time()\n\n#define calc and adorbate #\ns = int(sys.argv[1])\nc = sys.argv[2]\n\nprint('\\nRunning GA with', str(s), 'adsorbates - using', c, 'calculator')\n\ndb_file = 'DBs/working.db'\n\n#Create the molecule and surface\nunit = [('Pd',s)]\nslab = read('best.vasp')\nslab.pbc = False\nslab.set_constraint(FixAtoms(mask=len(slab)*[True]))\nbox = [(6.,6.,6.), ((8.,0.,0.), (0.,8.,0.), (0.,0.,8.))]\natom_numbers = [46]*s\n\n#Define the closest distance two atoms of a given species can be to each other\nunique_atom_types = get_all_atom_types(slab, atom_numbers)\nblmin = closest_distances_generator(atom_numbers=unique_atom_types, ratio_of_covalent_radii=1.0)\n\n#Create the starting population\nsg = StartGenerator(slab, unit, blmin, box_to_place_in=box)\npopulation_size = 1000\nprint('[Initializing Candidates]')\nstarting_population = [sg.get_new_candidate() for i in tqdm(range(population_size))]\n\n#Create the database to store information in\nd = PrepareDB(db_file_name=db_file, simulation_cell=slab, stoichiometry=atom_numbers)\nfor a in starting_population: d.add_unrelaxed_candidate(a)\n\n#Initialize the different components of the GA\nda = DataConnection(db_file)\natom_numbers_to_optimize = da.get_atom_numbers_to_optimize()\nn_to_optimize = len(atom_numbers_to_optimize)\nall_atom_types = get_all_atom_types(slab, atom_numbers_to_optimize)\n\n#Comparatot, Cross-over, and Mutation options\ncomp = InteratomicDistanceComparator(n_top=n_to_optimize, pair_cor_cum_diff=0.03,\n pair_cor_max=0.7, dE=0.05, mic=False)\n\npairing = CutAndSplicePairing(slab, n_to_optimize, blmin)\nmutations = OperationSelector([1., 1., 0.0],\n [MirrorMutation(blmin, n_to_optimize),\n RattleMutation(blmin, n_to_optimize),\n PermutationMutation(n_to_optimize)])\n\n#PARAMETERIZATION\n\n#load specific calculator\nif c == 'EMT': calc = EMT()\n\nelif c == 'LAMMPS':\n #currently assumes AuPd\n files = ['AuPd.set']\n parameters = {'pair_style': 'eam/alloy', 'pair_coeff': ['* * AuPd.set Au Pd']}\n calc = LAMMPS(files=files, parameters=parameters)\n\nelse: print('bad calc argument for initial GA'); exit(1)\n\nn_to_test = 1000\nmutation_probability = 0.4\nfmax = 0.01\nsteps = 2\n\n#MAIN ALGORITHM\n\n#Relax all unrelaxed structures\nprint(\"Relaxing Initial Candidates\")\nwhile da.get_number_of_unrelaxed_candidates() > 0:\n a = da.get_an_unrelaxed_candidate() \n a.calc = calc\n dyn = BFGS(a, trajectory=None, logfile=None)\n dyn.run(fmax=fmax, steps=steps)\n a.info['key_value_pairs']['raw_score'] = -a.get_potential_energy()\n da.add_relaxed_step(a)\n\n#create the population\npopulation = Population(data_connection=da, population_size=population_size, comparator=comp)\nprint('\\nPopulation initialized')\n\n#Test n_to_test new candidates\nprint(\"Testing Candidates\")\nprint('fmax=', fmax, ' steps=', steps)\n\nfor i in tqdm(range(n_to_test)):\n a1, a2 = population.get_two_candidates()\n a3, desc = pairing.get_new_individual([a1, a2])\n if a3 is None: continue\n da.add_unrelaxed_candidate(a3, description=desc)\n\n #check if we want to do a mutation\n if random() < mutation_probability:\n a3_mut, desc = mutations.get_new_individual([a3])\n if a3_mut is not None:\n da.add_unrelaxed_step(a3_mut, desc)\n a3 = a3_mut\n\n #relax the new candidate\n a3.calc = calc\n dyn = BFGS(a3, trajectory=None, logfile=None)\n dyn.run(fmax=fmax, steps=steps)\n a3.info['key_value_pairs']['raw_score'] = -a3.get_potential_energy()\n da.add_relaxed_step(a3)\n population.update()\n\nwrite('DBs/GA0.db', da.get_all_relaxed_candidates())\n\nprint('DONE - initial GA - '+str(round(time.time()-start_time,3)))\n##","repo_name":"anywallsocket/Professional-Projects","sub_path":"BasicExample/Adsorption/GRAMS/initial_GA.py","file_name":"initial_GA.py","file_ext":"py","file_size_in_byte":4681,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"10223385975","text":"import configparser\n\nparser = configparser.ConfigParser()\nparser[\"CONSTANTS\"] = {\n \"HOST\": \"postgres01-1.comp.nus.edu\",\n \"PORT\": 5432,\n \"DBNAME\": \"cs2102\",\n}\nparser[\"CREDENTIALS\"] = {\n \"username\": \"\",\n \"password\": \"\",\n}\n\nparser[\"DIRECTORIES\"] = {\n \"schemas\": \"./schema\",\n \"functions\": \"./functions\",\n \"triggers\": \"./triggers\",\n \"views\": \"./views\",\n}\n\ndef generate_config(file_path: str):\n with open(file_path, \"w\") as file:\n parser.write(file)","repo_name":"Jh123x/cs2102-project","sub_path":"SQL/testfiles/generate_config.py","file_name":"generate_config.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"1195076168","text":"from config import PROJECT_ID\nimport asana\n# ASANA UTILS\ndef get_pending_tasks(client):\n result = client.tasks.find_by_project(PROJECT_ID,opt_fields=['completed','notes','name'])\n return [task for task in result if task['completed'] == False]\n\ndef get_attachments(client,gid):\n img_url = []\n att = client.attachments.find_by_task(gid)\n for a in att:\n details = client.attachments.get_attachment(a['gid'])\n img_url.append(details['download_url'])\n\n return img_url\ndef append_task_desc(client, gid, new_desc, sep=\"*\" * 100):\n \"\"\"Appends new description to a given task\"\"\"\n desc = client.tasks.get_task(gid)['notes']\n final_desc = f\"{desc}\\n{sep}\\n{new_desc}\"\n return client.tasks.update_task(gid, {'notes': final_desc})","repo_name":"azazjhony1/FreeLance-everything","sub_path":"Raspador de articulos inverso (2)/asana_utils.py","file_name":"asana_utils.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"38497935623","text":"import tensorflow as tf\nimport numpy as np\nfrom collections import OrderedDict\nimport math\nimport zutils.tf_math_funcs as tmf\nimport nets.distribution.generic\nimport nets.distribution.category\n\nCategoryFactory = nets.distribution.category.Factory\n\nGenericFactory = nets.distribution.generic.Factory\n\nepsilon = tmf.epsilon\n\n\nclass Factory(GenericFactory):\n\n def __init__(self, tau=0., **kwargs):\n self.categ_dist = CategoryFactory(pi=[0.5, 0.5], tau=tau)\n\n @staticmethod\n def param_num():\n return 1\n\n @staticmethod\n def param_dict(p=0.5):\n return OrderedDict(p=p)\n\n @classmethod\n def transform2param(cls, input_tensor, latent_dim):\n \"\"\" Create network for converting input_tensor to distribution parameters\n\n :param input_tensor: (posterior phase) input tensor for the posterior\n :param latent_dim: dimension of the latent_variables\n :return: param_tensor - distribution parameters\n \"\"\"\n param_tensor = tf.sigmoid(input_tensor)\n return param_tensor\n\n @classmethod\n def parametrize(cls, param_tensor, latent_dim):\n \"\"\" Create network for converting parameter_tensor to parameter dictionary\n\n :param param_tensor: (posterior phase) input tensor for the posterior\n :param latent_dim: dimension of the latent_variables\n :return: dist_param - distribution parameters\n \"\"\"\n dist_param = cls.param_dict(\n p=param_tensor,\n )\n return dist_param\n\n @classmethod\n def deparametrize(cls, dist_param):\n param_tensor = tmf.expand_minimum_ndim(dist_param[\"p\"], axis=0)\n return param_tensor\n\n def nll(self, dist_param, samples):\n \"\"\" Compute negative log likelihood on given sample and distribution parameter\n\n :param samples: samples for evaluating PDF\n :param dist_param: input for the posterior\n :return: likelihood - likelihood to draw such samples from given distribution\n :return: is_atomic - is atomic, scalar or the same size as likelihood\n \"\"\"\n likelihood = tf.where(samples > 0.5, dist_param[\"p\"], 1.0-dist_param[\"p\"])\n bernoulli_nll = -tf.log(likelihood+epsilon)\n return bernoulli_nll, True\n\n def sampling(self, dist_param, batch_size, latent_dim):\n \"\"\" Create network for VAE latent variables (sampling only)\n\n :param dist_param: input for the posterior\n :param batch_size: batch size\n :param latent_dim: dimension of the latent_variables\n :return: samples - random samples from either posterior or prior distribution\n \"\"\"\n\n # generate random samples\n if self.categ_dist.tau>0.0:\n # soft sampling\n p = self.deparametrize(dist_param)\n categ_dist_param = OrderedDict()\n categ_dist_param[\"K\"] = 2\n t_p = tf.reshape(p, [batch_size, 1, latent_dim])\n categ_dist_param[\"pi\"] = tf.concat([1.0-t_p, t_p], axis=1)\n categ_samples = self.categ_dist.sampling(dist_param, batch_size, latent_dim)\n return categ_samples[:,1]\n else:\n # hard sampling\n rho = tf.random_uniform([batch_size, latent_dim])\n return self.inv_cdf(dist_param, rho)\n\n def inv_cdf(self, dist_param, rho):\n p = self.deparametrize(dist_param)\n return tf.to_float(rho > 1.0-p)\n\n @staticmethod\n def self_entropy(dist_param):\n p = dist_param[\"p\"]\n se = -p*tf.log(p+epsilon) - (1.0-p)*tf.log(1.0-p+epsilon)\n return se\n\n @classmethod\n def kl_divergence(cls, dist_param, ref_dist_param, ref_dist_type=None):\n if not isinstance(ref_dist_type, cls) and ref_dist_type is not None: # handle hybrid distribution\n return None\n\n p = dist_param[\"p\"]\n p0 = ref_dist_param[\"p\"]\n homo_kl = p*tf.log(p/(p0+epsilon)+epsilon) + (1.0-p)*tf.log((1.0-p)/(1.0-p0+epsilon)+epsilon)\n return homo_kl\n\n @classmethod\n def cross_entropy(cls, dist_param, ref_dist_param, ref_dist_type=None):\n if not isinstance(ref_dist_type, cls) and ref_dist_type is not None: # handle hybrid distribution\n return None\n\n p = dist_param[\"p\"]\n p0 = ref_dist_param[\"p\"]\n homo_ce = -p*tf.log(tf.clip_by_value(p0+epsilon, clip_value_min=epsilon, clip_value_max=1.)) - \\\n (1.0-p)*tf.log(tf.clip_by_value(1.0-p0+epsilon, clip_value_min=epsilon, clip_value_max=1.))\n return homo_ce\n\n @staticmethod\n def mean(dist_param):\n p = dist_param[\"p\"]\n return p\n","repo_name":"YutingZhang/lmdis-rep","sub_path":"nets/distribution/bernoulli.py","file_name":"bernoulli.py","file_ext":"py","file_size_in_byte":4571,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"28"} +{"seq_id":"5399653688","text":"import asyncio\nimport queue\nimport threading\nimport time\n\n\nasync def network_call(wait_for: int):\n await asyncio.sleep(wait_for)\n return wait_for\n\n\nasync def main():\n tasks = []\n for i in range(5):\n tasks.append(asyncio.create_task(network_call(wait_for=i)))\n\n await asyncio.wait(tasks)\n\n for task in tasks:\n print(task.result())\n\n\nif __name__ == '__main__':\n start_time = time.time()\n asyncio.run(main())\n print(f\"Completed at {time.time() - start_time}\")\n","repo_name":"Goshan220/slurm-education","sub_path":"8.API/threads_and_asyncio/asyncio_example.py","file_name":"asyncio_example.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"19290310653","text":"\n\n\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nres = requests.get(\"http://forecast.weather.gov/MapClick.php?lat=21.3049&lon=-157.8579\")\nsoup = BeautifulSoup(res.content, 'html.parser')\n\n#temp = soup.find(id_ = \"detailed-forecast-body\")\n\ntemp = soup.find(\"div\", {\"id\": \"detailed-forecast-body\"})\n\ntemp1= temp.find_all(\"div\", {\"class\": \"row row-even row-forecast\"})\ntemp2= temp.find_all(\"div\", {\"class\": \"row row-odd row-forecast\"})\n\n\nfor i in range(7):\n \n quando2 = temp2[i].find(class_ = \"col-sm-2 forecast-label\").getText()\n info2 = temp2[i].find(class_ = \"col-sm-10 forecast-text\").getText()\n print(\"%s: %s\"%(quando2, info2))\n\n quando1 = temp1[i].find(class_ = \"col-sm-2 forecast-label\").getText()\n info1 = temp1[i].find(class_ = \"col-sm-10 forecast-text\").getText()\n print(\"%s: %s\"%(quando1, info1))\n\n","repo_name":"SandroVonlanthen/notebook2","sub_path":"exe4.2.py","file_name":"exe4.2.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"15331308617","text":"import tkinter as sister\nfrom tkinter import messagebox\nmain_window = sister.Tk()\nmain_window.geometry(\"5000x5000\")\nmain_window.configure(bg=\"white\")\nmain_window.title(\"mth assignment\")\n\ndef AREA():\n RADIUS = int(big.get())\n AREA = 3.14 * RADIUS**2\n messagebox.showinfo(\"AREA\", \"the area is \" + str(AREA))\n\ndef CIRCUMFERENCE():\n RADIUS = int(big.get())\n CIRCUMFERENCE = 2 * RADIUS * 3.14\n messagebox.showinfo(\"CIRCUMFERENCE\", \"the circumference is \" + str(CIRCUMFERENCE))\n\nsmall = sister.Label(text=\"radius:\", font=(\"Algerian\", 20))\nsmall.grid(row=0, column=0, padx=5, pady=5)\nbig =sister.Entry(font=(\"Algerian\", 20))\nbig.grid(row=0, column=1)\n\n\ncalc_1 = sister.Button(text=\"AREA\", font=(\"Algerian\", 25), command=AREA)\ncalc_1.grid(row=2, column=3, padx=5, pady=5)\ncalc_2 = sister.Button(text=\"CIRCUMFERENCE\", font=(\"Algerian\", 25), command=CIRCUMFERENCE)\ncalc_2.grid(row=3, column=3, padx=5, pady=5)\n\n\n\n\nmain_window.mainloop()\n","repo_name":"zye-nom/chona","sub_path":"thirdclass.py","file_name":"thirdclass.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"11017514324","text":"from modules.validation import Validate\nfrom modules.data import Data\nfrom modules.log_experiments import MlFlow\nfrom modules.gridsearch import GridSeach\nfrom modules.models import Models \n \nclass Pipeline:\n\n def __init__(self):\n\n self.data = Data()\n self.models = Models()\n self.grid_search = GridSeach()\n self.validate = Validate()\n self.mlFlow = MlFlow()\n\n def train(self,model,X_train,y_train):\n\n model.fit(X_train,y_train)\n\n return model\n\n\n def initialize_model(self,model_name,parameter):\n\n if model_name in self.models.emsemble_names:\n for decision_tree_parameter in self.grid_search.iterate_by_name(\"DecisionTreeClassifier\"):\n model = self.models.instantiate(model_name,(parameter,decision_tree_parameter)) \n \n else:\n model = self.models.instantiate(model_name,parameter)\n \n parameter[\"model_name\"] = model_name\n\n return model\n\n \n def fit(self):\n \n self.datasets = self.data.generate()\n for X, y in self.datasets:\n X_train, X_test, y_train, y_test = self.data.transform.split(X, y)\n X_train, X_test = self.data.transform.normalize(X_train,X_test)\n for model_name,parameter in self.grid_search.generate_parameter():\n model = self.initialize_model(model_name,parameter)\n model = self.train(model,X_train,y_train)\n metrics = self.validate.eval(model,X_test,X_train,y_train,y_test)\n self.mlFlow.log_result(parameter,metrics)\n\n self.mlFlow.save_results()\n\n\nif __name__ == \"__main__\":\n\n pipeline = Pipeline()\n pipeline.fit()\n\n\n\n\n\n \n\n\n\n\n\n","repo_name":"GuintherKovalski/MLops","sub_path":"pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"37298367705","text":"from flask import Blueprint, render_template, request, redirect, url_for\nfrom flask_login import login_required\nfrom logging import getLogger\nfrom MediaMined.db import mongo \nimport MediaMined.reddit as reddit\n\nmain = Blueprint('main', __name__)\nlogger = getLogger(__name__)\n\n@main.route('/')\ndef index():\n return render_template('index.html')\n\n@main.route('/reddit')\n@login_required\ndef reddits():\n total_posts = mongo.reddit_get_total_posts_count()\n posts = mongo.reddit_get_all_posts()\n average_upvotes = 0.7\n return render_template('reddit.html', posts=posts, total_posts=total_posts, average_upvotes = average_upvotes)\n\n@main.route('/reddit/')\n@login_required\ndef reddit_post(post_id):\n logger.info(\"access reddit posts\")\n # Fetch the post details from the database\n post = mongo.reddit_find_post(post_id)\n # Fetch the comments associated with this post\n comments = mongo.reddit_find_comments_by_post(post_id)\n return render_template('reddit_post.html', post=post, comments=comments)\n\n@main.route('/submit_reddit_url', methods=['POST'])\n@login_required\ndef submit_reddit_url():\n url = request.form['url']\n\n reddit.get_content(url);\n \n return redirect(url_for('main.reddits')) # Redirect back to the index page\n\n# @main.route('/youtube')\n# @login_required\n# def youtubes():\n# # total_videos = mongo.get_total_videos_count()\n# # videos = mongo.get_all_videos()\n# # average_likes = 0.9 # Example, adjust as needed\n# return render_template('youtube.html', videos=videos, total_videos=total_videos, average_likes=average_likes)\n\n# @main.route('/youtube/')\n# @login_required\n# def youtube_video(video_id):\n# logger.info(\"access YouTube video\")\n# video = mongo.find_video(video_id)\n# comments = mongo.find_comments_by_video(video_id)\n# return render_template('youtube_video.html', video=video, comments=comments)\n","repo_name":"Tengs-Penkwe/LLM_analyzer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"25196993819","text":"import builder\nimport config\n\nfrom objects import (iwadchooser, iwadcombobox, iwadnamedialog, iwadnameentry,\n iwadlist, iwadpathentry, portchooser, portcombobox, portnamedialog,\n portnameentry, portlist, portpathentry)\n\n# Add Source Port\n\ndef addportbuttonclicked(button):\n portchooser.show()\n\ndef removeportbuttonclicked(button):\n index = portcombobox.get_active()\n iter = portcombobox.get_active_iter()\n portlist.remove(iter)\n\n if index:\n portcombobox.set_active(index-1)\n else:\n portcombobox.set_active(0)\n\n del config.jsoncfg['ports'][index]\n config.writechanges()\n\ndef portchooserselectionchanged(chooser):\n portpathentry.set_text(chooser.get_filename())\n\ndef portcancelbuttonclicked(button):\n portpathentry.set_text(\"\")\n portchooser.hide()\n\ndef portselectbuttonclicked(button):\n portnamedialog.show()\n\ndef portnamecancelbuttonclicked(button):\n portnamedialog.hide()\n\ndef portnameokbuttonclicked(button):\n portnamedialog.hide()\n portpathentry.set_text(\"\")\n portchooser.hide()\n\n newport = {\n \"name\": portnameentry.get_text(),\n \"path\": portchooser.get_filename()\n }\n\n config.jsoncfg['ports'].append(newport)\n portlist.append([newport['name'], newport['path']])\n config.writechanges()\n\n# Add IWAD\n\ndef addiwadbuttonclicked(button):\n iwadchooser.show()\n\ndef removeiwadbuttonclicked(button):\n index = iwadcombobox.get_active()\n iter = iwadcombobox.get_active_iter()\n iwadlist.remove(iter)\n\n if index:\n iwadcombobox.set_active(index-1)\n else:\n iwadcombobox.set_active(0)\n\n del config.jsoncfg['iwads'][index]\n config.writechanges()\n\ndef iwadchooserselectionchanged(chooser):\n if chooser.get_filename():\n iwadpathentry.set_text(chooser.get_filename())\n\ndef iwadcancelbuttonclicked(button):\n iwadpathentry.set_text(\"\")\n iwadchooser.hide()\n\ndef iwadselectbuttonactiate(button):\n iwadnamedialog.show()\n\ndef iwadnamecancelbuttonclicked(button):\n iwadnamedialog.hide()\n\ndef iwadnameokbuttonclicked(button):\n iwadnamedialog.hide()\n iwadpathentry.set_text(\"\")\n iwadchooser.hide()\n\n newiwad = {\n \"name\": iwadnameentry.get_text(),\n \"path\": iwadchooser.get_filename()\n }\n\n config.jsoncfg['iwads'].append(newiwad)\n\n iwadlist.append([newiwad['name'], newiwad['path']])\n config.writechanges()","repo_name":"iccolon818/MPDL","sub_path":"dialogs.py","file_name":"dialogs.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"19350636598","text":"import itertools\n\nlogic_dict = {\n \"AND\": \"and\",\n \"OR\": \"or\",\n \"NOT\": \"not\"\n}\n\noperators = {\n \"AND\": \"and\",\n \"OR\": \"or\",\n \"NOT\": \"not\",\n \"(\": \"(\",\n \")\": \")\",\n}\n\n\ndef replacer(expression: str, values: dict):\n for key in values.keys():\n expression = expression.replace(key, str(values[key]))\n return expression\n\n\ndef generate_table(expression: str):\n values = expression\n for key in operators.keys():\n values = values.replace(key, \"\")\n\n values = values.split(\" \")\n\n while \"\" in values:\n values.remove(\"\")\n\n table = itertools.product([\"True\", \"False\"], repeat=len(values))\n\n result_table = []\n for row in table:\n expr_dict = dict(zip(values, row))\n result = replacer(replacer(expression, logic_dict), expr_dict)\n row += (result,)\n result_table.append(row)\n\n return list((result_table, tuple(value for value in values) + (expression,)))\n\n\ndef pretty_print_table(table: list, header: list) -> None:\n string = ()\n for column in header:\n string += (f\"{column:<7}\",)\n\n print(\"|\".join(string))\n print(\"-\" * (len(header) * 7))\n\n for row in table:\n string = ()\n for column in row:\n string += (f\"{column:<7}\",)\n print(\"|\".join(string), )\n\n\ndef main():\n command = replacer(replacer(\"(A AND B) OR (NOT C)\", logic_dict), {\"A\": True, \"B\": False, \"C\": True})\n print(eval(command))\n\n expr: str = \"(A AND B) OR C\"\n table, header = generate_table(expr)\n pretty_print_table(table, header)\n\n print(expr)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Lotarion/KDMlabs","sub_path":"labwork3.py","file_name":"labwork3.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"30330433802","text":"import os, sys\nimport re\nimport requests\nimport ssl\nfrom urllib.parse import urljoin, urlparse\nfrom bs4 import BeautifulSoup\n\nGV_FLG = False # Flag to identify whether use global value\nif GV_FLG: import webGlobal as gv\nURL_RCD = gv.URL_LIST if GV_FLG else 'urllist.txt' # file to save url list\nRST_DIR = gv.DATA_DIR if GV_FLG else 'datasets'\nURL_FN = gv.INFO_RCD_NAME if GV_FLG else 'info.txt' # url file name \nPORT = 443 # port to download the server certificate most server use 443.\n\n#-----------------------------------------------------------------------------\n#-----------------------------------------------------------------------------\nclass urlDownloader(object):\n \"\"\" Download the webpage components based on the input urls.\"\"\"\n def __init__(self, imgFlg=True, linkFlg=True, scriptFlg=True, caFlg=True):\n self.soup = None\n self.imgFlg = imgFlg\n self.linkFlg = linkFlg\n self.scriptFlg = scriptFlg\n self.caFlg = caFlg\n self.linkType = ('css', 'png', 'ico', 'jpg', 'jpeg', 'mov', 'ogg', 'gif', 'xml','js')\n self.session = requests.Session()\n self.resultDir = RST_DIR\n \n def setResutlDir(self, resutlDir):\n self.resultDir = resutlDir\n\n #-----------------------------------------------------------------------------\n def savePage(self, url, pagefileDir='page', txtMD=True):\n \"\"\" Save the web page components based on the input url and dir name.\n Args:\n url ([try]): web url string.\n pagefileDir (str, optional): path to save the web components.\n txtMD(bool, optional): flag to identify whether save the url in txt file.\n Returns:\n [bool]: whether the components are saved the successfully.\n \"\"\"\n if not ('http' in url):\n print(\"> savePage(): The input url is not valid: %s\" %str(url))\n return False\n try:\n response = self.session.get(url)\n self.soup = BeautifulSoup(response.text, features=\"lxml\")\n pagefolder =os.path.join(self.resultDir, pagefileDir) # page contents\n if not os.path.exists(pagefolder): os.mkdir(pagefolder)\n if self.imgFlg: self._soupfindnSave(url, pagefolder, tag2find='img', inner='src')\n if self.linkFlg: self._soupfindnSave(url, pagefolder, tag2find='link', inner='href')\n if self.scriptFlg: self._soupfindnSave(url, pagefolder, tag2find='script', inner='src')\n if self.caFlg: self.saveServCA(url, pagefolder)\n with open(os.path.join(pagefolder, pagefileDir+'.html'), 'wb') as file:\n file.write(self.soup.prettify('utf-8'))\n if txtMD: \n # record the page url under text mode: \n with open(os.path.join(pagefolder, URL_FN), \"a+\", encoding='ISO-8859-1') as f:\n f.write(url)\n return True\n except Exception as e:\n print(\"> savePage(): Create files failed: %s.\" % str(e))\n return False\n\n #-----------------------------------------------------------------------------\n def saveServCA(self, url, pagefolder):\n \"\"\" Parse the host name from the URL then try to download the host's SSL \n certificate. \n Args:\n url ([try]): web url string.\n pagefileDir (str, optional): path to save the web components.\n Returns:\n [bool]: whether the components saved the successfully.\n \"\"\"\n if 'https' in url:\n certfolder = os.path.join(pagefolder, 'cert')\n if not os.path.exists(certfolder): os.mkdir(certfolder)\n caFilepath = os.path.join(certfolder, 'cert.der')\n hostname = urlparse(url).hostname\n with open(caFilepath, 'wb') as f:\n cert = None\n try:\n cert = ssl.get_server_certificate((hostname, PORT))\n except:\n print('>> Error: host: %s is invalid.' % str(hostname))\n # revert split the host to remove the country section such as 'sg'\n hostname = hostname.rsplit('.', 1)[0]\n cert = ssl.get_server_certificate((hostname, PORT))\n if cert: f.write(ssl.PEM_cert_to_DER_cert(cert)) # write the cert info.\n return True\n else:\n print(\">> The url is not a https url, no ssl CA available\")\n return False\n\n #-----------------------------------------------------------------------------\n def _soupfindnSave(self, url, pagefolder, tag2find='img', inner='src'):\n \"\"\" Saves on specified pagefolder all tag2find objects. \"\"\"\n pagefolder = os.path.join(pagefolder, tag2find)\n if not os.path.exists(pagefolder): os.mkdir(pagefolder)\n for res in self.soup.findAll(tag2find): # images, css, etc..\n try:\n if not res.has_attr(inner): continue # check if inner tag (file object) exists\n # clean special chars such as '@, # ? <>'\n filename = re.sub('\\W+', '.', os.path.basename(res[inner]))\n # print(\"> filename:\", filename)\n # Added the '.html' for the html file in the href\n if tag2find == 'link' and (not any(ext in filename for ext in self.linkType)):\n filename += '.html'\n fileurl = urljoin(url, res.get(inner))\n filepath = os.path.join(pagefolder, filename)\n # rename html ref so can move html and folder of files anywhere\n res[inner] = os.path.join(os.path.basename(pagefolder), filename)\n # create the file.\n if not os.path.isfile(filepath):\n with open(filepath, 'wb') as file:\n filebin = self.session.get(fileurl)\n if len(filebin.content) > 0: # filter the empty file(imge not found)\n file.write(filebin.content)\n except Exception as exc:\n print(exc, file=sys.stderr)\n\n\n#-----------------------------------------------------------------------------\n#-----------------------------------------------------------------------------\ndef main():\n soup = urlDownloader(imgFlg=True, linkFlg=True, scriptFlg=True, caFlg=True)\n count = failCount= 0\n if not os.path.exists(RST_DIR): os.mkdir(RST_DIR)\n print(\"> load url record file %s\" %URL_RCD)\n with open(URL_RCD) as fp:\n urllines = fp.readlines()\n for line in urllines:\n if line[0] in ['#', '', '\\n', '\\r', '\\t']: continue # jump comments/empty lines.\n count += 1\n print(\"> Process URL {}: {}\".format(count, line.strip()))\n if ('http' in line):\n line = line.strip()\n domain = str(urlparse(line).netloc)\n folderName = \"_\".join((str(count), domain))\n #print(domain)\n result = soup.savePage(line, folderName)\n # soup.savePage('https://www.google.com', 'www_google_com')\n if result: \n print('Finished.')\n else:\n failCount +=1\n print(\"\\n> Download result: download %s url, %s fail\" %(str(count), str(failCount)))\n\n#-----------------------------------------------------------------------------\nif __name__ == '__main__':\n main()\n","repo_name":"LiuYuancheng/Windows_User_Simulator","sub_path":"src/actionScheduler/UtilsFunc/webDownload.py","file_name":"webDownload.py","file_ext":"py","file_size_in_byte":7440,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"28"} +{"seq_id":"27068429676","text":"\nfrom enum import auto\nimport imp\nfrom mimetypes import init\nfrom connect import get_current\nfrom create_IMPT_plan import CreateIMPTPlan\nfrom create_virtual_ct_from_cbct import CreateConvertedImage\nfrom evaluation import EvaluationSummedDose, EvaluationPlanningDose, NeedsAdaptation\nfrom Patients import Patient\nimport pandas as pd\nfrom os.path import join\nimport json\n\n\ndef find_dose_on_examination(examination_name):\n case = get_current(\"Case\")\n for doe in case.TreatmentDelivery.FractionEvaluations[0].DoseOnExaminations:\n if doe.OnExamination.Name == examination_name:\n dose_on_examination = doe\n return dose_on_examination\n\ndef delete_all_dose_evaluations():\n case = get_current(\"Case\")\n for doe in case.TreatmentDelivery.FractionEvaluations[0].DoseOnExaminations:\n n_evals = len(doe.DoseEvaluations)\n if n_evals != 0:\n for i in range(len(doe.DoseEvaluations)-1,-1,-1):\n dose_eval = doe.DoseEvaluations[i]\n dose_eval.DeleteEvaluationDose()\n\ndef read_model_param(model_name):\n _f = open('model_parameters.json')\n properties = json.load(_f)\n _f.close()\n return properties[model_name]\n\ndef evaluate_initial_planning(plan_name):\n eval = EvaluationPlanningDose(plan_name)\n planning_results = eval.evaluate_dose_statistics()\n return planning_results\n\ndef main():\n try:\n patient = get_current(\"Patient\")\n patient.Save()\n except:\n print(\"No patient loaded\")\n\n #patient_list = ['ANON6','ANON12','ANON16','ANON29','ANON34','ANON38','ANON43','ANON18','ANON26','ANON37']\n patient_list = ['ANON37']\n model_list = [\"0_NoAdapt\",\"1_AutoRS_def_rois\", \"2_MimClin_rr_rois\",\"3_MimDef_def_rois\"]\n #model_list = model_list = [\"0_NoAdapt\",\"1_AutoRS_def_rois\", \"2_MimClin_rr_rois\",\"3_MimDef_def_rois\"]\n\n\n for patient_name in patient_list:\n\n pat = Patient(patient_name)\n RS_Patient = pat.loadPatient()\n\n RS_Patient.Cases[0].SetCurrent()\n\n case = get_current(\"Case\")\n patient = get_current(\"Patient\")\n pct_name = \"pCT\"\n initial_plan = \"ML_IMPT_plan\"\n adaptation_strategy_init = \"Last_plan\" #Best_plan or Last_plan\n results_planning = evaluate_initial_planning(initial_plan)\n\n treatment_schedule_folder = \"C:\\\\Elena\\\\results\\\\treatment_schedules\"\n stats_folder = join(\"C:\\\\Elena\\\\results\\\\different_ttmt_schedules\",adaptation_strategy_init)\n\n #export initial planning results\n results_planning_file = join(stats_folder, patient.Name + \"_initial_planning.xlsx\")\n results_planning.to_excel(results_planning_file, engine='openpyxl')\n\n plan_names = [plan.Name for plan in case.TreatmentPlans]\n exam_names = [exam.Name for exam in case.Examinations]\n\n oars_model = [r\"Brainstem\", r\"SpinalCord\",\n r\"Parotid_R\", r\"Parotid_L\", r\"Submandibular_L\", r\"Submandibular_R\",\n r\"Oral_Cavity\", r\"PharConsSup\", r\"PharConsMid\", r\"PharConsInf\",\n r\"Esophagus\", r\"BODY\"]\n\n targets_model = [r\"CTV_5425\", r\"CTV_7000\", r\"CTV_all\",\n r\"CTVp_7000\", r\"CTV_7000+10mm\", r\"CTV54.25-CTV70+10mm\"]\n\n model_rois = targets_model + oars_model\n\n file = patient.Name + \"_ttmt_schedule.xlsx\"\n schedule = pd.read_excel(join(treatment_schedule_folder, file))\n schedule = schedule.loc[:, ['#Fraction', 'CBCT_name',\n 'Needs_adaptation', 'Needs_adaptation_0_1']]\n\n #dataframe initialisations\n df_timing = pd.DataFrame(columns=[\"#Fraction\", \"Plan_image\", \"Plan_name\", \"t_plan_generation (min)\", \"t_plan_optimization (min)\"])\n all_results = pd.DataFrame(columns=[\"Patient\", \"Plan_name\", \"ClinicalGoal\", \"Value\"])\n all_patients_results = all_results\n\n for model in model_list:\n delete_all_dose_evaluations()\n model_paramters = read_model_param(model)\n \n possible_plans = []\n delivery_schedule_df = pd.DataFrame(columns=['#Fraction', 'CBCT_name', \"Delivered_plan_name\", \"Needs_new_plan\"])\n\n for i in range(len(schedule)):\n\n n_fx = schedule.loc[i, '#Fraction']\n adapt_image_name = schedule.loc[i, 'CBCT_name']\n cbct_name = adapt_image_name[-7:]\n needs_adapt = schedule.loc[i, 'Needs_adaptation_0_1']\n \n if model.startswith(\"0\"):\n needs_adapt = 0\n \n if adaptation_strategy_init == \"Daily_adapt\":\n needs_adapt = 1\n\n \n print(n_fx, adapt_image_name, needs_adapt)\n\n case.Examinations[adapt_image_name].ImportFraction = int(adapt_image_name[-2:])\n\n #adaption image generation\n if \"Corrected \" + cbct_name not in exam_names:\n converter = CreateConvertedImage(pct_name, cbct_name, model_rois)\n adapt_image_name = converter.create_corrected_cbct()\n else:\n print(\"The adaptation image already exist: \", adapt_image_name)\n \n auto_plan_name = model_paramters['Alias'] + cbct_name\n needs_new_plan = 0\n\n \n #initialisation\n auto_planning = CreateIMPTPlan(adapt_image_name, auto_plan_name, \n model_paramters['ModelName'], \n model_paramters['ModelStrategy'], \n model_paramters['ROI_mapping'], \n model_paramters['DoseGrid'], \n model_paramters['Needs_reference_dose'])\n \n adaptation_strategy = adaptation_strategy_init\n print('Adaption strategy: ', adaptation_strategy)\n\n if needs_adapt == 1:\n\n print(\"Adaptation is needed for \", adapt_image_name)\n print(possible_plans)\n\n if len(possible_plans) < 1:\n adaptation_strategy = 'Daily_adapt'\n\n if adaptation_strategy == 'Last_plan':\n print('Lets evaluate the LAST adapted plan')\n last_plan_name = possible_plans[-1]\n init_adapt = NeedsAdaptation(adapt_image_name, last_plan_name)\n adapt_needed = init_adapt.check_adaptation_needed()\n print(adapt_needed, type(adapt_needed))\n\n if adapt_needed == True:\n delivered_plan = auto_plan_name\n case.TreatmentPlans[auto_plan_name].BeamSets[0].ComputeDoseOnAdditionalSets(OnlyOneDosePerImageSet=False, AllowGridExpansion=True, ExaminationNames=[adapt_image_name], FractionNumbers=[0], ComputeBeamDoses=True)\n else:\n delivered_plan = last_plan_name\n print('We keep the LAST plan ', last_plan_name, ' as the delivered plan for adaptation image : ', adapt_image_name )\n \n elif adaptation_strategy == 'Best_plan':\n print('Lets find the BEST plan')\n init_adapt = NeedsAdaptation(adapt_image_name, possible_plans[0])\n best_plan_name, adapt_needed = init_adapt.find_best_plan_and_check_adapt(possible_plans)\n\n print('The best plan is : ', best_plan_name, ' Adaptation is needed : ', adapt_needed, type(adapt_needed))\n\n if adapt_needed == True:\n delivered_plan = auto_plan_name\n case.TreatmentPlans[auto_plan_name].BeamSets[0].ComputeDoseOnAdditionalSets(OnlyOneDosePerImageSet=False, AllowGridExpansion=True, ExaminationNames=[adapt_image_name], FractionNumbers=[0], ComputeBeamDoses=True)\n else:\n delivered_plan = best_plan_name\n print('We keep the BEST plan ', best_plan_name, ' as the delivered plan for adaptation image : ', adapt_image_name )\n \n elif adaptation_strategy == 'Daily_adapt':\n delivered_plan = auto_plan_name\n if auto_plan_name not in plan_names:\n #run planning\n t_plan_generation, t_plan_optimization = auto_planning.create_run_and_approve_IMPT_plan()\n df_timing = df_timing.append({'#Fraction': n_fx, 'Plan_image': adapt_image_name, 'Plan_name': auto_plan_name,\n 't_plan_generation (min)': t_plan_generation/60, 't_plan_optimization (min)': t_plan_optimization/60}, ignore_index=True)\n plan_names.append(auto_plan_name)\n \n case.TreatmentPlans[auto_plan_name].BeamSets[0].ComputeDoseOnAdditionalSets(OnlyOneDosePerImageSet=False, AllowGridExpansion=True, ExaminationNames=[adapt_image_name], FractionNumbers=[0], ComputeBeamDoses=True)\n\n elif needs_adapt == 0:\n delivered_plan = initial_plan\n print(\"Adaptation is not needed for \", adapt_image_name,\". I will recompute the initial plan\")\n \n case.TreatmentPlans[initial_plan].BeamSets[0].ComputeDoseOnAdditionalSets(\n OnlyOneDosePerImageSet=False, AllowGridExpansion=True, ExaminationNames=[adapt_image_name], FractionNumbers=[0], ComputeBeamDoses=True)\n \n #run DIR for dose deformation\n auto_planning.run_DIR_pCT_adapt_image()\n \n if delivered_plan == auto_plan_name:\n needs_new_plan = 1\n \n \n if delivered_plan not in possible_plans and delivered_plan.startswith(model_paramters['Alias']):\n possible_plans.append(delivered_plan)\n \n delivery_schedule_df = delivery_schedule_df.append({'#Fraction': n_fx, 'CBCT_name': adapt_image_name, 'Delivered_plan_name': delivered_plan,'Needs_new_plan': needs_new_plan}, ignore_index=True)\n print(delivery_schedule_df)\n print(possible_plans)\n \n # fraction on dose\n dose_on_examination = find_dose_on_examination(adapt_image_name)\n fx_dose = dose_on_examination.DoseEvaluations[0]\n\n # map fraction dose\n try:\n dir_reg_name = 'HybridDefReg'+adapt_image_name[-2:]+str(1)\n case.MapDose(FractionNumber=0, SetTotalDoseEstimateReference=True, DoseDistribution=fx_dose,\n StructureRegistration=case.StructureRegistrations[dir_reg_name], ReferenceDoseGrid=None)\n except:\n print(\"There is no dose to evaluate in \", adapt_image_name)\n\n # accumulate all fraction doses on the planning ct\n doses_to_sum = []\n weights = []\n\n dose_on_examination_pct = find_dose_on_examination(pct_name)\n for dose_eval in dose_on_examination_pct.DoseEvaluations:\n doses_to_sum.append(dose_eval)\n weights.append(1)\n\n #create summed dose\n summed_dose_name = model_paramters['Alias'] + \"Summed dose\"\n case.CreateSummedDose(DoseName=model_paramters['Alias'] + \"Summed dose\", FractionNumber=0,\n DoseDistributions=doses_to_sum,\n Weights=weights)\n \n patient.Save()\n oa_strategy = model_paramters['OAStrategy']\n\n #evaluate summed dose\n evaluation = EvaluationSummedDose(dose_on_examination_pct,oa_strategy,summed_dose_name)\n results = evaluation.evaluate_dose_statistics()\n \n #write results\n results_file = join(stats_folder, patient.Name + \"_\" + oa_strategy + \".xlsx\")\n results.to_excel(results_file, engine='openpyxl')\n\n all_results = all_results.append(results)\n\n #write delivery schedule\n delivery_file = join(stats_folder, patient.Name + \"_delivery_for_\" + oa_strategy + \".xlsx\")\n delivery_schedule_df.to_excel(delivery_file, engine='openpyxl')\n\n # export timing strategies and all fractions\n print('Timing data frame: ', df_timing)\n if len(df_timing) != 0:\n export_file = join(stats_folder, patient.Name + '_' + oa_strategy + \"_timing.xlsx\")\n df_timing.to_excel(export_file, engine='openpyxl')\n \n # export results for all strategies and sum over fractions\n all_results_file = join(stats_folder, patient.Name + \"_all_strategies\" + \".xlsx\")\n all_results.to_excel(all_results_file, engine='openpyxl')\n\n all_patients_results = all_patients_results.append(all_results)\n\n #export results for all patients, all strategies and sum over fractions\n all_patient_results_file = join(stats_folder,\"results_all_strategies_all_patients\" + \".xlsx\")\n all_patients_results.to_excel(all_patient_results_file, engine='openpyxl')\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ElenaBorderias/RS_adaptive","sub_path":"adaptive_workflows_diff_schedule/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"33491490631","text":"#Trace the following central conic : x^2 + y^2 + xy + x + y = 1\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef ellipse_gen(a,b) :\n y_el=np.zeros((2,50))\n rad=np.linspace(0,2*np.pi,50)\n y_el[0,]=a*np.cos(rad)\n y_el[1,]=b*np.sin(rad)\n return y_el\n\nV=np.array([1,0.5,0.5,1]).reshape(2,2)\nu=np.array([0.5,0.5])\nf=-1\nlam,P = np.linalg.eig(V)\nD=np.diag(lam)\nV_inv=np.linalg.inv(V)\nc=-V_inv@u\na=np.sqrt((np.transpose(u)@V_inv@u - f)/lam[0])\nb=np.sqrt((np.transpose(u)@V_inv@u - f)/lam[1])\ny_el=ellipse_gen(a,b)\n\nprint(\"a (minor axis) : {}\".format(a))\nprint(\"b (major axis) : {}\".format(a))\nprint(\"center : {}\".format(a))\n\nx_el=np.empty((1,2))\nfor i in y_el.T:\n x_el=np.vstack((x_el,P@i.T +c))\nx_el=x_el.T\n\nplt.plot(y_el[0,],y_el[1,],label='Standard Ellipse')\nplt.plot(x_el[0,],x_el[1,],label='Actual Ellipse')\nplt.annotate(\"(-0.3,-0.3)\", (c[0], c[1]), (c[0]-0.4, c[1]-0.25))\nplt.scatter(c[0],c[1],color='red')\nplt.scatter(0,0,color='black')\nplt.legend(loc='best')\nplt.grid()\nplt.axis('equal')\nplt.show()\n","repo_name":"gadepall/school","sub_path":"loney/solutions/41/18/Assignment7.py","file_name":"Assignment7.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"3572315248","text":"import logging; logger = logging.getLogger(\"morse.\" + __name__)\nfrom morse.core.services import service\nfrom morse.core import status, blenderapi, mathutils\nfrom morse.blender.main import reset_objects as main_reset, close_all as main_close, quit as main_terminate\nfrom morse.core.abstractobject import AbstractObject\nfrom morse.core.exceptions import *\nimport json\n\ndef get_structured_children_of(blender_object):\n \"\"\" Returns a nested dictionary of the given objects children, recursively.\n The retun format is as follows:\n\n {blender_object.name: [children_dictionary, position, orientation]}\n\n where children_dictionary is another of the same format, but with the keys\n being the children of blender_object. This continues down the entire tree\n structure.\n\n :param KX_GameObject blender_object: The Blender object to return children\n for.\n \"\"\"\n children = blender_object.children\n orientation = blender_object.worldOrientation.to_quaternion()\n position = blender_object.worldPosition\n structure = { blender_object.name: [{},\n (position.x, position.y, position.z),\n (orientation.x, orientation.y,\n orientation.z, orientation.w)\n ]\n }\n for c in children:\n structure[blender_object.name][0].update(\n get_structured_children_of(c) )\n return structure\n\ndef get_obj_by_name(name):\n \"\"\"\n Return object in the scene associated to :param name:\n If it does not exists, throw a MorseRPCInvokationError\n \"\"\"\n scene = blenderapi.scene()\n if name not in scene.objects:\n raise MorseRPCInvokationError(\n \"Object '%s' does not appear in the scene.\" % name)\n return scene.objects[name]\n\nclass Supervision(AbstractObject):\n def __init__(self):\n AbstractObject.__init__(self)\n\n def name(self):\n return \"simulation\"\n\n @service\n def list_robots(self):\n \"\"\" Return a list of the robots in the current scenario\n\n Uses the list generated during the initialisation of the scenario\n \"\"\"\n return [obj.name for obj in blenderapi.persistantstorage().robotDict.keys()]\n\n @service\n def reset_objects(self):\n \"\"\" Restore all simulation objects to their original position\n\n Upon receiving the request using sockets, call the\n 'reset_objects' function located in morse/blender/main.py\n \"\"\"\n contr = blenderapi.controller()\n main_reset(contr)\n return \"Objects restored to initial position\"\n\n @service\n def quit(self):\n \"\"\" Cleanly quit the simulation\n \"\"\"\n contr = blenderapi.controller()\n main_close(contr)\n main_terminate(contr)\n\n @service\n def terminate(self):\n \"\"\" Terminate the simulation (no finalization done!)\n \"\"\"\n contr = blenderapi.controller()\n main_terminate(contr)\n\n @service\n def activate(self, component_name):\n \"\"\" Enable the functionality of the component specified\n \"\"\"\n try:\n blenderapi.persistantstorage().componentDict[component_name]._active = True\n except KeyError as detail:\n logger.warn(\"Component %s not found. Can't activate\" % detail)\n raise MorseRPCTypeError(\"Component %s not found. Can't activate\" % detail)\n\n @service\n def deactivate(self, component_name):\n \"\"\" Stop the specified component from calling its default_action method\n \"\"\"\n try:\n blenderapi.persistantstorage().componentDict[component_name]._active = False\n except KeyError as detail:\n logger.warn(\"Component %s not found. Can't deactivate\" % detail)\n raise MorseRPCTypeError(\"Component %s not found. Can't deactivate\" % detail)\n\n @service\n def suspend_dynamics(self):\n \"\"\" Suspends physics for all object in the scene.\n \"\"\"\n\n scene = blenderapi.scene()\n for object in scene.objects:\n object.suspendDynamics()\n\n return \"Physics is suspended\"\n\n @service\n def restore_dynamics(self):\n \"\"\" Resumes physics for all object in the scene.\n \"\"\"\n\n scene = blenderapi.scene()\n for object in scene.objects:\n object.restoreDynamics()\n\n return \"Physics is resumed\"\n\n @service\n def details(self):\n \"\"\"Returns a structure containing all possible details\n about the simulation currently running, including\n the list of robots, the list of services and datastreams,\n the list of middleware in use, etc.\n \"\"\"\n\n simu = blenderapi.persistantstorage()\n details = {}\n\n # Retrieves the list of services and associated middlewares\n services = {}\n services_iface = {}\n for n, i in simu.morse_services.request_managers().items():\n services.update(i.services())\n for cmpt in i.services():\n services_iface.setdefault(cmpt, []).append(n)\n\n def cmptdetails(c):\n c = simu.componentDict[c.name]\n cmpt = {\"type\": type(c).__name__,}\n if c.name() in services:\n cmpt[\"services\"] = services[c.name()]\n cmpt[\"service_interfaces\"] = services_iface[c.name()]\n\n if c.name() in simu.datastreams:\n streams = simu.datastreams[c.name()]\n cmpt[\"stream_interfaces\"] = [(stream[0], stream[2]) for stream in streams]\n\n return cmpt\n\n def robotdetails(r):\n robot = {\"name\": r.name(),\n \"type\": type(r).__name__,\n \"components\": {c.name:cmptdetails(c) for c in r.components},\n }\n if r.name() in services:\n robot[\"services\"] = services[r.name()]\n robot[\"services_interfaces\"] = services_iface[r.name()]\n return robot\n\n for n, i in simu.stream_managers.items():\n pass\n\n\n details['robots'] = [robotdetails(r) for n, r in simu.robotDict.items()]\n details['environment'] = blenderapi.getssr()['environment_file']\n return details\n\n\n @service\n def set_log_level(self, component, level):\n \"\"\"\n Allow to change the logger level of a specific component\n\n :param string component: the name of the logger you want to modify\n :param string level: the desired level of logging\n \"\"\"\n\n my_logger = logging.getLogger('morse.' + component)\n try:\n my_logger.setLevel(level)\n except ValueError as exn:\n raise MorseRPCInvokationError(str(exn))\n\n\n @service\n def get_scene_objects(self):\n \"\"\" Returns a hierarchial dictonary structure of all objects in the scene\n along with their positions and orientations, formated as a Python string\n representation.\n The structure:\n {object_name: [dict_of_children, position_tuple, quaternion_tuple],\n object_name: [dict_of_children, position_tuple, quaternion_tuple],\n ...}\n \"\"\"\n\n scene = blenderapi.scene()\n # Special Morse items to remove from the list\n remove_items = ['Scene_Script_Holder', 'CameraFP', '__default__cam__',\n 'MORSE.Properties', '__morse_dt_analyser']\n top_levelers = [o for o in scene.objects\n if o.parent is None and\n not o.name in remove_items]\n\n objects = {}\n for obj in top_levelers:\n objects.update(get_structured_children_of(obj))\n\n return objects\n\n @service\n def set_object_visibility(self, object_name, visible, do_children):\n \"\"\" Set the visibility of an object in the simulation.\n\n Note: The object will still have physics and dynamics despite being invisible.\n\n :param string object_name: The name of the object to change visibility of.\n :param visible boolean: Make the object visible(True) or invisible(False)\n :param do_children boolean: If True then the visibility of all children of\n object_name is also set.\"\"\"\n\n blender_object = get_obj_by_name(object_name)\n blender_object.setVisible(visible, do_children)\n return visible\n\n @service\n def set_object_dynamics(self, object_name, state):\n \"\"\" Enable or disable the dynamics for an individual object.\n\n Note: When turning on dynamics, the object will continue with the velocities\n it had when it was turned off.\n\n :param string object_name: The name of the object to change.\n :param state boolean: Turn on dynamics(True), or off (False)\n \"\"\"\n\n blender_object = get_obj_by_name(object_name)\n if state:\n blender_object.restoreDynamics()\n else:\n blender_object.suspendDynamics()\n return state\n\n @service\n def set_camarafp_far_clip(self, far_clip):\n \"\"\" Set the CamaraFP (MORSE' environment camera) far clip distance\n\n :param far_clip: The camera's far clip distance.\n :type far_clip: float\n \"\"\"\n if far_clip > 0:\n blender_object = get_obj_by_name('CameraFP')\n blender_object.far = far_clip\n return far_clip\n return 0\n\n @service\n def set_camarafp_position(self, position):\n \"\"\" Set the CamaraFP (MORSE' environment camera) world position. [x, y, z]\n\n :param position: The camera's world position. [x, y, z].\n :type position: list(float)\n \"\"\"\n blender_object = get_obj_by_name('CameraFP')\n blender_object.worldPosition = position\n return position\n\n @service\n def get_camarafp_position(self):\n \"\"\" Get the CamaraFP (MORSE' environment camera) world position. [x, y, z]\n\n :returns: The camera's world position. [x, y, z].\n \"\"\"\n blender_object = get_obj_by_name('CameraFP')\n return blender_object.worldPosition\n\n @service\n def get_camarafp_transform(self):\n \"\"\" Get the CamaraFP (MORSE' environment camera) world space transform matrix.\n\n :returns: The camera's world space transform matrix. 4x4 Matrix [[float]]\n \"\"\"\n blender_object = get_obj_by_name('CameraFP')\n return blender_object.worldTransform\n\n @service\n def set_camarafp_transform(self, transform):\n \"\"\" Set the CamaraFP (MORSE' environment camera) world space transform matrix.\n\n :param transform: The camera's world space transform matrix.\n :type transform: 4x4 Matrix [[float]]\n \"\"\"\n try:\n blender_object = get_obj_by_name('CameraFP')\n blender_object.worldTransform = mathutils.Matrix(transform)\n return transform\n except SystemError: # if the matrix is not 4x4 numpy raises a SystemError\n raise MorseRPCInvokationError( \"The Matrix must be 4x4 [[float]]\" )\n\n @service\n def set_camarafp_projection_matrix(self, projection_matrix):\n \"\"\" Set the CamaraFP (MORSE' environment camera) 4x4 projection matrix\n\n :param projection_matrix: The camera's 4x4 projection matrix.\n :type projection_matrix: 4x4 Matrix [[float]]\n \"\"\"\n try:\n blender_object = get_obj_by_name('CameraFP')\n blender_object.projection_matrix = mathutils.Matrix(projection_matrix)\n return projection_matrix\n except SystemError: # if the matrix is not 4x4 numpy raises a SystemError\n raise MorseRPCInvokationError( \"The Matrix must be 4x4 [[float]]\" )\n\n @service\n def get_camarafp_projection_matrix(self):\n \"\"\" Get the CamaraFP (MORSE' environment camera) 4x4 projection matrix\n \"\"\"\n blender_object = get_obj_by_name('CameraFP')\n return [list(vec) for vec in blender_object.projection_matrix]\n\n @service\n def set_object_position(self, object_name, position, orientation = None):\n \"\"\" Set the position (and optionally orientation of an object in\n the simulation. [x, y, z]\n\n :param position: The objects's world position. [x, y, z].\n :type position: list(float)\n :param orientation: (optional) The object's world\n orientation [roll pitch yaw] in radians\n :type: orientation: list(float)\n \"\"\"\n blender_object = get_obj_by_name(object_name)\n blender_object.worldPosition = position\n if orientation:\n blender_object.worldOrientation = orientation\n\n def action(self):\n pass\n","repo_name":"morse-simulator/morse","sub_path":"src/morse/services/supervision_services.py","file_name":"supervision_services.py","file_ext":"py","file_size_in_byte":12618,"program_lang":"python","lang":"en","doc_type":"code","stars":340,"dataset":"github-code","pt":"28"} +{"seq_id":"27359156728","text":"from flask import url_for\r\nfrom pydub import AudioSegment\r\nimport random\r\nimport string\r\nimport pandas as pd\r\nimport os\r\n\r\nglobal prev_overlay\r\n\r\ndef process_labels(labels):\r\n print(labels)\r\n list_of_sounds = []\r\n df = pd.read_csv(os.path.join('summerjams/media/','music_labels.csv'))\r\n tags = df['tags'].tolist()\r\n location = df['file'].tolist()\r\n\r\n for label in labels:\r\n if label.lower() in tags:\r\n print(label)\r\n pos = int(tags.index(label.lower()))\r\n list_of_sounds.append(location[pos])\r\n return (list_of_sounds)\r\n\r\ndef sound_manipulation(theme_type = \"Piano\", sounds = []):\r\n if (theme_type == \"Acoustic\"):\r\n theme = AudioSegment.from_wav(os.path.join('summerjams/media/background_themes/','Acoustic.wav'))\r\n elif (theme_type == \"Electronic\"):\r\n theme = AudioSegment.from_wav(os.path.join('summerjams/media/background_themes/','Electronic.wav'))\r\n else:\r\n theme = AudioSegment.from_wav(os.path.join('summerjams/media/background_themes/','Piano.wav'))\r\n\r\n\r\n if (len(sounds)!= 0):\r\n audio_files = {}\r\n # limit the number of sounds to a max of 4\r\n if (len(sounds)>4):\r\n choosen_sounds = random.sample(range(len(sounds)), 4)\r\n for x in choosen_sounds:\r\n audio_files[\"sound_{0}\".format(choosen_sounds.index(x))] = AudioSegment.from_wav(os.path.join('summerjams/media/sound files/',str(sounds[x])))\r\n else:\r\n for x in range(len(sounds)):\r\n audio_files[\"sound_{0}\".format(x)] = AudioSegment.from_wav(os.path.join('summerjams/media/sound files/', str(sounds[x])))\r\n\r\n overlay1 = theme.overlay(audio_files[\"sound_0\"],\r\n position = 0.1 * len(theme),\r\n gain_during_overlay=-2)\r\n prev_overlay = overlay1\r\n\r\n for i in range (len(audio_files)):\r\n current_overlay = overlay_music(prev_overlay, audio_files[\"sound_\"+str(i)], i*0.3, -2)\r\n prev_overlay = current_overlay\r\n wave_obj = current_overlay.fade_out(duration = 3000)\r\n else:\r\n wave_obj = theme\r\n\r\n file_name = nameGenerator()\r\n wave_obj.export(os.path.join(\"summerjams/media/generated_files/\",file_name), format = \"wav\")\r\n # return value - file name to be used to play the exported file\r\n return (file_name)\r\n\r\ndef overlay_music (prev_sound, file, pos, volume):\r\n overlay_clip = prev_sound.overlay(file,\r\n position = (pos*len(prev_sound))+500,\r\n gain_during_overlay = volume)\r\n return overlay_clip\r\n\r\n# Generates a random file name - everytime a new music file is created\r\ndef nameGenerator(string_length = 8):\r\n character = string.ascii_lowercase + string.digits\r\n return ((''.join(random.choice(character) for i in range(string_length)))+\".wav\")\r\n","repo_name":"manya-girdhar/Summer-Jams","sub_path":"summerjams/create_music.py","file_name":"create_music.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"31083510066","text":"import threading\nimport numpy as np\n\nn = int(input(\"Enter the number: \"))\nfib = np.zeros(n, dtype=int)\n\ndef runn(num):\n\tif num == 0:\n\t\tfib[num] = 0\n\tif num == 1:\n\t\tfib[num] = 1\n\telse:\n\t\tfib[num] = fib[num-1] + fib[num-2]\n\nif __name__ == \"__main__\":\n\tfor i in range(n):\n\t\tt1 = threading.Thread(target=runn, args=(i,))\n\t\tt1.start()\n\t\tt1.join()\n\tprint(\"Fibonacci Sequence:\",fib)","repo_name":"Vatsalparsaniya/operating-system","sub_path":"Threading/FIBO.py","file_name":"FIBO.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"23559445732","text":"def master_and_visa(self): # boolean function\n if self[0] == \"4\": # visa\n if len(self) == 13 or len(self) == 16:\n return True\n else:\n return False\n elif self[:2] == \"51\" or self[:2] == \"52\" or self[:2] == \"53\" or self[:2] == \"54\" or self[:2] == \"55\": # master\n if len(self) == 16:\n return True\n else:\n return False\n else:\n return False\n\ndef check_digit(self): # boolean function\n try:\n check_digit = int(self[-1])\n number = list(self[:-1])\n number.reverse() # double the number starting from check digit, going leftwards\n for i in range(0,len(number),2):\n number[i] = str(2 * int(number[i]))\n for i in range(len(number)):\n if len(number[i]) > 1:\n number[i] = int(number[i][0]) + int(number[i][1]) # adding individual numbers which has 2 digits\n for i in range(len(number)):\n number[i] = int(number[i])\n sum_digit = int(str(int(sum(number)) * 9)[-1]) # obtaining the correct check digit\n if sum_digit == check_digit:\n return True\n else:\n return False\n except:\n return False\n\ndef master_or_visa(self):\n if master_and_visa(self) and check_digit(self):\n if self[0] == \"4\":\n print(\"VISA\")\n else:\n print(\"MASTERCARD\")\n else:\n print(\"INVALID CARD NUMBER\")\n\n\nnumber = input(\"Input Credit Card Number: \")\nmaster_or_visa(number)\n","repo_name":"blacksleek/python2013lab1","sub_path":"master_or_visa.py","file_name":"master_or_visa.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"5208841390","text":"# -*- coding: utf-8 -*-\n\nfrom typing import Dict, Mapping\n\nimport os\nimport pkg_resources\n\nfrom bag.design.module import Module\n\n\n# noinspection PyPep8Naming\nclass bag2_digital__flipflop_D_inv(Module):\n \"\"\"Module for library bag2_digital cell flipflop_D_inv.\n\n Fill in high level description here.\n \"\"\"\n yaml_file = pkg_resources.resource_filename(__name__,\n os.path.join('netlist_info',\n 'flipflop_D_inv.yaml'))\n\n\n def __init__(self, database, parent=None, prj=None, **kwargs):\n Module.__init__(self, database, self.yaml_file, parent=parent, prj=prj, **kwargs)\n\n @classmethod\n def get_params_info(cls) -> Mapping[str,str]:\n # type: () -> Dict[str, str]\n \"\"\"Returns a dictionary from parameter names to descriptions.\n\n Returns\n -------\n param_info : Optional[Dict[str, str]]\n dictionary from parameter names to descriptions.\n \"\"\"\n return dict(\n tgate_params_list = 'List of parameters for each transmission gate. Master0 and 1, then slave 0 and 1',\n inv_params_list = 'List of parameters for each inverter. master0 and 1, then slave 0 and 1.',\n buf_params_list = 'List of inverter parameters for the clock buffer.'\n )\n\n def design(self, **params):\n \"\"\"To be overridden by subclasses to design this module.\n\n This method should fill in values for all parameters in\n self.parameters. To design instances of this module, you can\n call their design() method or any other ways you coded.\n\n To modify schematic structure, call:\n\n rename_pin()\n delete_instance()\n replace_instance_master()\n reconnect_instance_terminal()\n restore_instance()\n array_instance()\n \"\"\"\n tgate_params_list = params['tgate_params_list']\n inv_params_list = params['inv_params_list']\n buf_params_list = params['buf_params_list']\n\n assert len(tgate_params_list) == 4, f'There are 4 transmission gates, not {len(tgate_params_list)}'\n assert len(inv_params_list) == 4, f'There are 4 inverters, not {len(inv_params_list)}'\n assert len(buf_params_list) > 1, f'Must have at least 2 inverters in clock buffer (currently {len(buf_params_list)})'\n\n idx_inst_map = ['M<0>', 'M<1>', 'S<0>', 'S<1>']\n\n for i, suffix in enumerate(idx_inst_map):\n self.instances[f'XTG_{suffix}'].design(mos_type='both', **(tgate_params_list[i]))\n self.instances[f'XINV_{suffix}'].design(stack_n=1, stack_p=1, **(inv_params_list[i]))\n\n for buf_params in buf_params_list:\n assert buf_params['stack_n'] == 1, f'Clock buffers should not be stacked'\n assert buf_params['stack_p'] == 1, f'Clock buffers should not be stacked'\n\n self.instances['XBUF'].design(dual_output=True, inv_param_list=buf_params_list)","repo_name":"PisterLab/bag2_digital","sub_path":"BagModules/bag2_digital/flipflop_D_inv.py","file_name":"flipflop_D_inv.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"25026829890","text":"import argparse\nimport copy\n\nimport yaml\n\n\nclass NestedDictAction(argparse.Action):\n \"\"\"Action class to append items to dict object.\n\n Examples:\n >>> parser = argparse.ArgumentParser()\n >>> _ = parser.add_argument('--conf', action=NestedDictAction,\n ... default={'a': 4})\n >>> parser.parse_args(['--conf', 'a=3', '--conf', 'c=4'])\n Namespace(conf={'a': 3, 'c': 4})\n >>> parser.parse_args(['--conf', 'c.d=4'])\n Namespace(conf={'a': 4, 'c': {'d': 4}})\n >>> parser.parse_args(['--conf', 'c.d=4', '--conf', 'c=2'])\n Namespace(conf={'a': 4, 'c': 2})\n >>> parser.parse_args(['--conf', '{d: 5, e: 9}'])\n Namespace(conf={'d': 5, 'e': 9})\n\n \"\"\"\n\n _syntax = \"\"\"Syntax:\n {op} =\n {op} .=\n {op} \n {op} \ne.g.\n {op} a=4\n {op} a.b={{c: true}}\n {op} {{\"c\": True}}\n {op} {{a: 34.5}}\n\"\"\"\n\n def __init__(\n self,\n option_strings,\n dest,\n nargs=None,\n default=None,\n choices=None,\n required=False,\n help=None,\n metavar=None,\n ):\n super().__init__(\n option_strings=option_strings,\n dest=dest,\n nargs=nargs,\n default=copy.deepcopy(default),\n type=None,\n choices=choices,\n required=required,\n help=help,\n metavar=metavar,\n )\n\n def __call__(self, parser, namespace, values, option_strings=None):\n # --{option} a.b=3 -> {'a': {'b': 3}}\n if \"=\" in values:\n indict = copy.deepcopy(getattr(namespace, self.dest, {}))\n key, value = values.split(\"=\", maxsplit=1)\n if not value.strip() == \"\":\n value = yaml.load(value, Loader=yaml.Loader)\n if not isinstance(indict, dict):\n indict = {}\n\n keys = key.split(\".\")\n d = indict\n for idx, k in enumerate(keys):\n if idx == len(keys) - 1:\n d[k] = value\n else:\n if not isinstance(d.setdefault(k, {}), dict):\n # Remove the existing value and recreates as empty dict\n d[k] = {}\n d = d[k]\n\n # Update the value\n setattr(namespace, self.dest, indict)\n else:\n try:\n # At the first, try eval(), i.e. Python syntax dict.\n # e.g. --{option} \"{'a': 3}\" -> {'a': 3}\n # This is workaround for internal behaviour of configargparse.\n value = eval(values, {}, {})\n if not isinstance(value, dict):\n syntax = self._syntax.format(op=option_strings)\n mes = f\"must be interpreted as dict: but got {values}\\n{syntax}\"\n raise argparse.ArgumentTypeError(self, mes)\n except Exception:\n # and the second, try yaml.load\n value = yaml.load(values, Loader=yaml.Loader)\n if not isinstance(value, dict):\n syntax = self._syntax.format(op=option_strings)\n mes = f\"must be interpreted as dict: but got {values}\\n{syntax}\"\n raise argparse.ArgumentError(self, mes)\n\n d = getattr(namespace, self.dest, None)\n if isinstance(d, dict):\n d.update(value)\n else:\n # Remove existing params, and overwrite\n setattr(namespace, self.dest, value)\n","repo_name":"espnet/espnet","sub_path":"espnet2/utils/nested_dict_action.py","file_name":"nested_dict_action.py","file_ext":"py","file_size_in_byte":3598,"program_lang":"python","lang":"en","doc_type":"code","stars":7371,"dataset":"github-code","pt":"28"} +{"seq_id":"21947791327","text":"import json\ndef returnProfit(jsonObj):\n\n profit = 0\n busSlots = jsonObj[\"BusParkingSlots\"]\n carSlots = jsonObj[\"CarParkingSlots\"]\n busCharge, carCharge, bikeCharge = jsonObj[\"ParkingCharges\"].values()\n numBuses = jsonObj[\"Buses\"]\n numCars = jsonObj[\"Cars\"]\n numBikes = jsonObj[\"Bikes\"]\n\n # carBaseCharge = carCharge / 5\n # busBaseCharge = busCharge / 12\n\n # if bikeCharge > carBaseCharge:\n # best = \"Bike\"\n # else:\n # best = \"Car\"\n # # chargeAndSpaces = [(carBaseCharge, 5, numCars), (bikeCharge, 1, numBikes)].sort()\n\n # busSpace = busSlots * 12\n # carSpace = carSlots * 5\n\n # while carSpace != 0:\n # if best == \"Bike\":\n # if numBikes != 0:\n # profit += bikeCharge\n # carSpace -= 1\n # numBikes -= 1\n # elif carSpace >= 5 and numCars != 0:\n # profit += carCharge\n # carSpace -= 5\n # numCars -= 1\n # else:\n # break\n \n # else:\n # if carSpace >= 5 and numCars != 0:\n # profit += carCharge\n # carSpace -= 5\n # numCars -= 1\n # elif numBikes != 0:\n # profit += bikeCharge\n # carSpace -= 1\n # numBikes -= 1\n # else:\n # break\n \n # tempProfit = profit\n # tempNumBikes = numBikes\n # tempNumCars = numCars\n\n # if carSpace != 0:\n # while carSpace < 5:\n # carSpace += 1\n # tempProfit -= bikeCharge\n # tempNumBikes += 1\n # tempProfit += carCharge\n # carSpace -= 5\n # tempNumCars -=1\n\n # if tempProfit > profit:\n # numBikes = tempNumBikes\n # numCars = tempNumCars\n # profit = tempProfit\n \n # while busSpace != 0:\n # if busSpace <= chargeAndSpaces[0][1] and chargeAndSpaces[0][3] != 0:\n # profit += chargeAndSpaces[0][0] * chargeAndSpaces[0][1]\n # busSpace -= chargeAndSpaces[0][1]\n # chargeAndSpaces[0][3] = chargeAndSpaces[0][3] - 1\n \n # elif busSpace <= chargeAndSpaces[1][1] and chargeAndSpaces[1][3] != 0:\n # profit += chargeAndSpaces[1][0] * chargeAndSpaces[1][1]\n # busSpace -= chargeAndSpaces[1][1]\n # chargeAndSpaces[1][3] = chargeAndSpaces[1][3] - 1\n \n # elif busSpace <= chargeAndSpaces[2][1] and chargeAndSpaces[2][3] != 0:\n # profit += chargeAndSpaces[2][0] * chargeAndSpaces[2][1]\n # busSpace -= chargeAndSpaces[2][1]\n # chargeAndSpaces[2][3] = chargeAndSpaces[2][3] - 1\n # else:\n # break\n\n # return profit\n\n max_bikes = carSlots * 5 + busSlots * 12\n max_cars = carSlots + busSlots*2\n\n profits = []\n for buses_parked in range(min(numBuses + 1, busSlots + 1)):\n for cars_parked in range(min(numCars + 1, max_cars + 1)):\n for bikes_parked in range(min(numBikes + 1, max_bikes + 1)):\n # Check if the combination exceeds the available slots\n if buses_parked*12 + cars_parked*5 + bikes_parked > busSlots*12 + carSlots:\n continue\n \n # Calculate the total profit for this combination\n bus_profit = buses_parked * busCharge\n car_profit = cars_parked * carCharge\n bike_profit = bikes_parked * bikeCharge\n total_profit = bus_profit + car_profit + bike_profit\n \n print(buses_parked, cars_parked, bikes_parked, total_profit)\n # Append the combination and its profit to the list\n profits.append((total_profit, buses_parked, cars_parked, bikes_parked))\n \n # Sort the list of profits in descending order\n profits.sort(reverse=True)\n \n # Find the combination with the highest profit\n if len(profits) > 0:\n max_profit, buses_assigned, cars_assigned, bikes_assigned = profits[0]\n else:\n max_profit, buses_assigned, cars_assigned, bikes_assigned = [0, 0, 0, 0]\n \n # Calculate the rejected vehicles\n buses_rejected = max(numBuses - buses_assigned, 0)\n cars_rejected = max(numCars - cars_assigned, 0)\n bikes_rejected = max(numBikes - bikes_assigned, 0)\n \n # Return the results\n return {\"Answer\": {\"Profit\":max_profit, \"BusRejections\":buses_rejected, \"CarRejections\":cars_rejected, \"BikeRejections\":bikes_rejected}}","repo_name":"thekaranvs/UBS-CtrlAltElite-Python","sub_path":"solutions/parkingLot.py","file_name":"parkingLot.py","file_ext":"py","file_size_in_byte":4517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"14722343648","text":"import re\nfrom bs4 import BeautifulSoup\nfrom requesting_urls import get_html\nimport os\n\ndef extract_events(url):\n \"\"\" \n Extract date, venue and discipline for competitions.\n \n Args:\n url (str): The url to extract events from .\n Returns:\n table_info ( list of lists ): A nested list where the rows\n represent each\n race date , and the columns are [date , venue ,\n discipline ].\n\n \"\"\"\n disciplines = {\n \"DH\": \" Downhill \",\n \"SL\": \" Slalom \",\n \"GS\": \" Giant Slalom \",\n \"SG\": \" Super Giant Slalom \",\n \"AC\": \" Alpine Combined \",\n \"PG\": \" Parallel Giant Slalom \"}\n\n # get the html\n html = get_html(url)\n\n # make soup\n soup = BeautifulSoup(html, \"html.parser\")\n # Find the tag that contains the Calendar header span\n calendar_header = soup.find(id=\"Calendar\")\n\n # Find the following table\n calendar_table = calendar_header.find_all_next(\"table\")[0]\n\n # Find the rows of the first table\n rows = calendar_table.find_all(\"tr\")\n\n events = []\n for row in rows:\n cells = row.find_all(\"td\")\n # Discards all uninteresting celles\n if len(cells) in {9, 10, 11}:\n date = cells[2].text.strip()\n\n if len(cells) == 11:\n venue = cells[3].text.strip()\n for cell in cells:\n # Checks if the celles has a venue name in it\n match = re.match(r'[A-Z]{2}', cell.text.strip())\n if match:\n events.append((venue, date, disciplines[match.group()]))\n else:\n for cell in cells:\n match = re.match(r'[A-Z]{2}', cell.text.strip())\n if match:\n events.append((venue, date, disciplines[match.group()]))\n return events\n\n\ndef create_betting_slip(events, save_as):\n \"\"\" Saves a markdown format betting slip to the location\n ’./ datetime_filter /< save_as >. md ’.\n Args :\n events (list): takes a list of 3- tuples containing date, venue and type for each event\n save_as (string): filename to save the markdown betting slip as.\n \"\"\"\n # ensure directory exists\n os.makedirs(\"datetime_filter\", exist_ok = True)\n\n with open(f\"./datetime_filter/{save_as}.md\", \"w\") as out_file:\n out_file.write(f\"#BETTING SLIP ({save_as})\\n\\nName:\\n\\n\")\n out_file.write(\"Date | Venue | Discipline | Who wins?\\n\")\n out_file.write(\"| --- | --- | --- | --- \\n\")\n for e in events:\n venue, date, type = e\n out_file.write(f\"| {date} | {venue} | {type} | |\\n\")\n\n\nif __name__ == '__main__':\n url = 'https://en.wikipedia.org/wiki/2021%E2%80%9322_FIS_Alpine_Ski_World_Cup'\n\n events = extract_events(url)\n save_as = 'FIS Betting sheet'\n create_betting_slip(events, save_as)\n","repo_name":"JouvalSomer/IN3110","sub_path":"assignment5/time_planner.py","file_name":"time_planner.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"15933524210","text":"\n# coding: utf-8\n\n# In[48]:\n\n\nimport tensorflow as tf\n\nmnist = tf.keras.datasets.mnist\n\n(x_train, y_train),(x_test, y_test)= mnist.load_data()\n\n#normailize the data -> scale the value from [0 1]\nx_train = tf.keras.utils.normalize(x_train, axis=1)\nx_test = tf.keras.utils.normalize(x_test, axis=1)\n\n#2 types of models one is sequencial\nmodel = tf.keras.models.Sequential()\n#add layer\n#flatten layer\nmodel.add(tf.keras.layers.Flatten())\n#Dense layer 128 unint(neuro) in the layer, + sigmoid function \nmodel.add(tf.keras.layers.Dense(128, activation = tf.nn.relu))\nmodel.add(tf.keras.layers.Dense(128, activation = tf.nn.relu))\n#output layer\nmodel.add(tf.keras.layers.Dense(10, activation = tf.nn.softmax))\n\n#parameters for training model\nmodel.compile(optimizer ='adam', loss = 'sparse_categorical_crossentropy' ,\n metrics = ['accuracy'])\nmodel.fit(x_train, y_train, epochs = 3)\n\n#calculate the validation loss\nval_loss, val_accuracy = model.evaluate(x_test, y_test)\nprint(val_loss, val_accuracy) #after 3 iteration, loss =0.086 accuaracy = 0.97\n\n\n\n# In[49]:\n\n\n#save model\nmodel.save('number_epic.model')\n#read model\nnew_model = tf.keras.models.load_model('number_epic.model')\n\n\n# In[50]:\n\n\n#prediction\npredictions = new_model.predict([x_test])\nprint(predictions)\n\n\n# In[56]:\n\n\n#looks not friedly --> lets use numpy\nimport numpy as np\nprint(np.argmax(predictions[1]))\n\n\n# In[55]:\n\n\n#To show the predict image\nplt.imshow(x_test[1])\nplt.show()\n\n\n# In[51]:\n\n\nimport matplotlib.pyplot as plt\n\n#print(x_train[0])\n\n#if its image\nplt.imshow(x_train[0])\nplt.show()\n\n#if its binary\nplt.imshow(x_train[0], cmap = plt.cm.binary)\n\n\n# In[52]:\n\n\nprint(x_train[0])\n\n","repo_name":"fiona8231/Tensorflow_keras1","sub_path":"Tensorflow1.py","file_name":"Tensorflow1.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"71984176716","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @return a ListNode\n def removeNthFromEnd(self, head, n):\n if head is None: return None\n \n nodes = []\n p = head\n while p :\n nodes.append(p)\n p = p.next\n \n if n == len(nodes):\n return nodes[1] if len(nodes) > 1 else None\n else:\n nodes[-n-1].next = nodes[-n+1] if n > 1 else None\n return head\n \n ","repo_name":"xiaonanln/myleetcode-python","sub_path":"src/Remove Nth Node From End of List.py","file_name":"Remove Nth Node From End of List.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"33757625700","text":"import sys\nimport inro.modeller as _modeller\nimport inro.emme.desktop.app as _app\nimport inro.emme.core.exception as _exception\nimport itertools as _itertools\nimport datetime\nimport os\nfrom shutil import copyfile\n\nclass BKRCastExportNetwork(_modeller.Tool()):\n '''\n this tool is to populate AM, MD and PM peak hour network and their associated network\n input files in EMME punch file format.\n Files will be produced:\n base network file, link shape file, turn file, and transit lines for AM, MD, PM and NI.\n 1.1.0: populate vdf functions for four TOD.\n 1.1.1: populate sc_headway.csv\n 1.1.2: remove future bike links with modes == \"wk\" and @biketype == 0\n 1.3.0: upgrade to python 3.7, compatible with EMME 4.5.1\n '''\n version = \"1.3.0\" # this is the version\n default_path = \"\"\n tool_run_message = \"\"\n outputFolder = _modeller.Attribute(object)\n\n def page(self):\n pb = _modeller.ToolPageBuilder(self, title=\"BKRCast Network Interface\",\n description=\"Populate networks from master network\",\n branding_text=\"Modeling and Analysis Group -- City of Bellevue Transportation\")\n pb.add_select_file(\"outputFolder\", \"directory\", \"\", self.default_path, title = \"Select the directory for output files\")\n\n if self.tool_run_message != \"\":\n pb.tool_run_status(self.tool_run_msg_status)\n\n return pb.render()\n\n @_modeller.method(return_type=str)\n def tool_run_msg_status(self):\n return self.tool_run_message\n\n @property\n def current_scenario(self):\n return _modeller.Modeller().desktop.data_explorer().primary_scenario.core_scenario\n\n @property\n def current_emmebank(self):\n return self.current_scenario.emmebank\n\n def run(self):\n self.tool_run_message = \"\"\n try:\n self.__call__()\n run_message = \"Network exported\"\n self.tool_run_message += _modeller.PageBuilder.format_info(run_message)\n except Exception as e:\n self.tool_run_message += _modeller.PageBuilder.format_exception(exception = e, chain = False)\n\n @_modeller.logbook_trace(name=\"BKRCast Export Network\", save_arguments=True)\n def __call__(self):\n\n ## total number of scenarios allowed\n tot_scn_spaces = self.current_emmebank.dimensions['scenarios']\n scens = self.current_emmebank.scenarios()\n current_scen = self.current_scenario\n _modeller.logbook_write(\"Version\", self.version)\n\n with _modeller.logbook_trace(name = 'Remove future non-motorized-only links', value = \"\"):\n self.removeExtraBikeLinks(current_scen)\n\n num_scns = 0;\n for scen in scens:\n num_scns = num_scns + 1\n print(\"Total allowed scenarios \" + str(tot_scn_spaces))\n print(\"Total scenarios \" + str(num_scns))\n\n if tot_scn_spaces < num_scns + 4:\n self.tool_run_message += _modeller.PageBuilder.format_info(\"Does not have enough space for scenarios. Please increase dimension to accommodate at least three more scenarios\")\n exit(1)\n\n am_net_name = os.path.join(self.outputFolder, \"am_roadway.in\")\n md_net_name = os.path.join(self.outputFolder, \"md_roadway.in\")\n pm_net_name = os.path.join(self.outputFolder, \"pm_roadway.in\")\n ni_net_name = os.path.join(self.outputFolder, \"ni_roadway.in\")\n am_shape = os.path.join(self.outputFolder, \"am_linkshapes.in\")\n md_shape = os.path.join(self.outputFolder, \"md_linkshapes.in\")\n pm_shape = os.path.join(self.outputFolder, \"pm_linkshapes.in\")\n ni_shape = os.path.join(self.outputFolder, \"ni_linkshapes.in\")\n am_turn_name = os.path.join(self.outputFolder, \"am_turns.in\")\n md_turn_name = os.path.join(self.outputFolder, \"md_turns.in\")\n pm_turn_name = os.path.join(self.outputFolder, \"pm_turns.in\")\n ni_turn_name = os.path.join(self.outputFolder, \"ni_turns.in\")\n am_transit_name = os.path.join(self.outputFolder, \"am_transit.in\")\n md_transit_name = os.path.join(self.outputFolder, \"md_transit.in\")\n pm_transit_name = os.path.join(self.outputFolder, \"pm_transit.in\")\n ni_transit_name = os.path.join(self.outputFolder, \"ni_transit.in\")\n am_vdf_name = os.path.join(self.outputFolder, \"vdfs6to9.txt\")\n md_vdf_name = os.path.join(self.outputFolder, \"vdfs9to1530.txt\")\n pm_vdf_name = os.path.join(self.outputFolder, \"vdfs1530to1830.txt\")\n ni_vdf_name = os.path.join(self.outputFolder, \"vdfs1830to6.txt\")\n headway_name = os.path.join(self.outputFolder, 'sc_headways.csv')\n \n with _modeller.logbook_trace(name = \"Export headway file\", value = \"\"):\n self.exportTransitLineHeadway(current_scen, headway_name)\n\n with _modeller.logbook_trace(name = \"Export temporary transit network\", value = \"\"):\n self.tLineNetCalculator(\"hdw\", \"ut1\", 'all')\n self.exportTransit(am_transit_name, current_scen, \"not hdw = 999\")\n self.tLineNetCalculator(\"hdw\", \"ut2\", 'all')\n self.exportTransit(md_transit_name, current_scen, \"not hdw = 999\")\n self.tLineNetCalculator(\"hdw\", \"ut3\", 'all')\n self.exportTransit(pm_transit_name, current_scen, \"not hdw = 999\")\n self.tLineNetCalculator(\"hdw\", \"@nihdwy\", 'all')\n self.exportTransit(ni_transit_name, current_scen, \"not hdw = 999\")\n\n\n with _modeller.logbook_trace(name = \"Create scenario for time periods\", value = \"\"):\n today = datetime.date.today().strftime(\"%m%d%Y\")\n amScen = self.copyScenario(current_scen, 224, \"AMPK BKRCast \" + today, True, True, True)\n mdScen = self.copyScenario(current_scen, 225, \"MDPK BKRCast \" + today, True, True, True)\n pmScen = self.copyScenario(current_scen, 226, \"PMPK BKRCast \" + today, True, True, True)\n niScen = self.copyScenario(current_scen, 227, \"NIPK BKRCast \" + today, True, True, True)\n\n _modeller.Modeller().desktop.data_explorer().replace_primary_scenario(amScen)\n self.linkNetCalculator(\"ul1\", \"@revlane_cap\", \"@revlane = 1,4\")\n self.linkNetCalculator(\"ul2\", \"0.01\", \"@revlane = 1,4\")\n self.linkNetCalculator(\"ul2\", \"60\", \"@revlane = 2 or @revlane = 4 and vdf = 1\")\n self.linkNetCalculator(\"ul2\", \"35\", \"@revlane = 2 or @revlane = 4 and vdf = 3\")\n\n _modeller.Modeller().desktop.data_explorer().replace_primary_scenario(mdScen)\n self.linkNetCalculator(\"ul1\", \"@revlane_cap * 0.5\", \"@revlane = 1,4\")\n self.linkNetCalculator(\"ul2\", \"60\", \"@revlane = 1,4 and vdf = 1\")\n self.linkNetCalculator(\"ul2\", \"35\", \"@revlane = 1,4 and vdf = 3\")\n \n _modeller.Modeller().desktop.data_explorer().replace_primary_scenario(pmScen)\n self.linkNetCalculator(\"ul1\", \"@revlane_cap\", \"@revlane = 1,4\")\n self.linkNetCalculator(\"ul2\", \"0.01\", \"@revlane = 1,4\")\n self.linkNetCalculator(\"ul2\", \"60\", \"@revlane = 1 or @revlane = 3 and vdf = 1\")\n self.linkNetCalculator(\"ul2\", \"35\", \"@revlane = 1 or @revlane = 3 and vdf = 3\")\n\n _modeller.Modeller().desktop.data_explorer().replace_primary_scenario(niScen)\n self.linkNetCalculator(\"ul1\", \"@revlane_cap\", \"@revlane = 1,4\")\n self.linkNetCalculator(\"ul2\", \"0.01\", \"@revlane = 1,4\")\n self.linkNetCalculator(\"ul2\", \"60\", \"@revlane = 1 or @revlane = 3 and vdf = 1\")\n self.linkNetCalculator(\"ul2\", \"35\", \"@revlane = 1 or @revlane = 3 and vdf = 3\")\n\n _modeller.Modeller().desktop.data_explorer().replace_primary_scenario(current_scen)\n \n # create transit lines for AM, MD and PM. headways are saved in ut1 ~ ut3\n\n self.deleteTransitLines(amScen, \"all\")\n self.loadTransitLines(amScen, am_transit_name, True)\n self.deleteTransitLines(mdScen, \"all\")\n self.loadTransitLines(mdScen, md_transit_name, True)\n self.deleteTransitLines(pmScen, \"all\")\n self.loadTransitLines(pmScen, pm_transit_name, True)\n self.deleteTransitLines(niScen, \"all\")\n self.loadTransitLines(niScen, ni_transit_name, True)\n\n #export base network\n with _modeller.logbook_trace(name = \"Export base network\", value = \"\"):\n self.exportBaseNetwork(amScen, \"all\", \"all\", am_net_name, False, \" \", \"PROMPT_DATA_FORMAT\")\n self.exportBaseNetwork(mdScen, \"all\", \"all\", md_net_name, False, \" \", \"PROMPT_DATA_FORMAT\")\n self.exportBaseNetwork(pmScen, \"all\", \"all\", pm_net_name, False, \" \", \"PROMPT_DATA_FORMAT\")\n self.exportBaseNetwork(niScen, \"all\", \"all\", ni_net_name, False, \" \", \"PROMPT_DATA_FORMAT\")\n \n # export link shapes\n with _modeller.logbook_trace(name = \"Export link shapes\", value = \"\"):\n self.exportLinkShapes(amScen, \"all\", am_shape, \" \", False)\n self.exportLinkShapes(mdScen, \"all\", md_shape, \" \", False)\n self.exportLinkShapes(pmScen, \"all\", pm_shape, \" \", False)\n self.exportLinkShapes(niScen, \"all\", ni_shape, \" \", False)\n\n # exoirt turns\n with _modeller.logbook_trace(name = \"Export turns\", value = \"\"):\n self.exportTurns(amScen, \"all\", am_turn_name, \" \", False, \"PROMPT_DATA_FORMAT\")\n self.exportTurns(mdScen, \"all\", md_turn_name, \" \", False, \"PROMPT_DATA_FORMAT\")\n self.exportTurns(pmScen, \"all\", pm_turn_name, \" \", False, \"PROMPT_DATA_FORMAT\")\n self.exportTurns(niScen, \"all\", ni_turn_name, \" \", False, \"PROMPT_DATA_FORMAT\")\n \n #export transit lines\n with _modeller.logbook_trace(name = \"Export transit network\", value = \"\"):\n self.exportTransit(am_transit_name, amScen, \"not hdw = 999\")\n self.exportTransit(md_transit_name, mdScen, \"not hdw = 999\")\n self.exportTransit(pm_transit_name, pmScen, \"not hdw = 999\")\n self.exportTransit(ni_transit_name, niScen, \"not hdw = 999\")\n\n #export vdf functions (all functions, overwrite if file exists)\n with _modeller.logbook_trace(name = \"Export vdfs\", value = \"\"):\n self.exportVDF(am_vdf_name)\n self.exportVDF(md_vdf_name)\n self.exportVDF(pm_vdf_name)\n self.exportVDF(ni_vdf_name) \n\n def exportTransit(self, tempFileName, scen, selection):\n NAMESPACE = \"inro.emme.data.network.transit.export_transit_lines\"\n export_transitlines = _modeller.Modeller().tool(NAMESPACE)\n emmebank_dir = os.path.dirname(_modeller.Modeller().emmebank.path)\n line_file = os.path.join(emmebank_dir, tempFileName)\n export_transitlines(export_file = line_file, selection = selection, scenario = scen)\n \n def tLineNetCalculator(self, result, expression, sel):\n NAMESPACE = \"inro.emme.network_calculation.network_calculator\"\n specs = {\n \"type\": \"NETWORK_CALCULATION\",\n \"result\": result,\n \"expression\": expression,\n \"selections\": {\n \"transit_line\": sel }\n }\n netcalc = _modeller.Modeller().tool(NAMESPACE)\n report = netcalc(specs)\n\n def copyScenario(self, fromScen, toScenID, title, copyStrategy, copyShape, overwrite):\n NAMESPACE = \"inro.emme.data.scenario.copy_scenario\"\n copy_scenario = _modeller.Modeller().tool(NAMESPACE)\n toScen = copy_scenario(from_scenario = fromScen, scenario_id = toScenID, scenario_title = title, copy_strategies = copyStrategy,\n copy_linkshapes = copyShape, overwrite = overwrite)\n return toScen\n\n def linkNetCalculator(self, result, expression, selectors):\n NAMESPACE = \"inro.emme.network_calculation.network_calculator\"\n specs = {\n \"type\": \"NETWORK_CALCULATION\",\n \"result\": result,\n \"expression\": expression,\n \"selections\": {\n \"link\": selectors }\n }\n\n netCalc = _modeller.Modeller().tool(NAMESPACE)\n report = netCalc(specs)\n\n def loadTransitLines(self, scen, transitFile, revertOnError):\n NAMESPACE = \"inro.emme.data.network.transit.transit_line_transaction\"\n load_transit = _modeller.Modeller().tool(NAMESPACE)\n load_transit(scenario = scen, transaction_file = transitFile, revert_on_error = revertOnError)\n\n def exportBaseNetwork(self, scen, node_selector, link_selector, exportname, append, seperator, exportformat):\n NAMESPACE = \"inro.emme.data.network.base.export_base_network\"\n export_base = _modeller.Modeller().tool(NAMESPACE)\n export_base(scenario = scen, selection = {\"link\": link_selector,\n \"node\": node_selector}, export_file = exportname, append_to_file = append,\n field_separator = seperator, export_format = exportformat)\n\n def exportLinkShapes(self, scen, selector, exportfile, seperator, append):\n NAMESPACE = \"inro.emme.data.network.base.export_link_shape\"\n export_shape = _modeller.Modeller().tool(NAMESPACE)\n export_shape(scenario = scen, export_file = exportfile, selection = selector, \n field_separator = seperator, append_to_file = append)\n\n def exportTurns(self, scen, selector, exportfile, seperator, append, exportformat):\n NAMESPACE = \"inro.emme.data.network.turn.export_turns\"\n export_turns = _modeller.Modeller().tool(NAMESPACE)\n export_turns(scenario = scen, selection = selector, export_file = exportfile, field_separator = seperator,\n append_to_file = append, export_format = exportformat)\n\n def deleteTransitLines(self, scen, selector):\n NAMESPACE = \"inro.emme.data.network.transit.delete_transit_lines\"\n delete_tline = _modeller.Modeller().tool(NAMESPACE)\n tot = delete_tline(scenario = scen, selection = selector)\n return tot\n\n def exportVDF(self, exportfile):\n NAMESPACE = \"inro.emme.data.function.export_functions\"\n export_function = _modeller.Modeller().tool(NAMESPACE)\n export_function(export_file = exportfile, append_to_file = False)\n\n def exportTransitLineHeadway(self, curScen, exportfile):\n network = curScen.get_network()\n tlines = network.transit_lines()\n\n with open(exportfile, mode = 'w') as f:\n f.write('LineID,hdw_6to9,hdw_9to1530,hdw_1530to1830,hdw_1830to6,id\\n')\n for tline in tlines:\n f.write('{0:d}, {1:.0f}, {2:.0f}, {3:.0f}, {4:.0f}, {5:d}\\n'.format(int(tline.id), tline.data1, tline.data2, tline.data3, tline['@nihdwy'], int(tline.id)))\n\n # remove future non-motorized links with condition modes = \"wk\" and @biketype == 0. (if non-motorized only, @biketype has to be 1.\n def removeExtraBikeLinks(self, curScen):\n network = curScen.get_network()\n links = network.links()\n bikemodeset = set([network.mode('w'), network.mode('k')])\n\n emmebank_dir = os.path.dirname(_modeller.Modeller().emmebank.path)\n extraBikeLinks = os.path.join(emmebank_dir, 'removed_bike_links.dat')\n with open(extraBikeLinks, mode = 'w') as f:\n for link in links: \n if (link.modes == bikemodeset) and (link['@biketype'] == 0):\n print('link ', link.id, ' is removed from network')\n f.write('link {0} is removed from network\\n'.format(link.id))\n network.delete_link(link.i_node, link.j_node)\n\n curScen.publish_network(network)\n","repo_name":"bellevuewa/BKRCast","sub_path":"scripts/modeller/Master_network_only/PopulateNetwork.py","file_name":"PopulateNetwork.py","file_ext":"py","file_size_in_byte":15568,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"28"} +{"seq_id":"28940866052","text":"# 【参考】\n# https://www.tensorflow.org/lite/performance/post_training_quantization\n\nimport sys\nimport os\nimport glob\nfrom argparse import ArgumentParser, SUPPRESS, RawTextHelpFormatter\nimport tensorflow as tf\nimport cv2\nimport numpy as np\n\ndef build_argparser():\n parser = ArgumentParser(add_help=False, formatter_class=RawTextHelpFormatter)\n parser.add_argument('-h', '--help', action='help', default=SUPPRESS, \n help='Show this help message and exit.')\n parser.add_argument(\"-i\", \"--input\", type=str, default=\"../../SampleExports/TensorFlowSavedModel/\",\n help=\"入力データディレクトリ(SavedModelディレクトリ)\")\n parser.add_argument(\"-o\", \"--output\", type=str, default=\"./converted_model.tflite\",\n help=\"出力ファイル(tfliteファイル)\")\n parser.add_argument(\"-d\", \"--data_dir\", type=str, default=\"./data\",\n help=\"キャリブレーションデータディレクトリ\")\n parser.add_argument(\"--width\", type=int, default=224,\n help=\"入力データの幅\")\n parser.add_argument(\"--hight\", type=int, default=224,\n help=\"入力データの高さ\")\n parser.add_argument('--quantize', nargs='*',\n help=\"量子化種別 \")\n return parser\n\n# キャリブレーション用データ生成ルーチン\ndef representative_dataset_gen():\n # グローバル変数の宣言\n global data_dir\n global input_width\n global input_height\n\n # ファイル名のリストを作成\n files = glob.glob(os.path.join(data_dir,'**/*.jpg'), recursive=True)\n\n for filename in files :\n input_file = os.path.abspath(filename)\n assert os.path.isfile(input_file), \"Specified input file doesn't exist\"\n \n # 画像の読み込み\n image = cv2.imread(input_file)\n # NNに入��できる形式に変換\n resized_image = cv2.resize(image, (input_width, input_height)) # input size of coco ssd mobilenet?\n resized_image = np.expand_dims(resized_image, axis=0) # 3D -> 4D\n resized_image = resized_image.astype(np.float32) # 型変換 uint8 -> float32 \n \n yield [resized_image] # list にwrapして返す\n\ndef main():\n # グローバル変数の宣言\n # キャリブレーション用データ生成ルーチンで参照したいのでglobalにしておく\n global data_dir\n global input_width\n global input_height\n \n # コマンドライン引数の解析\n args = build_argparser().parse_args()\n print(args)\n quantize = args.quantize\n # オプション quantize が指定されていなければ空のlistを作成\n if not quantize :\n quantize = []\n \n # グローバル変数の設定\n data_dir = args.data_dir\n input_width = args.width\n input_height = args.hight\n \n tf.compat.v1.enable_eager_execution()\n \n # Full Integer Quantization - Input/Output=int8\n # export_model = os.path.splitext(args.output)[0] + \"_full_integer_quant\" + os.path.splitext(args.output)[1] # 出力ファイル名\n export_model = args.output\n converter = tf.lite.TFLiteConverter.from_saved_model(args.input)\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset_gen\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.uint8\n converter.inference_output_type = tf.uint8\n tflite_quant_model = converter.convert()\n open(export_model, \"wb\").write(tflite_quant_model)\n print(\"==== Full Integer Quantization complete! ====\")\n \n if \"weight\" in quantize :\n # Weight Quantization - Input/Output=float32\n export_model = os.path.splitext(args.output)[0] + \"_weight_quant\" + os.path.splitext(args.output)[1] # 出力ファイル名\n converter = tf.lite.TFLiteConverter.from_saved_model(args.input)\n converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]\n tflite_quant_model = converter.convert()\n open(export_model, \"wb\").write(tflite_quant_model)\n print(\"==== Weight Quantization complete! ====\")\n \n if \"integer\" in quantize :\n # Integer Quantization - Input/Output=float32\n export_model = os.path.splitext(args.output)[0] + \"_integer_quant\" + os.path.splitext(args.output)[1] # 出力ファイル名\n converter = tf.lite.TFLiteConverter.from_saved_model(args.input)\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset_gen\n tflite_quant_model = converter.convert()\n open(export_model, \"wb\").write(tflite_quant_model)\n print(\"==== Integer Quantization complete! ====\")\n \n\nif __name__ == '__main__':\n sys.exit(main() or 0)\n\n# converter.representative_dataset で設定する関数の書き方\n\n'''\nsaved_model_dir=\"./TensorFlowSavedModel\"\nexport_model = \"converted_model.tflite\"\nexport_model_size = \"converted_model_size.tflite\"\n\nprint(\"**** DEFAULT ****\")\nconverter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\n# converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]\ntflite_quant_model = converter.convert()\nopen(export_model, \"wb\").write(tflite_quant_model)\n\nprint(\"**** SIZE ****\")\nconverter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)\nconverter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]\ntflite_quant_model = converter.convert()\nopen(export_model_size, \"wb\").write(tflite_quant_model)\n\n\n'''\n","repo_name":"ippei8jp/CustomVisionExport","sub_path":"tflite/convert_model/saved_model2tflite.py","file_name":"saved_model2tflite.py","file_ext":"py","file_size_in_byte":5771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"43600666646","text":"# -*- coding: utf-8 -*-\n\"\"\"Pruebas calculadora AVANZADA\"\"\"\n\nimport unittest\nfrom proyecto.avanzada import CalculadoraAvanzada\n\n\nclass TestCalculadoraAvanzada(unittest.TestCase):\n \"\"\"Test calculadora AVANZADA\"\"\"\n\n ca = CalculadoraAvanzada()\n\n def test_pow(self):\n \"\"\"Test operación POTENCIA\"\"\"\n r = self.ca.pow(3)\n self.assertEqual(r, 9)\n\n def test_pown(self):\n \"\"\"Test operación POTENCIA N\"\"\"\n r = self.ca.pown(3, 3)\n self.assertEqual(r, 27)\n\n def test_sqrt(self):\n \"\"\"Test operación RAIZ CUADRADA\"\"\"\n r = self.ca.sqrt(9)\n self.assertEqual(r, 3.0)\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"danydlhm/danke_challenge","sub_path":"tests/test_avanzada.py","file_name":"test_avanzada.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"7651022286","text":"#Make a two-player Rock-Paper-Scissors game.\n# (Hint: Ask for player plays (using input), compare them, print out a message of congratulations to the winner,\n# and ask if the players want to start a new game)\n#rock > scissor\n#scissor > paper\n#paper > rock\n\nplayer1w = 0\nplayer2w = 0\ngames = 0\n\noption1 = \"Rock\"\noption2 = \"Paper\"\noption3 = \"Scissor\"\n\nwhile games < 3:\n player1 = input(f\"Player 1 pick one: {option1}, {option2}, {option3}\")\n player2 = input(f\"Player 2 pick one: {option1}, {option2}, {option3}\")\n if player1.upper() == option1.upper():\n if player2.upper() == option3.upper():\n print(\"Player 1 wins\")\n player1w += 1\n elif player2.upper() == option2.upper():\n print(\"Player 2 wins\")\n player2w += 1\n elif player2.upper() == option1.upper():\n print(\"It is a draw\")\n elif player1.upper() == option2.upper():\n if player2.upper() == option1.upper():\n print(\"Player 1 wins\")\n player1w += 1\n elif player2.upper() == option3.upper():\n print(\"Player 2 wins\")\n player2w += 1\n elif player2.upper() == option2.upper():\n print(\"It is a draw\")\n elif player1.upper() == option3.upper():\n if player2.upper() == option2.upper():\n print(\"Player 1 wins\")\n player1w += 1\n elif player2.upper() == option1.upper():\n print(\"Player 2 wins\")\n player2w += 1\n elif player2.upper() == option3.upper():\n print(\"It is a draw\")\n else:\n print('Not a valid option')\n games += 1\n\nif player1w > player2w:\n print(\"Player 1 is best of 3\")\nelse:\n print(\"Player 2 is best of 3\")\n\n\n\n","repo_name":"wreinert/Python-Exercises","sub_path":"Rock Paper Scissors.py","file_name":"Rock Paper Scissors.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"27680598195","text":"import tkinter as tk\nimport tkinter.messagebox\nimport customtkinter\nfrom pages.simplePage import *\nfrom pages.contentBasedPage import *\nfrom pages.collaborativePage import *\nfrom pages.hybridPage import *\n\ncustomtkinter.set_appearance_mode(\"Light\") # Modes: \"System\" (standard), \"Dark\", \"Light\"\ncustomtkinter.set_default_color_theme(\"green\") # Themes: \"blue\" (standard), \"green\", \"dark-blue\"\n\n\nclass App(customtkinter.CTk):\n\n WIDTH = 780\n HEIGHT = 520\n\n def __init__(self):\n super().__init__()\n\n self.title(\"Movie ReSys\")\n self.geometry(f\"{App.WIDTH}x{App.HEIGHT}\")\n self.protocol(\"WM_DELETE_WINDOW\", self.on_closing) # call .on_closing() when app gets closed\n # ============ create two frames ============\n\n # configure grid layout (2x1)\n self.grid_columnconfigure(1, weight=1)\n self.grid_rowconfigure(0, weight=1)\n\n self.frame_left = customtkinter.CTkFrame(master=self,\n width=180,\n corner_radius=0)\n self.frame_left.grid(row=0, column=0, sticky=\"nswe\")\n\n self.frame_right = customtkinter.CTkFrame(master=self)\n self.frame_right.grid(row=0, column=1, sticky=\"nswe\", padx=20, pady=20)\n\n self.frame_right.rowconfigure((0, 1, 2, 3), weight=1)\n self.frame_right.rowconfigure(7, weight=10)\n self.frame_right.columnconfigure((0, 1), weight=1)\n self.frame_right.columnconfigure(2, weight=0)\n\n # Add page\n self.frames = {}\n self.frames[\"ContentBasedPage\"] = ContentBasedPage(parent=self.frame_right)\n self.frames[\"ContentBasedPage\"].grid(row=0, column=1, sticky=\"nsew\")\n self.frames[\"CollaborativeBasedPage\"] = CollaborativeBasedPage(parent=self.frame_right)\n self.frames[\"CollaborativeBasedPage\"].grid(row=0, column=1, sticky=\"nsew\")\n self.frames[\"HybridPage\"] = HybridPage(parent=self.frame_right)\n self.frames[\"HybridPage\"].grid(row=0, column=1, sticky=\"nsew\")\n self.frames[\"SimplePage\"] = SimplePage(parent=self.frame_right)\n self.frames[\"SimplePage\"].grid(row=0, column=1, sticky=\"nsew\")\n\n # ============ frame_left ============\n\n # configure grid layout (1x11)\n self.frame_left.grid_rowconfigure(0, minsize=10) # empty row with minsize as spacing\n self.frame_left.grid_rowconfigure(2, minsize=10) # empty row with minsize as spacing\n self.frame_left.grid_rowconfigure(7, weight=1) # empty row as spacing\n self.frame_left.grid_rowconfigure(8, minsize=20) # empty row with minsize as spacing\n self.frame_left.grid_rowconfigure(11, minsize=10) # empty row with minsize as spacing\n\n self.app_name = customtkinter.CTkLabel(master=self.frame_left,\n text=\"Movie Recommender System\",\n text_font=(\"Roboto Medium\", 16)) # font name and size in px\n self.app_name.grid(row=1, column=0, pady=10, padx=10)\n\n self.simple_btn = customtkinter.CTkButton(master=self.frame_left,\n text=\"Simple Recommender\",\n command=lambda: self.show_frame(\"SimplePage\"))\n self.simple_btn.grid(row=3, column=0, pady=10, padx=20, sticky='nesw')\n \n self.content_btn = customtkinter.CTkButton(master=self.frame_left,\n text=\"Content Based Recommender\",\n command=lambda: self.show_frame(\"ContentBasedPage\"))\n self.content_btn.grid(row=4, column=0, pady=10, padx=20, sticky='nesw')\n\n self.collab_btn = customtkinter.CTkButton(master=self.frame_left,\n text=\"Collaborative Based Recommender\",\n command=lambda: self.show_frame(\"CollaborativeBasedPage\"))\n self.collab_btn.grid(row=5, column=0, pady=10, padx=20, sticky='nesw')\n\n self.hybrid_btn = customtkinter.CTkButton(master=self.frame_left,\n text=\"Hybrid Recommender\",\n command=lambda: self.show_frame(\"HybridPage\"))\n self.hybrid_btn.grid(row=6, column=0, pady=10, padx=20, sticky='nesw')\n\n # Change theme\n self.theme_label = customtkinter.CTkLabel(master=self.frame_left, text=\"Appearance Mode:\")\n self.theme_label.grid(row=9, column=0, pady=0, padx=20, sticky=\"w\")\n\n self.theme_option = customtkinter.CTkOptionMenu(master=self.frame_left,\n values=[\"Light\", \"Dark\", \"System\"],\n command=self.change_appearance_mode)\n self.theme_option.grid(row=10, column=0, pady=10, padx=20, sticky=\"w\")\n\n \n\n def show_frame(self, page_name):\n frame = self.frames[page_name]\n frame.tkraise()\n\n\n def change_appearance_mode(self, new_appearance_mode):\n customtkinter.set_appearance_mode(new_appearance_mode)\n self.state('zoomed')\n\n def on_closing(self, event=0):\n self.destroy()\n\n\nif __name__ == \"__main__\":\n app = App()\n app.state('zoomed')\n app.mainloop()","repo_name":"cowienduckie/movie-resys","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"18714981346","text":"\"\"\"A simple error handling extension. Should work with any discord.ext-based bot.\"\"\"\n\nfrom discord.ext import commands\nfrom core.mysql import delete_data_entry\n\n\nclass IsNotHuman(commands.CommandError):\n \"\"\"Raised if a bot attempts to invoke one of this bot's commands.\"\"\"\n pass\n\n\ndef setup(bot):\n \"\"\"Set up the cog.\"\"\"\n\n @bot.check\n def is_human(ctx):\n \"\"\"Prevent the bot from responding to other bots.\"\"\"\n if ctx.author.bot:\n raise IsNotHuman(\"User is not human\")\n return True\n\n @bot.listen(\"on_command_error\")\n async def handle_error(ctx, exc):\n \"\"\"Simple error handler.\"\"\"\n if isinstance(exc, commands.MissingRequiredArgument):\n param = exc.param.replace(\"_\", \" \")\n await ctx.send(f\"A {param} needs to be specified for this command to work.\")\n elif not isinstance(exc, (commands.CommandNotFound, IsNotHuman)):\n await ctx.send(exc)\n\n @bot.listen(\"on_message\")\n async def shhh(ctx):\n wlist = [110373943822540800, 264445053596991498]\n bot_num = len([a for a in ctx.guild.members if a.bot])\n percent = ((bot_num)/len(ctx.guild.members)) * 100\n if len(ctx.guild.members) > 40 and percent > 90 and ctx.guild.id not in wlist:\n return print(\"returned\")\n else:\n pass\n \n\n @bot.listen(\"on_guild_remove\")\n async def delguild(guild):\n wew = [\"greet-message\", \"leave-message\", \"join-leave-channel\", \"join-role\"]\n for owo in wew:\n delete_data_entry(guild.id, owo)\n print(f\"Deleted {owo} in {guild.id}\")\n\n\nasync def on_command_error(error, ctx):\n if isinstance(error, commands.MissingRequiredArgument):\n return\n elif isinstance(error, commands.BadArgument):\n return\n\n\ndef is_owner():\n async def predicate(ctx):\n if str(ctx.author.id) in ctx.bot.config['owner']:\n return True\n else:\n False\n return commands.check(predicate)\n\n\ndef is_guild_owner():\n async def predicate(ctx):\n if ctx.author == ctx.guild.owner:\n return True\n else:\n False\n return commands.check(predicate)\n","repo_name":"wafflemelon/ema","sub_path":"extensions/owner/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"25"} +{"seq_id":"72286919104","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'Invaders'\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^index$', views.index, name='index'),\n url(r'^register/$', views.register, name='register'),\n url(r'^do_register/$', views.do_register, name='do_register'),\n url(r'^register_success/$', views.register_success, name='register_success'),\n url(r'^register_fail/$', views.register_fail, name='register_fail'),\n url(r'^login/$', views.login, name='login'),\n url(r'^do_login/$', views.do_login, name='do_login'),\n url(r'^logout/$', views.do_logout, name='do_logout'),\n url(r'^update_high_score/$', views.update_high_score, name='update_high_score'),\n]\n","repo_name":"addliu/Games","sub_path":"Invaders/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"25"} +{"seq_id":"9777636667","text":"\"\"\" Линейный алгоритм поиска |||\nПоследовательно перебирать элементы списка/массива\nи сравнивать каждый элемент с искомым значением.\nЕсли совпадение найдено, поиск должен быть остановлен,\nвывести необходимо индекс элемента в массиве/списке.\"\"\"\nimport random as r\n\nmy_list = []\nfor i in range(r.randint(5, 100)): # количество повторений случайно\n my_list.append(r.randint(-100, 100))\n\nprint(f'Список: {my_list}.')\n\n# алгоритм линейного поиска\nres = None\nkey = int(input('Введите искомое: '))\n\nfor i in range(len(my_list)): # количество повторений = длине списка\n if my_list[i] == key: # если ключ и элемент списка - одно и то же число\n res = i # в результате храним индекс найденного элемента\n break # принудительная остановка\n\nif res is not None: # если в переменной res находится что-то отличное от пустоты\n print(f'Элемент найден! ID={res}.')\nelse:\n print('Элемент не был найден!')\n","repo_name":"100ballovby/monday_start_2122","sub_path":"05_lesson_0111/01_linear_search.py","file_name":"01_linear_search.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"} +{"seq_id":"74649462466","text":"import json, requests, webbrowser, warnings\nfrom urllib import parse\nwarnings.filterwarnings(\"ignore\")\nprint(\"===COVID-19 예방접종 사전예약 시스템===\")\nprint(\"안내에 따라 20시 이전에 NetFunnel Key를 준비해주세요. 준비되지 않은 경우 본인인증 완료 후 대기열이 발생할 수 있습니다.\")\nhead = {'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'}\nwhile True:\n name = input(\"접종자 성명 : \")\n name = parse.quote(name)\n birthday = input(\"접종자 생년월일(yyyymmdd 형태로 작성해주세요) : \")\n ntv = int(input(\"접종자 내외국민 구분을 입력해주세요.\\n1. 내국인 2. 외국인\\n선택 : \"))\n ntvFrnrCd = ''\n if ntv == 1:\n ntvFrnrCd = 'L'\n elif ntv == 2:\n ntvFrnrCd = 'F'\n sex = int(input(\"성별을 선택해주세요.\\n1. 남자, 2. 여자\\n선택 : \"))\n sexCd = ''\n if sex == 1:\n sexCd = 'M'\n elif sex == 2:\n sexCd = 'F'\n print(\"이동통신사를 선택해주세요. \\n1. SKT 2. KT 3. LG U+ 4. 알뜰폰(SK망) 5. 알뜰폰(KT망) 6. 알뜰폰(U+망)\")\n telecom = int(input(\"선택 : \"))\n telComCd = \"0\" + str(telecom)\n telNo = input(\"본인인증할 휴대전화번호를 입력해주세요. : \")\n data = \"svcGb=P&name=\" + name + \"&birthday=\" + birthday + \"&sexCd=\" + sexCd + \"&ntvFrnrCd=\" + ntvFrnrCd + \"&telComCd=\" + telComCd + \"&telNo=\" + telNo + \"&agree1=Y&agree2=Y&agree3=Y&agree4=Y\"\n if telecom > 4:\n data = data + \"&agree5=Y\"\n res = requests.post(\"https://ncvr2.kdca.go.kr/svc/kcb/callKcb\", data=data, headers=head, verify = False)\n res.request\n se = res.json()\n if se['rsltCd'] == \"B000\":\n break\n print(\"본인인증 실패\")\n print(\"실패 내용 : \" + se['rsltMsg'])\ntxSeqNo = se['txSeqNo']\nprint(\"입력하신 휴대전화번호로 인증번호가 발송되었습니다.\")\nwhile True:\n otpNo = input(\"받으신 인증번호 6자리를 입력해주세요 : \")\n data = \"svcGb=R&txSeqNo=\" + txSeqNo + \"&telNo=\" + telNo + \"&otpNo=\" + otpNo\n res = requests.post(\"https://ncvr2.kdca.go.kr/svc/kcb/callKcb\", data=data, headers=head, verify = False)\n res.request\n se = res.json()\n if se['rsltCd'] == \"CONFLICT\":\n print(\"중복 접속 차단\")\n print(\"동일인은 한번 접속된 후 10분 이후에 접속이 가능합니다.\")\n exit()\n if se['rsltCd'] == \"B000\":\n break\n print(\"본인인증 실패\")\n print(\"실패 내용\" + se['rsltMsg'])\nreqId = se['reqId']\nprint(\"본인인증이 완료되었습니다.\")\nprint(\"접속 후 페이지 오류 또는 실수로 탭을 닫은 경우 https://ncvr2.kdca.go.kr/svc/waiting?reqId=\" + reqId + \"로 접속하시기 바랍니다.\\n본 주소로 접속 시 10분 동안 재인증 대기를 하실 필요가 없습니다.\")\nprint(\"별도로 안내해드린 방법으로 준비해둔 NetFunnel Key가 필요합니다.\")\nprint(\"NetFunnel Key 없이 본인인증 후 예약 시스템 접속 대기를 브라우저에서 시작하려면 p만 입력하고 엔터키를 누르세요\")\nnfKey = input(\"NetFunnel Key : \")\nif nfKey.upper() == \"P\" or nfKey == \"\":\n webbrowser.open(\"https://ncvr2.kdca.go.kr/svc/waiting?reqId=\" + reqId)\nelse:\n webbrowser.open(\"https://ncvr2.kdca.go.kr/svc/complete?reqId=\" + reqId + \"&nfKey=\" + nfKey)\n","repo_name":"lhs9842/COVID19VaccineReservationPass","sub_path":"Covid19VaccineRegister.py","file_name":"Covid19VaccineRegister.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"25"} +{"seq_id":"21059711867","text":"\"\"\" MetaProFi setup module\n\"\"\"\n\nfrom setuptools import setup, find_packages\nfrom Cython.Build import cythonize\nimport numpy as np\nfrom metaprofi.version import __version__\n\nwith open(\"README.md\", \"r\") as readme:\n long_description = readme.read()\n\nsetup(\n name=\"MetaProFi\",\n version=__version__,\n license=\"GPLv2+\",\n author=\"Sanjay Kumar Srikakulam\",\n maintainer=\"Sanjay Kumar Srikakulam\",\n description=\"MetaProFi is a protein-based Bloom filter tool for storing and querying sequence data for accurate identification of functionally relevant genetic variants\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n keywords=\"metagenome protein finder bloom filters index sequence search\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)\",\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: POSIX :: Linux\",\n \"Natural Language :: English\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n ],\n include_package_data=True,\n python_requires=\">=3.8\",\n packages=find_packages(),\n install_requires=[\n \"Cython==0.29.28\",\n \"numpy==1.22.3\",\n \"zarr==2.11.1\",\n \"pyfastx==0.8.4\",\n \"bitarray==2.4.0\",\n \"humanfriendly==9.1\",\n \"pyyaml==5.4.1\",\n \"zstd==1.5.1.0\",\n \"psutil==5.9.0\",\n \"tqdm==4.61.2\",\n \"SharedArray==3.2.1\",\n \"indexed-gzip==1.6.4\",\n \"lmdb==1.3.0\",\n \"msgpack==1.0.3\",\n \"msgspec==0.5.0\",\n ],\n ext_modules=cythonize(\n [\n \"metaprofi/lib/bloomfilter_cython.pyx\",\n \"metaprofi/lib/utilities_cython.pyx\",\n ],\n compiler_directives={\n \"language_level\": \"3\",\n \"boundscheck\": False,\n \"wraparound\": False,\n },\n ),\n include_dirs=[np.get_include()],\n entry_points={\n \"console_scripts\": [\"metaprofi = metaprofi.metaprofi_main:main\"],\n },\n zip_safe=False,\n)\n","repo_name":"kalininalab/metaprofi","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"25"} +{"seq_id":"13188249912","text":"import tkinter as tk\nimport subprocess\n\nclass PortForwardingApp:\n def __init__(self, master):\n self.master = master\n self.master.title(\"端口转发应用程序\")\n\n self.host_label = tk.Label(self.master, text=\"本地IP地址:\")\n self.host_label.grid(row=0, column=0, padx=5, pady=5)\n self.host_entry = tk.Entry(self.master)\n self.host_entry.grid(row=0, column=1, padx=5, pady=5)\n\n self.local_port_label = tk.Label(self.master, text=\"本地端口:\")\n self.local_port_label.grid(row=1, column=0, padx=5, pady=5)\n self.local_port_entry = tk.Entry(self.master)\n self.local_port_entry.grid(row=1, column=1, padx=5, pady=5)\n\n self.remote_host_label = tk.Label(self.master, text=\"远程IP地址:\")\n self.remote_host_label.grid(row=2, column=0, padx=5, pady=5)\n self.remote_host_entry = tk.Entry(self.master)\n self.remote_host_entry.grid(row=2, column=1, padx=5, pady=5)\n\n self.remote_port_label = tk.Label(self.master, text=\"远程端口:\")\n self.remote_port_label.grid(row=3, column=0, padx=5, pady=5)\n self.remote_port_entry = tk.Entry(self.master)\n self.remote_port_entry.grid(row=3, column=1, padx=5, pady=5)\n\n self.forward_button = tk.Button(self.master, text=\"启动转发\", command=self.start_forwarding)\n self.forward_button.grid(row=4, column=0, padx=5, pady=5)\n\n self.stop_button = tk.Button(self.master, text=\"停止转发\", command=self.stop_forwarding)\n self.stop_button.grid(row=4, column=1, padx=5, pady=5)\n\n def start_forwarding(self):\n host = self.host_entry.get()\n local_port = self.local_port_entry.get()\n remote_host = self.remote_host_entry.get()\n remote_port = self.remote_port_entry.get()\n\n command = f\"netsh interface portproxy add v4tov4 listenaddress={host} listenport={local_port} connectaddress={remote_host} connectport={remote_port}\"\n subprocess.run(command, shell=True)\n\n def stop_forwarding(self):\n host = self.host_entry.get()\n local_port = self.local_port_entry.get()\n\n command = f\"netsh interface portproxy delete v4tov4 listenaddress={host} listenport={local_port}\"\n subprocess.run(command, shell=True)\n\nif __name__ == '__main__':\n root = tk.Tk()\n app = PortForwardingApp(root)\n root.mainloop()\n","repo_name":"liangqishou/mypython","sub_path":"ChatGPT_test/port_forwarding.py","file_name":"port_forwarding.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"} +{"seq_id":"72044011264","text":"#!/usr/bin/python3\ndef safe_print_list(my_list=[], x=0):\n x_x = 0\n\n while x_x < int(x):\n try:\n print(f\"{my_list[x_x]}\", end=\"\")\n x_x = x_x + 1\n except IndexError:\n break\n\n print(\"\")\n return x_x\n","repo_name":"dakhamohammed/alx-higher_level_programming","sub_path":"0x05-python-exceptions/0-safe_print_list.py","file_name":"0-safe_print_list.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"} +{"seq_id":"39346374819","text":"#!/usr/bin/python\n\nimport argparse\nimport csv\nimport json\nimport requests\nimport time\n\n\nMULTI_VALUE_SEPARATOR = '|'\n\nEXAMPLE_TEXT = '''example:\n\n python %(prog)s.py --srv https://qa.magnum.genestack.com/ --study study_metadata.tsv --samples samples_metadata.tsv --token \n \n python %(prog)s.py --srv https://qa.magnum.genestack.com/ --study study_metadata.tsv --samples samples_metadata.tsv --template GSF000065 --token \n '''\n\ndef get_rest_endpoint(host):\n return host + '/frontend/rs/genestack/'\n\n\ndef get_app_endpoint(host):\n return host + '/frontend/endpoint/application/invoke/genestack/'\n\n\ndef get_request_headers(token):\n return {'Genestack-API-Token': token,\n 'Accept': 'application/json',\n 'Content-Type': 'application/json'}\n\n\ndef update_study(study_accession, study_metadata, host, token):\n url = get_rest_endpoint(host) + 'studyCurator/default-released/studies/' + study_accession\n r = requests.patch(url=url, headers=get_request_headers(token), data=json.dumps(study_metadata))\n # print(r)\n\n\ndef read_study_metadata(study_filename):\n with open(study_filename) as f:\n reader = csv.reader(f, delimiter='\\t')\n headers = next(reader)\n row = next(reader)\n result = dict()\n for key, value in zip(headers, row):\n values = value.split(MULTI_VALUE_SEPARATOR)\n result[key] = values\n return result\n\n\ndef create_study(study_filename, template_accession, number_of_samples, host, token):\n study_metadata = read_study_metadata(study_filename)\n study_name = study_metadata.get('Study Title', ['Unknown study'])[0]\n number_of_initial_samples = number_of_samples\n\n url_authenticate = get_app_endpoint(host) + 'signin/authenticateByApiToken'\n url_new_study = get_app_endpoint(host) + 'study-metainfo-editor/createStudy'\n\n s = requests.Session()\n session = s.post(url_authenticate, json=[token])\n res = json.loads(s.post(url=url_new_study, json=[study_name, template_accession, number_of_initial_samples]).text)\n if 'result' not in res:\n print(res)\n raise Exception('study has not been created')\n study_accession = res['result']\n update_study(study_accession, study_metadata, host, token)\n return study_accession\n\n\ndef replace_samples(study_accession, samples_parent, data, host, token):\n url_authenticate = get_app_endpoint(host) + 'signin/authenticateByApiToken'\n url_replace_samples = get_app_endpoint(host) + 'study-metainfo-editor/replaceSamples'\n s = requests.Session()\n session = s.post(url_authenticate, json=[token])\n res = json.loads(s.post(url=url_replace_samples, data=json.dumps([samples_parent, data])).text)\n if 'result' not in res:\n print(res)\n raise Exception('There is a problem while invoking replaceSamples')\n\n\ndef read_samples_metadata(samples_filename):\n new_samples = []\n with open(samples_filename) as f:\n reader = csv.reader(f, delimiter='\\t')\n headers = next(reader)\n for row in reader:\n new_sample = []\n for key, value in zip(headers, row):\n values = value.split(MULTI_VALUE_SEPARATOR)\n new_sample.append({\"attributeName\": key, \"values\": values})\n new_samples.append(new_sample)\n return new_samples\n\n\ndef get_samples_parent(study_accession, host, token):\n url_authenticate = get_app_endpoint(host) + 'signin/authenticateByApiToken'\n url_get_study_container_descriptor = get_app_endpoint(host) + 'study-metainfo-editor/getStudyContainerDescriptor'\n\n s = requests.Session()\n session = s.post(url_authenticate, json=[token])\n res = json.loads(s.post(url=url_get_study_container_descriptor, data=json.dumps([study_accession])).text)\n if 'result' not in res:\n print(res)\n raise Exception('There is a problem while invoking replaceSamples')\n check_samples = res['result']['metadataDescriptors']\n return [item['id'] for item in check_samples if item[u'typeId'] == 'sampleGroup'][0]\n\n\ndef create_studies_and_samples(study_filename, samples_filename, template_accession, host, token):\n if samples_filename is None:\n study_accession = create_study(study_filename, template_accession, 0, host, token)\n else:\n new_samples = read_samples_metadata(samples_filename)\n study_accession = create_study(study_filename, template_accession, 1, host, token)\n time.sleep(3)\n samples_parent = get_samples_parent(study_accession, host, token)\n replace_samples(study_accession, samples_parent, new_samples, host, token)\n return study_accession\n\n\ndef select_template(host, token, template):\n url_authenticate = get_app_endpoint(host) + 'signin/authenticateByApiToken'\n url_get_templates = get_app_endpoint(host) + 'study-metainfotemplateeditor/listTemplates'\n s = requests.Session()\n session = s.post(url_authenticate, json=[token])\n res = json.loads(s.post(url=url_get_templates).text)['result']\n for item in res:\n if template is None and item['isDefault']:\n return item['accession']\n else:\n if template == item['accession']:\n return template\n raise Exception('Unknown template accession: {}'.format(template))\n\ndef main():\n parser = argparse.ArgumentParser(prog='upload_local_study_to_dom',\n description='Upload study and samples metadata to ODM',\n epilog=EXAMPLE_TEXT,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--srv', type=str, help='url of the server', required=True)\n parser.add_argument('--study', type=str, help='Path to the study TSV-file', required=True)\n parser.add_argument('--samples', type=str, help='Path to the samples TSV-file', required=False)\n parser.add_argument('--template', type=str, help='template accession (or default template will be selected)', required=False)\n parser.add_argument('--token', type=str, help='token', required=True)\n args = parser.parse_args()\n host = args.srv[:-1] if args.srv.endswith('/') else args.srv\n template_accession = select_template(host, args.token, args.template)\n study_accession =create_studies_and_samples(args.study, args.samples, template_accession, host, args.token)\n print(study_accession)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"genestack/doc-odm-user-guide","sub_path":"scripts/import/upload_local_study_to_odm.py","file_name":"upload_local_study_to_odm.py","file_ext":"py","file_size_in_byte":6430,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"25"} +{"seq_id":"27068988861","text":"#import libraries\nimport streamlit as st\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport plotly.express as px\n#import plotly.figure_factory as ff\n\n#look for more information here https://docs.streamlit.io/library/cheatsheet\n#adding title\nst.title(\"Basketball over the years\")\n\n#adding discription to your website\nst.write('All about basketball')\n\n\nst.header('Team Members')\n\nst.markdown(\"- Gemma\")\nst.markdown(\"- Cameron Estell\")\nst.markdown(\"- Cameron Conely\")\nst.markdown(\"- Livia\")\nst.markdown(\"- Natalia\")\nst.markdown(\"- Melanie\")\n\n#SHOWING THE DATA\n#dataset Header\nst.header('Dataset')\n\n#add your dataset (delete dataset this is an example)\ndf = pd.read_csv(\"cbb.csv\")\n\n#showing dataset\nst.table(df.head())\n\n\n## Data Description\n\nst.header('Dataset Column Description')\n\nst.write(\"\"\"\n0. TEAM : Team Name \n1. CONF : The Athletic Conference in which the school participates in \n2.G : Number of Games playesd\n3.W : Number of Games Won \n4. ADJOE : Points scored per 100 possessions \n5. ADJDE : Adjusted Defensive Efficiency \n6. BARTHAG : Power Rating \n7. EFG_O : Effective Field Goal Percentage Shot\n8. EFG_D : Effective Field Goal Percentage Allowed\n9.TOR : Turnover Percentage Allowed\n10. TORD : Turnover Percentage Committed\n11. ORB : Offensive Rebound Rate \n12. DRB : Defensive Rebound Rate \n13. FTR : Free Throw Rate \n14. FTRD : Free throw Rate Allowed \n15. 2P_O : Two Point Shooting Percentage \n16. 2P_D : Two Point Shooting Percentage allowed \n17. 3P_O: 3 Point shooting percentage \n18. 3P_D: 3 point shooting perecentage allowed \n19. ADJ_T : Adjusted Tempo \n20. WAB : Wins above bubble \n21. POSTSEASON : Round where the given team was eliminated or where their season ended\n22. SEED : Seed in the NCAA Tournament\n23. Year : Season \"\"\")\n#Adding images to make your streamlit look visually better!\n# st.image('pro.png')\n# st.text('You can add photos with descriptions')\n\n\n#Adding 3-6 Visualizations using photos collected and made from your graph\n#adding images\n#adding graphs by images\n\nst.header('How are the number of games won distributed? ')\n\n## Box Plot \ndf_plot=df['G']\n\nfig = px.box(df_plot, y=\"G\")\n \nfig.update_layout(xaxis_tickangle=-90)\nst.plotly_chart(fig)\n\nst.write('The 75th percentile (or upper quantile) is 37 , the median is 31 and the lower quantile is 26.')\n## Histogram \n#df.hist(bin=5)\n\nfig=px.histogram(df_plot,'G')\n\nst.plotly_chart(fig)\n\nst.write(\" Most teams have played games between 24 and 40 games\")\n\n###### Scatter Plot \nst.header('Relationship between games won and ADJOE')\n\ndf_plot=df[['W','ADJOE']]\nfig=px.scatter(df_plot,x='W',y='ADJOE')\nfig.update_layout(xaxis_tickangle=-90)\nst.plotly_chart(fig)\n\nst.write('If in a game a team have a higher amount of points scored it is highly likely that team will win the game. ')\n\n\n###### Bar Chart Plot \nst.header('Defensive Rebound rate of the top 15 teams')\n\ndf_plot=df[['TEAM','DRB']]\n## We want to Sum the number of wins by the team gonzaga for each year.\navgdrb = df_plot.groupby(\"TEAM\")[[\"DRB\"]].mean().reset_index()\navgdrb = avgdrb.sort_values(by=\"DRB\", ascending=False).head(15)\nfig = px.bar(avgdrb, x=\"TEAM\", y=\"DRB\")\nfig.update_layout(xaxis_tickangle=-90)\nst.plotly_chart(fig)\n\nst.write(\"Fairleigh Dicknson is the most defensive team.\")\n\n\n#### Pie Chart \n\nst.header('Which conference has the highest number of games?')\n\ndf_plot=df[['CONF','G']]\n\ntotConf=df_plot.groupby(\"CONF\")[['G']].sum().reset_index()\nfig = px.pie(totConf,values=\"G\",names=\"CONF\")\nst.plotly_chart(fig)\n\n\nst.write('The three conferences with the highest games played is ACC, SEC, and A10')\n\n\n### Heatmap \n\n# st.header('Team vs Conference Games Won Heatmap ')\n\n# df_plot=df[['TEAM','CONF','W']]\n\n# gamesWon=df_plot.groupby(['TEAM','CONF'])[['W']].sum().reset_index()\n\n# gamesWon=pd.pivot_table(gamesWon, values = 'W', index=['TEAM'], columns = 'CONF').reset_index()\n\n# fig = px.imshow(gamesWon)\n# st.plotly_chart(fig)\n\n\nst.header('How many games have Gonzaga won over different seasons? ')\n\n## Filtering out the team gonzaga from our dataset \ndf_plot=df[df['TEAM']=='Gonzaga']\n## We want to Sum the number of wins by the team gonzaga for each year.\ncount = df_plot.groupby(\"YEAR\")[[\"W\"]].sum().reset_index()\ncount = count.sort_values(by=\"W\", ascending=False).head(15)\nfig = px.bar(count, x=\"YEAR\", y=\"W\")\nfig.update_layout(xaxis_tickangle=-90)\nst.plotly_chart(fig)\n\nst.markdown(\"- The most games Gonzaga have won in a season is 37. \")\nst.markdown(\"- the least amount of games they have won is 27 \")\nst.markdown(\"- the season they won the most was 2017\")\n\nst.header(\"what was North Carolina's power rating over the years?? \")\n\ndf_plot=df[df['TEAM']=='North Carolina']\npr = df_plot.groupby(\"YEAR\")[[\"BARTHAG\"]].sum().reset_index()\npr.sort_values(by=\"BARTHAG\", ascending=False).head(15)\nfig = px.bar(pr, x=\"YEAR\", y=\"BARTHAG\")\nfig.update_layout(xaxis_tickangle=-90)\nst.plotly_chart(fig)\n\n\nst.write(\"North Carolina had the same power rating in the years 2015,2016,2017 and 2019\")\n\n\nst.header('what was the point shooting percentage over the seasons for the wisconsin')\n\ndf_plot=df[df['TEAM']=='Wisconsin']\n\ndf_plot=df_plot[['YEAR','2P_O']]\n\ntwopo=df_plot.groupby('YEAR')[['2P_O']].mean().reset_index()\n\nfig=px.bar(twopo,x=\"YEAR\",y='2P_O')\nst.plotly_chart(fig)\n\nst.markdown(\"- Wisconsin had very close 2 point shooting percentage in the years 2017 and 2018 \")\n\n\nst.markdown(\"- Wisconsin had the highest 2 point shooting percentage in 2015 while in the very next season they had the lowest 2 point shooting percentage. \")\n\nst.header('What is the relationship between offensive rebound rate and defensive rebound rate')\n\ndf_plot=df[['ORB','DRB']]\nfig=px.scatter(df,x='ORB',y='DRB')\nfig.update_layout(xaxis_tickangle=-90)\nst.plotly_chart(fig)\n\nst.write('The linear relationship between offensive rebound rate and defensive rebound rate is very weak.')\n\nst.header('Which team has the highest power rating in 2019?')\n\ndf_plot=df[df['YEAR']==2019]\npower_rating=df_plot.groupby('TEAM')['BARTHAG'].max().reset_index()\npower_rating=power_rating.sort_values(by='BARTHAG',ascending=False).head(15)\nfig=px.bar(power_rating,x='TEAM',y='BARTHAG')\nfig.update_layout(xaxis_tickangle=-90)\nst.plotly_chart(fig)\n\n\nst.write('Gonzaga has the highest power rating in 2019')\n\n#adding graphs by making plotly_Chart\n# Plot!\n#st.plotly_chart(BostonHousing, use_container_width=True)\n#st.text('Discription')\n\n\n#adding conclusions\nst.header('Summary')\nst.markdown('- The median number of games played is around 31')\nst.markdown('- There is a strong linear relationship between ADJOE and the number of games won')\nst.markdown('- Fairleigh Dickinson is the most defensive team in the roster')\nst.markdown('- ACC is the conference with the highest number of games played')\nst.markdown('- Gonzaga had its peak season in 2017')\nst.markdown('- Gonzaga also has the highest power rating among all teams in the roster.')","repo_name":"ghoshdebapratim1/WebWolvesStreamlit3","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":6868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"} +{"seq_id":"30677863267","text":"#powered by newsapi.org\nimport requests\n\ndef getTopNews():\n\turl = ('https://newsapi.org/v2/top-headlines?'\n\t\t 'country=se&'\n\t\t 'apiKey=6a17c0ed413c46efb97152947735a0d7')\n\tresponse = requests.get(url).json()\n\n\tl = []\n\tfor i in response['articles']:\n\t\tl.append(i['title'])\n\n\treturn l","repo_name":"efriden/pappasvader","sub_path":"TestApp/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"} +{"seq_id":"33527420705","text":"import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\nfrom matplotlib import image as mpimg\nfrom scipy.ndimage import convolve\n\nrose = mpimg.imread('rose-piano.jpg')\n\nrose1 = rose / 255\n\nblur = [[[1/9, 1/9, 1/9],\n [1/9, 1/9, 1/9],\n [1/9, 1/9, 1/9]]]\n\nblur = np.array(blur)\na = convolve(rose1, blur, output=None, mode='reflect', cval=0.0, origin=0)\n\nb = cv2.GaussianBlur(rose, (5, 5), sigmaX=0)\n\nedge = [\n [-1, -1, -1],\n [-1, 8, -1],\n [-1, -1, -1]\n]\nedge = np.array(edge)\nrose1 = np.mean(rose1, axis=2)\nc = convolve(rose1, edge)\nc = np.where(c > 0.25, 1, 0)\n\nd = cv2.Laplacian(rose, cv2.CV_8U, ksize=3)\n\ne = cv2.Laplacian(b, cv2.CV_8U, ksize=3)\n\n#plt.imsave('exercise5a.jpg', a)\nplt.imsave('exercise5b.jpg', b)\nplt.imsave('exercise5c.jpg', c)\nplt.imsave('exercise5d.jpg', d)\nplt.imsave('exercise5e.jpg', e)\n\nplt.imshow(a)\nplt.show()","repo_name":"OrangePear13/python-vision-exercises","sub_path":"e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"} +{"seq_id":"70277072065","text":"print('Hello World')\ndef downloadFile(file,filename,sha):\n if os.path.exists(os.path.join(base_url,sha)):\n fileDirectory = os.path.join(base_url,sha,filename)\n else:\n os.makedirs(os.path.join(base_url,sha))\n fileDirectory = os.path.join(base_url,sha,filename)\n urllib.request.urlretrieve(file,fileDirectory)\n return fileDirectory\n\ndef get_repo_data(request):\n if request.method == 'POST':\n data = JSONParser().parse(request)\n if not data:\n return Response({'message':'Json Data Missing'}, status=status.HTTP_400_BAD_REQUEST)\n userName = data.get('userName')\n req = requests.get(f'https://api.github.com/users/{userName}/repos')\n if req.status_code == 200:\n res = req.json()\n finalData = []\n for singleRes in res:\n finalData.append(singleRes.get('name'))\n return Response({'data':finalData},status=status.HTTP_200_OK)\n else:\n return Response({'data':req.json()},status=status.HTTP_200_OK)\n\n@api_view(['POST'])\ndef get_branches_data(request):\n if request.method == 'POST':\n data = JSONParser().parse(request)\n if not data:\n return Response({'message':'Json Data Missing'}, status=status.HTTP_400_BAD_REQUEST)\n userName = data.get('userName')\n repoName = data.get('repoName')\n req = requests.get(f'https://api.github.com/repos/{userName}/{repoName}/branches')\n if req.status_code == 200:\n res = req.json()\n finalData = []\n for singleRes in res:\n finalData.append(singleRes.get('name'))\n return Response({'data':finalData},status=status.HTTP_200_OK)\n else:\n return Response({'data':req.json()},status=status.HTTP_200_OK)\n\n@api_view(['POST'])\ndef get_commit_data(request):\n if request.method == 'POST':\n data = JSONParser().parse(request)\n if not data:\n return Response({'message':'No Data Received'}, status=status.HTTP_400_BAD_REQUEST)\n userName = data.get('userName')\n repoName = data.get('repoName')\n branchName = data.get('branchName')\n req = requests.get(f'https://api.github.com/repos/{userName}/{repoName}/branches/{branchName}')\n if req.status_code == 200:\n res = req.json()\n commitData = res.get('commit')\n parentData= commitData.get('parents')[0]\n parentSha = parentData.get('sha')\n reqData = requests.get(f'https://api.github.com/repos/{userName}/{repoName}/commits/{parentSha}')\n allParentPath = {}\n if reqData.status_code == 200:\n resData = reqData.json().get('files')\n for singleres in resData:\n filePath = downloadFile(singleres.get('raw_url'),singleres.get('filename'),f'{userName}/{repoName}/parent')\n allParentPath.update({singleres.get('filename'):singleres.get('raw_url')})\n parentKeys = allParentPath.keys()\n print('*********parentKeys*******')\n print(parentKeys)\n childSha = commitData.get('sha')\n reqData = requests.get(f'https://api.github.com/repos/{userName}/{repoName}/commits/{childSha}')\n if reqData.status_code == 200:\n resData = reqData.json().get('files')\n allFilesData = []\n for singleres in resData:\n patchData = singleres.get('patch')\n print(patchData)\n finalLines = tk.tokenize(patchData)\n linedeleted = []\n lineadded = []\n for singleline in finalLines[1:]:\n if '-' == singleline[0]:\n linedeleted.append(singleline[1:])\n elif '+' == singleline[0]:\n lineadded.append(singleline[1:])\n send_data = []\n if (linedeleted and lineadded):\n if len(linedeleted) > len(lineadded):\n count = len(linedeleted) - len(lineadded)\n for i in range(len(lineadded)):\n send_data.append({'line_deleted':linedeleted[i],'line_added':lineadded[i],'lineLanguage':guess.language_name(lineadded[i])})\n for i in linedeleted[-count:]:\n send_data.append({'line_deleted':i})\n elif len(lineadded) > len(linedeleted):\n count = len(lineadded) - len(linedeleted)\n for i in range(len(linedeleted)):\n send_data.append({'line_deleted':linedeleted[i],'line_added':lineadded[i],'lineLanguage':guess.language_name(lineadded[i])})\n for i in lineadded[-count:]:\n send_data.append({'line_added':i})\n else:\n for i in range(len(lineadded)):\n send_data.append({'line_deleted':linedeleted[i],'line_added':lineadded[i],'lineLanguage':guess.language_name(lineadded[i])})\n else:\n if linedeleted:\n for i in linedeleted:\n send_data.append({'line_deleted':i,'lineLanguage':guess.language_name(i)})\n elif lineadded:\n for i in lineadded:\n send_data.append({'line_added':i,'lineLanguage':guess.language_name(i)})\n fileExtData = '\\n'.join(lineadded)\n codeLang = guess.language_name(fileExtData)\n\n\n if singleres.get('filename') in parentKeys:\n childfilePath = downloadFile(singleres.get('raw_url'),singleres.get('filename'),f'{userName}/{repoName}/child')\n parentfilePath = downloadFile(allParentPath[singleres.get('filename')],singleres.get('filename'),f'{userName}/{repoName}/parent')\n childCounter = 0\n parentCounter = 0\n childFile = open(childfilePath,\"r\")\n childContent = childFile.read()\n childCoList = childContent.split(\"\\n\")\n parentFile = open(parentfilePath,\"r\")\n parentContent = parentFile.read()\n parentCoList = parentContent.split(\"\\n\")\n for i in childCoList:\n if i:\n childCounter += 1\n for i in parentCoList:\n if i:\n parentCounter += 1\n print('***************childCounter********************')\n print(childCounter)\n print('***************parentCounter********************')\n print(parentCounter)\n finalCounter = childCounter - parentCounter\n indivChanges = singleres.get('changes')\n if finalCounter:\n indivPercent = indivChanges / finalCounter\n else:\n indivPercent = 1\n print('****************indivPercent******************')\n print(indivPercent)\n if codeLang in ['HTML','CSS']:\n codeProb = (indivPercent / 70) * 100\n else:\n codeProb = (indivPercent / 100) * 100\n else:\n codeProb = (singleres.get('changes') / 70) * 100\n if codeProb > 1:\n codeProb = 100\n else:\n codeProb = codeProb * 100\n allFilesData.append({\"filename\": singleres.get('filename'),\"additions\": singleres.get('additions'),\\\n \"deletions\": singleres.get('deletions'),\"changes\": singleres.get('changes'),'codeLanguage':codeLang,\\\n 'changePercent':codeProb,'lines':send_data})\n return Response({'files_data':allFilesData},status=status.HTTP_200_OK)\n else:\n return Response({'data':reqData.json()},status=status.HTTP_200_OK)\n else:\n return Response({'data':req.json()},status=status.HTTP_200_OK)\n","repo_name":"k19034866/code_contribution_analysis","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":8586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"} +{"seq_id":"32784020290","text":"'''\n\nNASA NextGen NAS ULI Information Fusion\n \n@organization: PARA Lab, Arizona State University (PI Dr. Yongming Liu)\n@author: Hari Iyer\n@date: 01/19/2018\n\nCommand call to interface NATS module with PARA-ATM to fetch generated trajectories.\n\n'''\n\nfrom PARA_ATM import *\nimport imp\n\nclass Command:\n '''\n Class Command wraps the command methods and functions to be executed. For user-defined commands, this name \n should be kept the same (Command).\n '''\n \n #Here, the database connector and the parameter are passed as arguments. This can be changed as per need.\n def __init__(self, cursor, *args):\n self.NATS_DIR = str(Path(__file__).parent.parent.parent) + '/NATS'\n self.cursor = cursor\n pass\n \n #Method name executeCommand() should not be changed. It executes the query and displays/returns the output.\n def executeCommand(self):\n pid=os.fork()\n parentPath = str(Path(__file__).parent.parent.parent)\n if pid==0:\n host_port = 'localhost:2017'\n server_response = os.system('curl -s ' + host_port) >> 8\n if server_response == 52 or server_response == 0:\n exit()\n else:\n os.system(\"cd \" + parentPath + \"/NATS/Server && ./run &\")\n exit()\n print(pid)\n if pid!=0:\n host_port = 'localhost:2017'\n while True:\n server_response = os.system('curl -s ' + host_port) >> 8\n if server_response == 0 or server_response == 52:\n time.sleep(15)\n break\n else:\n time.sleep(5)\n CSVData = None\n #try:\n parentPath = str(Path(__file__).parent.parent.parent)\n os.system('cd ' + parentPath + '/NATS/Client && pwd')\n open_file,file_name,description = imp.find_module('DEMO_Gate_To_Gate_Simulation_SFO_PHX_beta1.0', [parentPath+'/NATS/Client/'])\n DEMO_Gate_To_Gate_Simulation_SFO_PHX_beta1 = imp.load_module('DEMO_Gate_To_Gate_Simulation_SFO_PHX_beta1.0.py',open_file,file_name,description)\n with open(parentPath + \"/NATS/Server/DEMO_Gate_To_Gate_SFO_PHX_trajectory.csv\", 'r') as trajectoryFile:\n CSVData = trajectoryFile.read()\n #except:\n #print('killing NATS process')\n #os.system(\"ps -a -o pid= | xargs -I sig kill -9 sig\")\n \n return [\"NATS_GateToGateSim\", CSVData]\n","repo_name":"ymlasu/NASA_ULI_InfoFusion","sub_path":"src/PARA_ATM/Commands/NATS_GateToGateSim.py","file_name":"NATS_GateToGateSim.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"25"} +{"seq_id":"41096763289","text":"# Сортировка слиянием. Матрица списков.\n# N. Клумбы\n# https://contest.yandex.ru/contest/24734/problems/N/\n\n# n = int(input())\n# arr = []\n# for _ in range(0, n):\n# arr.append(list(input().split()))\n\nnums = [[7,9],[7,8],[2,3],[6,10]]\nn = 4\ndef merge_sort(nums): \n if len(nums) > 1: \n mid = len(nums)//2\n left = nums[:mid] \n right = nums[mid:]\n merge_sort(left) \n merge_sort(right) \n l = r = k = 0\n while l < len(left) and r < len(right): \n if left[l] < right[r]: \n nums[k] = left[l] \n l+=1\n else: \n nums[k] = right[r] \n r+=1\n k+=1\n while l < len(left): \n nums[k] = left[l] \n l+=1\n k+=1\n while r < len(right): \n nums[k] = right[r] \n r+=1\n k+=1\n\ndef solution(nums):\n result = [nums[0]]\n for i in range(1, len(nums)):\n if nums[i][0] == result[-1][0]:\n result[-1][0] = result[i][0]\n elif nums[i][0] > result[-1][0] and nums[i][1] > result[-1][1]:\n result.append(nums[i])\n return result\n \nmerge_sort(nums)\nprint(nums)\nresult = solution(nums)\nfor i in result:\n print(*i, sep=' ')\n","repo_name":"ZOMini/algorithms","sub_path":"3-hard-recursion_sort/N-.py","file_name":"N-.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"} +{"seq_id":"32670521118","text":"\"\"\"\nAlumnos:\nEst. Melvin Josué Pereira Amaya SMIS010221\nEst. Melvin Adiel Vásquez Mejía SMIS 001021\nEst. Melvin Josué Pérez García SMIS007021\n\n\"\"\"\nciclo = \"si\"\nwhile ciclo == \"si\":\n cadena = input(\"Ingrese una letra: \").lower()\n tipo = cadena.isalpha()\n longitud = len(cadena)\n\n if tipo == True:\n \n if longitud == 1:\n \n if cadena == \"a\" or cadena == \"e\" or cadena == \"i\" or cadena == \"o\" or cadena == \"u\":\n print(\"La letra \"+cadena.upper()+\" es una vocal...\")\n else:\n print(\"La letra \"+cadena.upper()+\" es una consonante...\")\n\n else:\n print(\"Tiene que ingresar solo una letra...\")\n\n else:\n print(\"Tiene que ingresar una letra...\")\n\n\n while True:\n\n ciclo = input(\"Desea continuar? \").lower()\n \n if ciclo == \"no\" or ciclo == \"si\":\n break\n else:\n print(\"Respuesta desconocida, porfavor responda si o no...\") ","repo_name":"MelvinAmaya/Actividad-Grupal-semana-5.2","sub_path":"Validar letras.py","file_name":"Validar letras.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"} +{"seq_id":"3585539115","text":"from rl.agents.dqn import DQNAgent\nimport gym\nfrom processors.keras_atari import AtariProcessor\nfrom rl.memory import SequentialMemory\nfrom rl.policy import EpsGreedyQPolicy, LinearAnnealedPolicy\nfrom networks.keras_network import Network\nfrom agents.dqn_agent import create_agent\nfrom callbacks.keras_callbacks import SubTensorBoard, TestCallback\nfrom rl.callbacks import ModelIntervalCheckpoint\nimport numpy as np\nfrom keras.optimizers import Adam\nfrom agents.test import print_metrics\n\n\ndef create_agent(\n network,\n processor,\n nb_actions,\n policy,\n memory,\n batch_size=32,\n nb_steps_warmup=32,\n gamma=0.99,\n target_model_update=10000,\n train_interval=4,\n delta_clip=1.0,\n enable_double_dqn=False,\n enable_dueling_network=False,\n):\n\n return DQNAgent(\n model=network,\n processor=processor,\n nb_actions=nb_actions,\n policy=policy,\n memory=memory,\n batch_size=batch_size,\n enable_double_dqn=enable_double_dqn,\n enable_dueling_network=enable_dueling_network,\n nb_steps_warmup=nb_steps_warmup,\n gamma=gamma,\n target_model_update=target_model_update,\n train_interval=train_interval,\n delta_clip=delta_clip,\n )\n\n\ndef create_dqn_agent(\n memory_capacity=500000,\n exploration_max=1.0,\n exploration_min=0.1,\n exploration_test=0,\n exploration_steps=1e6,\n frame_shape=(84, 84),\n window_length=4,\n):\n env = gym.make(\"Tutankham-v4\")\n nb_actions = env.action_space.n\n processor = AtariProcessor(frame_shape)\n memory = SequentialMemory(limit=memory_capacity, window_length=window_length)\n policy = LinearAnnealedPolicy(\n EpsGreedyQPolicy(),\n attr=\"eps\",\n value_max=exploration_max,\n value_min=exploration_min,\n value_test=exploration_test,\n nb_steps=exploration_steps,\n )\n network = Network(frame_shape, window_length, nb_actions)\n model = network.create_model()\n\n return create_agent(model, processor, nb_actions, policy, memory)\n\n\ndef train_dqn(\n agent, optimizer, train_episodes, episode_len, logdir, checkpoint, verbose_flag=1\n):\n tb_callback = [SubTensorBoard(logdir=logdir)]\n tb_callback += [ModelIntervalCheckpoint(checkpoint, 10000)]\n agent.compile(optimizer)\n env = gym.make(\"Tutankham-v4\")\n agent.fit(\n env,\n visualize=False,\n nb_steps=train_episodes,\n verbose=verbose_flag,\n nb_max_episode_steps=episode_len,\n callbacks=tb_callback,\n )\n\n\ndef test_dqn(agent, num_episodes, episode_len, visualize):\n env = gym.make(\"Tutankham-v4\")\n test_callback = TestCallback()\n agent.test(\n env,\n callbacks=[test_callback],\n nb_episodes=num_episodes,\n visualize=visualize,\n nb_max_episode_steps=episode_len,\n verbose=0,\n )\n\n rewards_list = np.array(test_callback.rewards_list)\n keys_list = np.array(test_callback.keys_list)\n keys_reward_list = np.array(test_callback.keys_reward_list)\n timesteps_list = np.array(test_callback.timesteps_list)\n\n print_metrics(rewards_list, keys_list, keys_reward_list, timesteps_list)\n\n\ndef train_test_dqn(\n exploration_steps,\n epochs_train,\n checkpoint,\n epochs_test,\n episode_len_train=1000,\n episode_len_test=1000,\n test_visualize=False,\n memory_capacity=500000,\n exploration_max=1.0,\n exploration_min=0.1,\n exploration_test=0,\n logdir=\"board/DQN\",\n):\n\n agent = create_dqn_agent(\n memory_capacity=memory_capacity,\n exploration_max=exploration_max,\n exploration_min=exploration_min,\n exploration_test=exploration_test,\n exploration_steps=exploration_steps,\n )\n\n optimizer = Adam(lr=0.00025)\n train_dqn(agent, optimizer, epochs_train, episode_len_train, logdir, checkpoint)\n test_dqn(agent, epochs_test, episode_len_test, test_visualize)\n","repo_name":"klaudialemiec/tutankham","sub_path":"agents/dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"} +{"seq_id":"36645109258","text":"'''\nФункция cyclic_shift()\nРеализуйте функцию cyclic_shift() с использованием аннотаций типов, \nкоторая принимает два аргумента в следующем порядке:\n\nnumbers — список целых или вещественных чисел\nstep — целое число\n\nФункция должна изменять переданный список, циклически сдвигая \nэлементы списка на step шагов, и возвращать значение None. \nЕсли step является положительным числом, сдвиг происходит вправо, \nесли отрицательным — влево.\n\nПримечание 1. Используйте встроенные типы (list, tuple, ...), а не \nтипы из модуля typing. Также используйте нотацию |, а не тип Union из модуля typing.\n\nПримечание 2. В тестирующую систему сдайте программу, содержащую \nтолько необходимую функцию cyclic_shift(), но не код, вызывающий ее. \n\nПримечание 3. Тестовые данные доступны по ссылкам:\n\nАрхив с тестами\nhttps://stepik.org/media/attachments/lesson/655394/tests_2745410.zip\nGitHub\nhttps://github.com/python-generation/Professional/tree/main/Module_9/Module_9.6/Module_9.6.16\n\nSample Input 1:\nnumbers = [1, 2, 3, 4, 5]\ncyclic_shift(numbers, 1)\nprint(numbers)\nSample Output 1:\n[5, 1, 2, 3, 4]\n\nSample Input 2:\nnumbers = [1, 2, 3, 4, 5]\ncyclic_shift(numbers, -2)\nprint(numbers)\nSample Output 2:\n[3, 4, 5, 1, 2]\n'''\n# def cyclic_shift(numbers: list[int|float], step: int) -> None:\n# for _ in range(abs(step)):\n# numbers.insert(\n# (len(numbers)-1, 0)[step > 0], \n# numbers.pop((0, -1)[step > 0])\n# )\n\ndef cyclic_shift(numbers: list[int | float], step: int) -> None:\n for i in range(step % len(numbers)):\n numbers.insert(0, numbers.pop())\n\nif __name__ == '__main__':\n numbers = [1, 2, 3, 4, 5]\n cyclic_shift(numbers, 1)\n print(numbers)\n\n numbers = [1, 2, 3, 4, 5]\n cyclic_shift(numbers, -2)\n print(numbers)","repo_name":"MER-GROUP/COURSES","sub_path":"Python. Поколение Python - курс для профессионалов/Module 009/033.py","file_name":"033.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"25"} +{"seq_id":"12327915383","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#cangye@hotmail.com\n\"\"\"\n单个图片检测任务\n\"\"\"\nimport sys\nimport cv2\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nsys.path.append(\"../\")\nfrom Detection.MtcnnDetector import MtcnnDetector\nfrom Detection.detector import Detector\nfrom Detection.fcn_detector import FcnDetector\nfrom train_models.mtcnn_model import P_Net, R_Net, O_Net\nfrom train_data.loader import TestLoader\n\ntest_mode = \"ONet\"\nthresh = [0.9, 0.6, 0.7]\nmin_face_size = 24\nstride = 2\nslide_window = False\nshuffle = False\ndetectors = [None, None, None]\nprefix = ['../data/MTCNN_model/PNet_landmark/PNet', '../data/MTCNN_model/RNet_landmark/RNet', '../data/MTCNN_model/ONet_landmark/ONet']\n\nepoch = [18, 14, 16]\nbatch_size = [2048, 256, 16]\nmodel_path = ['%s-%s' % (x, y) for x, y in zip(prefix, epoch)]\n# load pnet model\nif slide_window:\n PNet = Detector(P_Net, 12, batch_size[0], model_path[0])\nelse:\n PNet = FcnDetector(P_Net, model_path[0])\ndetectors[0] = PNet\n\n# load rnet model\nif test_mode in [\"RNet\", \"ONet\"]:\n RNet = Detector(R_Net, 24, batch_size[1], model_path[1])\n detectors[1] = RNet\n\n# load onet model\nif test_mode == \"ONet\":\n ONet = Detector(O_Net, 48, batch_size[2], model_path[2])\n detectors[2] = ONet\n\nmtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=min_face_size,\n stride=stride, threshold=thresh, slide_window=slide_window)\ngt_imdb = []\n#gt_imdb.append(\"35_Basketball_Basketball_35_515.jpg\")\n#imdb_ = dict()\"\n#imdb_['image'] = im_path\n#imdb_['label'] = 5\npath = \"valid-data\"\nfor item in os.listdir(path):\n if('jpg' not in item):\n continue\n gt_imdb.append(os.path.join(path,item))\n\nprint(gt_imdb)\ntest_data = TestLoader(gt_imdb)\nall_boxes,landmarks = mtcnn_detector.detect_face(test_data)\ncount = 0\nfor imagepath in gt_imdb:\n image = cv2.imread(imagepath)\n\n for bbox,landmark in zip(all_boxes[count],landmarks[count]):\n \n cv2.putText(image,str(np.round(bbox[4],2)),(int(bbox[0]),int(bbox[1])),cv2.FONT_HERSHEY_TRIPLEX,1,color=(255,0,255))\n cv2.rectangle(image, (int(bbox[0]),int(bbox[1])),(int(bbox[2]),int(bbox[3])),(0,0,255), 7)\n \n for landmark in landmarks[count]:\n for i in range(int(len(landmark)/2)):\n cv2.circle(image, (int(landmark[2*i]),int(int(landmark[2*i+1]))), 3, (0,0,255))\n \n count = count + 1\n #cv2.imwrite(\"result_landmark/%d.png\" %(count),image)\n\n cv2.imshow(\"lala\",image)\n cv2.waitKey(0) \n","repo_name":"luckyluckydadada/MTCNN_tf","sub_path":"test/one_image_test.py","file_name":"one_image_test.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"25"} +{"seq_id":"34380731812","text":"state_abb = {\n \"NSW\": \"New South Wales\",\n \"ACT\": \"Australian Capital Territory\",\n \"NT\": \"Northern Territory\",\n \"QLD\": \"Queensland\",\n \"SA\": \"South Australia\",\n \"TAS\": \"Tasmania\",\n \"VIC\": \"Victoria\",\n \"WA\": \"Western Australia\"\n}\n\n# write code to print out Queensland and Victoria.\nprint(state_abb[\"QLD\"])\nprint(state_abb[\"VIC\"])","repo_name":"DuGuanhuan/Python","sub_path":"Lab10/quiz1.py","file_name":"quiz1.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"25"} +{"seq_id":"15030415713","text":"from operator import index\nimport cv2 as cv\nfrom feature_matcher import get_features\nimport sys\nimport numpy as np\nfrom math import sqrt\n\n\ndef calc_homography(src_points, dst_points):\n\n iter_num = len(point_pairs)\n A = np.zeros((2 * iter_num, 9))\n for i, (first, second) in enumerate(zip(src_points, dst_points)):\n u1 = first[0]\n v1 = first[1]\n\n u2 = second[0]\n v2 = second[1]\n\n A[2 * i, 0] = u1\n A[2 * i, 1] = v1\n A[2 * i, 2] = 1.\n A[2 * i, 3] = 0.\n A[2 * i, 4] = 0.\n A[2 * i, 5] = 0.\n A[2 * i, 6] = -u2 * u1\n A[2 * i, 7] = -u2 * v1\n A[2 * i, 8] = -u2\n\n A[2 * i + 1, 0] = 0.\n A[2 * i + 1, 1] = 0.\n A[2 * i + 1, 2] = 0.\n A[2 * i + 1, 3] = u1\n A[2 * i + 1, 4] = v1\n A[2 * i + 1, 5] = 1.\n A[2 * i + 1, 6] = -v2 * u1\n A[2 * i + 1, 7] = -v2 * v1\n A[2 * i + 1, 8] = -v2\n\n _, _, eigen_vecs = cv.eigen(A.T @ A)\n # print(eigen_vecs[8, :])\n # print(eigen_vecs)\n\n H = eigen_vecs[8, :].reshape(3, 3)\n\n H /= H[-1, -1]\n\n # print(H)\n\n return H\n\n\ndef normalize(src_points, dst_points):\n # pts = np.concatenate((src_points, dst_points), axis=-1)\n normalized_src_points = np.copy(src_points[:, :2])\n masspoint = np.sum(normalized_src_points, axis=0)\n masspoint /= normalized_src_points.shape[0]\n avg_distance = np.average(np.linalg.norm(normalized_src_points, axis=1))\n ratio = sqrt(2) / avg_distance\n normalized_src_points -= masspoint\n normalized_src_points *= ratio\n T1 = np.eye(3)\n T1[0, 0] = ratio\n T1[1, 1] = ratio\n T1[0, 2] = -ratio * masspoint[0]\n T1[1, 2] = -ratio * masspoint[1]\n\n normalized_dst_points = np.copy(dst_points[:, :2])\n masspoint = np.sum(normalized_dst_points, axis=0)\n masspoint /= normalized_dst_points.shape[0]\n avg_distance = np.average(np.linalg.norm(normalized_dst_points, axis=1))\n ratio = sqrt(2) / avg_distance\n normalized_dst_points -= masspoint\n normalized_dst_points *= ratio\n T2 = np.eye(3)\n T2[0, 0] = ratio\n T2[1, 1] = ratio\n T2[0, 2] = -ratio * masspoint[0]\n T2[1, 2] = -ratio * masspoint[1]\n return normalized_src_points, normalized_dst_points, T1, T2\n\n\ndef num_inlier(src_points, dst_points, H, threshold) -> float:\n # print(H.shape,src_points.shape)\n proj_points = (H @ src_points.T).T\n error = np.sqrt(\n np.sum(np.square(dst_points -\n (proj_points / proj_points[:, -1].reshape(-1, 1))),\n axis=1))\n\n return np.sum(error < threshold)\n\n\ndef ransac(src_points, dst_points, normalized_src_points,\n normalized_dst_points, T1, T2, threshold, max_iter):\n best_inlier_count = -1\n best_homography = None\n for _ in range(max_iter):\n indices = np.random.choice(len(src_points), 4, replace=False)\n selected_src_points = normalized_src_points[indices]\n selected_dst_points = normalized_dst_points[indices]\n H = calc_homography(src_points=selected_src_points,\n dst_points=selected_dst_points)\n H = np.linalg.inv(T2) @ H @ T1\n\n inliers = num_inlier(src_points=src_points,\n dst_points=dst_points,\n H=H,\n threshold=threshold)\n if inliers > best_inlier_count:\n best_inlier_count = inliers\n best_homography = H\n\n return best_homography, best_inlier_count\n\n\ndef transform_img(img: np.ndarray, new_img: np.ndarray, tr: np.ndarray,\n is_perspective):\n\n inv_tr = np.linalg.inv(tr)\n width, height, _ = img.shape\n new_w, new_h, _ = new_img.shape\n img = np.copy(img)\n\n for x in range(new_w):\n for y in range(new_h):\n pt = np.array([x, y, 1.0])\n ptr = inv_tr @ pt\n\n if is_perspective:\n ptr = (1.0 / ptr[2]) * ptr\n\n new_x = round(ptr[0])\n new_y = round(ptr[1])\n\n if 0 <= new_x < width and 0 <= new_y < height:\n new_img[x, y] = np.copy(img[new_x, new_y])\n\n\nif __name__ == \"__main__\":\n img1 = cv.imread(sys.argv[1])\n img2 = cv.imread(sys.argv[2])\n\n point_pairs = get_features(img1, img2)\n\n src_points = np.array([(pair[0][1], pair[0][0], 1)\n for pair in point_pairs])\n dst_points = np.array([(pair[1][1], pair[1][0], 1)\n for pair in point_pairs])\n\n normalized_src_points, normalized_dst_points, T1, T2 = normalize(\n src_points=src_points, dst_points=dst_points)\n\n H, inlier_count = ransac(src_points=src_points,\n dst_points=dst_points,\n normalized_src_points=normalized_src_points,\n normalized_dst_points=normalized_dst_points,\n T1=T1,\n T2=T2,\n threshold=1,\n max_iter=5000)\n print(inlier_count)\n\n new_shape = (img1.shape[0] * 2, img1.shape[1] * 2, img1.shape[2])\n new_img = np.zeros(new_shape, dtype=img2.dtype)\n\n transform_img(img2, new_img, np.eye(3), True)\n transform_img(img1, new_img, H, True)\n\n cv.namedWindow(\"Display window\")\n cv.imshow(\"Display window\", new_img)\n cv.imwrite(f\"{sys.argv[1]}+{sys.argv[2]}.png\", new_img)\n cv.waitKey(0)\n","repo_name":"gyaur/3D_Computer_Vision","sub_path":"Practice/Assignment_3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"} +{"seq_id":"34175479368","text":"import time\nimport random\nfrom faker import Faker\nfrom libs.api_helpers.employee_api import EmployeeApi\nfrom libs.builders.employee_builder import EmployeeBuilder\nfrom assertpy import assert_that\nfrom text_resources.employee import EmployeeTexts\nfrom http import HTTPStatus\n\n\nfake = Faker()\n\nclient = EmployeeApi()\n\nclass TestCreateEmployee:\n def test_create_employee_should_return_201_when_all_fields_are_valid(self):\n # Arrange\n employee_payload = EmployeeBuilder()\\\n .setName(fake.name())\\\n .setAge(str(random.randint(18, 100)))\\\n .setSalary(str(random.randint(500, 1000000)))\\\n .build()\n # Act\n create_empl_res = client.create_employee(employee_payload)\n # Assertion\n assert_that(create_empl_res.status_code).is_equal_to(HTTPStatus.OK)\n assert_that(create_empl_res.json()['message']).is_equal_to(EmployeeTexts.created_item)\n assert_that(create_empl_res.json()['status']).is_equal_to('success') # Can use enum if we have multiple statuses, skip for now\n assert_that(create_empl_res.json()['data']).is_equal_to(employee_payload, include=['name', 'age', 'salary'])\n\n # Get created employee's id\n time.sleep(60) # Sleep 60 secs to prevent error: too many request when calling api consequence\n employee_id = create_empl_res.json()['data']['id']\n get_empl_res = client.get_employee(employee_id)\n assert_that(get_empl_res.status_code).is_equal_to(HTTPStatus.OK)\n assert_that(get_empl_res.json()['status']).is_equal_to('success')\n assert_that(get_empl_res.json()['data']).is_none()\n assert_that(get_empl_res.json()['message']).is_equal_to(EmployeeTexts.item_fetched)\n\n # Clean up data (delete created employee -> can add in setup, teardown) in fixture (or reset db after all test runs)","repo_name":"longnv1995/rest_api_project","sub_path":"test_suites/employees/test_create_employee.py","file_name":"test_create_employee.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"} +{"seq_id":"30318619306","text":"import sys\nfrom PySide2 import QtGui, QtWidgets, QtCore, Qt\nfrom PySide2.QtWidgets import (\n QApplication,\n QMainWindow,\n QPushButton,\n QToolTip,\n QMessageBox,\n QLabel,\n QDialog,\n)\nfrom library.db import Bicycle_db\nimport time\nimport math\n\n\nclass Communicate(QtCore.QObject):\n\n closeApp = QtCore.Signal()\n\n\nclass GoodsForm(QMainWindow):\n def __init__(\n self,\n table=False,\n new_good=False,\n values=False,\n category_widget=False,\n course=False,\n ):\n super().__init__()\n\n self.values = values\n self.new_good = new_good\n self.category_widget = category_widget\n self.table = table\n self.values_for_new_good_window = {}\n\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n Form.resize(544, 297)\n self.c = Communicate()\n self.label = QtWidgets.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(160, 80, 53, 16))\n self.label.setObjectName(\"label\")\n self.label_2 = QtWidgets.QLabel(Form)\n self.label_2.setGeometry(QtCore.QRect(160, 130, 71, 16))\n self.label_2.setObjectName(\"label_2\")\n self.spinBox = QtWidgets.QSpinBox(Form)\n self.spinBox.setGeometry(QtCore.QRect(160, 150, 61, 21))\n self.spinBox.setObjectName(\"spinBox\")\n self.label_3 = QtWidgets.QLabel(Form)\n self.label_3.setGeometry(QtCore.QRect(160, 180, 71, 16))\n self.label_3.setObjectName(\"label_3\")\n self.lineEdit = QtWidgets.QLineEdit(Form)\n self.lineEdit.setGeometry(QtCore.QRect(160, 100, 371, 21))\n self.lineEdit.setObjectName(\"lineEdit\")\n self.lineEdit_2 = QtWidgets.QLineEdit(Form)\n self.lineEdit_2.setGeometry(QtCore.QRect(160, 200, 81, 21))\n self.lineEdit_2.setObjectName(\"lineEdit_2\")\n self.label_4 = QtWidgets.QLabel(Form)\n self.label_4.setGeometry(QtCore.QRect(250, 180, 71, 16))\n self.label_4.setObjectName(\"label_4\")\n self.comboBox = QtWidgets.QComboBox(Form)\n self.comboBox.setGeometry(QtCore.QRect(250, 200, 81, 21))\n self.comboBox.setEditable(False)\n self.comboBox.setObjectName(\"comboBox\")\n self.comboBox.addItem(\"\")\n self.lineEdit_3 = QtWidgets.QLineEdit(Form)\n self.lineEdit_3.setGeometry(QtCore.QRect(340, 200, 81, 21))\n self.lineEdit_3.setText(\"\")\n self.lineEdit_3.setObjectName(\"lineEdit_3\")\n self.label_5 = QtWidgets.QLabel(Form)\n self.label_5.setGeometry(QtCore.QRect(340, 180, 71, 16))\n self.label_5.setObjectName(\"label_5\")\n self.lineEdit_4 = QtWidgets.QLineEdit(Form)\n self.lineEdit_4.setGeometry(QtCore.QRect(440, 200, 91, 21))\n self.lineEdit_4.setText(\"\")\n self.lineEdit_4.setObjectName(\"lineEdit_4\")\n self.label_6 = QtWidgets.QLabel(Form)\n self.label_6.setGeometry(QtCore.QRect(440, 180, 81, 20))\n self.label_6.setObjectName(\"label_6\")\n self.label_7 = QtWidgets.QLabel(Form)\n self.label_7.setGeometry(QtCore.QRect(450, 130, 81, 20))\n self.label_7.setObjectName(\"label_7\")\n self.lineEdit_5 = QtWidgets.QLineEdit(Form)\n self.lineEdit_5.setGeometry(QtCore.QRect(440, 150, 91, 21))\n self.lineEdit_5.setText(\"\")\n self.lineEdit_5.setObjectName(\"lineEdit_5\")\n self.treeWidget = QtWidgets.QTreeWidget(Form)\n self.treeWidget.setGeometry(QtCore.QRect(0, 50, 151, 241))\n self.treeWidget.setObjectName(\"treeWidget\")\n self.pushButton = QtWidgets.QPushButton(Form)\n self.pushButton.setGeometry(QtCore.QRect(160, 240, 121, 51))\n self.pushButton.setObjectName(\"pushButton\")\n self.pushButton_2 = QtWidgets.QPushButton(Form)\n self.pushButton_2.setGeometry(QtCore.QRect(420, 240, 111, 51))\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.label_8 = QtWidgets.QLabel(Form)\n self.label_8.setGeometry(QtCore.QRect(0, 30, 171, 16))\n font = QtGui.QFont()\n font.setFamily(\"Cantarell\")\n font.setPointSize(12)\n font.setBold(False)\n font.setWeight(50)\n self.label_8.setFont(font)\n self.label_8.setObjectName(\"label_8\")\n self.label_9 = QtWidgets.QLabel(Form)\n self.label_9.setGeometry(QtCore.QRect(150, 10, 51, 16))\n font = QtGui.QFont()\n font.setFamily(\"Cantarell\")\n font.setPointSize(12)\n font.setBold(False)\n font.setWeight(50)\n self.label_9.setFont(font)\n self.label_9.setObjectName(\"label_9\")\n self.USD_value = QtWidgets.QLabel(Form)\n self.USD_value.setGeometry(QtCore.QRect(200, 10, 65, 16))\n font = QtGui.QFont()\n font.setFamily(\"Cantarell\")\n font.setPointSize(12)\n font.setBold(False)\n font.setWeight(50)\n self.USD_value.setFont(font)\n self.USD_value.setObjectName(\"USD_value\")\n self.label_10 = QtWidgets.QLabel(Form)\n self.label_10.setGeometry(QtCore.QRect(300, 20, 61, 21))\n font = QtGui.QFont()\n font.setFamily(\"Cantarell\")\n font.setPointSize(12)\n font.setBold(False)\n font.setWeight(50)\n self.label_10.setFont(font)\n self.label_10.setObjectName(\"label_10\")\n self.lineEdit_6 = QtWidgets.QLineEdit(Form)\n self.lineEdit_6.setGeometry(QtCore.QRect(380, 20, 81, 25))\n self.lineEdit_6.setObjectName(\"lineEdit_6\")\n self.category_title = QtWidgets.QLabel(Form)\n self.category_title.setGeometry(QtCore.QRect(180, 50, 171, 16))\n font = QtGui.QFont()\n font.setFamily(\"Cantarell\")\n font.setPointSize(12)\n font.setBold(False)\n font.setWeight(50)\n self.category_title.setFont(font)\n self.category_title.setObjectName(\"category_title\")\n self.comboBox.hide()\n self.label_4.hide()\n\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n self.additional_actions(Form)\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(\"добавить новый товар\")\n if self.values:\n Form.setWindowTitle(\"изменить товар\")\n self.label.setText(_translate(\"Form\", \"Название\"))\n self.label_2.setText(_translate(\"Form\", \"В наличии\"))\n self.label_3.setText(_translate(\"Form\", \"Закупка\"))\n self.label_4.setText(_translate(\"Form\", \"Валюта\"))\n self.comboBox.setItemText(0, _translate(\"Form\", \"USD\"))\n self.label_5.setText(_translate(\"Form\", \"Наценка\"))\n self.label_6.setText(_translate(\"Form\", \"Продаж,USD\"))\n self.label_7.setText(_translate(\"Form\", \"Продаж,ГРН\"))\n self.pushButton.setText(_translate(\"Form\", \"Отмена\"))\n self.pushButton_2.setText(_translate(\"Form\", \"Подтвердить\"))\n self.label_8.setText(_translate(\"Form\", \"Категория\"))\n self.USD_value.setText(_translate(\"Form\", \"30\"))\n self.label_10.setText(_translate(\"Form\", \"Артикул\"))\n self.category_title.setText(_translate(\"Form\", \"Категория\"))\n\n def find_child_category(self, list_with_results):\n id_with_child = []\n for result in list_with_results:\n if result[\"parent_id\"] == -1:\n id_with_child.append({\"id\": result[\"id\"], \"childs\": []})\n else:\n for number in range(len(id_with_child)):\n if id_with_child[number][\"id\"] == result[\"parent_id\"]:\n id_with_child[number][\"childs\"].append(result[\"name_category\"])\n return id_with_child\n\n def get_category_values(self):\n list_dict_with_results = []\n db = Bicycle_db()\n result = db.edit(\"Select * FROM categories\")\n count = len(result)\n for item in result:\n id = item[0]\n name_category = item[1]\n parent_id = item[2]\n export_date = item[3]\n list_dict_with_results.append(\n {\n \"id\": id,\n \"name_category\": name_category,\n \"parent_id\": parent_id,\n \"export_date\": export_date,\n }\n )\n # sort_by_id\n return sorted(list_dict_with_results, key=lambda k: k[\"parent_id\"])\n\n def fill_tree(self):\n list_with_results = self.get_category_values()\n childs_categories = self.find_child_category(list_with_results)\n for res in list_with_results:\n if res[\"parent_id\"] == -1:\n item = QtWidgets.QTreeWidgetItem([res[\"name_category\"]])\n current_index = self.treeWidget.currentItem()\n self.treeWidget.addTopLevelItem(item)\n for child in childs_categories:\n if res[\"id\"] == child[\"id\"]:\n if len(child[\"childs\"]) != 0:\n for element in child[\"childs\"]:\n QtWidgets.QTreeWidgetItem(item, [element])\n\n def transtlate_category(self, category):\n db = Bicycle_db()\n if isinstance(category, str):\n category_id = db.insert(\n 'SELECT id from categories where name like \"%{}%\"'.format(category)\n )\n return category_id[0]\n elif isinstance(category, int):\n category_id = db.insert(\n \"SELECT name from categories where id ={}\".format(category)\n )\n return category_id[0][0]\n\n def get_values_from_good_windows(self):\n good_values = {}\n good_values[\"article\"] = self.lineEdit_6.text()\n good_values[\"profit\"] = self.lineEdit_3.text()\n good_values[\"buy\"] = self.lineEdit_2.text()\n good_values[\"sell\"] = self.lineEdit_4.text()\n good_values[\"name\"] = self.lineEdit.text()\n good_values[\"sell_uah\"] = self.lineEdit_5.text()\n if self.category_title.text() == \"категории\":\n good_values[\"category\"] = 0\n else:\n good_values[\"category\"] = (\n self.transtlate_category(self.category_title.text())\n )[0]\n good_values[\"qty\"] = str(self.spinBox.value())\n return good_values\n\n def insert_into_good_form(self, good_values):\n self.lineEdit_6.setText(good_values[\"article\"])\n self.lineEdit_3.setText(good_values[\"profit\"])\n self.lineEdit_2.setText(good_values[\"buy\"])\n self.lineEdit_4.setText(good_values[\"sell\"])\n self.lineEdit_5.setText(good_values[\"sell_uah\"])\n self.spinBox.setValue(int(good_values[\"qty\"]))\n\n def store_good(self):\n\n values = self.get_values_from_good_windows()\n db = Bicycle_db()\n schema = db.schema[\"goods\"]\n str_schema = \",\".join(schema)\n query = 'insert into \"goods\"({}) values(\"\",\"{}\",{},{},{},{},{},\"{}\",\"{}\",{})'.format(\n str_schema,\n values[\"name\"],\n values[\"qty\"],\n values[\"buy\"],\n values[\"sell\"],\n str(values[\"profit\"]).split(\"%\")[0],\n values[\"category\"],\n \"USD\",\n values[\"sell_uah\"],\n values[\"article\"],\n )\n db.insert(query)\n self.table.update_table()\n self.update_good_window()\n\n def edit_good(self):\n values = self.get_values_from_good_windows()\n db = Bicycle_db()\n query = \"\"\"UPDATE goods SET (name,qty,buy,sell,profit,category,sell_uah,article)=(\"{}\",{},{},{},\"{}\",{},{},{}) WHERE article like \"%{}%\";\"\"\".format(\n values[\"name\"],\n values[\"qty\"],\n values[\"buy\"],\n values[\"sell\"],\n values[\"profit\"],\n values[\"category\"],\n values[\"sell_uah\"],\n values[\"article\"],\n values[\"article\"],\n )\n db.insert(query)\n self.table.update_table()\n\n def cur_category_handler(self):\n db = Bicycle_db()\n cur_category = db.exists(\"cur_category\")\n if cur_category:\n res = db.insert(\n \"select name_category from cur_category where id=(select max(id) from cur_category)\"\n )\n cur_category = res[0][0]\n else:\n cur_category = \"Всі\"\n return cur_category\n\n def update_good_window(self):\n self.values_for_new_good_window = self.get_values_from_good_windows()\n self.values_for_new_good_window[\"article\"] = int(\n self.values_for_new_good_window[\"article\"]\n )\n self.values_for_new_good_window[\"article\"] += 1\n self.values_for_new_good_window[\"article\"] = str(\n self.values_for_new_good_window[\"article\"]\n )\n self.values_for_new_good_window[\"category\"] = self.transtlate_category(\n self.values_for_new_good_window[\"category\"]\n )\n self.insert_into_good_form(self.values_for_new_good_window)\n\n def selectItem(self, widget, itemOrText):\n oldIndex = widget.selectionModel().currentIndex()\n try: # an item is given--------------------------------------------\n newIndex = widget.model().indexFromItem(itemOrText)\n except: # a text is given and we are looking for the first match---\n listIndexes = widget.model().match(\n widget.model().index(0, 0),\n QtCore.Qt.DisplayRole,\n itemOrText,\n QtCore.Qt.MatchStartsWith,\n )\n newIndex = listIndexes[0]\n widget.selectionModel().select( # programmatical selection---------\n newIndex, QtGui.QItemSelectionModel.ClearAndSelect\n )\n\n def additional_actions(self, Form):\n self.add_actions(Form)\n\n self.fill_tree()\n self.treeWidget.setHeaderHidden(True)\n self.lineEdit_3.setEnabled(False)\n self.lineEdit_5.setEnabled(False)\n self.lineEdit_6.setEnabled(False)\n if self.values:\n # when change_item\n self.lineEdit_4.setText(self.values[\"Продаж\"])\n self.lineEdit_3.setText(self.values[\"Нац\"])\n self.lineEdit_3.setEnabled(False)\n self.lineEdit_2.setText(self.values[\"Закупка\"])\n self.lineEdit_6.setText(self.values[\"Арт\"])\n self.lineEdit_5.setText(self.values[\"ГРН\"])\n self.lineEdit.setText(self.values[\"Название\"])\n self.spinBox.setValue(int(self.values[\"Кол-во.\"]))\n\n self.label_9.setText(\"\")\n self.comboBox.hide()\n self.label_4.hide()\n self.pushButton_2.clicked.connect(self.edit_good)\n self.pushButton_2.clicked.connect(lambda: Form.close())\n # move categories\n category = self.cur_category_handler()\n item = QtWidgets.QTreeWidgetItem(category)\n self.category_title.setText(category)\n index = self.find_element_index_in_tree(category)\n\n if len(index) == 1:\n pass\n # self.treeWidget.model().index(index[0])\n # handle for child or not child element\n\n else:\n self.lineEdit_6.setEnabled(False)\n ids_for_new_good = \"SELECT MAX(article)from goods\"\n db = Bicycle_db()\n query = db.insert(ids_for_new_good)[0][0] \n good_id += 1\n self.lineEdit_6.setText(str(good_id))\n self.spinBox.setValue(1)\n self.treeWidget.setCurrentItem(QtWidgets.QTreeWidgetItem(\"Bci\"))\n category = self.cur_category_handler()\n self.category_title.setText(category)\n self.pushButton_2.clicked.connect(self.store_good)\n db.close()\n\n def find_element_index_in_tree(self, category):\n root = self.treeWidget.invisibleRootItem()\n child_count = root.childCount()\n for i in range(child_count):\n item = root.child(i)\n url = item.text(0) # text at first (0) column\n if root.child(i).text(0) == category:\n return [i]\n for x in range(item.childCount()):\n if root.child(i).child(x).text(0) == category:\n return [i, x]\n\n def get_current_course(self):\n db = Bicycle_db()\n query = db.insert(\"SELECT value FROM settings WHERE name = 'Курс'\")\n db.close()\n if len((query)) != 0 :\n course = int(query[0][0])\n else:\n course = 20.3\n self.USD_value.setText(f\"USD-{str(course)}\")\n return course\n\n def recalculate_procent(self, sell, buy):\n dif = abs(float(buy) - float(sell))\n if buy < sell:\n if isinstance(sell, float) or isinstance(buy, float):\n res = int(round((dif / buy) * 100, 1))\n else:\n res = int(str(int(round((dif / buy) * 100, 1))))\n return res\n else:\n return -(int(str(int(round((dif / buy) * 100, 1)))))\n\n def recalculate_price(self):\n buy_price_window = self.lineEdit_2.text()\n sell_price_window = self.lineEdit_4.text()\n self.onlyInt = QtGui.QRegExpValidator(\n QtCore.QRegExp(\"^[0-9]{1,8}([.][0-9]{1,4})?$\")\n )\n self.lineEdit_3.setValidator(self.onlyInt)\n self.lineEdit_5.setValidator(self.onlyInt)\n self.lineEdit_2.setValidator(self.onlyInt)\n self.lineEdit_4.setValidator(self.onlyInt)\n\n def get_only_digits_from_text_window(window_value):\n try:\n int(window_value)\n except:\n if str(window_value).endswith(\".\"):\n window_value = (str(window_value).split(\".\"))[0]\n elif \",\" in window_value:\n print(\"remove ,\")\n elif float(window_value):\n window_value = float(window_value)\n else:\n window_value = [\n int(s) for s in window_value.split() if s.isdigit()\n ][0]\n print(window_value)\n return window_value\n\n def update_digits(self, sell_price, buy_price):\n if isinstance(sell_price, float) or isinstance(buy_price, float):\n sell_price_uah = math.ceil(float(sell_price) * self.course)\n profit_procent = self.recalculate_procent(\n float(sell_price), float(buy_price)\n )\n if sell_price_uah == int(sell_price_uah):\n sell_price = int(sell_price_uah)\n else:\n sell_price_uah = int(sell_price) * self.course\n profit_procent = self.recalculate_procent(int(sell_price), int(buy_price))\n self.lineEdit_3.setText(\"\")\n self.lineEdit_3.setText(str(profit_procent) + \"%\")\n self.lineEdit_5.setText(\"\")\n if sell_price_uah == int(sell_price_uah):\n self.lineEdit_5.setText(str(int(sell_price_uah)))\n else:\n self.lineEdit_5.setText(str(sell_price_uah))\n\n if buy_price_window != \"\" and buy_price_window != \"0\":\n buy_price = get_only_digits_from_text_window(buy_price_window)\n if sell_price_window != \"\" and sell_price_window != \"0\":\n sell_price = get_only_digits_from_text_window(sell_price_window)\n update_digits(self, sell_price, buy_price)\n elif sell_price_window != \"\" and sell_price_window != \"0\":\n sell_price = get_only_digits_from_text_window(sell_price_window)\n sell_price_uah = int(sell_price) * self.course\n self.lineEdit_5.setText(\"\")\n self.lineEdit_5.setText(str(sell_price_uah))\n if buy_price_window != \"\" and buy_price_window != \"0\":\n buy_price = get_only_digits_from_text_window(buy_price_window)\n update_digits(self, sell_price, buy_price)\n\n def make_all_products_bigger(self):\n def make_font_bigger(lineEdit):\n f = lineEdit.font()\n f.setPointSize(11)\n f.setBold(True)\n lineEdit.setFont(f)\n\n windows = [\n self.lineEdit_2,\n self.lineEdit_3,\n self.lineEdit_4,\n self.lineEdit_5,\n self.comboBox,\n ]\n for window in windows:\n make_font_bigger(window)\n\n def add_actions(self, Form):\n self.pushButton.clicked.connect(lambda: Form.close())\n self.pushButton_2.clicked.connect(lambda: Form.close())\n self.lineEdit_2.inputRejected.connect(self.recalculate_price)\n self.lineEdit_2.textChanged.connect(self.recalculate_price)\n self.lineEdit_4.inputRejected.connect(self.recalculate_price)\n self.lineEdit_4.textChanged.connect(self.recalculate_price)\n self.treeWidget.clicked.connect(\n lambda: self.category_title.setText(self.treeWidget.currentItem().text(0))\n )\n self.course = self.get_current_course()\n self.make_all_products_bigger()\n","repo_name":"evgeniygazetdinov/bicycle_warehouse","sub_path":"widgets/good_form.py","file_name":"good_form.py","file_ext":"py","file_size_in_byte":21004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"25"} +{"seq_id":"898309727","text":"#################################################\n# Layer.py\n# Layer class used in modes with drawings\n#\n# Your name: Jackie Yang Section: J\n# Your andrew id: jaclyny\n#\n#################################################\n\nfrom Drawnimate import *\n\nclass Layer(object):\n def __init__(self, visible = True):\n self.strokes = [] # stores all brush strokes on layer\n self.visible = visible ","repo_name":"jackiey321/drawnimate","sub_path":"Layer.py","file_name":"Layer.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"} +{"seq_id":"2733266485","text":"import requests\n#https://restapi.amap.com/v3/place/around?key=<用户的key>&location=116.473168,39.993015&radius=10000&types=011100\n\n\ndef urlget(dic1,fn):\n lis1=['key','location','radius','types']\n url='https://restapi.amap.com/v3/place/around?'\n for words in lis1:\n url+=(str(words)+'='+str(dic1[words]).strip('(').strip(')').replace(' ','')+'&')\n print(url)\n for i in range(1,6):\n url1=url+('page='+str(i))\n print(url1)\n \n r=requests.get(url1)\n js=eval(r.text)\n print(js)\n write(js,fn)\n if len(js['pois'])!=20:break\n \n\ndef write(js,fn):\n global p\n h = len(js['pois'])\n with open('./csv/%s.csv'%fn,'a+',newline='') as f:\n for n in range(0,h):\n p+=1\n print('写入第%i条' % p)\n for name in lis2:\n try:\n con = js['pois'][n][name]\n f.write('\"%s\",' % con)\n except:continue\n f.write('\\n')\n\nlis2=['name','pname','cityname','address','location','type','tel','distance']\ndic1={'key':'','location':'','radius':'1000','types':''}\n \np=0\nlocations=[(119.281123,26.056889)]\n#typess=['080300','050000','160000','070000','060000']\n#typess=['150700','150600','150500','150400','150300','150200','150100','151200','151300']\ntypess=['120300']\nfor loc in locations:\n for types in typess:\n dic1['location']=loc\n dic1['types']=types\n fn=str(loc)+str(types)\n #fn=str(loc)+'other'\n #\n urlget(dic1,fn)\n \n \n","repo_name":"rtyfghvbnlndl/get_amap_POI_data","sub_path":"newpoi.py","file_name":"newpoi.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"25"} +{"seq_id":"42651160868","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 17 23:58:00 2022\n\n@author: ndasadhikari\n\"\"\"\n\nimport string\nimport re\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom services.filereaders import get_data\n\ndf_recommended1 = get_data(\n 'local_files/NST_Recommendation_SubRegion_Translated_Text_corrected.csv', 'csv', 'latin-1')\ndf_recommend_approval = df_recommended1['NST Translater'].value_counts(\n).rename_axis('Approval NST Comments').to_frame('Users Used')\n\nxgb_model = pickle.load(open(\n \"local_files/xgb_model.pkl\", \"rb\"))\nvectorizer = pickle.load(open(\n \"local_files/Xvec.pkl\", \"rb\"))\n\nNST_types = list(df_recommended1['NST Type'].value_counts().keys())\n# NST_types.append(None)\nNST_groups = list(df_recommended1['NST Group'].value_counts().keys())\n# NST_groups.append(None)\nSubRegion = list(df_recommended1['SubRegion'].value_counts().keys())\n\n# API: 1. To get the options for nst type, group and subregion\n\n\ndef get_defaults():\n return {\n 'NST_types': NST_types,\n 'NST_groups': NST_groups,\n 'SubRegion': SubRegion,\n }\n\n\n# Cleaning Text phase\n#from nltk.corpus import stopwords\n#stop = stopwords.words('english')\nstop = ['i',\n 'me',\n 'my',\n 'myself',\n 'we',\n 'our',\n 'ours',\n 'ourselves',\n 'you',\n \"you're\",\n \"you've\",\n \"you'll\",\n \"you'd\",\n 'your',\n 'yours',\n 'yourself',\n 'yourselves',\n 'he',\n 'him',\n 'his',\n 'himself',\n 'she',\n \"she's\",\n 'her',\n 'hers',\n 'herself',\n 'it',\n \"it's\",\n 'its',\n 'itself',\n 'they',\n 'them',\n 'their',\n 'theirs',\n 'themselves',\n 'what',\n 'which',\n 'who',\n 'whom',\n 'this',\n 'that',\n \"that'll\",\n 'these',\n 'those',\n 'am',\n 'is',\n 'are',\n 'was',\n 'were',\n 'be',\n 'been',\n 'being',\n 'have',\n 'has',\n 'had',\n 'having',\n 'do',\n 'does',\n 'did',\n 'doing',\n 'a',\n 'an',\n 'the',\n 'and',\n 'but',\n 'if',\n 'or',\n 'because',\n 'as',\n 'until',\n 'while',\n 'of',\n 'at',\n 'by',\n 'for',\n 'with',\n 'about',\n 'against',\n 'between',\n 'into',\n 'through',\n 'during',\n 'before',\n 'after',\n 'above',\n 'below',\n 'to',\n 'from',\n 'up',\n 'down',\n 'in',\n 'out',\n 'on',\n 'off',\n 'over',\n 'under',\n 'again',\n 'further',\n 'then',\n 'once',\n 'here',\n 'there',\n 'when',\n 'where',\n 'why',\n 'how',\n 'all',\n 'any',\n 'both',\n 'each',\n 'few',\n 'more',\n 'most',\n 'other',\n 'some',\n 'such',\n 'no',\n 'nor',\n 'not',\n 'only',\n 'own',\n 'same',\n 'so',\n 'than',\n 'too',\n 'very',\n 's',\n 't',\n 'can',\n 'will',\n 'just',\n 'don',\n \"don't\",\n 'should',\n \"should've\",\n 'now',\n 'd',\n 'll',\n 'm',\n 'o',\n 're',\n 've',\n 'y',\n 'ain',\n 'aren',\n \"aren't\",\n 'couldn',\n \"couldn't\",\n 'didn',\n \"didn't\",\n 'doesn',\n \"doesn't\",\n 'hadn',\n \"hadn't\",\n 'hasn',\n \"hasn't\",\n 'haven',\n \"haven't\",\n 'isn',\n \"isn't\",\n 'ma',\n 'mightn',\n \"mightn't\",\n 'mustn',\n \"mustn't\",\n 'needn',\n \"needn't\",\n 'shan',\n \"shan't\",\n 'shouldn',\n \"shouldn't\",\n 'wasn',\n \"wasn't\",\n 'weren',\n \"weren't\",\n 'won',\n \"won't\",\n 'wouldn',\n \"wouldn't\",\n 'a',\n 'b',\n 'c',\n 'd',\n 'e',\n 'f',\n 'g',\n 'h',\n 'i',\n 'j',\n 'k',\n 'l',\n 'm',\n 'n',\n 'o',\n 'p',\n 'q',\n 'r',\n 's',\n 't',\n 'u',\n 'v',\n 'w',\n 'x',\n 'y',\n 'z',\n 'aa',\n 'ab',\n 'bf',\n 'bb',\n 'abb',\n 'ac',\n 'ae',\n 'af',\n 'ba',\n 'bb',\n 'bd',\n 'af',\n 'ef',\n 'bc']\n\n\ndef clean_text(text):\n '''Make text lowercase, remove text in square brackets,remove links,remove punctuation\n and remove words containing numbers.'''\n text = str(text).lower()\n text = re.sub('\\[.*?\\]', '', text)\n text = re.sub('https?://\\S+|www\\.\\S+', '', text)\n text = re.sub('<.*?>+', '', text)\n text = re.sub('[%s]' % re.escape(string.punctuation), '', text)\n text = re.sub('\\n', '', text)\n text = re.sub('\\w*\\d\\w*', '', text)\n return text\n\n\nnew_alpha = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',\n 'u', 'v', 'w', 'x', 'y', 'z', 'aa', 'ab', 'bf', 'bb', 'abb', 'ac', 'ae', 'af', 'ba', 'bb', 'bd', 'af', 'ef', 'bc']\nfor num in new_alpha:\n stop.append(num)\ndf_recommended1['cleaned_text'] = df_recommended1['NST Translater'].apply(\n lambda x: clean_text(x))\ndf_recommended1['NST_clean_translated_Comment'] = df_recommended1['cleaned_text'].apply(\n lambda x: ' '.join([word for word in x.split() if word not in (stop)]))\n\n\ndef evaluate_approval(approved, df_unique_NST_comments, vectorizer, xgb_model):\n #approved = 'Microsoft will be paid for work performed but not any additional hours of the fee. Approved by the SPL.'\n d = {'NST_clean_translated_Comment': [[approved]]}\n df_recommend_approval = pd.DataFrame(d)\n df_training = df_recommend_approval\n df_training['NST_clean_translated_Comment'] = df_training['NST_clean_translated_Comment'].apply(\n str)\n df_training['length'] = df_training['NST_clean_translated_Comment'].map(\n lambda text: len(text))\n df_training['NST_clean_translated_Comment'] = df_training['NST_clean_translated_Comment'].apply(\n lambda x: ' '.join([word for word in x.split() if word not in (stop)]))\n\n # tfnew = TfidfVectorizer(max_features=50000, ngram_range=(2, 3), vocabulary = tfidf_tokens)#Xvec.vocabulary_)\n corpus = df_training['NST_clean_translated_Comment'].values\n print(\"Corpus, \", corpus)\n Xvec1 = vectorizer.transform(corpus)\n cols_when_model_builds = xgb_model.get_booster().feature_names\n X = pd.DataFrame(data=Xvec1.toarray(), columns=cols_when_model_builds)\n zpred = xgb_model.predict_proba(X)\n\n \"\"\"\n The code for Recommendation\n \"\"\"\n\n df_unique_NST_comments['unique_NST_comments'] = df_unique_NST_comments['unique_NST_comments'].apply(\n str)\n df_unique_NST_comments['documents_cleaned'] = df_unique_NST_comments['unique_NST_comments'].apply(lambda x: \" \".join(\n re.sub(r'[^a-zA-Z]', ' ', w).lower() for w in x.split() if re.sub(r'[^a-zA-Z]', ' ', w).lower() not in stop))\n #vectorizer123 = TfidfVectorizer()\n tfidf_vectors = vectorizer.transform(\n df_unique_NST_comments.documents_cleaned)\n\n tfidf_test_vectors = vectorizer.transform(\n df_training.NST_clean_translated_Comment)\n pairwise_similarities = np.dot(\n tfidf_vectors, tfidf_test_vectors.T).toarray()\n\n df_unique_NST_comments['similarity'] = pairwise_similarities\n\n final_df = df_unique_NST_comments.sort_values(\n by=['similarity'], ascending=False)\n\n if final_df.head(1).tail(1)['similarity'].values <= 0.0:\n print('zpred: ', [1, 0])\n print('NOT APPROVED')\n else:\n print('zpred: ', zpred)\n\n return zpred\n\n# API: 2. To get default comments for the NST options selected\n\n\ndef get_default_comments(nst_type, nst_group, sub_region=None, from_server=False):\n df_training1 = df_recommended1\n df_approved = df_training1[df_training1['New Review Recommendation'] == 1]\n\n df_approved1 = df_approved[df_approved['NST Type'] == nst_type]\n df_approved12 = df_approved1[df_approved1['NST Group'] == nst_group]\n if sub_region:\n df_approved13 = df_approved12[df_approved12['SubRegion'] == sub_region]\n else:\n df_approved13 = df_approved12\n\n if df_approved13.shape[0] == 0:\n if df_approved12.shape[0] == 0:\n if df_approved1.shape[0] == 0:\n df_approval_reco = df_approved\n else:\n df_approval_reco = df_approved1\n else:\n df_approval_reco = df_approved12\n else:\n df_approval_reco = df_approved13\n\n df_approval_reco['NST_clean_translated_Comment'] = df_approval_reco['NST_clean_translated_Comment'].apply(\n str)\n df_approval_reco['length'] = df_approval_reco['NST_clean_translated_Comment'].map(\n lambda text: len(text))\n df_approval_reco['NST_clean_translated_Comment'] = df_approval_reco['NST_clean_translated_Comment'].apply(\n lambda x: ' '.join([word for word in x.split() if word not in (stop)]))\n\n value_counts = df_approval_reco['NST_clean_translated_Comment'].value_counts(\n dropna=True, sort=True)\n\n df_NST_counts = pd.DataFrame(value_counts)\n df_unique_NST_comments = df_NST_counts.reset_index()\n df_unique_NST_comments.columns = ['unique_NST_comments', 'counts']\n if from_server:\n return df_unique_NST_comments\n return df_unique_NST_comments.to_dict('records')\n\n# API: 3. To get the score output\n\n\ndef nst_re_score(data):\n approved = data[\"approved\"]\n if not data[\"df_unique_NST_comments\"]:\n df_unique_NST_comments = get_default_comments(\n data['NST Type'], data['NST Group'], data['SubRegion'] if \"SubRegion\" in data else None, from_server=True)\n else:\n df_unique_NST_comments = pd.DataFrame(data[\"df_unique_NST_comments\"])\n zpred = evaluate_approval(\n approved, df_unique_NST_comments, vectorizer, xgb_model)\n if zpred[0, 1] > 0.90:\n text = 'Approval Chances More'\n elif zpred[0, 1] > 0.6 and zpred[0, 1] <= 0.9:\n text = 'Not Sure'\n else:\n text = 'Approval Denail More!!'\n return {\n \"label\": text,\n \"score\": zpred\n }\n","repo_name":"abman-coder/maipoc-main","sub_path":"backend/services/nst_model_v1.py","file_name":"nst_model_v1.py","file_ext":"py","file_size_in_byte":10372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"25"} +{"seq_id":"17566851843","text":"# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport datetime\n\nimport sisppeo\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'SISPPEO'\ncopyright = '2020-%s Arthur Coqué, Pôle OFB-INRAE ECLA, UR RECOVER' % datetime.datetime.now().year\nauthor = 'Arthur Coqué'\n\n# The full version, including alpha/beta/rc tags\nrelease = '1.1.0'\n\nroot_doc = 'index'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'sphinx_copybutton',\n 'sphinx_panels'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\nsource_encoding = \"utf-8\"\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Configuration -----------------------------------------------------------\n\nautosummary_generate = True\nautodoc_typehints = 'none'\n\nnapoleon_numpy_docstring = False\nnapoleon_preprocess_types = True\n\ncopybutton_prompt_text = r\">>> |\\.\\.\\. |\\$ |In \\[\\d*\\]: | {2,5}\\.\\.\\.: | {5,8}: \"\ncopybutton_prompt_is_regexp = True\n\npanels_add_bootstrap_css = False\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'pydata_sphinx_theme'\n\nhtml_title=''\nhtml_logo = '_static/SISPPEO.png'\nhtml_favicon = '_static/favicon.ico'\nhtml_last_updated_fmt = '%Y-%m-%d'\nhtmlhelp_basename = 'sisppeo'\n\nhtml_theme_options = dict(\n github_url='https://github.com/inrae/SISPPEO'\n)\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_css_files = [\n \"css/custom.css\",\n \"css/getting_started.css\"\n]\n","repo_name":"inrae/SISPPEO","sub_path":"docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"25"} +{"seq_id":"43718015026","text":"import os\nimport sys\nimport glob\nimport re\nimport cgi\nimport zlib\nfrom utils.repertoire import Repertoire\nclass Audit:\n\n def __init__(self):\n \"\"\"commentaires\"\"\"\n\n # Fonction qui liste le nombre de namespaces et de modules\n def nbrNamespaceAndModule(self, path, dossierLog):\n retour = \"\"\n rep = Repertoire()\n codePoolDirs = os.listdir(path + 'app/code/')\n nombreDeModulesTotal = 0\n nombreDeNamespacesTotal = 0\n for codePool in codePoolDirs:\n namespaceDirs = os.listdir(path + 'app/code/' + codePool + '/')\n nombreDeNamespaces = rep.countFolders(path + 'app/code/' + codePool + '/')\n nombreDeModules = 0\n nombreDeNamespacesTotal += nombreDeNamespaces\n for namespace in namespaceDirs:\n nombreDeModules += rep.countFolders(path + 'app/code/' + codePool + '/' + namespace + '/')\n nombreDeModulesTotal = nombreDeModulesTotal + nombreDeModules\n retour += \"- codePool : \" + codePool + \" (\" + str(nombreDeNamespaces) + \" namespaces, \" + str(\n nombreDeModules) + \" modules) \\n\"\n retour += \"\\n## Total : \" + str(nombreDeNamespacesTotal) + \" namespaces et \" + str(nombreDeModulesTotal) + \"\\n\"\n retour += \"\\n\"\n\n return retour\n\n # Fonction qui analyse les templates\n def analyserLeCode(self, path, dossierLog, tab):\n # DEBUT LOADS IN TEMPLATES\n rep = Repertoire()\n if (tab is None):\n results = {}\n # code\n results['code_search_for_new'] = []\n results['code_global_php'] = []\n results['code_mysql'] = []\n results['code_logs'] = []\n results['code_search_for_load_in_loop'] = []\n # templates\n results['template_search_for_load'] = []\n results['template_search_for_getblock'] = []\n results['template_search_for_createblock'] = []\n results['template_search_for_new'] = []\n results['template_global_php'] = []\n results['template_mysql'] = []\n results['template_logs'] = []\n results['template_search_for_load_in_loop'] = []\n else:\n results = tab\n\n dirs = os.listdir(path)\n for ligne in dirs:\n if os.path.isdir(path + ligne):\n self.analyserLeCode(path + ligne + \"/\", dossierLog, results)\n else:\n # ici on est dans chaque fichier du dossier\n if (re.search(r\"app\", path + ligne)):\n # si dans le code\n if (re.search(r\"app\\/code\\/core\", path + ligne)):\n continue\n\n # si dans le code\n if (re.search(r\"app\\/code\", path + ligne)):\n\n # ici on est dans chaque fichier PHP\n if (ligne.endswith('.php')):\n\n # new dans le code\n search_for_new = self.searchForNew(path + ligne, dossierLog, 1)\n if (len(search_for_new) is not 0):\n results['code_search_for_new'].append(search_for_new)\n\n # load in loop dans le code\n search_for_code_load_in_loop = self.searchForLoadInLoop(path + ligne, dossierLog)\n if (len(search_for_code_load_in_loop) is not 0):\n results['code_search_for_load_in_loop'].append(search_for_code_load_in_loop)\n\n # globalPHP dans le code\n search_for_globalphp = self.searchForPhpGlobals(path + ligne, dossierLog)\n if (len(search_for_globalphp) is not 0):\n results['code_global_php'].append(search_for_globalphp)\n\n # fonctions mysql_ dans le code\n search_for_mysql = self.searchForMysql(path + ligne, dossierLog)\n if (len(search_for_mysql) is not 0):\n results['code_mysql'].append(search_for_mysql)\n\n # fonctions logs dans le code\n search_for_logs = self.searchForLogs(path + ligne, dossierLog)\n if (len(search_for_logs) is not 0):\n results['code_logs'].append(search_for_logs)\n\n # si dans design\n if (re.search(r\"app\\/design\", path + ligne)):\n\n # ici on est dans chaque fichier du dossier\n if (ligne.endswith('.phtml')):\n\n # load dans les templates\n search_for_load = self.searchForLoad(path + ligne, dossierLog)\n if (len(search_for_load) is not 0):\n results['template_search_for_load'].append(search_for_load)\n\n # load in loop dans le code\n search_for_template_load_in_loop = self.searchForLoadInLoop(path + ligne, dossierLog)\n if (len(search_for_template_load_in_loop) is not 0):\n results['template_search_for_load_in_loop'].append(search_for_template_load_in_loop)\n\n # getblock dans les templates\n search_for_getblock = self.searchForGetblock(path + ligne, dossierLog)\n if (len(search_for_getblock) is not 0):\n results['template_search_for_getblock'].append(search_for_getblock)\n\n # createblock dans les templates\n search_for_createblock = self.searchForCreateblock(path + ligne, dossierLog)\n if (len(search_for_createblock) is not 0):\n results['template_search_for_createblock'].append(search_for_createblock)\n\n # new dans les templates\n search_for_new = self.searchForNew(path + ligne, dossierLog, 0)\n if (len(search_for_new) is not 0):\n results['template_search_for_new'].append(search_for_new)\n\n # globalPHP dans les templates\n search_for_globalphp = self.searchForPhpGlobals(path + ligne, dossierLog)\n if (len(search_for_globalphp) is not 0):\n results['template_global_php'].append(search_for_globalphp)\n\n # fonctions mysql_ dans le code\n search_for_mysql = self.searchForMysql(path + ligne, dossierLog)\n if (len(search_for_mysql) is not 0):\n results['template_mysql'].append(search_for_mysql)\n\n # fonctions logs dans le code\n search_for_logs = self.searchForLogs(path + ligne, dossierLog)\n if (len(search_for_logs) is not 0):\n results['template_logs'].append(search_for_logs)\n\n return results\n\n #\n # Fonction qui repere les loads dans les templates\n #\n def searchForLoad(self, path, dossierLog):\n print(path)\n fichier = open(path, 'r', encoding=\"utf-8\")\n nbrLigne = 0\n nbrLoads = 0\n retoursAll = []\n try:\n for ligne in fichier:\n nbrLigne += 1\n result = re.search(r\"->load\\(\", ligne)\n if result is not None:\n retours = {}\n retours['path'] = path\n retours['ligne'] = str(nbrLigne)\n retours['contents'] = (ligne[0:50].strip(\" \\n\\r\"))\n retoursAll.append(retours)\n nbrLoads += 1\n except:\n print(\"error\")\n return retoursAll\n\n #\n # Fonction qui repere les getBlock dans les templates\n #\n def searchForGetblock(self, path, dossierLog):\n fichier = open(path, 'r',encoding=\"utf-8\")\n nbrLigne = 0\n nbrLoads = 0\n retoursAll = []\n try:\n for ligne in fichier:\n nbrLigne += 1\n result = re.search(r\"->getBlock\\(\", ligne)\n if result is not None:\n retours = {}\n retours['path'] = path\n retours['ligne'] = str(nbrLigne)\n retours['contents'] = (ligne[0:50].strip(\" \\n\\r\"))\n retoursAll.append(retours)\n nbrLoads += 1\n except:\n print('error')\n return retoursAll\n\n #\n # Fonction qui repere les getBlock dans les templates\n #\n def searchForPhpGlobals(self, path, dossierLog):\n fichier = open(path, 'r', encoding=\"utf-8\")\n nbrLigne = 0\n nbrLoads = 0\n retoursAll = []\n try:\n for ligne in fichier:\n nbrLigne += 1\n post = re.search(r\"\\$_POST\", ligne)\n get = re.search(r\"\\$_GET\", ligne)\n glo = re.search(r\"\\$_GLOBALS\", ligne)\n sess = re.search(r\"\\$_SESSION\", ligne)\n if ((post is not None) or (get is not None) or (glo is not None) or (sess is not None)):\n retours = {}\n retours['path'] = path\n retours['ligne'] = str(nbrLigne)\n retours['contents'] = (ligne[0:50].strip(\" \\n\\r\"))\n retoursAll.append(retours)\n nbrLoads += 1\n except:\n print('error')\n return retoursAll\n\n #\n # Fonction qui repere les getBlock dans les templates\n #\n def searchForLoadInLoop(self, path, dossierLog):\n fichier = open(path, 'r', encoding=\"utf-8\")\n nbrLigne = 0\n nbrLoads = 0\n enterInLoop = 0\n detecloop = 0\n inloop = 0\n continuer = 0\n retoursAll = []\n try:\n for ligne in fichier:\n detecloop = 0\n nbrLigne += 1\n result = re.search(r\"->load\\(\", ligne)\n if result is not None:\n filee = open(path, 'r', encoding=\"utf-8\")\n for line in filee:\n for i in range(0, len(line)):\n if len(line) > i + 3 and line[i] == 'f' and line[i + 1] == 'o' and line[i + 2] == 'r' and line[\n i + 3] == '(':\n detecloop = 1\n if (len(line) > i + 4 and line[i] == 'f' and line[i + 1] == 'o' and line[i + 2] == 'r' and line[\n i + 3] == ' ' and line[i + 4] == '('):\n detecloop = 1\n\n if (len(line) >= i + 7 and line[i] == 'f' and line[i + 1] == 'o' and line[i + 2] == 'r' and\n line[i + 3] == 'e' and line[i + 4] == 'a' and line[i + 5] == 'c' and line[\n i + 6] == 'h' and line[i + 7] == '('):\n detecloop = 1\n if (len(line) >= i + 8 and line[i] == 'f' and line[i + 1] == 'o' and line[i + 2] == 'r' and\n line[i + 3] == 'e' and line[i + 4] == 'a' and line[i + 5] == 'c' and line[\n i + 6] == 'h' and line[i + 7] == ' ' and line[i + 8] == '('):\n detecloop = 1\n\n if (len(line) >= i + 2 and line[i] == 'd' and line[i + 1] == 'o' and line[i + 2] == '{'):\n detecloop = 1\n\n if (len(line) >= i + 3 and line[i] == 'd' and line[i + 1] == 'o' and line[i + 2] == ' ' and\n line[i + 3] == '{'):\n detecloop = 1\n\n if (len(line) >= i + 5 and line[i] == 'w' and line[i + 1] == 'h' and line[i + 2] == 'i' and\n line[i + 3] == 'l' and line[i + 4] == 'e' and line[i + 5] == '{'):\n detecloop = 1\n\n if (len(line) >= i + 6 and line[i] == 'w' and line[i + 1] == 'h' and line[i + 2] == 'i' and\n line[i + 3] == 'l' and line[i + 4] == 'e' and line[i + 5] == ' ' and line[\n i + 6] == '('):\n detecloop = 1\n\n if (detecloop == 1):\n if (line[i] == '{'):\n enterInLoop = 1\n detecloop = 0\n inloop = 0\n\n if (enterInLoop == 1):\n\n if (line[i] == '{'):\n inloop = inloop + 1\n\n if (line[i] == '}'):\n inloop = inloop - 1\n if (inloop == 0):\n enterInLoop = 0\n\n if (enterInLoop == 1 and line[i] == '-' and line[i + 1] == '>' and line[i + 2] == 'l' and\n line[i + 3] == 'o' and line[i + 4] == 'a' and line[i + 5] == 'd' and line[\n i + 6] == '('):\n retours = {}\n retours['path'] = path\n retours['ligne'] = str(nbrLigne)\n retours['contents'] = (ligne[0:50].strip(\" \\n\\r\"))\n retoursAll.append(retours)\n continuer = 1\n break\n\n if (continuer == 1):\n break\n\n if (continuer == 1):\n continuer = 0\n continue\n except:\n print(\"\")\n return retoursAll\n\n #\n # Fonction qui repere les fonctions de logs\n #\n def searchForLogs(self, path, dossierLog):\n fichier = open(path, 'r', encoding=\"utf-8\")\n nbrLigne = 0\n nbrLoads = 0\n retoursAll = []\n try:\n for ligne in fichier:\n nbrLigne += 1\n exp1 = re.search(r\"print_r\\(\", ligne)\n exp2 = re.search(r\"var_dump\\(\", ligne)\n exp3 = re.search(r\"mage_debug::dump\\(\", ligne)\n exp4 = re.search(r\"Mage_Debug::dump\\(\", ligne)\n exp5 = re.search(r\"->debug\\(\\);\", ligne)\n exp6 = re.search(r\"debug_backtrace\\(\", ligne)\n exp7 = re.search(r\"debug_print_backtrace\\(\", ligne)\n if ((exp1 is not None) or (exp2 is not None) or (exp3 is not None) or (exp4 is not None) or (\n exp5 is not None) or (exp6 is not None) or (exp7 is not None)):\n retours = {}\n retours['path'] = path\n retours['ligne'] = str(nbrLigne)\n retours['contents'] = (ligne[0:50].strip(\" \\n\\r\"))\n retoursAll.append(retours)\n nbrLoads += 1\n except:\n print('error')\n return retoursAll\n\n #\n # Fonction qui repere les fonctions mysql non magento dans le code\n #\n def searchForMysql(self, path, dossierLog):\n fichier = open(path, 'r', encoding=\"utf-8\")\n nbrLigne = 0\n nbrLoads = 0\n retoursAll = []\n try:\n for ligne in fichier:\n nbrLigne += 1\n mysql = re.search(r\"mysql\\_\", ligne)\n if ((mysql is not None)):\n retours = {}\n retours['path'] = path\n retours['ligne'] = str(nbrLigne)\n retours['contents'] = (ligne[0:50].strip(\" \\n\\r\"))\n retoursAll.append(retours)\n nbrLoads += 1\n except:\n print('error')\n return retoursAll\n\n #\n # Fonction qui repere les createBlock dans les templates\n #\n def searchForCreateblock(self, path, dossierLog):\n fichier = open(path, 'r', encoding=\"utf-8\")\n nbrLigne = 0\n nbrLoads = 0\n retoursAll = []\n try:\n for ligne in fichier:\n nbrLigne += 1\n result = re.search(r\"->createBlock\\(\", ligne)\n resultWidgetName = re.search(\"customer/widget_\", ligne)\n if (result is not None) and (resultWidgetName is None):\n retours = {}\n retours['path'] = path\n retours['ligne'] = str(nbrLigne)\n retours['contents'] = (ligne[0:50].strip(\" \\n\\r\"))\n retoursAll.append(retours)\n nbrLoads += 1\n except:\n print('error')\n return retoursAll\n\n #\n # Fonction qui repere les new dans les templates\n # si code=1 on est dans le code\n # si code=0 on est dans les templates\n #\n def searchForNew(self, path, dossierLog, code):\n fichier = open(path, 'r', encoding=\"utf-8\")\n nbrLigne = 0\n nbrLoads = 0\n isInScript = 0\n isInComment = 0\n retoursAll = []\n try:\n for ligne in fichier:\n inscript_one = re.search(\"text/javascript\", ligne)\n inscript_two = re.search(\"script\", ligne)\n inscript_three = re.search(\"\\n\",\n '
',\n '↓ Transcript
',\n '
']\n name = old_name = ''\n paragraph_started = False\n for line in self.content:\n line = line.strip()\n if line == '::':\n if paragraph_started:\n lines.append('

')\n paragraph_started = False\n lines.append('

')\n continue\n if '::' not in line:\n if paragraph_started:\n lines.append('
')\n else:\n lines.append('

')\n paragraph_started = True\n if line.startswith(':title:'):\n lines.append('{}'.format(line[7:].strip()))\n else:\n lines.append(line)\n continue\n name, text = line.split('::')\n if not name:\n name = old_name\n else:\n old_name = name\n if paragraph_started:\n lines.append('
')\n else:\n paragraph_started = True\n lines.append('{}: {}'.format(name, text))\n if paragraph_started:\n lines.append('

')\n lines.append(('
'))\n text_node = nodes.raw('', ''.join(lines), format='html')\n return [text_node]\n\n\n# dit kan met een standaard rest directive, alleen genereert die niet zo'n aside tag\n# die directive heet ook sidebar en als argument kun je blijkbaar een include directive opgeven\n# echter die include moet in deze eigen implementatie van te voren omgezet zijn want je wilt de\n# de inhoud van het aangegeven file tussenvoegen, niet de verwijzing ernaar\n# class MySidebar(Directive):\n# \"\"\"genereert een verwijzing naar de pagina die de tekst voor een sidebar bevat\n# \"\"\"\n#\n# required_arguments = 1\n# optional_arguments = 0\n# final_argument_whitespace = True\n# option_spec = {'href': directives.unchanged}\n# has_content = False\n#\n# def run(self):\n# \"genereer de html\"\n# text = '

'.format(self.arguments[0])\n# text_node = nodes.raw('', text, format='html')\n# return [text_node]\n\n\nclass StrofenTekst(Directive):\n \"genereert een gedicht of songtekst \"\n\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {'titel': directives.unchanged,\n 'tekst': directives.unchanged}\n has_content = True\n\n def run(self):\n \"genereer de html\"\n try:\n lines = ['
'.format(self.soortnaam)]\n except AttributeError:\n lines = ['
']\n for item in self.option_spec:\n if item in self.options:\n lines.append('
{}
'.format(item, self.options[item]))\n lines.append('
')\n end_couplet = in_refrein = False\n for line in self.content:\n if line == '--':\n # if end_couplet:\n # strofe = 'refrein' if in_refrein else 'couplet'\n # lines.append('
'.format(strofe))\n lines.append('
')\n end_couplet = True\n continue\n in_refrein = line.startswith(' ')\n if end_couplet:\n strofe = 'refrein' if in_refrein else 'couplet'\n lines.append('
'.format(strofe))\n end_couplet = False\n lines.append('
{}
'.format(line))\n lines.append('
')\n text_node = nodes.raw('', '\\n' + '\\n'.join(lines), format='html')\n return [text_node]\n\n\nclass RoleSpec(Directive):\n \"genereert rolverdeling\"\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {'titel': directives.unchanged,\n 'tekst': directives.unchanged}\n has_content = True\n\n def run(self):\n \"genereer de html\"\n lines = ['
']\n for item in self.option_spec:\n if item in self.options:\n lines.append('
{}
'.format(item, self.options[item]))\n for line in self.content:\n lines.append('
{}
'.format(line))\n lines.append('
')\n text_node = nodes.raw('', '\\n' + '\\n'.join(lines), format='html')\n return [text_node]\n\n\nclass Scene(Directive):\n \"genereert scene tekst\"\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {}\n has_content = True\n\n def run(self):\n \"genereer de html\"\n lines = ['
']\n open_claus = open_spraak = found_who = False\n for line in self.content:\n try:\n who, what = line.split('::', 1)\n found_who = True\n except ValueError:\n if found_who:\n lines.append('
{}
'.format(line))\n else:\n lines.append('
{}
'.format(line))\n continue\n if open_claus:\n if open_spraak:\n lines.append('
')\n open_spraak = False\n lines.append('
')\n open_claus = False\n if who:\n lines.append('
')\n lines.append('
{}
'.format(who))\n lines.append('
')\n lines.append('
{}
'.format(what))\n open_claus = open_spraak = True\n else:\n # if open_spraak:\n # lines.append('
')\n # open_spraak = False\n lines.append('
{}
'.format(what))\n if open_claus:\n if open_spraak:\n lines.append('
')\n open_spraak = False\n lines.append('
')\n open_claus = False\n lines.append('')\n text_node = nodes.raw('', '\\n' + '\\n'.join(lines), format='html')\n return [text_node]\n\n\nclass Anno(Directive):\n \"genereert annotatietekst\"\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {}\n has_content = True\n\n def run(self):\n \"genereer de html\"\n lines = ['
']\n for line in self.content:\n lines.append('

{}

'.format(line))\n lines.append('
')\n text_node = nodes.raw('', '\\n' + '\\n'.join(lines), format='html')\n return [text_node]\n\n\nclass Gedicht(StrofenTekst):\n \"specifieke subclass\"\n def __init__(self, *args, **kwargs):\n self.soortnaam = 'gedicht'\n super().__init__(*args, **kwargs)\n\n\nclass SongTekst(StrofenTekst):\n \"specifieke subclass\"\n def __init__(self, *args, **kwargs):\n self.soortnaam = 'songtekst'\n super().__init__(*args, **kwargs)\n\n\nclass StartBlock(Directive):\n \"genereert divstart met class \"\n\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {'text': directives.unchanged}\n has_content = False\n\n def run(self):\n \"genereer de html\"\n lines = ['
'.format(self.arguments[0])]\n if 'text' in self.options:\n divclass = 'title'\n lines.append('
{}
'.format(divclass, self.options['text']))\n text_node = nodes.raw('', '\\n'.join(lines), format='html')\n return [text_node]\n\n\nclass EndBlock(Directive):\n \"\"\"genereert divend corresponderend met StartBlock\n\n het argument wordt vooralsnog niet gebruikt maar is alleen ter verduidelijking\n \"\"\"\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {}\n has_content = False\n\n def run(self):\n \"genereer de html\"\n text_node = nodes.raw('', '
\\n'.format(self.arguments[0]),\n format='html')\n return [text_node]\n\n\nclass StartSideBar(Directive):\n \"genereert tagstarts zodat een sidebar met een include directive kan worden opgenomen\"\n\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {}\n has_content = False\n\n def run(self):\n \"genereer de html\"\n text_node = nodes.raw('', '', format='html')\n return [text_node]\n\n\nclass MyFooter(Directive):\n \"\"\"genereert een footer die met de css om kan gaan (of andersom)\n \"\"\"\n\n required_arguments = 0\n optional_arguments = 0 # was 2 maar dat slaat op het aantal options\n final_argument_whitespace = True\n option_spec = {'text': directives.unchanged,\n 'mailto': directives.unchanged}\n has_content = False\n\n def run(self):\n \"genereer de html\"\n # helaas, letterlijk overnemen van de footer code helpt ook niet om dit onderaan\n # te krijgen\n text = self.options.get('text', \"Please don't copy without source attribution. contact me\")\n mailto = self.options.get('mailto', 'info@magiokis.nl')\n lines = ('',\n '
',\n '
',\n ''.format(text, mailto))\n text_node = nodes.raw('', '\\n'.join(lines), format='html')\n return [text_node]\n # # lines = ['

']\n # lines = ['
']\n # # lines.append('
')\n # text_node = nodes.raw('', '\\n'.join(lines), format='html')\n # return [text_node]\n\n\n# ---------------- Directives to realize simple grid960 layout ----------------------\nclass StartCols(Directive):\n \"\"\"Initialisatie van het grid\n\n required: aantal eenheden (12 0f 16)\"\"\"\n\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec = {'grid': directives.nonnegative_int}\n has_content = False\n\n def run(self):\n \"genereer de html\"\n text = '
\\n'.format(self.arguments[0])\n text_node = nodes.raw('', text, format='html')\n return [text_node]\n\n\nclass EndCols(Directive):\n \"afsluiten van het grid\"\n\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = True\n has_content = False\n\n def run(self):\n \"genereer de html\"\n text_node = nodes.raw('', '
\\n', format='html')\n return [text_node]\n\n\nclass FirstCol(Directive):\n \"\"\"definieren van de eerste kolom\n\n required: aantal eenheden\n optional: class\"\"\"\n\n required_arguments = 1\n optional_arguments = 1\n final_argument_whitespace = True\n option_spec = {'grid': directives.nonnegative_int}\n has_content = False\n\n def run(self):\n \"genereer de html\"\n width = self.arguments[0]\n try:\n classes = self.arguments[1]\n except IndexError:\n classes = ''\n text = '
\\n'.format(width, classes)\n text_node = nodes.raw('', text, format='html')\n return [text_node]\n\n\nclass NextCol(Directive):\n \"\"\"definieren van de volgende kolom\n\n required: aantal eenheden\n optional: class\"\"\"\n\n required_arguments = 1\n optional_arguments = 1\n final_argument_whitespace = True\n option_spec = {'grid': directives.nonnegative_int}\n has_content = False\n\n def run(self):\n \"genereer de html\"\n width = self.arguments[0]\n try:\n classes = self.arguments[1]\n except IndexError:\n classes = ''\n text = '
\\n
\\n'.format(width, classes)\n text_node = nodes.raw('', text, format='html')\n return [text_node]\n\n\nclass ClearCol(Directive):\n \"\"\"afsluiten van een rij kolommen\"\"\"\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = True\n has_content = False\n\n def run(self):\n \"genereer de html\"\n text_node = nodes.raw('', '
\\n
 
\\n',\n format='html')\n return [text_node]\n\n\nclass Spacer(Directive):\n \"\"\"genereert een lege regel of kolom\n\n optional: aantal kolomeenheden. Bij niet opgeven hiervan wrdt de spacer genereneerd\n binnen de huidige kolom; anders vergelijkbaar met firstcol/nextcol\"\"\"\n\n required_arguments = 0\n optional_arguments = 1\n final_argument_whitespace = True\n option_spec = {'grid': directives.nonnegative_int}\n has_content = False\n\n def run(self):\n \"genereer de html\"\n try:\n cls = \"grid_{} \".format(self.arguments[0])\n clr = '
 
\\n'\n except IndexError:\n cls = clr = ''\n text = '
 
\\n{}'.format(cls, clr)\n text_node = nodes.raw('', text, format='html')\n return [text_node]\n\n\n# ---------------- Directives for BitBucket site layout ------------------------\nclass StartBody(Directive):\n \"\"\"genereert de start van de container div en de header div\n \"\"\"\n required_arguments = 0\n optional_arguments = 1\n final_argument_whitespace = True\n option_spec = {'header': directives.unchanged}\n has_content = False\n\n def run(self):\n \"genereer de html\"\n header_text = self.options.get('header', '') # \"Albert Visser's programmer's blog\"\n text = '
'\n if header_text:\n text += '
{}
'.format(header_text)\n text_node = nodes.raw('', text, format='html')\n return [text_node]\n\n\nclass NavLinks(Directive):\n \"\"\"Menuutje met links voor navigatie\n\n Op basis van de content, bijvoorbeeld:\n .. navlinks::\n\n `linktekst `_\n `linktekst `_\n `menutekst`\n . `linktekst `_\n . `linktekst `_\n let op: dit werkt alleen maar in combinatie met de bijbehorende CSS\n \"\"\"\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = True\n has_content = True\n\n def run(self):\n \"genereer de html\"\n text = ['
    ']\n in_submenu = False\n for line in self.content:\n if line.startswith('`'):\n if in_submenu:\n text.append('
')\n in_submenu = False\n line = line.strip()[1:-1]\n if '<' in line: # menuoptie met tekst en link\n menu, target = line.split('<')\n text.append('
  • {}
  • '.format(target[:-2],\n menu.strip()))\n else: # alleen tekst: submenu\n text.append('
  • {}
      '.format(line))\n in_submenu = True\n elif line.startswith('. `') and '<' in line: # submenuoptie met tekst en link\n if in_submenu:\n menu, target = line.strip()[3:-3].split('<')\n text.append('
    • {}
    • '.format(target, menu.strip()))\n else:\n self.error('Submenu entry before main menu: `{}`'.format(line.strip()))\n else: # error in content\n self.error('Illegal content: `{}`'.format(line.strip()))\n text.append('
  • ')\n text_node = nodes.raw('', ''.join(text), format='html')\n return [text_node]\n\n\nclass TextHeader(Directive):\n \"\"\"genereert het begin van de echte tekst\n \"\"\"\n required_arguments = 0\n optional_arguments = 1\n final_argument_whitespace = True\n option_spec = {'text': directives.unchanged}\n has_content = False\n\n def run(self):\n \"genereer de html\"\n text = ['
    ']\n try:\n title_text = self.arguments[0]\n except IndexError:\n title_text = \" \"\n text.append('

    {}

    '.format(title_text))\n # datum = datetime.datetime.today().strftime('%A, %B %d, %Y')\n # text.append('

    last modified on {}

    '.format(datum))\n text_node = nodes.raw('', ''.join(text), format='html')\n return [text_node]\n\n\nclass StartMarginless(Directive):\n \"\"\"opschorten van de marges voor bv. een paginabreed plaatje\n \"\"\"\n required_arguments = 0\n final_argument_whitespace = True\n has_content = False\n\n def run(self):\n \"genereer de html\"\n text = '
    '\n text_node = nodes.raw('', text, format='html')\n return [text_node]\n\n\nclass EndMarginless(Directive):\n \"\"\"marges terugzetten - zou eigenlijk niet moeten werken omdat je een id\n vaker gebruikt\n \"\"\"\n required_arguments = 0\n final_argument_whitespace = True\n has_content = False\n\n def run(self):\n \"genereer de html\"\n text = '
    '\n text_node = nodes.raw('', text, format='html')\n return [text_node]\n\n\nclass BottomNav(Directive):\n \"\"\"Extra menuutje met links voor navigatie onderin\n \"\"\"\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = True\n has_content = True\n\n def run(self):\n \"genereer de html\"\n text = ['
      ']\n for line in self.content:\n line = line.strip()\n if line.startswith('`') and ' <' in line and line.endswith(\">`_\"):\n line = line[1:-3]\n linktext, link = line.split(' <', 1)\n line = '{}'.format(link, linktext)\n else:\n line = line\n text.append('
    • {}
    • '.format(line))\n text.append('
    ')\n text_node = nodes.raw('', ''.join(text), format='html')\n return [text_node]\n\n\nclass EndBody(Directive):\n \"\"\"genereert het eind van de body div en de container div\n \"\"\"\n required_arguments = 0\n final_argument_whitespace = True\n ## option_spec = {'grid': directives.nonnegative_int,\n ## 'next': directives.unchanged,\n ## 'ltext': directives.unchanged,\n ## }\n has_content = False\n\n def run(self):\n \"genereer de html\"\n text = '
    '\n text_node = nodes.raw('', text, format='html')\n return [text_node]\n","repo_name":"albertvisser/rst2html","sub_path":"app/rst2html_directives.py","file_name":"rst2html_directives.py","file_ext":"py","file_size_in_byte":32290,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"2321575616","text":"\"\"\"Boundary value problem solvers implementation.\"\"\"\n\nimport scipy\n\nimport geomstats.backend as gs\nfrom geomstats.numerics._common import result_to_backend_type\n\n\nclass ScipySolveBVP:\n \"\"\"Wrapper for scipy.integrate.solve_bvp.\"\"\"\n\n def __init__(self, tol=1e-3, max_nodes=1000, bc_tol=None, save_result=False):\n self.tol = tol\n self.max_nodes = max_nodes\n self.bc_tol = bc_tol\n\n self.save_result = save_result\n self.result_ = None\n\n def integrate(self, fun, bc, x, y, fun_jac=None, bc_jac=None):\n \"\"\"Solve a boundary value problem for a system of ODEs.\"\"\"\n\n def fun_(t, state):\n return fun(t, gs.from_numpy(state))\n\n def bc_(state_0, state_1):\n return bc(gs.from_numpy(state_0), gs.from_numpy(state_1))\n\n if fun_jac is not None:\n\n def fun_jac_(t, state):\n return fun_jac(t, gs.from_numpy(state))\n\n else:\n fun_jac_ = None\n\n result = scipy.integrate.solve_bvp(\n fun_,\n bc_,\n x,\n y,\n tol=self.tol,\n max_nodes=self.max_nodes,\n fun_jac=fun_jac_,\n bc_jac=bc_jac,\n bc_tol=self.bc_tol,\n )\n\n result = result_to_backend_type(result)\n if self.save_result:\n self.result_ = result\n\n return result\n","repo_name":"geomstats/geomstats","sub_path":"geomstats/numerics/bvp.py","file_name":"bvp.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":1022,"dataset":"github-code","pt":"28"} +{"seq_id":"36400289680","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n========== SIMPLE GENETIC ALGORITHM (SGA) ==========\n\nCreated on Wed Nov 6 19:36:44 2019\n\n@author: Juan Camilo Díaz \n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sb\nfrom matplotlib import pyplot as plt\nimport random as rnd\n\nclass genetic_algorithm:\n\n \"\"\" SIMPLE GENETIC ALGORITHM\n\n * : Should be a class that must have a 'fitness_function' function inside it. And this function\n must return a numerical value (float or int), and it should receive as input a solution.\n \n EX: \n class XX:\n def __init__(self):\n\n def fitness_function(solution):\n return solution^2\n\n * : Dictionary that must follow the following structure\n\n optionals = {\n \"method\" : \"Permutation\", # Type of problem: \"Permutation\" or \"Combinatorial\"\n \"optimize\" : \"min\", # \"min\" or \"max\" \n \"cross_prob\" : 0.87, # Probability of performing Crossover Operation\n \"mutat_prob\" : 0.50, # Probability of performing Mutation Operation\n \"cross_method\" : \"Order 1\", # Method of Crossover: \"Order 1\", \"n points\", \"Uniform\", \"\"\n \"mutation_method\" : \"swap\", # Method of mutation: \"Swap\", \"Scramble\", \"Inversion\", \"\"\n \"selection_method\" : \"roulette\", # Method of selection: \"Roulette\", \"Rank\", \"Tournament\"\n \"generations\" : 10, # Number of generations\n \"pop_size\" : 50, # Population Size\n \"elitism\" : False, # Perform elitism\n \"show_iters\" : True # Show iterations (generations)\n }\n \"\"\"\n\n def __init__(self, model_class, optional_prm):\n self.optional_prm = optional_prm\n self.model_class = model_class\n \n # Create random solution \n def random_solution(self):\n \"\"\" \n Creates a random solution depending of the type of problem. \n Method could be: \n - Permutations: Shuffle the decision variable vector. \n - Combinatorial: Randomize values among the decision variable vector. \n \"\"\"\n\n if self.optional_prm[\"method\"].lower() == \"permutation\":\n rnd_sol = self.model_class.decision_variables.copy()\n rnd.shuffle(rnd_sol)\n return rnd_sol\n elif self.optional_prm[\"method\"].lower() == \"combinatorial\":\n decision_variables = self.model_class.decision_variables.copy() \n rnd_sol = [decision_variables[rnd.randint(0,len(decision_variables))-1] for i in range(len(decision_variables))]\n return rnd_sol\n \n def initialize(self):\n \"\"\" Creates the initial population bag. \"\"\"\n\n pop_bag = []\n for i in range(self.optional_prm[\"pop_size\"]):\n pop_bag.append(self.random_solution())\n \n return np.array(pop_bag)\n \n def eval_fit_population(self, pop_bag):\n \"\"\" Evaluates the fitness of each element in population bag. \"\"\"\n\n result = {}\n fit_vals_lst = []\n solutions = []\n for solution in pop_bag: \n fit_vals_lst.append(self.model_class.fitness_function(solution))\n solutions.append(solution)\n \n result[\"fit_vals\"] = fit_vals_lst\n\n if self.optional_prm[\"optimize\"].lower() == \"min\":\n # If the problem is minimization then the lower fitnes values should have the hieghts probability of been selected\n min_wgh = [np.max(list(result[\"fit_vals\"]))-i for i in list(result[\"fit_vals\"])]\n result[\"fit_wgh\"] = [i/sum(min_wgh) for i in min_wgh]\n elif self.optional_prm[\"optimize\"].lower() == \"max\":\n result[\"fit_wgh\"] = [i/sum(list(result[\"fit_vals\"])) for i in list(result[\"fit_vals\"])]\n\n result[\"solution\"] = np.array(solutions)\n \n return result\n \n def pickOne(self, pop_bag):\n \"\"\" Pick one solution from the population bag using the selection method. \"\"\"\n\n # Evaluate the fitness of the population bag\n fit_bag_evals = self.eval_fit_population(pop_bag)\n\n if self.optional_prm[\"selection_method\"].lower() == \"roulette\":\n \n n = len(fit_bag_evals[\"solution\"])\n maxIts = 1000\n c = 0\n while c <= maxIts:\n rnIndex = rnd.randint(0, n-1)\n rnPick = fit_bag_evals[\"fit_wgh\"][rnIndex]\n r = rnd.random()\n if r <= rnPick:\n pickedFitness = fit_bag_evals[\"fit_vals\"][rnIndex]\n pickedSol = fit_bag_evals[\"solution\"][rnIndex]\n #print(f\"ProbRnd: {r} <= Choosen: {rnPick} | fitness : {pickedFitness}\")\n return pickedSol\n \n c += 1\n \n elif self.optional_prm[\"selection_method\"].lower() == \"rank\":\n pass\n else:\n print(\"Method of selection not selected.\")\n return [] \n \n print(\"It was not possible to pick one solution from the population bag.\")\n return []\n \n def crossover(self, solA, solB):\n \"\"\" Perform Crossover GA Operation depending on the method. \"\"\"\n\n # ========= For Permutations ==========\n # Order 1 Crossover Operation: Take a random subsection of the first parent, then paste that in the child and then start to\n # include ordered the elements of parent 2 that are not in that subsection of parent 1.\n if self.optional_prm[\"cross_method\"].lower() == \"order 1\":\n n = len(solA)\n # Create an empty child -> Ch = [nan nan nan nan nan nan]\n child = [np.nan for i in range(n)] \n # Number of elements to take as the subsection.\n blockA, str_pnt, end_pnt = self.SubSection(solA)\n #print(f\"{solA} | De {str_pnt} hasta {end_pnt} => {blockA}\")\n\n # Input that subsection to the child -> Ch = [nan nan 5 2 4 nan] \n child[str_pnt:end_pnt] = blockA\n for i in range(n):\n if list(blockA).count(solB[i]) == 0:\n for j in range(n):\n if np.isnan(child[j]):\n child[j] = solB[i]\n break\n #print(f\"Parent A: {solA} | Parent B: {solB} | Child: {child}\")\n return child\n \n def mutation(self, sol):\n \"\"\" Perform Mutation GA Operation depending on method. \"\"\"\n\n n = len(sol)\n\n # ========= For Permutations ==========\n # Swap : Exchange the positions between to elements. [3 4 2 1] --> [3 2 4 1] where 4 <-> 2 \n if self.optional_prm[\"mutation_method\"].lower() == \"swap\":\n pos_1 = rnd.randint(0,n-1)\n c = 0\n while c <= 1000:\n pos_2 = rnd.randint(0,n-1)\n if pos_2 != pos_1:\n result = self.swap(sol, pos_1, pos_2)\n return result\n break\n c += 1\n\n # Scramble : Take a subsection and shuffle it.\n elif self.optional_prm[\"mutation_method\"].lower() == \"scramble\":\n result = sol.copy()\n subsec, pos1, pos2 = self.SubSection(sol)\n rnd.shuffle(subsec)\n result[pos1:pos2] = subsec\n return result\n\n # Inversion : Take a subsection and reverse it.\n elif self.optional_prm[\"mutation_method\"].lower() == \"inversion\":\n result = sol.copy()\n subsec, pos1, pos2 = self.SubSection(sol)\n subsec.reverse()\n result[pos1:pos2] = subsec\n return result\n\n \n def run(self):\n \"\"\" Execute the Simple Genetic Algorithm SGA\"\"\"\n \n # Initialize the population bag with random elements\n pop_bag = self.initialize()\n \n # Iterate for all generations\n for g in range(self.optional_prm[\"generations\"]):\n\n # Show step of Iteration\n perc_gen = np.round(100*g/self.optional_prm[\"generations\"], 0)\n if self.optional_prm[\"show_iters\"]:\n print(f\"\\nGeneration: {g} | {perc_gen}%\")\n \n # Calculate the fitness of elements in population bag\n pop_bag_fit = self.eval_fit_population(pop_bag)\n \n # Best so far\n if self.optional_prm[\"optimize\"].lower() == \"min\":\n best_fit = np.min(pop_bag_fit[\"fit_vals\"])\n elif self.optional_prm[\"optimize\"].lower() == \"max\":\n best_fit = np.max(pop_bag_fit[\"fit_vals\"])\n \n best_fit_index = pop_bag_fit[\"fit_vals\"].index(best_fit)\n best_solution = pop_bag_fit[\"solution\"][best_fit_index]\n \n if g == 0:\n best_fit_global = best_fit\n best_solution_global = best_solution\n else:\n if self.optional_prm[\"optimize\"].lower() == \"min\":\n if(best_fit <= best_fit_global):\n best_fit_global = best_fit\n best_solution_global = best_solution\n elif self.optional_prm[\"optimize\"].lower() == \"max\":\n if(best_fit >= best_fit_global):\n best_fit_global = best_fit\n best_solution_global = best_solution\n \n if self.optional_prm[\"show_iters\"]:\n print(f\"Best solution so far -> {best_solution_global} | Fitness: {best_fit_global}\")\n \n # Create the new population bag\n new_pop_bag = []\n for i in range(self.optional_prm[\"pop_size\"]):\n \n # Pick 2 parents from bag using the method of selection\n pA = self.pickOne(pop_bag_fit)\n pB = self.pickOne(pop_bag_fit)\n \n new_element = pA \n \n # Crossover the parents if rnd <= Rcros \n rndCross = rnd.random()\n if rndCross <= self.optional_prm[\"cross_prob\"]:\n new_element = self.crossover(pA,pB)\n \n # Mutate the child of that crossover (or the parentA) if rnd <= Rmut\n rndMutat = rnd.random()\n if rndMutat <= self.optional_prm[\"mutat_prob\"]:\n new_element = self.mutation(new_element) \n \n new_pop_bag.append(new_element)\n \n pop_bag = np.array(new_pop_bag)\n \n return best_fit_global, best_solution_global\n \n \n def swap(self, sol, posA, posB):\n \"\"\" Swap positions of elements in a vector. \"\"\"\n result = sol.copy()\n elA = sol[posA]\n elB = sol[posB]\n result[posA] = elB\n result[posB] = elA\n return result\n\n def SubSection(self, vector):\n \"\"\" Get a subsection of a vector. \"\"\"\n n = len(vector)\n # Number of elements to take as the subsection. Between 10% and 90% of the elements.\n num_els = np.ceil(n*(rnd.randint(10,90)/100))\n # Starting point of the subsection\n str_pnt = rnd.randint(0, n-2)\n # Ending point of the subsection\n end_pnt = n if int(str_pnt+num_els) > n else int(str_pnt+num_els)\n # Subsection of parent A\n blockA = list(vector[str_pnt:end_pnt])\n \n return blockA, str_pnt, end_pnt\n\n\n\n# ========================================= \n","repo_name":"juancadh/mycode","sub_path":"genetic_algorithm/genetic_algorithm.py","file_name":"genetic_algorithm.py","file_ext":"py","file_size_in_byte":11922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"18784518192","text":"#!/usr/bin/python3\ntry:\n import yara\nexcept:\n raise ModuleNotFoundError\n\nimport os\n\ndef checkRule(filem, rulem):\n rulesm = yara.compile(file=rulem)\n matchm = rulesm.match(filem.name)\n return matchm\n\ndef cultivate(pcappath, rulepath):\n infectesFiles = {}\n pcapfiles = os.listdir(pcappath)\n rulefiles = os.listdir(rulepath)\n\n for pfile in pcapfiles:\n if pfile.endswith(\".pcapng\"):\n pfile = open(pcappath + pfile, \"rb\")\n for rfile in rulefiles:\n if rfile.endswith(\".yar\"):\n rfile = open(rulepath + rfile)\n matcher = checkRule(pfile, rfile)\n if matcher:\n # print(pfile.name,\" is infected with \",rfile.name)\n #matcher[0] contains rule name\n infectesFiles.setdefault(pfile.name, []).append(str(matcher[0]))\n # print(infectesFiles)\n rfile.close()\n pfile.close()\n return infectesFiles\n\nif __name__ == '__main__':\n pass\n","repo_name":"karthikgenius/YaraCapper","sub_path":"yaradriver.py","file_name":"yaradriver.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"28"} +{"seq_id":"8688235339","text":"import webbrowser, sys\nimport requests\n#for Cygwin add \n#export BRWOSER=cygwin\nwebbrowser.open('http://inventwithpython.com/')\n\nif len(sys.argv) > 1 :\n\t#get address from command line\n\taddress=' '.join(sys.argv[1:])\n\t\n\n\nres = requests.get('https://automatetheboringstuff.com/files/rj.txt')\ntype(res)\n","repo_name":"carlycs/python-pandas","sub_path":"scrapeIt.py","file_name":"scrapeIt.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"71346408396","text":"import json\nimport socket\nimport traceback\nimport time\n\n# Wait following seconds below sending the controller request\ntime.sleep(10)\n\n# Read Message Template\nmsg = json.load(open(\"Message.json\"))\n\n# Initialize\nsender = \"Controller\"\ntarget = \"Node1\"\nport = 5555\n\n# Request\nmsg['sender_name'] = sender\nmsg['request'] = \"LEADER_INFO\"\n\n\n# Socket Creation and Binding\nskt = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\nskt.bind((sender, port))\n\n# Send Message\ntry:\n # Encoding and sending the message\n skt.sendto(json.dumps(msg).encode('utf-8'), (target, port))\nexcept:\n # socket.gaierror: [Errno -3] would be thrown if target IP container does not exist or exits, write your listener\n print(f\"ERROR WHILE SENDING REQUEST ACROSS : {traceback.format_exc()}\")\nj = 0\nl = 0\nwhile True:\n\n message, addr = skt.recvfrom(1024)\n decoded_msg = json.loads(message.decode('utf-8'))\n if decoded_msg[\"request\"] == \"LEADER_INFO\":\n target3 = decoded_msg[\"value\"]\n msg['request'] = \"STORE\"\n msg['key'] = \"key2\"\n msg['value'] = \"value2\"\n msg_bytes4 = json.dumps(msg).encode('utf-8')\n skt.sendto(msg_bytes4, (target3, 5555))\n time.sleep(5)\n msg['request'] = \"RETRIEVE\"\n print(f\"New Request Created : {msg}\")\n msg_bytes5 = json.dumps(msg).encode('utf-8')\n skt.sendto(msg_bytes5, (target3, 5555))\n\n if decoded_msg['request'] == 'RETRIEVE':\n logs = decoded_msg[\"value\"]\n for i in logs:\n print(f\"The Retrieved Key is: {i['Key']}, and Value is: {i['value']}\")\n if j == 0:\n target1 = decoded_msg['value']\n msg['request'] = \"STORE\"\n print(f\"New Request Created : {msg}\")\n msg['key'] = \"key1\"\n msg['value'] = \"value1\"\n msg_bytes = json.dumps(msg).encode('utf-8')\n j += 1\n skt.sendto(msg_bytes, (target1, 5555))\n time.sleep(5)\n msg['request'] = \"RETRIEVE\"\n print(f\"New Request Created : {msg}\")\n msg_bytes1 = json.dumps(msg).encode('utf-8')\n skt.sendto(msg_bytes1,(target1, 5555))\n msg['request'] = \"TIMEOUT\"\n print(f\"New Request Created : {msg}\")\n msg_bytes2 = json.dumps(msg).encode('utf-8')\n skt.sendto(msg_bytes2, (target1, 5555))\n print(\"SLeeping for 15\")\n time.sleep(20)\n msg['request'] = \"LEADER_INFO\"\n print(f\"New Request Created : {msg}\")\n msg_bytes3 = json.dumps(msg).encode('utf-8')\n skt.sendto(msg_bytes3, (target, 5555))\n\n\n\n\n","repo_name":"kschhajed/Distributed-Systems---RAFT-algorithm","sub_path":"Controller/convert_follower_node1.py","file_name":"convert_follower_node1.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"12168029935","text":"import cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport torch\r\nfrom torchvision import transforms, datasets\r\n\r\n# Gram Matrix\r\ndef gram(tensor):\r\n B, C, H, W = tensor.shape\r\n x = tensor.view(B, C, H*W)\r\n x_t = x.transpose(1, 2)\r\n return torch.bmm(x, x_t) / (C*H*W)\r\n\r\n# Load image file\r\ndef load_image(path):\r\n # Images loaded as BGR\r\n img = cv2.imread(path)\r\n return img\r\n\r\n# Show image\r\ndef show(img):\r\n # Convert from BGR to RGB\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n \r\n # imshow() only accepts float [0,1] or int [0,255]\r\n img = np.array(img/255).clip(0,1)\r\n \r\n plt.figure(figsize=(10, 5))\r\n plt.imshow(img)\r\n plt.show()\r\n\r\ndef saveimg(img, image_path):\r\n img = img.clip(0, 255)\r\n cv2.imwrite(image_path, img)\r\n\r\n# Preprocessing ~ Image to Tensor\r\ndef itot(img, max_size=None):\r\n # Rescale the image\r\n if (max_size==None):\r\n itot_t = transforms.Compose([\r\n #transforms.ToPILImage(),\r\n transforms.ToTensor(),\r\n transforms.Lambda(lambda x: x.mul(255))\r\n ]) \r\n else:\r\n H, W, C = img.shape\r\n image_size = tuple([int((float(max_size) / max([H,W]))*x) for x in [H, W]])\r\n itot_t = transforms.Compose([\r\n transforms.ToPILImage(),\r\n transforms.Resize(image_size),\r\n transforms.ToTensor(),\r\n transforms.Lambda(lambda x: x.mul(255))\r\n ])\r\n\r\n # Convert image to tensor\r\n tensor = itot_t(img)\r\n\r\n # Add the batch_size dimension\r\n tensor = tensor.unsqueeze(dim=0)\r\n return tensor\r\n\r\n# Preprocessing ~ Tensor to Image\r\ndef ttoi(tensor):\r\n # Add the means\r\n #ttoi_t = transforms.Compose([\r\n # transforms.Normalize([-103.939, -116.779, -123.68],[1,1,1])])\r\n\r\n # Remove the batch_size dimension\r\n tensor = tensor.squeeze()\r\n #img = ttoi_t(tensor)\r\n img = tensor.cpu().numpy()\r\n \r\n # Transpose from [C, H, W] -> [H, W, C]\r\n img = img.transpose(1, 2, 0)\r\n return img\r\n\r\ndef transfer_color(src, dest):\r\n \"\"\"\r\n Transfer Color using YIQ colorspace. Useful in preserving colors in style transfer.\r\n This method assumes inputs of shape [Height, Width, Channel] in BGR Color Space\r\n \"\"\"\r\n src, dest = src.clip(0,255), dest.clip(0,255)\r\n \r\n # Resize src to dest's size\r\n H,W,_ = src.shape \r\n dest = cv2.resize(dest, dsize=(W, H), interpolation=cv2.INTER_CUBIC)\r\n \r\n dest_gray = cv2.cvtColor(dest, cv2.COLOR_BGR2GRAY) #1 Extract the Destination's luminance\r\n src_yiq = cv2.cvtColor(src, cv2.COLOR_BGR2YCrCb) #2 Convert the Source from BGR to YIQ/YCbCr\r\n src_yiq[...,0] = dest_gray #3 Combine Destination's luminance and Source's IQ/CbCr\r\n \r\n return cv2.cvtColor(src_yiq, cv2.COLOR_YCrCb2BGR).clip(0,255) #4 Convert new image from YIQ back to BGR\r\n\r\ndef plot_loss_hist(c_loss, s_loss, total_loss, title=\"Loss History\"):\r\n x = [i for i in range(len(total_loss))]\r\n plt.figure(figsize=[10, 6])\r\n plt.plot(x, c_loss, label=\"Content Loss\")\r\n plt.plot(x, s_loss, label=\"Style Loss\")\r\n plt.plot(x, total_loss, label=\"Total Loss\")\r\n \r\n plt.legend()\r\n plt.xlabel('Every 500 iterations')\r\n plt.ylabel('Loss')\r\n plt.title(title)\r\n plt.show()\r\n\r\nclass ImageFolderWithPaths(datasets.ImageFolder):\r\n \"\"\"Custom dataset that includes image file paths. \r\n Extends torchvision.datasets.ImageFolder()\r\n Reference: https://discuss.pytorch.org/t/dataloader-filenames-in-each-batch/4212/2\r\n \"\"\"\r\n # override the __getitem__ method. this is the method dataloader calls\r\n def __getitem__(self, index):\r\n # this is what ImageFolder normally returns \r\n original_tuple = super(ImageFolderWithPaths, self).__getitem__(index)\r\n\r\n # the image file path\r\n path = self.imgs[index][0]\r\n\r\n # make a new tuple that includes original and the path\r\n tuple_with_path = (*original_tuple, path)\r\n return tuple_with_path","repo_name":"rrmina/fast-neural-style-pytorch","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","stars":289,"dataset":"github-code","pt":"28"} +{"seq_id":"10710023033","text":"#!/usr/local/bin/python3\nimport itertools\nfrom copy import copy\n\ndef parse_mem(line: str) -> list[int]:\n return list(map(int, line.split(',')))\n\ndef get_digit(number: int, n: int) -> int:\n return number // 10**n % 10\n\nclass IntcodeVM:\n def __init__(self, initial_mem: list[int]) -> None:\n self.mem: list[int] = copy(initial_mem)\n self.pc: int = 0\n self.input: list[int] = []\n self.output: list[int] = []\n self.halted = False\n self.piped_buffers: list[list[int]] = []\n\n def run(self):\n while not self.halted: self.tick()\n\n def tick(self):\n instruction = self.mem[self.pc]\n opcode = instruction % 100\n modes = [None, get_digit(instruction, 2), get_digit(instruction, 3), get_digit(instruction, 4)]\n\n if opcode == 99: # halt\n self.halted = True\n return\n elif opcode == 1: # add\n a = self.deref(self.pc + 1, modes[1])\n b = self.deref(self.pc + 2, modes[2])\n self.write_to_mem(value=a + b, ptr=self.pc + 3)\n self.pc += 4\n elif opcode == 2: # mul\n a = self.deref(self.pc + 1, modes[1])\n b = self.deref(self.pc + 2, modes[2])\n self.write_to_mem(value=a * b, ptr=self.pc + 3)\n self.pc += 4\n elif opcode == 3: # input\n if len(self.input) == 0:\n return\n v = self.input.pop()\n self.write_to_mem(value=v, ptr=self.pc + 1)\n self.pc += 2\n elif opcode == 4: # output\n v = self.deref(self.pc + 1, modes[1])\n self.output.append(v)\n for buffer in self.piped_buffers:\n buffer.insert(0, v)\n self.pc += 2\n elif opcode == 5: # jump-if-true\n v = self.deref(self.pc + 1, modes[1])\n jmp_target = self.deref(self.pc + 2, modes[2])\n if v != 0:\n self.pc = jmp_target\n else:\n self.pc += 3\n elif opcode == 6: # jump-if-false\n v = self.deref(self.pc + 1, modes[1])\n jmp_target = self.deref(self.pc + 2, modes[2])\n if v == 0:\n self.pc = jmp_target\n else:\n self.pc += 3\n elif opcode == 7: # less than\n a = self.deref(self.pc + 1, modes[1])\n b = self.deref(self.pc + 2, modes[2])\n self.write_to_mem(value=1 if a < b else 0, ptr=self.pc + 3)\n self.pc += 4\n elif opcode == 8: # equals\n a = self.deref(self.pc + 1, modes[1])\n b = self.deref(self.pc + 2, modes[2])\n self.write_to_mem(value=1 if a == b else 0, ptr=self.pc + 3)\n self.pc += 4\n else:\n print(f'unknown opcode {opcode}')\n\n def deref(self, ptr: int, mode: int) -> int:\n if mode == 0: # position mode\n return self.mem[self.mem[ptr]]\n elif mode == 1: # immediate mode\n return self.mem[ptr]\n else:\n print(f'unknown mode: {mode}')\n\n def write_to_mem(self, value: int, ptr: int) -> None:\n target = self.mem[ptr]\n self.mem[target] = value\n\n def pipe(self, buffer: list[int]) -> None:\n self.piped_buffers.append(buffer)\n\n def send_input(self, value: int) -> None:\n self.input.insert(0, value)\n\ndef main():\n file = open('input')\n lines = file.read().splitlines()\n initial_mem = parse_mem(lines[0])\n\n # part 1\n highest_signal = -1\n for phase_setting_sequence in itertools.permutations(range(0, 4+1), 5):\n input_value = 0\n for idx in range(5):\n vm = IntcodeVM(initial_mem)\n vm.input = [input_value, phase_setting_sequence[idx]]\n vm.run()\n input_value = vm.output[-1]\n\n highest_signal = max(input_value, highest_signal)\n\n print(highest_signal)\n\n # part 2\n highest_signal = -1\n for phase_setting_sequence in itertools.permutations(range(5, 9+1), 5):\n vm_list = list(map(lambda _: IntcodeVM(initial_mem), range(5)))\n\n for idx in range(5):\n vm_list[idx].send_input(phase_setting_sequence[idx])\n\n vm_list[0].send_input(0)\n\n for idx in range(5):\n current_vm = vm_list[idx]\n next_vm = vm_list[(idx + 1) % 5]\n current_vm.pipe(next_vm.input)\n\n while not vm_list[-1].halted:\n for vm in vm_list: vm.tick()\n\n highest_signal = max(vm_list[-1].output[-1], highest_signal)\n\n print(highest_signal)\n\n\nif __name__ == '__main__': main()\n","repo_name":"Deseteral/advent-of-code","sub_path":"2019/07/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"17869296719","text":"import time\n\nfrom instruments.units import ureg as u\n\nfrom instruments.abstract_instruments import PowerSupply\nfrom instruments.generic_scpi import SCPIInstrument\nfrom instruments.util_fns import (\n int_property,\n unitful_property,\n bounded_unitful_property,\n bool_property,\n split_unit_str,\n assume_units,\n)\n\n# CLASSES #####################################################################\n\n\nclass HPe3631a(PowerSupply, PowerSupply.Channel, SCPIInstrument):\n\n \"\"\"\n The HPe3631a is a three channels voltage/current supply.\n - Channel 1 is a positive +6V/5A channel (P6V)\n - Channel 2 is a positive +25V/1A channel (P25V)\n - Channel 3 is a negative -25V/1A channel (N25V)\n\n This module is designed for the power supply to be set to\n a specific channel and remain set afterwards as this device\n does not offer commands to set or read multiple channels\n without calling the channel set command each time (0.5s). It is\n possible to call a specific channel through psu.channel[idx],\n which will automatically reset the channel id, when necessary.\n\n This module is likely to work as is for the Agilent E3631 and\n Keysight E3631 which seem to be rebranded but identical devices.\n\n Example usage:\n\n >>> import instruments as ik\n >>> psu = ik.hp.HPe3631a.open_gpibusb(\"/dev/ttyUSB0\", 10)\n >>> psu.channelid = 2 # Sets channel to P25V\n >>> psu.voltage = 12.5 # Sets voltage to 12.5V\n >>> psu.voltage # Reads back set voltage\n array(12.5) * V\n >>> psu.voltage_sense # Reads back sensed voltage\n array(12.501) * V\n \"\"\"\n\n def __init__(self, filelike):\n super().__init__(filelike)\n self.sendcmd(\"SYST:REM\") # Puts the device in remote operation\n time.sleep(0.1)\n\n # INNER CLASSES #\n\n class Channel:\n \"\"\"\n Class representing a power output channel on the HPe3631a.\n\n .. warning:: This class should NOT be manually created by the user. It is\n designed to be initialized by the `HPe3631a` class.\n \"\"\"\n\n def __init__(self, parent, valid_set):\n self._parent = parent\n self._valid_set = valid_set\n\n def __getitem__(self, idx):\n # Check that the channel is available. If it is, set the\n # channelid of the device and return the device object.\n if self._parent.channelid != idx:\n self._parent.channelid = idx\n time.sleep(0.5)\n return self._parent\n\n def __len__(self):\n return len(self._valid_set)\n\n # PROPERTIES ##\n\n @property\n def channel(self):\n \"\"\"\n Gets a specific channel object. The desired channel is specified like\n one would access a list.\n\n :rtype: `HPe3631a.Channel`\n\n .. seealso::\n `HPe3631a` for example using this property.\n \"\"\"\n return self.Channel(self, [1, 2, 3])\n\n @property\n def mode(self):\n \"\"\"\n Gets/sets the mode for the specified channel.\n\n The constant-voltage/constant-current modes of the power supply\n are selected automatically depending on the load (resistance)\n connected to the power supply. If the load greater than the set\n V/I is connected, a voltage V is applied and the current flowing\n is lower than I. If the load is smaller than V/I, the set current\n I acts as a current limiter and the voltage is lower than V.\n \"\"\"\n raise AttributeError(\"The `HPe3631a` sets its mode automatically\")\n\n channelid = int_property(\n \"INST:NSEL\",\n valid_set=[1, 2, 3],\n doc=\"\"\"\n Gets/Sets the active channel ID.\n\n :type: `HPe3631a.ChannelType`\n \"\"\",\n )\n\n @property\n def voltage(self):\n \"\"\"\n Gets/sets the output voltage of the source.\n\n :units: As specified, or assumed to be :math:`\\\\text{V}` otherwise.\n :type: `float` or `~pint.Quantity`\n \"\"\"\n raw = self.query(\"SOUR:VOLT?\")\n return u.Quantity(*split_unit_str(raw, u.volt)).to(u.volt)\n\n @voltage.setter\n def voltage(self, newval):\n \"\"\"\n Gets/sets the output voltage of the source.\n\n :units: As specified, or assumed to be :math:`\\\\text{V}` otherwise.\n :type: `float` or `~pint.Quantity`\n \"\"\"\n min_value, max_value = self.voltage_range\n if newval < min_value:\n raise ValueError(\n \"Voltage quantity is too low. Got {}, minimum \"\n \"value is {}\".format(newval, min_value)\n )\n\n if newval > max_value:\n raise ValueError(\n \"Voltage quantity is too high. Got {}, maximum \"\n \"value is {}\".format(newval, max_value)\n )\n\n # Rescale to the correct unit before printing. This will also\n # catch bad units.\n strval = f\"{assume_units(newval, u.volt).to(u.volt).magnitude:e}\"\n self.sendcmd(f\"SOUR:VOLT {strval}\")\n\n @property\n def voltage_min(self):\n \"\"\"\n Gets the minimum voltage for the current channel.\n\n :units: :math:`\\\\text{V}`.\n :type: `~pint.Quantity`\n \"\"\"\n return self.voltage_range[0]\n\n @property\n def voltage_max(self):\n \"\"\"\n Gets the maximum voltage for the current channel.\n\n :units: :math:`\\\\text{V}`.\n :type: `~pint.Quantity`\n \"\"\"\n return self.voltage_range[1]\n\n @property\n def voltage_range(self):\n \"\"\"\n Gets the voltage range for the current channel.\n\n The MAX function SCPI command is designed in such a way\n on this device that it always returns the largest absolute value.\n There is no need to query MIN, as it is always 0., but one has to\n order the values as MAX can be negative.\n\n :units: :math:`\\\\text{V}`.\n :type: array of `~pint.Quantity`\n \"\"\"\n value = u.Quantity(*split_unit_str(self.query(\"SOUR:VOLT? MAX\"), u.volt))\n if value < 0.0:\n return value, 0.0\n return 0.0, value\n\n current, current_min, current_max = bounded_unitful_property(\n \"SOUR:CURR\",\n u.amp,\n min_fmt_str=\"{}? MIN\",\n max_fmt_str=\"{}? MAX\",\n doc=\"\"\"\n Gets/sets the output current of the source.\n\n :units: As specified, or assumed to be :math:`\\\\text{A}` otherwise.\n :type: `float` or `~pint.Quantity`\n \"\"\",\n )\n\n voltage_sense = unitful_property(\n \"MEAS:VOLT\",\n u.volt,\n readonly=True,\n doc=\"\"\"\n Gets the actual output voltage as measured by the sense wires.\n\n :units: As specified, or assumed to be :math:`\\\\text{V}` otherwise.\n :type: `~pint.Quantity`\n \"\"\",\n )\n\n current_sense = unitful_property(\n \"MEAS:CURR\",\n u.amp,\n readonly=True,\n doc=\"\"\"\n Gets the actual output current as measured by the sense wires.\n\n :units: As specified, or assumed to be :math:`\\\\text{A}` otherwise.\n :type: `~pint.Quantity`\n \"\"\",\n )\n\n output = bool_property(\n \"OUTP\",\n inst_true=\"1\",\n inst_false=\"0\",\n doc=\"\"\"\n Gets/sets the outputting status of the specified channel.\n\n This is a toggle setting. ON will turn on the channel output\n while OFF will turn it off.\n\n :type: `bool`\n \"\"\",\n )\n","repo_name":"instrumentkit/InstrumentKit","sub_path":"src/instruments/hp/hpe3631a.py","file_name":"hpe3631a.py","file_ext":"py","file_size_in_byte":7422,"program_lang":"python","lang":"en","doc_type":"code","stars":232,"dataset":"github-code","pt":"28"} +{"seq_id":"739807036","text":"import random\nimport s\nclass Entry:\n def __init__(self, englisch, deutsch):\n self.englisch = englisch\n self.deutsch = deutsch\ndef eintrag():\n while True:\n deutsch = input(\"Deutsch: \")\n if deutsch == \"fertig\":\n return\n englisch = input(\"Englisch: \")\n if englisch == \"fertig\":\n return\n eintraege.append(Entry(englisch, deutsch))\ndef save():\n storage = input(\"In welchen storage möchtest du die Vokabelliste speichern?\\nAntworte mit '1', '2' oder '3'\")\n if storage == \"1\":\n eintraege = str(eintraege)\n name_1 = input(\"Wie soll dein storage heißen?: \")\n storage_1 = open(name_1 + \".txt\", \"w+\")\n storage_1.write(eintraege)\n elif storage == \"2\":\n pass\n elif storage == \"3\":\n pass\n else:\n print(\"Das ist keine gültige Antwort\")\n\ndef abfrage():\n streak = 0\n trues = 0\n falses = 0\n def quote(trues, falses):\n if trues == 0:\n return 0\n elif falses == 0:\n return 100\n else:\n quote = trues / (falses + trues) * 100\n quote = round(quote)\n return quote\n print(\"Möchtest du in englisch->deutsch oder in deutsch->englisch abgefragt werden?\\nAntworte mit 'ed' oder 'de'\")\n l = input(\">\").lower()\n if l.startswith(\"e\"):\n while True:\n vocab = random.randint(0,len(eintraege) - 1)\n deutsch = input(eintraege[vocab].englisch + \" >\").lower()\n if deutsch == \"stop\":\n return\n if eintraege[vocab].deutsch == deutsch:\n print(\"Richtig\")\n streak = streak + 1\n trues = trues + 1\n print(\"Deine streak\", streak, \"\\nDeine quote:\", quote(trues, falses))\n else:\n print(\"Das war leider falsch\")\n streak = streak - streak\n falses = falses + 1\n print(\"Deine streak\", streak, \"\\nDeine quote:\", quote(trues, falses))\n elif l.startswith(\"d\"):\n while True:\n vocab = random.randint(0,len(eintraege) - 1)\n englisch = input(eintraege[vocab].deutsch + \" >\").lower()\n if englisch == \"stop\":\n return\n if eintraege[vocab].englisch == englisch:\n print(\"Richtig\")\n streak = streak + 1\n trues = trues + 1\n print(\"Deine streak\", streak, \"\\nDeine quote:\", quote(trues, falses))\n else:\n print(\"Das war leider falsch\")\n streak = streak - streak\n falses = falses + 1\n print(\"Deine streak\", streak, \"\\nDeine quote:\", quote(trues, falses))\n else:\n print(\"Das ist keine korrekte Eingabe bitte antworte mit 'ed', 'de' oder 'stop'\")\nif __name__ == \"__main__\":\n eintraege = []\n print(\"Hallo zu VoTrain, mit 'help' kannst du dir alle Befehle anzeigen lassen!\")\n while True:\n befehl = input(\">\")\n if befehl == \"abfrage\":\n abfrage()\n elif befehl == \"eintrag\":\n eintrag()\n elif befehl == \"quit\":\n quit(0)\n elif befehl == \"save\":\n save()\n elif befehl == \"load\":\n pass\n elif befehl == \"help\":\n print(\"Deine Befehle sind 'abfrage', 'eintrag', 'quit' und 'save'\")\n else:\n print(\"Das war keine korrekte Eingabe.\"\n \"probier es mal mit: 'abfrage', 'eintrag', 'quit' oder 'save'\")\n","repo_name":"hemplll/small-programs","sub_path":"vocab_trainer.py","file_name":"vocab_trainer.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"21464360749","text":"import typing\nimport numpy as np\nfrom scipy.sparse import csr_matrix\n\nfrom tmu.weight_bank import WeightBank\nfrom tmu.clause_bank.clause_bank import ClauseBank\nfrom tmu.clause_bank.clause_bank_cuda import ClauseBankCUDA\nfrom tmu.clause_bank.clause_bank_sparse import ClauseBankSparse\nfrom tmu.util.sparse_clause_container import SparseClauseContainer\n\n\ndef _validate_input_dtype(d: np.ndarray):\n if d.dtype is not np.uint32:\n raise RuntimeError(f\"The data input is of type {d.dtype}, but should be {np.uint32}\")\n\n\nclass MultiWeightBankMixin:\n weight_banks: SparseClauseContainer\n\n def __init__(self):\n self.weight_banks = SparseClauseContainer()\n\n\nclass MultiClauseBankMixin:\n clause_banks: SparseClauseContainer\n\n def __init__(self):\n self.clause_banks = SparseClauseContainer()\n\n\nclass SingleWeightBankMixin:\n weight_bank: WeightBank\n\n def __init__(self):\n pass\n\n\nclass SingleClauseBankMixin:\n clause_bank: typing.Union[ClauseBank, ClauseBankSparse, ClauseBankCUDA]\n\n def __init__(self):\n pass\n\n\nclass TMBasis:\n\n def __init__(\n self,\n number_of_clauses,\n T,\n s,\n confidence_driven_updating=False,\n type_i_ii_ratio=1.0,\n type_iii_feedback=False,\n focused_negative_sampling=False,\n output_balancing=False,\n d=200.0,\n platform='CPU',\n patch_dim=None,\n feature_negation=True,\n boost_true_positive_feedback=1,\n reuse_random_feedback=0,\n max_included_literals=None,\n number_of_state_bits_ta=8,\n number_of_state_bits_ind=8,\n weighted_clauses=False,\n clause_drop_p=0.0,\n literal_drop_p=0.0,\n literal_sampling=1.0,\n feedback_rate_excluded_literals=1,\n literal_insertion_state=0,\n batch_size=100,\n incremental=True,\n type_ia_ii_feedback_ratio=0,\n absorbing=-1,\n absorbing_include=None,\n absorbing_exclude=None,\n squared_weight_update_p=False\n ):\n self.number_of_clauses = number_of_clauses\n self.number_of_state_bits_ta = number_of_state_bits_ta\n self.number_of_state_bits_ind = number_of_state_bits_ind\n self.T = int(T)\n self.s = s\n\n self.confidence_driven_updating = confidence_driven_updating\n\n if type_i_ii_ratio >= 1.0:\n self.type_i_p = 1.0\n self.type_ii_p = 1.0 / type_i_ii_ratio\n else:\n self.type_i_p = type_i_ii_ratio\n self.type_ii_p = 1.0\n\n self.type_iii_feedback = type_iii_feedback\n self.focused_negative_sampling = focused_negative_sampling\n self.output_balancing = output_balancing\n self.d = d\n self.platform = platform\n self.patch_dim = patch_dim\n self.feature_negation = feature_negation\n self.boost_true_positive_feedback = boost_true_positive_feedback\n self.max_included_literals = max_included_literals\n self.weighted_clauses = weighted_clauses\n self.clause_drop_p = clause_drop_p\n self.literal_drop_p = literal_drop_p\n self.batch_size = batch_size\n self.incremental = incremental\n self.type_ia_ii_feedback_ratio = type_ia_ii_feedback_ratio\n self.absorbing = absorbing\n self.absorbing_include = absorbing_include\n self.absorbing_exclude = absorbing_exclude\n self.reuse_random_feedback = reuse_random_feedback\n self.initialized = False\n self.literal_sampling = literal_sampling\n self.feedback_rate_excluded_literals = feedback_rate_excluded_literals\n self.literal_insertion_state = literal_insertion_state\n self.squared_weight_update_p = squared_weight_update_p\n\n # TODO - Change to checksum\n self.X_train = np.zeros(0, dtype=np.uint32)\n self.X_test = np.zeros(0, dtype=np.uint32)\n\n def clause_co_occurrence(self, X, percentage=False):\n clause_outputs = csr_matrix(self.transform(X))\n if percentage:\n return clause_outputs.transpose().dot(clause_outputs).multiply(1.0 / clause_outputs.sum(axis=0))\n else:\n return clause_outputs.transpose().dot(clause_outputs)\n\n def transform(self, X):\n encoded_X = self.clause_bank.prepare_X(X)\n transformed_X = np.empty((X.shape[0], self.number_of_clauses), dtype=np.uint32)\n for e in range(X.shape[0]):\n transformed_X[e, :] = self.clause_bank.calculate_clause_outputs_predict(encoded_X, e)\n return transformed_X\n\n def literal_clause_frequency(self):\n clause_active = np.ones(self.number_of_clauses, dtype=np.uint32)\n return self.clause_bank.calculate_literal_clause_frequency(clause_active)\n\n def get_ta_action(self, clause, ta, **kwargs):\n return self.clause_bank.get_ta_action(clause, ta)\n\n def get_ta_state(self, clause, ta, **kwargs):\n return self.clause_bank.get_ta_state(clause, ta)\n\n def set_ta_state(self, clause, ta, state, **kwargs):\n return self.clause_bank.set_ta_state(clause, ta, state)\n\n def fit(self, X, Y, *args, **kwargs):\n raise NotImplementedError(\"fit(self, X, Y, *args, **kwargs) is not implemented for your model\")\n\n def predict(self, X, shuffle=True) -> np.ndarray:\n raise NotImplementedError(\"predict(self, X: np.ndarray\")\n\n def init(self, X: np.ndarray, Y: np.ndarray):\n raise NotImplementedError(\"init(self, X: np.ndarray, Y: np.ndarray)\")\n","repo_name":"charulgiri/Incremental_evaluation","sub_path":"tmu/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"70018890635","text":"#!/usr/bin/python3\n\"\"\" Module Rectangle\n creates a Rectangle that inherits from Base \"\"\"\n\n\nfrom models.base import Base\n\n\nclass Rectangle(Base):\n \"\"\" describes a Rectangle \"\"\"\n\n def __init__(self, width, height, x=0, y=0, id=None):\n \"\"\" initializes a Rectangle instance \"\"\"\n\n super().__init__(id)\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n\n @property\n def width(self):\n \"\"\" retrieves width attribute \"\"\"\n\n return self.__width\n\n @property\n def height(self):\n \"\"\" retrieves height attribute \"\"\"\n\n return self.__height\n\n @property\n def x(self):\n \"\"\" retrieves x attribute \"\"\"\n\n return self.__x\n\n @property\n def y(self):\n \"\"\" retrieves y attribute \"\"\"\n\n return self.__y\n\n @width.setter\n def width(self, value):\n \"\"\" sets width attribute \"\"\"\n\n if type(value) is not int:\n raise TypeError('width must be an integer')\n if value <= 0:\n raise ValueError('width must be > 0')\n self.__width = value\n\n @height.setter\n def height(self, value):\n \"\"\" sets height attribute \"\"\"\n\n if type(value) is not int:\n raise TypeError('height must be an integer')\n if value <= 0:\n raise ValueError('height must be > 0')\n self.__height = value\n\n @x.setter\n def x(self, value):\n \"\"\" sets x attribute \"\"\"\n\n if type(value) is not int:\n raise TypeError('x must be an integer')\n if value < 0:\n raise ValueError('x must be >= 0')\n self.__x = value\n\n @y.setter\n def y(self, value):\n \"\"\" sets y attribute \"\"\"\n\n if type(value) is not int:\n raise TypeError('y must be an integer')\n if value < 0:\n raise ValueError('y must be >= 0')\n self.__y = value\n\n def area(self):\n \"\"\" returns area of the Rectangle \"\"\"\n\n return self.__width * self.__height\n\n def display(self):\n \"\"\" displays Rectangle using #'s \"\"\"\n\n print('\\n' * (self.__y), end=\"\")\n for j in range(self.__height):\n print(' ' * self.__x + '#' * self.__width)\n\n def __str__(self):\n \"\"\" returns string representation of Rectangle \"\"\"\n\n return (f'[Rectangle] ({self.id}) '\n f'{self.__x}/{self.__y} - '\n f'{self.__width}/{self.__height}')\n\n def update(self, *args, **kwargs):\n \"\"\" assigns value to an unknown number of arguments \"\"\"\n\n if len(args) >= 1:\n self.id = args[0]\n if len(args) >= 2:\n self.width = args[1]\n if len(args) >= 3:\n self.height = args[2]\n if len(args) >= 4:\n self.x = args[3]\n if len(args) >= 5:\n self.y = args[4]\n\n for key, value in kwargs.items():\n if key == \"id\":\n self.id = value\n if key == \"width\":\n self.width = value\n if key == \"height\":\n self.height = value\n if key == \"x\":\n self.x = value\n if key == \"y\":\n self.y = value\n\n def to_dictionary(self):\n \"\"\" return dictionary representation of Rectangle \"\"\"\n\n my_dict = {\n 'id': self.id,\n 'width': self.width,\n 'height': self.height,\n 'x': self.x,\n 'y': self.y\n }\n return my_dict\n","repo_name":"GoldLion1011/holbertonschool-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"70156051596","text":"\"\"\"\n格式化城市区域\n\"\"\"\n\nimport json\n\nwith open('lib/city.json', 'r+') as f:\n standard_city_dict = json.loads(f.read())\nwith open('lib/region.json', 'r+') as f:\n standard_block_dict = json.loads(f.read())\n\n\ndef standard_city(city_name):\n \"\"\"\n\n :param city_name: 城市名称\n :return: 城市名称\n \"\"\"\n for i in standard_city_dict.items():\n for city in i[1]:\n if city and i[0] in city_name:\n print(i[0])\n return i[0]\n else:\n continue\n print(\"��法标准化\")\n return city_name\n\n\ndef standard_block(region_name):\n \"\"\"\n\n :param region_name: 区域\n :return: 区域\n \"\"\"\n for i in standard_block_dict.items():\n for block in i[1]:\n if block in region_name:\n # print(i[0])\n return i[0]\n else:\n continue\n print('无法标准化')\n # print(region_name)\n return region_name\n\n\nif __name__ == '__main__':\n standard_city(' 珠海市 ')\n standard_block('浦东新区 ')\n","repo_name":"BHBSA/hider_comm_price","sub_path":"lib/standardization.py","file_name":"standardization.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"25715599007","text":"\"\"\"\n@functions: source model, dirty map, clean img, radplot\n@author: Zhen ZHAO\n@date: Dec 16, 2018\n\"\"\"\nimport os\nimport matplotlib as mpl\nmpl.use(\"TkAgg\")\nimport matplotlib.image as plimg\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport scipy.ndimage.interpolation as spndint\nimport scipy.optimize as spfit\nimport numpy as np\nimport load_conf as lc\nimport utility as ut\nfrom Func_uv import FuncUv\nimport argparse\nimport configparser\nimport pickle\nimport time\n\n# colors normalization\nnorm = mpl.colors.Normalize(vmin=0, vmax=0.6)\n\n\nclass FuncImg(object):\n def __init__(self, model_name, n_pix, coverage_u, coverage_v, max_uv, obs_freq,\n set_clean_window, clean_gain, clean_threshold, clean_niter, uv_unit=\"km\"):\n self.n_pix = n_pix\n self.n_phf = self.n_pix // 2\n # 1. source model\n # 1.1 get source model file directory\n self.source_model = model_name\n model_dir = os.path.join(os.getcwd(), 'SOURCE_MODELS')\n self.model_file = os.path.join(model_dir, self.source_model)\n self.unit_flag = 0 if uv_unit == \"lambda\" else 1\n self.obs_freq = obs_freq\n self.obs_wlen = 299792458.8 / self.obs_freq\n # 1.2 source model result\n self.img_size = 4.\n self.img_file = []\n self.models = []\n self.x_max = 0\n\n self.model_img = []\n self.model_fft = []\n\n # 2. dirty beam\n # 2.1 parameter settings\n self.u = []\n self.v = []\n self.max_u = 0\n\n u = np.array(coverage_u)\n v = np.array(coverage_v)\n max_u = max_uv\n if len(u) != 0 and len(v) != 0:\n if self.unit_flag != 0: # unit is not lambda\n self.u = u * 1000 / self.obs_wlen\n self.v = v * 1000 / self.obs_wlen\n self.max_u = max_u * 1000 / self.obs_wlen\n else:\n self.u = u\n self.v = v\n self.max_u = max_u\n\n # 2.2 dirty beam result\n self.dirty_beam = []\n self.mask = []\n self.beam_scale = 0\n\n # 3. dirty map\n self.dirty_map = np.zeros((self.n_pix, self.n_pix), dtype=np.float32)\n\n # 4. cleaner\n # 4.1 settings\n self.clean_window = set_clean_window\n self.clean_gain = clean_gain\n self.clean_thresh = clean_threshold\n self.clean_niter = clean_niter\n # 4.2 clean results\n self.clean_img = []\n self.res_img = []\n\n # to avoid multiple runing\n self.is_model_obtained = False\n self.is_beam_obtained = False\n self.is_map_obtained = False\n\n # 5. parameter calculation\n self.result_e_bpa = 0\n self.result_e_bmaj = 0\n self.result_e_bmin = 0\n self.result_e_range = 0\n self.result_dynamic_range = 0\n self.result_rms_noise = 0\n\n # 1.source model\n def _read_model(self):\n \"\"\"\n :return: models, img_size, Xaxmax, img_file\n \"\"\"\n\n if len(self.model_file) == 0:\n self.models = [['G', 0., 0.4, 1.0, 0.1], ['D', 0., 0., 2., 0.5], ['P', -0.4, -0.5, 0.1]]\n self.x_max = self.img_size / 2.\n return True\n\n if len(self.model_file) > 0:\n if not os.path.exists(self.model_file):\n print(\"\\n\\nModel file %s does not exist!\\n\\n\" % self.model_file)\n return False\n else:\n fix_size = False\n temp_model = []\n temp_img_files = []\n temp_img_size = self.img_size\n Xmax = 0.0\n fi = open(self.model_file)\n for li, l in enumerate(fi.readlines()):\n comm = l.find('#')\n if comm >= 0:\n l = l[:comm]\n it = l.split()\n if len(it) > 0:\n if it[0] == 'IMAGE':\n temp_img_files.append([str(it[1]), float(it[2])])\n elif it[0] in ['G', 'D', 'P']:\n temp_model.append([it[0]] + list(map(float, it[1:])))\n if temp_model[-1][0] != 'P':\n temp_model[-1][4] = np.abs(temp_model[-1][4])\n Xmax = np.max([np.abs(temp_model[-1][1]) + temp_model[-1][4],\n np.abs(temp_model[-1][2]) + temp_model[-1][4], Xmax])\n elif it[0] == 'IMSIZE':\n temp_img_size = 2. * float(it[1])\n fix_size = True\n else:\n print(\"\\n\\nWRONG SYNTAX IN LINE %i:\\n\\n %s...\\n\\n\" % (li + 1, l[:max(10, len(l))]))\n if len(temp_model) + len(temp_img_files) == 0:\n print(\"\\n\\nThere should be at least 1 model component!\\n\\n\")\n\n self.models = temp_model\n self.imsize = temp_img_size\n self.imfiles = temp_img_files\n if not fix_size:\n self.imsize = Xmax * 1.1\n self.x_max = self.imsize / 2\n fi.close()\n\n return True\n\n return False\n\n def _prepare_model(self):\n \"\"\"\n :return: modelim, modelfft\n \"\"\"\n if self._read_model():\n # create temp variable\n models = self.models\n imsize = self.imsize\n imfiles = self.imfiles\n Npix = self.n_pix\n Nphf = self.n_phf\n\n pixsize = float(imsize) / Npix\n xx = np.linspace(-imsize / 2., imsize / 2., Npix)\n yy = np.ones(Npix, dtype=np.float32)\n distmat = np.zeros((Npix, Npix), dtype=np.float32)\n modelim = np.zeros((Npix, Npix), dtype=np.float32)\n\n # read model\n for model in models:\n xsh = -model[1]\n ysh = -model[2]\n xpix = np.rint(xsh / pixsize).astype(np.int32)\n ypix = np.rint(ysh / pixsize).astype(np.int32)\n centy = np.roll(xx, ypix)\n centx = np.roll(xx, xpix)\n distmat[:] = np.outer(centy ** 2., yy) + np.outer(yy, centx ** 2.)\n if model[0] == 'D':\n mask = np.logical_or(distmat <= model[4] ** 2., distmat == np.min(distmat))\n modelim[mask] += float(model[3]) / np.sum(mask)\n elif model[0] == 'G':\n gauss = np.exp(-distmat / (2. * model[4] ** 2.))\n modelim[:] += float(model[3]) * gauss / np.sum(gauss)\n elif model[0] == 'P':\n if np.abs(xpix + Nphf) < Npix and np.abs(ypix + Nphf) < Npix:\n yint = ypix + Nphf\n xint = xpix + Nphf\n modelim[yint, xint] += float(model[3])\n\n # read image file\n for imfile in imfiles:\n if not os.path.exists(imfile[0]):\n imfile[0] = os.path.join(os.path.join(os.getcwd(), 'PICTURES'), imfile[0])\n if not os.path.exists(imfile[0]):\n print('File %s does NOT exist. Cannot read the model!' % imfile[0])\n return False\n\n Np4 = Npix // 4\n img = plimg.imread(imfile[0]).astype(np.float32)\n dims = np.shape(img)\n d3 = min(2, dims[2])\n d1 = float(max(dims))\n avimg = np.average(img[:, :, :d3], axis=2)\n avimg -= np.min(avimg)\n avimg *= imfile[1] / np.max(avimg)\n if d1 == Nphf:\n pass\n else:\n zoomimg = spndint.zoom(avimg, float(Nphf) / d1)\n zdims = np.shape(zoomimg)\n zd0 = min(zdims[0], Nphf)\n zd1 = min(zdims[1], Nphf)\n sh0 = (Nphf - zdims[0]) // 2\n sh1 = (Nphf - zdims[1]) // 2\n # print(sh0, Np4, zd0, sh1, zd1)\n modelim[sh0 + Np4:sh0 + Np4 + zd0, sh1 + Np4:sh1 + Np4 + zd1] += zoomimg[:zd0, :zd1]\n\n # obtain modelim, modelfft\n modelim[modelim < 0.0] = 0.0\n self.model_img = modelim\n self.model_fft = np.fft.fft2(np.fft.fftshift(modelim))\n return True\n else:\n print(\"wrong model settings\")\n return False\n\n def get_result_src_model_with_update(self):\n \"\"\"\n :return: model_img, max_range\n \"\"\"\n if self._prepare_model():\n self.is_model_obtained = True\n Npix = self.n_pix\n Np4 = Npix // 4\n show_modelim = self.model_img[Np4:(Npix - Np4), Np4:(Npix - Np4)]\n return show_modelim, self.x_max\n else:\n return None, None\n\n def update_result_src_model(self):\n if self._prepare_model():\n self.is_model_obtained = True\n else:\n self.is_model_obtained = False\n\n def get_result_src_model(self):\n if self.is_model_obtained:\n Npix = self.n_pix\n Np4 = Npix // 4\n show_modelim = self.model_img[Np4:(Npix - Np4), Np4:(Npix - Np4)]\n return show_modelim, self.x_max\n else:\n return [], 0.0\n\n # 2.dirty beam\n def _prepare_beam(self):\n mask = np.zeros((self.n_pix, self.n_pix), dtype=np.float32)\n beam = []\n\n # 1. griding uv\n ctr = self.n_pix // 2\n scale_uv = self.n_pix / 2 / self.max_u * 0.95 * 0.5\n for index in np.arange(len(self.u)):\n mask[int(ctr + round(self.u[index] * scale_uv)), int(ctr + round(self.v[index] * scale_uv))] += 1\n # mask = np.transpose(mask)\n mask[mask > 1] = 1\n\n # 2. robust sampling\n # robust = 0.0\n # Nbas = len(u)\n # nH = 200 # time_duration // time_step\n # robfac = (5. * 10. ** (-robust)) ** 2. * (2. * Nbas * nH) / np.sum( mask** 2.)\n # robustsamp = np.zeros((Npix, Npix), dtype=np.float32)\n # robustsamp[:] = mask / (1. + robfac * mask)\n\n # 3. beam\n # beam = np.real(np.fft.fftshift(np.fft.ifft2(np.fft.fftshift(mask))))\n # beam = np.real(np.fft.ifftshift(np.fft.ifft2(np.fft.fftshift(mask))))\n beam = np.real(np.fft.ifftshift(np.fft.ifft2(np.fft.fftshift(mask))))\n\n beam_scale = np.max(beam)\n # beam_scale = np.max(beam[self.n_phf:self.n_phf + 1, self.n_phf:self.n_phf + 1])\n beam /= beam_scale\n\n # return\n self.dirty_beam = beam\n self.mask = mask\n self.beam_scale = beam_scale\n # print(\"=\"*20)\n # print(self.beam_scale)\n # print(\"=\" * 20)\n\n def get_result_dirty_beam_with_update(self):\n self._prepare_beam()\n self.is_beam_obtained = True\n Npix = self.n_pix\n Np4 = Npix // 4\n show_beam = self.dirty_beam[Np4:(Npix - Np4), Np4:(Npix - Np4)]\n return show_beam\n\n # for multiprocessing purpose (separate updating and getter)\n def update_result_dirty_beam(self):\n self._prepare_beam()\n self.is_beam_obtained = True\n\n def get_result_dirty_beam(self):\n if self.is_beam_obtained:\n Npix = self.n_pix\n Np4 = Npix // 4\n show_beam = self.dirty_beam[Np4:Npix - Np4, Np4:Npix - Np4]\n return show_beam\n else:\n return []\n\n # 3.dirty map\n def _prepare_map(self):\n if not self.is_model_obtained:\n self._prepare_model()\n if not self.is_beam_obtained:\n self._prepare_beam()\n\n # Be1=np.real(np.fft.fftshift(np.fft.fft2(np.fft.fftshift(self.dirty_beam))))\n # Ga1=np.real(self.model_fft)\n # C_BG1=np.copy(self.dirty_map)\n #\n # for ii in np.arange(len(Be1)):\n # for jj in np.arange(len(Ga1)):\n # C_BG1[ii][jj] = Be1[ii][jj]*Ga1[ii][jj]\n #\n # self.dirty_map[:] = np.real(np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(C_BG1))))\n\n # self.dirty_map[:] = np.fft.fftshift(\n # np.fft.ifft2(self.model_fft * np.fft.ifftshift(self.mask))).real / self.beam_scale\n self.dirty_map[:] = np.fft.fftshift(np.fft.ifft2(self.model_fft * np.fft.ifftshift(self.mask))).real / (\n self.beam_scale * 1.5)\n\n def get_result_dirty_map_with_update(self):\n self._prepare_map()\n self.is_map_obtained = True\n Np4 = self.n_pix // 4\n show_dirty = self.dirty_map[Np4:self.n_pix - Np4, Np4:self.n_pix - Np4]\n return show_dirty\n\n # for multiprocessing purpose (separate updating and getter)\n def update_result_dirty_map(self):\n self._prepare_map()\n self.is_map_obtained = True\n\n def get_result_dirty_map(self):\n if self.is_map_obtained:\n Np4 = self.n_pix // 4\n show_dirty = self.dirty_map[Np4:self.n_pix - Np4, Np4:self.n_pix - Np4]\n return show_dirty\n else:\n return []\n\n # 4.cleaner\n def overlap_indices(self):\n pass\n\n def do_clean(self):\n # clean_img, res_img = do_clean(dirty_map, dirty_beam, True, 0.2, 0, 100)\n if not self.is_map_obtained:\n self._prepare_map()\n self.get_clean_beam()\n clean_beam = self.clean_beam\n Npix = self.n_pix\n image_shape = self.dirty_map.shape\n # clean_img = np.zeros(image_shape)\n # res_img = np.array(self.dirty_map)\n clean_img = np.zeros(np.shape(self.dirty_map))\n source_img = np.zeros(np.shape(self.dirty_map))\n res_img = np.copy(self.dirty_map)\n # clean window\n window = []\n if self.clean_window is True:\n window = np.ones(image_shape, np.bool)\n # clean iterations\n for i in range(self.clean_niter):\n mx, my = np.unravel_index(np.fabs(res_img[window]).argmax(), res_img.shape)\n mval = res_img[mx, my] * self.clean_gain\n source_img[mx, my] += mval\n clean_img += mval * np.roll(np.roll(clean_beam, mx - Npix // 2, axis=0),\n my - Npix // 2, axis=1)\n\n a1o, a2o = overlap_indices(self.dirty_map, self.dirty_beam,\n mx - image_shape[0] / 2,\n my - image_shape[1] / 2)\n # print(a1o, a2o)\n res_img[a1o[0]:a1o[1], a1o[2]:a1o[3]] -= self.dirty_beam[a2o[0]:a2o[1], a2o[2]:a2o[3]] * mval\n if np.fabs(res_img).max() < self.clean_thresh:\n break\n # result\n # print(\"=\"*20, self.clean_niter, \"=\"*20)\n self.clean_img = clean_img\n self.res_img = res_img\n self.source_img = source_img\n\n def get_clean_beam(self):\n beam = self.dirty_beam\n main_lobe = np.where(beam > 0.6 * np.max(beam))\n clean_beam = np.zeros(np.shape(beam))\n Npix = self.n_pix\n # print(Npix)\n\n if len(main_lobe[0]) < 5:\n print('ERROR!', 'The main lobe of the PSF is too narrow!\\n CLEAN model will not be restored')\n clean_beam[:] = 0.0\n clean_beam[Npix // 2, Npix // 2] = 1.0\n else:\n dX = main_lobe[0] - Npix // 2\n dY = main_lobe[1] - Npix // 2\n # if True:\n try:\n fit = spfit.leastsq(\n lambda x: np.exp(-(dX * dX * x[0] + dY * dY * x[1] + dX * dY * x[2])) - beam[main_lobe],\n [1., 1., 0.])\n ddX = np.outer(np.ones(Npix),\n np.arange(-Npix // 2, Npix // 2).astype(np.float64))\n ddY = np.outer(np.arange(-Npix // 2, Npix // 2).astype(np.float64),\n np.ones(Npix))\n\n clean_beam[:] = np.exp(-(ddY * ddY * fit[0][0] + ddX * ddX * fit[0][1] + ddY * ddX * fit[0][2]))\n\n del ddX, ddY\n except:\n print('ERROR!', 'Problems fitting the PSF main lobe!\\n CLEAN model will not be restored')\n clean_beam[:] = 0.0\n clean_beam[Npix // 2, Npix // 2] = 1.0\n\n self.clean_beam = clean_beam\n\n def get_result_clean_map_with_update(self):\n self.do_clean()\n Np4 = self.n_pix // 4\n show_clean = self.clean_img[Np4:self.n_pix - Np4, Np4:self.n_pix - Np4]\n show_res = self.res_img[Np4:self.n_pix - Np4, Np4:self.n_pix - Np4]\n show_src = self.source_img[Np4:self.n_pix - Np4, Np4:self.n_pix - Np4]\n show_cln_beam = self.clean_beam[Np4:self.n_pix - Np4, Np4:self.n_pix - Np4]\n return show_clean + show_res, show_res, show_src, show_cln_beam\n\n # for multiprocessing purpose (separate updating and getter)\n def update_result_clean_map(self):\n self.do_clean()\n\n def get_result_clean_map(self):\n if self.is_map_obtained:\n Np4 = self.n_pix // 4\n show_clean = self.clean_img[Np4:self.n_pix - Np4, Np4:self.n_pix - Np4]\n show_res = self.res_img[Np4:self.n_pix - Np4, Np4:self.n_pix - Np4]\n return show_clean, show_res\n else:\n return [], []\n\n # 5. calculation\n def get_result_img_range(self):\n self.update_result_para_cal()\n return self.result_e_range\n\n def show_result_para_cal(self):\n str1 = \"e_bpa={} degree\\ne_bmaj={} mas\\ne_bmin={} mas\\ne_range={}\\nrms_noise={}\\ndr={}\".format(\n self.result_e_bpa, self.result_e_bmaj, self.result_e_bmin, self.result_e_range,\n self.result_rms_noise, self.result_dynamic_range)\n return str1\n\n def update_result_para_cal(self):\n # 1. calculate beam size and position angle\n # the unit of u,v in my code is km or lambda\n u = np.array(self.u)\n v = np.array(self.v)\n max_uv = self.max_u\n if len(self.u) != 0 and len(self.v) != 0:\n uv_bl = [np.sqrt(uu ** 2 + vv ** 2) for uu, vv in zip(u, v)]\n max_bl = np.max(uv_bl)\n muu, mvv, muv = 0.0, 0.0, 0.0\n wsum, runwt = 0.0, 1.0\n for i in range(0, len(u)):\n weight = 1.0\n # if True: # do radial weighting\n # weight *= uv_bl[i]\n if True: # do uniform weighting\n weight /= max_bl\n wsum += weight\n runwt = weight / wsum\n\n muu += runwt * (u[i] ** 2 - muu)\n mvv += runwt * (v[i] ** 2 - mvv)\n muv += runwt * (u[i] * v[i] - muv)\n # http://www.astro.caltech.edu/~tjp/ Timothy J. Pearson\n # https://www.eso.org/sci/meetings/2015/eris2015/ERIS-T4.pdf\n fudge = 0.7 # Empirical fudge factor of TJP's algorithm\n ftmp = np.sqrt((muu - mvv) ** 2 + 4 * muv * muv)\n\n e_bpa = -0.5 * np.arctan2(2.0 * muv, muu - mvv)\n e_bpa = e_bpa * 180 / np.pi\n\n e_bmaj = fudge / (np.sqrt(2.0 * (muu + mvv - ftmp)))\n e_bmaj = e_bmaj / np.pi * 180 * 3600 * 1000\n\n e_bmin = fudge / (np.sqrt(2.0 * (muu + mvv) + 2.0 * ftmp))\n e_bmin = e_bmin / np.pi * 180 * 3600 * 1000\n\n self.result_e_bpa = e_bpa\n self.result_e_bmaj, self.result_e_bmin = e_bmaj, e_bmin\n\n # 2. calculate the image axis\n\n u_range = np.linspace(-max_uv, max_uv, self.n_pix)\n u_reso = np.abs(u_range[3] - u_range[2]) # delta u\n l_extent = 1 / u_reso\n l_angle = np.arcsin(l_extent) * 180.0 / np.pi # rad to degree\n l_angle = l_angle * 3600 # degree to as\n l_angle = l_angle * 1000 # as to mas\n self.result_e_range = l_angle // 4\n\n # 3. calculate the rms noise and dr, self.clean_img, self.res_img\n clean_img = np.abs(self.clean_img + self.res_img)\n mean_noise = np.mean(np.abs(self.res_img))\n self.result_rms_noise = mean_noise\n self.result_dynamic_range = np.max(clean_img) / mean_noise\n\n return self.result_e_bpa, self.result_e_bmaj, self.result_e_bmin, self.result_e_range, self.result_rms_noise, self.result_dynamic_range\n\n\nclass ImgConfigParser(object):\n def __init__(self, _filename=\"config_img.ini\", _dbname='database.pkl'):\n # path = os.path.abspath(path)\n # path = os.getcwd()\n # path = \"./CONFIG_FILE\"\n self.filename = os.path.join(os.path.join(os.getcwd(), 'CONFIG_FILE'), _filename)\n self.db_path = os.path.join(os.path.join(os.getcwd(), 'DATABASE'), _dbname)\n\n # time\n self.time_start = []\n self.time_end = []\n self.time_step = []\n # show info\n self.bs_flag_gg = 0\n self.bs_flag_gs = 0\n self.bs_flag_ss = 0\n self.baseline_type = 0\n self.unit_flag = \"km\"\n self.cutoff_angle = 0\n self.precession_mode = 0\n # obs\n self.obs_freq = 0\n\n # position\n self.str_source = \"\"\n self.str_vlbi = \"\"\n self.str_telemetry = \"\"\n self.str_sat = \"\"\n\n self.pos_mat_src = []\n self.pos_mat_vlbi = []\n self.pos_mat_telemetry = []\n self.pos_mat_sat = []\n\n # imaging\n self.n_pix = 0\n self.source_model = \"\"\n self.clean_gain = 0\n self.clean_threshold = 0\n self.clean_niter = 0\n self.color_map_name = \"\"\n\n # parse data\n self.parse_data()\n\n def parse_data(self):\n if not os.path.exists(self.filename):\n self.rewrite_config()\n return\n\n def parse_string_list(config, _string):\n tmp = config.get(\"station\", _string)\n tmp_lst = [x.strip() for x in tmp.split(',')]\n return tmp_lst\n\n # create configparse\n config = configparser.ConfigParser()\n config.read(self.filename, encoding=\"utf-8\")\n\n # obs_time\n tmp = config.get(\"obs_time\", \"start\")\n self.time_start = [int(x) for x in tmp.split('/')]\n tmp = config.get(\"obs_time\", \"end\")\n self.time_end = [int(x) for x in tmp.split('/')]\n tmp = config.get(\"obs_time\", \"step\")\n self.time_step = [int(x) for x in tmp.split('/')]\n\n # bs_type\n self.bs_flag_gg = config.getint(\"bs_type\", \"bs_flag_gg\")\n self.bs_flag_gs = config.getint(\"bs_type\", \"bs_flag_gs\")\n self.bs_flag_ss = config.getint(\"bs_type\", \"bs_flag_ss\")\n self.baseline_type = self.bs_flag_gg + self.bs_flag_gs * 2 + self.bs_flag_ss * 4\n\n # obs_mode\n self.obs_freq = config.getfloat(\"obs_mode\", \"obs_freq\")\n self.cutoff_angle = config.getfloat(\"obs_mode\", \"cutoff_angle\")\n self.precession_mode = config.getint(\"obs_mode\", \"precession_mode\")\n self.unit_flag = config.get(\"obs_mode\", \"unit_flag\")\n\n # station\n self.str_source = parse_string_list(config, \"pos_source\")\n self.str_vlbi = parse_string_list(config, \"pos_vlbi\")\n self.str_telemetry = parse_string_list(config, \"pos_telemetry\")\n self.str_sat = parse_string_list(config, \"pos_satellite\")\n\n self.get_data_from_db()\n\n # imaging\n self.n_pix = config.getint(\"imaging\", \"n_pix\")\n self.source_model = config.get(\"imaging\", \"source_model\")\n self.clean_gain = config.getfloat(\"imaging\", \"clean_gain\")\n self.clean_threshold = config.getfloat(\"imaging\", \"clean_threshold\")\n self.clean_niter = config.getint(\"imaging\", \"clean_niter\")\n self.color_map_name = config.get(\"imaging\", \"color_map_name\")\n\n def show_info(self):\n print('*' * 15, \" TIME \", '*' * 15)\n print(\"start=\", self.time_start)\n print(\"end=\", self.time_end)\n print(\"step=\", self.time_step)\n print()\n\n print('*' * 15, \" OBS \", '*' * 15)\n print(\"bs_type=\", self.baseline_type)\n print(\"obs_freq=\", self.obs_freq)\n print(\"cutoff_angle=\", self.cutoff_angle)\n print(\"precession_mode=\", self.precession_mode)\n print(\"unit_flag=\", self.unit_flag)\n print()\n\n print('*' * 15, \" Station \", '*' * 15)\n print(\"str_source=\", self.str_source)\n print(\"str_vlbi=\", self.str_vlbi)\n print(\"str_telemetry=\", self.str_telemetry)\n print(\"str_sat=\", self.str_sat)\n\n print('*' * 15, \" Station with data\", '*' * 15)\n print(\"\\t source:\", self.pos_mat_src)\n print(\"\\t vlbi stations:\", self.pos_mat_vlbi)\n print(\"\\t telemetry stations:\", self.pos_mat_telemetry)\n print(\"\\t satellite:\", self.pos_mat_sat)\n\n print('*' * 15, \" Imaging\", '*' * 15)\n print(\"\\t n_pix:\", self.n_pix)\n print(\"\\t source model:\", self.source_model)\n print(\"\\t clean gain:\", self.clean_gain)\n print(\"\\t clean threshold:\", self.clean_threshold)\n print(\"\\t clean iterations:\", self.clean_niter)\n print(\"\\t colormap name:\", self.color_map_name)\n\n def rewrite_config(self):\n # create file\n if os.path.exists(self.filename):\n os.remove(self.filename)\n f = open(self.filename, 'w')\n f.close()\n else:\n f = open(self.filename, 'w')\n f.close()\n\n # create configparse\n config = configparser.ConfigParser()\n config.read(self.filename, encoding=\"utf-8\")\n\n # add sections: obs_time\n config.add_section(\"obs_time\")\n config.set(\"obs_time\", \"start\", \"2020/01/01/00/00/00\")\n config.set(\"obs_time\", \"end\", \"2020/01/02/00/00/00\")\n config.set(\"obs_time\", \"step\", \"00/00/05/00\")\n self.time_start = [2020, 1, 1, 0, 0, 0]\n self.time_end = [2020, 1, 2, 0, 0, 0]\n self.time_step = [0, 0, 5, 0]\n\n # add sections: bs_type\n config.add_section(\"bs_type\")\n config.set(\"bs_type\", \"bs_flag_gg\", \"1\")\n config.set(\"bs_type\", \"bs_flag_gs\", \"0\")\n config.set(\"bs_type\", \"bs_flag_ss\", \"0\")\n self.bs_flag_gg, self.bs_flag_gs, self.bs_flag_ss = 1, 0, 0\n self.baseline_type = self.bs_flag_gg + self.bs_flag_gs * 2 + self.bs_flag_ss * 4\n\n # add sections: obs_mode\n config.add_section(\"obs_mode\")\n config.set(\"obs_mode\", \"obs_freq\", \"1.63e9\")\n config.set(\"obs_mode\", \"bandwidth\", \"3.2e7\")\n config.set(\"obs_mode\", \"cutoff_angle\", \"10.0\")\n config.set(\"obs_mode\", \"precession_mode\", \"0\")\n config.set(\"obs_mode\", \"unit_flag\", \"km\")\n self.obs_freq = 1.63e9\n self.cutoff_angle = 10.0\n self.precession_mode = 0\n self.unit_flag = 'km'\n\n # add sections: station\n config.add_section(\"station\")\n config.set(\"station\", \"pos_source\", \"0316+413\")\n config.set(\"station\", \"pos_vlbi\", \"ShangHai, Tianma, Urumqi, GIFU11, HITACHI,KASHIM34\")\n config.set(\"station\", \"pos_telemetry\", \"\")\n config.set(\"station\", \"pos_satellite\", \"\")\n self.str_source = ['0316+413']\n self.str_vlbi = ['ShangHai', 'Tianma', 'Urumqi', 'GIFU11', 'HITACHI', 'KASHIM34']\n self.str_telemetry = ['']\n self.str_sat = ['']\n self.get_data_from_db()\n\n # add section: imaging\n config.add_section(\"imaging\")\n config.set(\"imaging\", \"n_pix\", \"512\")\n config.set(\"imaging\", \"source_model\", \"Point-source.model\")\n config.set(\"imaging\", \"clean_gain\", \"0.9\")\n config.set(\"imaging\", \"clean_threshold\", \"0.01\")\n config.set(\"imaging\", \"clean_niter\", \"20\")\n config.set(\"imaging\", \"color_map_name\", \"viridis\")\n self.n_pix = 512\n self.source_model = \"Point-source.model\"\n self.clean_gain = 0.9\n self.clean_threshold = 0.01\n self.clean_niter = 20\n self.color_map_name = \"viridis\"\n\n # write file\n config.write(open(self.filename, \"w\"))\n\n def get_data_from_db(self):\n with open(self.db_path, 'rb') as fr:\n db_src_dict = pickle.load(fr)\n db_sat_dict = pickle.load(fr)\n db_telem_dict = pickle.load(fr)\n db_vlbi_vlba_dict = pickle.load(fr)\n db_vlbi_evn_dict = pickle.load(fr)\n db_vlbi_eavn_dict = pickle.load(fr)\n db_vlbi_lba_dict = pickle.load(fr)\n db_vlbi_other_dict = pickle.load(fr)\n db_vlbi_all = pickle.load(fr)\n\n # source\n self.pos_mat_src = []\n if len(self.str_source) != 0:\n for each in self.str_source:\n if each in db_src_dict.keys():\n self.pos_mat_src.append(list(db_src_dict[each]))\n\n # sat\n self.pos_mat_sat = []\n if len(self.str_sat) != 0:\n for each in self.str_sat:\n if each in db_sat_dict.keys():\n self.pos_mat_sat.append(list(db_sat_dict[each]))\n\n # telem\n self.pos_mat_telemetry = []\n if len(self.str_telemetry) != 0:\n for each in self.str_telemetry:\n if each in db_telem_dict.keys():\n self.pos_mat_telemetry.append(list(db_telem_dict[each]))\n\n # vlbi\n self.pos_mat_vlbi = []\n if len(self.str_vlbi) != 0:\n for each in self.str_vlbi:\n if each in db_vlbi_all.keys():\n self.pos_mat_vlbi.append(list(db_vlbi_all[each]))\n\n\ndef overlap_indices(a1, a2, shiftx, shifty):\n if shiftx >= 0:\n a1xbeg = shiftx\n a2xbeg = 0\n a1xend = a1.shape[0]\n a2xend = a1.shape[0] - shiftx\n else:\n a1xbeg = 0\n a2xbeg = -shiftx\n a1xend = a1.shape[0] + shiftx\n a2xend = a1.shape[0]\n\n if shifty >= 0:\n a1ybeg = shifty\n a2ybeg = 0\n a1yend = a1.shape[1]\n a2yend = a1.shape[1] - shifty\n else:\n a1ybeg = 0\n a2ybeg = -shifty\n a1yend = a1.shape[1] + shifty\n a2yend = a1.shape[1]\n\n return (int(a1xbeg), int(a1xend), int(a1ybeg), int(a1yend)), (int(a2xbeg), int(a2xend), int(a2ybeg), int(a2yend))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Run the imaging func, show the source model, dirty beam, dirty map, clean map and corresponding parameter info\")\n parser.add_argument('-c',\n '--config',\n default='config_img.ini',\n help='Specify the configuration file')\n parser.add_argument('-u',\n '--uv_file',\n default=\"\",\n help=\"Load your own u,v data instead of configuring the obs parameters (under ./)\")\n parser.add_argument('-p',\n '--group_img',\n action=\"store_true\",\n help=\"To save 4 imgs in a single one or separately\"\n )\n parser.add_argument('-g',\n '--show_img',\n action=\"store_true\",\n help='Choose to show GUI or not')\n parser.add_argument('-i',\n '--show_info',\n action=\"store_true\",\n help='Choose to show beam size, position angle, dynamic range and rms noise', )\n parser.add_argument('-f',\n '--img_fmt',\n choices=['eps', 'png', 'pdf', 'svg', 'ps'],\n help='Specify the img format (default:pdf)',\n default='pdf')\n # parser.add_argument('-m',\n # '--color_map',\n # choices=['viridis', 'hot', 'jet', 'rainbow', 'Greys', 'cool', 'nipy_spectral'],\n # help='Specify the color map',\n # default='viridis')\n\n return parser.parse_args()\n\n\ndef run_img():\n # 1.initialize parse and config objects\n args = parse_args()\n # for test in ide\n # args.show_img = True\n # args.group_img = True\n # args.show_info = True\n\n if args.config != '':\n my_config_parser = ImgConfigParser(args.config)\n else:\n my_config_parser = ImgConfigParser()\n\n # 2. show-image parameters\n # colormap = 'viridis'\n # if args.color_map in ['viridis', 'hot', 'jet', 'rainbow', 'Greys', 'cool', 'nipy_spectral']:\n # colormap = args.color_map\n colormap = my_config_parser.color_map_name\n gamma = 0.3\n set_clean_window = True\n # norm = mpl.colors.Normalize(vmin=0, vmax=1)\n\n # 3. results data\n data_u, data_v = [], []\n max_uv = 0\n data_img_src, data_img_bm, data_img_map, data_img_cbm, data_img_cmap = 0, 0, 0, 0, 0\n data_img_range = 0\n\n # 4. u,v\n use_uv_file = False\n uv_file_path = ''\n if args.uv_file != \"\":\n uv_file_name = args.uv_file\n uv_file_path = os.path.join(os.getcwd(), uv_file_name)\n if os.path.exists(uv_file_path):\n use_uv_file = True\n\n if use_uv_file:\n read_in = np.loadtxt(uv_file_path, dtype=np.float32)\n # my Func_uv.py will save u,v data in row fashion\n data_u, data_v = read_in[0], read_in[1]\n max_uv = max(np.max(np.abs(data_u)), np.max(np.abs(data_v)))\n else:\n start_time = ut.time_2_mjd(*my_config_parser.time_start, 0)\n stop_time = ut.time_2_mjd(*my_config_parser.time_end, 0)\n time_step = ut.time_2_day(*my_config_parser.time_step)\n cutoff_dict = {\"flag\": lc.cutoff_mode[\"flag\"], \"CutAngle\": my_config_parser.cutoff_angle}\n myFuncUV = FuncUv(start_time, stop_time, time_step,\n my_config_parser.pos_mat_src[0],\n my_config_parser.pos_mat_src,\n my_config_parser.pos_mat_sat,\n my_config_parser.pos_mat_vlbi,\n my_config_parser.pos_mat_telemetry,\n my_config_parser.obs_freq,\n my_config_parser.baseline_type,\n my_config_parser.unit_flag,\n cutoff_dict,\n my_config_parser.precession_mode\n )\n data_u, data_v, max_uv = myFuncUV.get_result_single_uv_with_update()\n\n # 5. img calculation\n if len(data_u) == 0 or len(data_v) == 0:\n print(\"U,V data is not properly configured!\")\n return\n # 5.1 initialize FuncImg object\n myFuncImg = FuncImg(my_config_parser.source_model,\n my_config_parser.n_pix,\n data_u, data_v, max_uv,\n my_config_parser.obs_freq,\n set_clean_window,\n my_config_parser.clean_gain,\n my_config_parser.clean_threshold,\n my_config_parser.clean_niter,\n my_config_parser.unit_flag)\n # 5.2 src model\n data_img_src, data_img_range = myFuncImg.get_result_src_model_with_update()\n # 5.3 dirty beam\n data_img_bm = myFuncImg.get_result_dirty_beam_with_update()\n # 5.4 dirty map\n data_img_map = myFuncImg.get_result_dirty_map_with_update()\n # 5.5 clean map, resual map, clean beam\n data_img_cmap, data_img_res, data_pure_point, data_img_cbm = myFuncImg.get_result_clean_map_with_update()\n data_img_range = myFuncImg.get_result_img_range()\n show_range = data_img_range // 2\n\n # 7. show parameter info\n if args.show_info:\n print(myFuncImg.show_result_para_cal())\n\n # 8. Imaging\n img_type = 'pdf'\n if args.img_fmt in ['eps', 'png', 'pdf', 'svg', 'ps']:\n img_type = args.img_fmt\n # 8.1 specify img type and output directory\n img_out_path = os.path.join(os.path.join(os.getcwd(), 'OUTPUT'), 'imaging')\n path_time_str = time.asctime()\n path_save_uv = os.path.join(img_out_path, \"uv-{}.{}\".format(path_time_str, img_type))\n path_save_bm = os.path.join(img_out_path, \"dirty-beam-{}.{}\".format(path_time_str, img_type))\n path_save_cbm = os.path.join(img_out_path, \"clean-beam-{}.{}\".format(path_time_str, img_type))\n path_save_src = os.path.join(img_out_path, \"src-model-{}.{}\".format(path_time_str, img_type))\n path_save_map = os.path.join(img_out_path, \"dirty-map-{}.{}\".format(path_time_str, img_type))\n path_save_cmap = os.path.join(img_out_path,\"clean-map-{}.{}\".format(path_time_str, img_type))\n path_save_integrate = os.path.join(img_out_path,\"Integrated-all-{}.{}\".format(path_time_str, img_type))\n\n # 8.2 draw imgs\n if args.group_img:\n figs = plt.figure(figsize=(8, 4))\n # 1) u,v\n fig_uv = figs.add_subplot(231, aspect='equal')\n x = np.array(data_u)\n y = np.array(data_v)\n max_range = max_uv * 1.1\n fig_uv.scatter(x, y, s=1, marker='.', color='brown')\n fig_uv.set_xlim([-max_range, max_range])\n fig_uv.set_ylim([-max_range, max_range])\n fig_uv.set_title(\"UV Plot: %s\" % my_config_parser.str_source[0])\n if my_config_parser.unit_flag == 'km':\n fig_uv.set_xlabel(\"u$(km)$\")\n fig_uv.set_ylabel(\"v$(km)$\")\n else:\n fig_uv.set_xlabel(\"u$(\\lambda)$\")\n fig_uv.set_ylabel(\"v$(\\lambda)$\")\n fig_uv.grid()\n # set science\n fig_uv.yaxis.get_major_formatter().set_powerlimits((0, 1))\n fig_uv.xaxis.get_major_formatter().set_powerlimits((0, 1))\n\n # 2) dirty beam\n fig_bm = figs.add_subplot(232, aspect='equal')\n plot_beam = fig_bm.imshow(data_img_bm, origin='lower', aspect='equal', picker=True, interpolation='nearest', cmap=colormap, norm=norm)\n plt.setp(plot_beam, extent=(-show_range, show_range, -show_range, show_range))\n fig_bm.set_xlabel('Relative RA (mas)')\n fig_bm.set_ylabel('Relative DEC (mas)')\n fig_bm.set_title('DIRTY BEAM')\n\n # 3) clean beam\n fig_cbm = figs.add_subplot(233, aspect='equal')\n plot_cbeam = fig_cbm.imshow(data_img_cbm, origin='lower', aspect='equal', picker=True, interpolation='nearest', cmap=colormap, norm=norm)\n plt.setp(plot_cbeam, extent=(-show_range, show_range, -show_range, show_range))\n fig_cbm.set_xlabel('Relative RA (mas)')\n fig_cbm.set_ylabel('Relative DEC (mas)')\n fig_cbm.set_title('CLEAN BEAM')\n\n figs.colorbar(plot_cbeam, shrink=0.9)\n\n # 4) src model\n fig_model = figs.add_subplot(234, aspect='equal')\n plot_model = fig_model.imshow(np.power(data_img_src, gamma), origin='lower', aspect='equal', picker=True, cmap=colormap, norm=norm)\n plt.setp(plot_model, extent=(-show_range, show_range, -show_range, show_range))\n fig_model.set_xlabel('Relative RA (mas)')\n fig_model.set_ylabel('Relative DEC (mas)')\n fig_model.set_title('MODEL IMAGE')\n\n # 5) dirty map\n fig_map = figs.add_subplot(235, aspect='equal')\n plot_map = fig_map.imshow(data_img_map, origin='lower', aspect='equal', cmap=colormap, norm=norm)\n plt.setp(plot_map, extent=(-show_range, show_range, -show_range, show_range))\n fig_map.set_xlabel('Relative RA (mas)')\n fig_map.set_ylabel('Relative DEC (mas)')\n fig_map.set_title('DIRTY IMAGE')\n\n # 6) clean map\n fig_cmap = figs.add_subplot(236, aspect='equal')\n plot_cmap = fig_cmap.imshow(data_img_cmap, origin='lower', aspect='equal',picker=True, interpolation='nearest', cmap=colormap, norm=norm)\n plt.setp(plot_cmap, extent=(-show_range, show_range, -show_range, show_range))\n fig_cmap.set_xlabel('Relative RA (mas)')\n fig_cmap.set_ylabel('Relative DEC (mas)')\n fig_cmap.set_title('CLEAN IMAGE')\n figs.colorbar(plot_cmap, shrink=0.9)\n\n figs.tight_layout()\n plt.savefig(path_save_integrate)\n else:\n # 1) u,v\n fig1 = plt.figure(figsize=(4, 4))\n fig_uv = fig1.add_subplot(111, aspect='equal')\n x = np.array(data_u)\n y = np.array(data_v)\n max_range = max_uv * 1.1\n fig_uv.scatter(x, y, s=1, marker='.', color='brown')\n fig_uv.set_xlim([-max_range, max_range])\n fig_uv.set_ylim([-max_range, max_range])\n fig_uv.set_title(\"UV Plot: %s\" % my_config_parser.str_source[0])\n if my_config_parser.unit_flag == 'km':\n fig_uv.set_xlabel(\"u$(km)$\")\n fig_uv.set_ylabel(\"v$(km)$\")\n else:\n fig_uv.set_xlabel(\"u$(\\lambda)$\")\n fig_uv.set_ylabel(\"v$(\\lambda)$\")\n fig_uv.grid()\n # set science\n fig_uv.yaxis.get_major_formatter().set_powerlimits((0, 1))\n fig_uv.xaxis.get_major_formatter().set_powerlimits((0, 1))\n # save uv\n plt.savefig(path_save_uv)\n\n # 2) dirty beam\n fig2 = plt.figure(figsize=(4, 4))\n fig_bm = fig2.add_subplot(111, aspect='equal')\n # plot_beam = fig_bm.imshow(data_img_bm, origin='lower', aspect='equal', vmin=-0, vmax=1.0, cmap=colormap)\n # plot_beam = fig_bm.imshow(data_img_bm, picker=True, cmap=colormap, norm=norm) # interpolation='nearest',\n plot_beam = fig_bm.imshow(data_img_bm, origin='lower', aspect='equal', cmap=colormap, norm=norm)\n plt.setp(plot_beam, extent=(-show_range, show_range, -show_range, show_range))\n fig_bm.set_xlabel('Relative RA (mas)')\n fig_bm.set_ylabel('Relative DEC (mas)')\n fig_bm.set_title('DIRTY BEAM')\n fig2.colorbar(plot_beam, shrink=0.9)\n plt.savefig(path_save_bm)\n\n # 3) clean beam\n fig3 = plt.figure(figsize=(4, 4))\n fig_cbm = fig3.add_subplot(111, aspect='equal')\n plot_cbeam = fig_cbm.imshow(data_img_cbm, origin='lower', aspect='equal',picker=True, interpolation='nearest', cmap=colormap, norm=norm)\n plt.setp(plot_cbeam, extent=(-show_range, show_range, -show_range, show_range))\n fig_cbm.set_xlabel('Relative RA (mas)')\n fig_cbm.set_ylabel('Relative DEC (mas)')\n fig_cbm.set_title('CLEAN BEAM')\n fig3.colorbar(plot_cbeam, shrink=0.9)\n plt.savefig(path_save_cbm)\n\n # 4) src model\n fig4 = plt.figure(figsize=(4, 4))\n fig_model = fig4.add_subplot(111, aspect='equal')\n plot_model = fig_model.imshow(np.power(data_img_src, gamma), origin='lower', aspect='equal',picker=True, cmap=colormap, norm=norm)\n plt.setp(plot_model, extent=(-show_range, show_range, -show_range, show_range))\n fig_model.set_xlabel('Relative RA (mas)')\n fig_model.set_ylabel('Relative DEC (mas)')\n fig_model.set_title('MODEL IMAGE')\n fig4.colorbar(plot_model, shrink=0.9)\n plt.savefig(path_save_src)\n\n # 5) dirty map\n fig5 = plt.figure(figsize=(4, 4))\n fig_map = fig5.add_subplot(111, aspect='equal')\n plot_map = fig_map.imshow(data_img_map, origin='lower', aspect='equal', cmap=colormap, norm=norm)\n plt.setp(plot_map, extent=(-show_range, show_range, -show_range, show_range))\n fig_map.set_xlabel('Relative RA (mas)')\n fig_map.set_ylabel('Relative DEC (mas)')\n fig_map.set_title('DIRTY IMAGE')\n fig5.colorbar(plot_map, shrink=0.9)\n plt.savefig(path_save_map)\n\n # 6) clean map\n fig6 = plt.figure(figsize=(4, 4))\n fig_cmap = fig6.add_subplot(111, aspect='equal')\n plot_cmap = fig_cmap.imshow(data_img_cmap, origin='lower', aspect='equal', picker=True, interpolation='nearest', cmap=colormap, norm=norm)\n plt.setp(plot_cmap, extent=(-show_range, show_range, -show_range, show_range))\n fig_cmap.set_xlabel('Relative RA (mas)')\n fig_cmap.set_ylabel('Relative DEC (mas)')\n fig_cmap.set_title('CLEAN IMAGE')\n fig6.colorbar(plot_cmap, shrink=0.9)\n plt.savefig(path_save_cmap)\n\n if args.show_img:\n plt.show()\n\n\nif __name__ == \"__main__\":\n run_img()\n","repo_name":"ZhenZHAO/VNSIM","sub_path":"Func_img.py","file_name":"Func_img.py","file_ext":"py","file_size_in_byte":43443,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"28"} +{"seq_id":"43106744987","text":"from sys import version_info\n\nimport matplotlib\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nimport matplotlib.pyplot as plt\n\nfrom python.Plot_RSTs.Plot_RSTs import PlotRSTs\nimport python.Plot_RSTs.GUI_constants as const_GUI\n\nmatplotlib.use('TkAgg')\n\nif version_info[0] < 3:\n import Tkinter as tk\n import tkFont\nelse:\n import tkinter as tk\n import tkinter.font as tkFont\n\nclass plot_RST_GUI:\n def __init__(self, master):\n # create a custom font\n self.customFont = tkFont.Font(family=\"Helvetica\", size=const_GUI.default_font_size)\n\n self.master = master\n self.master.title(const_GUI.title)\n self.master.rowconfigure(1, weight=1)\n\n # Define the GUI frames\n self.frame_data_options = tk.Frame(self.master)\n self.frame_general_attributes = tk.Frame(self.master)\n self.frame_display_options = tk.Frame(self.master)\n self.frame_map = tk.Frame(self.master)\n self.frame_nav_toolbar = tk.Frame(self.master)\n\n # Define the GUI frame's layout\n self.frame_data_options.grid(row=0, column=0, sticky=tk.N, padx=5, pady=5)\n self.frame_general_attributes.grid(row=0, column=1, sticky=tk.N, padx=5, pady=5)\n self.frame_display_options.grid(row=0, column=2, sticky=tk.N, padx=5, pady=5)\n self.frame_map.grid(row=1, columnspan=4)\n self.frame_map.rowconfigure(0, weight=1)\n self.frame_nav_toolbar.grid(row=2, columnspan=2)\n\n # Define the data options widgets\n self.data_options_label = tk.Label(self.frame_data_options, text=const_GUI.data_options_label, font=self.customFont)\n # Choose NCEP or ERA Interim\n self.model_data_label = tk.Label(self.frame_data_options, text=const_GUI.model_data_label, font=self.customFont)\n self.model_data_list = const_GUI.models_list\n self.model_data_var = tk.StringVar()\n self.model_data_var.set(const_GUI.default_model_data)\n self.model_data_entry = tk.OptionMenu(self.frame_data_options, self.model_data_var, *self.model_data_list)\n self.model_data_entry.config(font=self.customFont)\n\n # Choose Geostrophic Vorticity or just Vorticity\n self.data_to_map_label = tk.Label(self.frame_data_options, text=const_GUI.data_to_map_label, font=self.customFont)\n self.data_to_map_list = const_GUI.data_to_map_list\n self.data_to_map_var = tk.StringVar()\n self.data_to_map_var.set(const_GUI.default_data_to_map)\n self.data_to_map_entry = tk.OptionMenu(self.frame_data_options, self.data_to_map_var, *self.data_to_map_list)\n self.data_to_map_entry.config(font=self.customFont)\n\n # 'Use interpolation'\n self.use_interpolation = tk.IntVar()\n self.use_interpolation.set(const_GUI.default_use_interpolation)\n self.checkbutton1_data_options = tk.Checkbutton(self.frame_data_options,\n text=const_GUI.data_options_1,\n variable=self.use_interpolation,\n font=self.customFont)\n # 'Show troughs/ridges dots'\n self.show_dots = tk.IntVar()\n self.show_dots.set(const_GUI.default_show_dots)\n self.checkbutton2_data_options = tk.Checkbutton(self.frame_data_options,\n text=const_GUI.data_options_2,\n variable=self.show_dots,\n font=self.customFont)\n # 'Show RST info'\n self.show_rst_info = tk.IntVar()\n self.show_rst_info.set(const_GUI.default_show_rst_info)\n self.checkbutton3_data_options = tk.Checkbutton(self.frame_data_options,\n text=const_GUI.data_options_3,\n variable=self.show_rst_info,\n font=self.customFont)\n # 'Polyfit RST'\n self.polyfit_rst = tk.IntVar()\n self.polyfit_rst.set(const_GUI.default_polyfit_rst)\n self.checkbutton4_data_options = tk.Checkbutton(self.frame_data_options,\n text=const_GUI.data_options_4,\n variable=self.polyfit_rst,\n font=self.customFont)\n\n # 'Show only longest separate'\n self.only_longest_separate = tk.IntVar()\n self.only_longest_separate.set(const_GUI.default_only_longest_separate)\n self.checkbutton5_data_options = tk.Checkbutton(self.frame_data_options,\n text=const_GUI.data_options_5,\n variable=self.only_longest_separate,\n font=self.customFont)\n\n # Define the data options widget's layout (1 = 'Use interpolation', 2 = 'Show vorticity', 3 = 'Show geostrophic vorticity',\n # 4 = 'Show troughs/ridges dots', 5 = 'Show RST info')\n self.data_options_label.grid(row=0, column=0, columnspan=2)\n self.model_data_label.grid(row=1, column=0, sticky=tk.W)\n self.model_data_entry.grid(row=2, column=0, sticky=tk.W)\n self.data_to_map_label.grid(row=3, column=0, sticky=tk.W)\n self.data_to_map_entry.grid(row=4, column=0, sticky=tk.W)\n self.checkbutton1_data_options.grid(row=1, column=1, sticky=tk.W)\n self.checkbutton2_data_options.grid(row=2, column=1, sticky=tk.W)\n self.checkbutton3_data_options.grid(row=3, column=1, sticky=tk.W)\n self.checkbutton4_data_options.grid(row=4, column=1, sticky=tk.W)\n self.checkbutton5_data_options.grid(row=5, column=1, sticky=tk.W)\n\n # Define the general attributes widgets\n self.date_label = tk.Label(self.frame_general_attributes, text=const_GUI.date_label, font=self.customFont)\n self.year_label = tk.Label(self.frame_general_attributes, text=const_GUI.year_label, font=self.customFont)\n self.month_label = tk.Label(self.frame_general_attributes, text=const_GUI.month_label, font=self.customFont)\n self.day_label = tk.Label(self.frame_general_attributes, text=const_GUI.day_label, font=self.customFont)\n self.year_list = [str(x) for x in range(1979, 2017)]\n #self.year_list = [\"1979\", \"1985\", \"1994\"]\n self.year_var = tk.StringVar()\n self.year_var.set(const_GUI.default_year)\n self.year_entry = tk.OptionMenu(self.frame_general_attributes, self.year_var, *self.year_list)\n self.year_entry.config(font=self.customFont)\n # self.year_entry.configure(state=\"disabled\")\n self.month_list = [\"%02d\" % x for x in range(1, 13)]\n self.month_var = tk.StringVar()\n self.month_var.set(const_GUI.default_month)\n self.month_entry = tk.OptionMenu(self.frame_general_attributes, self.month_var, *self.month_list)\n self.month_entry.config(font=self.customFont)\n self.day_list = [\"%02d\" % x for x in range(1, 32)]\n self.day_var = tk.StringVar()\n self.day_var.set(const_GUI.default_day)\n self.day_entry = tk.OptionMenu(self.frame_general_attributes, self.day_var, *self.day_list)\n self.day_entry.config(font=self.customFont)\n\n self.prev_day_button = tk.Button(self.frame_general_attributes,\n text=const_GUI.prev_day_button_text,\n command=self.show_prev_day,\n width=13,\n font=self.customFont)\n self.next_day_button = tk.Button(self.frame_general_attributes,\n text=const_GUI.next_day_button_text,\n command=self.show_next_day,\n width=13,\n font=self.customFont)\n\n\n self.files_path_label = tk.Label(self.frame_general_attributes, text=const_GUI.files_path_label, font=self.customFont)\n self.files_path_var = tk.StringVar()\n self.files_path_var.set(const_GUI.default_file_path)\n self.files_path_entry = tk.Entry(self.frame_general_attributes, textvariable=self.files_path_var, font=self.customFont)\n\n self.detached_map = tk.IntVar()\n self.detached_map.set(0)\n self.detached_map_checkbutton = tk.Checkbutton(self.frame_general_attributes,\n variable=self.detached_map,\n text=const_GUI.detached_map_checkbutton,\n font=self.customFont)\n\n self.draw_button = tk.Button(self.frame_general_attributes,\n text=const_GUI.draw_button_text,\n command=self.draw_map,\n font=self.customFont)\n\n # Define the general attributes layout\n self.files_path_label.grid(row=0, column=3, padx=10)\n self.files_path_entry.grid(row=1, column=3, padx=10)\n\n self.date_label.grid(row=0, column=0, columnspan=3)\n self.year_label.grid(row=1, column=0)\n self.month_label.grid(row=1, column=1)\n self.day_label.grid(row=1, column=2)\n self.year_entry.grid(row=2, column=0)\n self.month_entry.grid(row=2, column=1)\n self.day_entry.grid(row=2, column=2)\n\n self.prev_day_button.grid(row=4,column=0)\n self.next_day_button.grid(row=4,column=2)\n\n self.detached_map_checkbutton.grid(row=3, column=3)\n\n self.draw_button.grid(row=5, column=3)\n\n # Define display options widgets\n # Buttons to adjust the font\n self.font_size_label = tk.Label(self.frame_display_options, text=const_GUI.font_size_label, font=self.customFont)\n self.smaller_font = tk.Button(self.frame_display_options,\n text=\"-\",\n command=self.contract_font,\n width=3,\n font=self.customFont)\n self.default_font = tk.Button(self.frame_display_options,\n text=\"Default\",\n command=self.make_default_font_size,\n font=self.customFont)\n self.bigger_font = tk.Button(self.frame_display_options,\n text=\"+\",\n command=self.enlarge_font,\n width=3,\n font=self.customFont)\n\n self.choose_cb_label = tk.Label(self.frame_display_options, text=const_GUI.choose_cb_label, font=self.customFont)\n self.cb_list = const_GUI.cb_list\n self.cb_var = tk.StringVar()\n self.cb_var.set(const_GUI.default_cb)\n self.cb_menu = tk.OptionMenu(self.frame_display_options, self.cb_var, *self.cb_list)\n self.cb_menu.config(font=self.customFont)\n\n # Define the display options layout\n self.font_size_label.grid(row=0, column=0, columnspan=3)\n self.smaller_font.grid(row=1, column=0)\n self.default_font.grid(row=1, column=1)\n self.bigger_font.grid(row=1, column=2)\n\n self.choose_cb_label.grid(row=2, column=0, columnspan=3)\n self.cb_menu.grid(row=3, column=0, columnspan=3)\n\n # Initialize the plotRSTs objects according to the default year\n self.current_year = const_GUI.default_year\n self.plotRSTs_NCEP_instance = PlotRSTs('NCEP', self.current_year)\n self.plotRSTs_ERA_instance = PlotRSTs('ERA_Interim', self.current_year)\n self.plotRSTs_ERA_25_instance = PlotRSTs('ERA Int 2.5', self.current_year)\n\n root.mainloop()\n\n def show_prev_day(self):\n current_day = self.year_var.get() + \"-\" + self.month_var.get() + \"-\" + self.day_var.get() + \" 12:00:00\"\n # Get the previous datetime object date from the plotRSTS instance\n prev_date,_ = self.plotRSTs_NCEP_instance.get_next_and_prev_days(current_day)\n\n if prev_date:\n prev_day_year = str(prev_date)[0:4]\n prev_day_month = str(prev_date)[5:7]\n prev_day_day = str(prev_date)[8:10]\n self.year_var.set(prev_day_year)\n self.month_var.set(prev_day_month)\n self.day_var.set(prev_day_day)\n\n self.draw_map()\n\n def show_next_day(self):\n current_day = self.year_var.get() + \"-\" + self.month_var.get() + \"-\" + self.day_var.get() + \" 12:00:00\"\n # Get the next string date from the plotRSTS instance\n _, next_date = self.plotRSTs_NCEP_instance.get_next_and_prev_days(current_day)\n\n if next_date:\n next_day_year = str(next_date)[0:4]\n next_day_month = str(next_date)[5:7]\n next_day_day = str(next_date)[8:10]\n self.year_var.set(next_day_year)\n self.month_var.set(next_day_month)\n self.day_var.set(next_day_day)\n\n self.draw_map()\n\n def enlarge_font(self):\n '''Make the font 2 points bigger'''\n size = self.customFont['size']\n self.customFont.configure(size=size + 2)\n\n def contract_font(self):\n '''Make the font 2 points smaller'''\n size = self.customFont['size']\n self.customFont.configure(size=size - 2)\n\n def make_default_font_size(self):\n self.customFont.configure(size=const_GUI.default_font_size)\n\n def draw_map(self):\n if self.year_var.get() != self.current_year:\n # Replace the plotRSTs objects according to the current year\n self.current_year = self.year_var.get()\n self.plotRSTs_NCEP_instance = PlotRSTs('NCEP', self.current_year)\n self.plotRSTs_ERA_instance = PlotRSTs('ERA_Interim', self.current_year)\n self.plotRSTs_ERA_25_instance = PlotRSTs('ERA Int 2.5', self.current_year)\n\n # The first part is completely done by matplotlib, and then transferred to Tkinter\n current_day = self.year_var.get() + \"-\" + self.month_var.get() + \"-\" + self.day_var.get() + \" 12:00:00\"\n\n map_figure, map_axis = plt.subplots()\n map_figure.set_figheight(8)\n map_figure.set_figwidth(7)\n\n if self.model_data_var.get() == const_GUI.models_list[0]:\n # Plot the NCEP model data\n self.plotRSTs_NCEP_instance.calculate_maps_data(current_day,\n use_interpolation=self.use_interpolation.get(),\n data_to_map=self.data_to_map_var.get(),\n show_dots=self.show_dots.get(),\n only_longest_separate=self.only_longest_separate.get(),\n polyfit_rst=self.polyfit_rst.get())\n self.plotRSTs_NCEP_instance.create_map(map_axis,\n show_rst_info=self.show_rst_info.get(),\n req_colormap=self.cb_var.get())\n elif self.model_data_var.get() == const_GUI.models_list[1]:\n # Plot the ERA Interim model data\n self.plotRSTs_ERA_instance.calculate_maps_data(current_day,\n use_interpolation=self.use_interpolation.get(),\n data_to_map=self.data_to_map_var.get(),\n show_dots=self.show_dots.get(),\n only_longest_separate=self.only_longest_separate.get(),\n polyfit_rst=self.polyfit_rst.get())\n self.plotRSTs_ERA_instance.create_map(map_axis,\n show_rst_info=self.show_rst_info.get(),\n req_colormap=self.cb_var.get())\n\n elif self.model_data_var.get() == const_GUI.models_list[2]:\n # Plot the ERA Interim 2.5 degrees model data\n self.plotRSTs_ERA_25_instance.calculate_maps_data(current_day,\n use_interpolation=self.use_interpolation.get(),\n data_to_map=self.data_to_map_var.get(),\n show_dots=self.show_dots.get(),\n only_longest_separate=self.only_longest_separate.get(),\n polyfit_rst=self.polyfit_rst.get())\n self.plotRSTs_ERA_25_instance.create_map(map_axis,\n show_rst_info=self.show_rst_info.get(),\n req_colormap=self.cb_var.get())\n\n if self.detached_map.get() == 0:\n # The map is drawn inside the current GUI\n # Create the tk.DrawingArea\n canvas = FigureCanvasTkAgg(map_figure, master=self.frame_map)\n canvas.draw()\n canvas.get_tk_widget().grid(row=0, column=0, sticky=\"nsew\")\n\n # Add the toolbar in a different frame and remove the x,y coords from appearing\n for child in self.frame_nav_toolbar.winfo_children():\n child.destroy()\n map_axis.format_coord = lambda x, y: ''\n toolbar = NavigationToolbar2Tk(canvas, self.frame_nav_toolbar)\n toolbar.update()\n # canvas._tkcanvas.grid(row=1, column=0)\n else:\n # The map is drawn in a seperate window\n main_seperate = tk.Tk()\n main_seperate.wm_title(\"Map\") # TODO change this\n\n # a tk.DrawingArea\n canvas = FigureCanvasTkAgg(map_figure, master=main_seperate)\n canvas.draw()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n toolbar = NavigationToolbar2Tk(canvas, main_seperate)\n toolbar.update()\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n\nroot = tk.Tk()\nmy_gui = plot_RST_GUI(root)\nroot.mainloop()\n","repo_name":"hatzvika/RSTs","sub_path":"python/Plot_RSTs/plot_RST_GUI.py","file_name":"plot_RST_GUI.py","file_ext":"py","file_size_in_byte":18683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"1737420429","text":"from django.conf.urls import url,include\nfrom . import views\nfrom django.contrib.auth.views import logout,login\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\n\nurlpatterns=[\n\n url(r'acceuil/$', views.acceuil, name='acceuil'),\n url(r'login/$',login,{'template_name':'accounts\\login.html'},name='login'),\n url(r'home/$',views.home,name='home'),\n url(r'logout/$', logout, {'template_name': 'accounts\\logout.html'},name='logout'),\n url(r'profile/$', views.view_profile, name='profile'),\n url(r'^change-password/$', views.change_password, name='change_password'),\n url(r'^scolarite/$', views.Scolarite, name='scolarite'),\n url(r'^mark-as-read/$', views.mark_all_as_read, name='mark_as_read'),\n url(r'^service_secretariat/$',views.service_secretariat,name=\"service_secretariat\"),\n url(r'^notify_chef/$',views.notify_chef,name=\"notify_chef\"),\n url(r'^notify_Adjoint/$',views.notify_Adjoint,name=\"notify_Adjoint\"),\n url(r'^notify_scolarite/$', views.notify_scolarite, name=\"notify_scolarite\"),\n url(r'^notify_res/$',views.notify_responsable,name=\"notify_responsable\"),\n url(r'^afficher_note/$', views.affiche_note, name=\"afficher_note\"),\n url(r'^emploi_du_temp_et/$', views.emploi_etude, name=\"emploi_du_temp_et\"),\n url(r'^emploi_du_temp_en/$', views.emploi_ens, name=\"emp_ens\"),\n url(r'^emploi_du_temp_et_ex/$', views.emploi_exam_etude, name=\"emploi_du_temp_et_ex\"),\n url(r'^emploi_du_temp_en_ex/$', views.emploi_exam_en, name=\"emploi_du_temp_en_ex\"),\n url(r'^consulter_pv/$', views.pv_cpc, name=\"pv_cpc\"),\n url(r'^pv_filiere/$', views.pv_filiere, name=\"pv_filiere\"),\n url(r'^pv_l1/$', views.pv_l1, name=\"pv_l1\"),\n url(r'^pv_l2/$', views.pv_l2, name=\"pv_l2\"),\n url(r'^pv_l3/$', views.pv_l3, name=\"pv_l3\"),\n url(r'^pv_m1/$', views.pv_m1, name=\"pv_m1\"),\n url(r'^pv_m2/$', views.pv_m2, name=\"pv_m2\"),\n url(r'^liste_l1/$', views.liste_l1, name=\"liste_l1\"),\n url(r'^liste_l2/$', views.liste_l2, name=\"liste_l2\"),\n url(r'^liste_l3/$', views.liste_l3, name=\"liste_l3\"),\n url(r'^liste_m1/$', views.liste_m1, name=\"liste_m1\"),\n url(r'^liste_m2/$', views.liste_m2, name=\"liste_m2\"),\n url(r'^liste_parcour/$',views.liste_parcour,name=\"liste_parcour\"),\n url(r'^Ajouter_etudiant/$', views.save_e, name=\"ajouter_et\"),\n url(r'^Upload_cpc/$', views.upload_cpc, name=\"upload_cpc\"),\n url(r'^document_filiere/$', views.document_F, name=\"document_f\"),\n url(r'^document_scol/$', views.document_scolarite, name=\"document_scol\"),\n url(r'^document_PG/$', views.document_pg, name=\"document_pg\"),\n url(r'^document_chef/$', views.document_chef, name=\"document_chef\"),\n url(r'^document_peda/$', views.document_peda, name=\"document_peda\"),\n url(r'^document_et/$', views.document_et, name=\"document_et\"),\n url(r'^traiter_dossier/$', views.traiter_dossier_chef, name=\"traiter_dossier_chef\"),\n url(r'^traiter_dossier_pg/$', views.traiter_dossier_pg, name=\"traiter_dossier_pg\"),\n url(r'^traiter_dossier_scol/$', views.traiter_dossier_scol, name=\"traiter_dossier_scol\"),\n url(r'^traiter_dossier_f/$', views.traiter_dossier_f, name=\"traiter_dossier_f\"),\n url(r'^traiter_dossier_peda/$', views.traiter_dossier_peda, name=\"traiter_dossier_peda\"),\n url(r'^delete/$', views.delete, name=\"delete\"),\n url(r'^upload_document/$', views.upload_document, name=\"upload_document\"),\n url(r'^dossier_traiter/$', views.dossier_traiter, name=\"dossier_traiter\"),\n url(r'^delete_secretaire/$', views.delete_secretariat, name=\"delete_secretaire\"),\n url(r'^demande_etudiant/$', views.demande_etudiant, name=\"demande_etudiant\"),\n url(r'^suivi_demande/$', views.suivi_de_demande, name=\"suivi_demande\"),\n url(r'^matiere/$', views.matiere_en, name=\"matiere_en\"),\n url(r'^consulter/$', views.consulter, name=\"consulter\"),\n url(r'^liste_doctorat/$',views.liste_doctorat,name='liste_doctorat'),\n url(r'^user/$', views.create_user, name='create_user'),\n\n\n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"yacine9607/Django-App-university-platforme","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"15857736156","text":"import random\nfrom typing import Optional\n\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\n\nfrom psb_learning.projects.models import Project\n\n\nclass Quizz(models.Model):\n \"\"\"Модель викторины\"\"\"\n\n project: Project = models.ForeignKey(\n to=Project,\n on_delete=models.CASCADE,\n related_name=\"quizzes\",\n blank=True,\n null=True,\n verbose_name=\"проект\"\n )\n\n name = models.CharField(\n max_length=512,\n verbose_name=\"название\"\n )\n\n def get_next_question(self, order: int) -> Optional[int]:\n \"\"\"Возвращает id следующего вопроса в викторине\"\"\"\n if self.questions.filter(order=order+1):\n return self.questions.filter(order=order+1).first().pk\n else:\n return None\n\n def get_next_question_order(self) -> int:\n \"\"\"Возвращает, какой порядок должен быть у следующего вопроса\"\"\"\n if self.questions.exists():\n last_order = self.questions.order_by(\"-order\").first().order\n return last_order + 1\n else:\n return 1\n\n def get_option_texts(self, project_attribute_name: str) -> list[str]:\n \"\"\"Возвращает список уникальных значений атрибута у других проектов\"\"\"\n\n option_texts = list(\n Project.objects.values_list(\n project_attribute_name,\n flat=True\n ).distinct()\n )\n\n # Удаляем текст правиль��ой опции\n right_option_text = getattr(self.project, project_attribute_name)\n option_texts.remove(right_option_text)\n\n # Выбрать из них не более 3 случайных\n if len(option_texts) > 3:\n return random.sample(option_texts, 3)\n else:\n return option_texts\n\n def generate_questions_and_options(self) -> None:\n \"\"\"Генерирует для викторины вопросы и варианты ответа\n по различным атрибутам проекта\"\"\"\n\n attributes_and_question_texts = {\n \"goals\": \"Какая из перечисленных ниже задач относится к твоему проекту?\",\n \"project_type\": \"Какая методология используется в твоём проекте?\",\n \"technologies\": \"Какой технологический стек твоего проекта?\",\n \"events\": \"Какие обязательные мероприятия есть в твоём проекте?\"\n }\n\n for attribute, question_text in attributes_and_question_texts.items():\n self.generate_question_and_options(attribute, question_text)\n\n def generate_question_and_options(self,\n project_attribute_name: str,\n question_text: str\n ) -> None:\n \"\"\"Создаёт вопрос и варианты ответа с данным текстом по\n данному атрибуту проекта, исходя из значений этого\n атрибута в других проектах\"\"\"\n try:\n if getattr(self.project, project_attribute_name):\n right_option_text = getattr(self.project, project_attribute_name)\n\n # Создаём вопрос\n question = Question.objects.create(\n quizz=self,\n text=question_text,\n order=self.get_next_question_order()\n )\n\n # Получаем уникальные значения атрибута с других проектов\n option_texts = self.get_option_texts(project_attribute_name)\n\n # Создаём варианты ответа\n Option.objects.create(\n question=question,\n text=right_option_text,\n is_correct=True\n )\n for text in option_texts:\n Option.objects.create(\n question=question,\n text=text,\n is_correct=False\n )\n return None\n else:\n # Поле не заполнено у текущего проекта\n return None\n except AttributeError:\n return None\n\n def __str__(self) -> str:\n return f\"{self.name}\"\n\n class Meta:\n verbose_name = \"викторина\"\n verbose_name_plural = \"викторины\"\n\n\nclass Question(models.Model):\n \"\"\"Модель вопроса в викторине\"\"\"\n\n text = models.CharField(\n max_length=512,\n verbose_name=\"содержание вопроса\"\n )\n\n quizz: Quizz = models.ForeignKey(\n to=Quizz,\n on_delete=models.CASCADE,\n related_name=\"questions\",\n verbose_name=\"викторина\"\n )\n\n correct_info = models.TextField(\n blank=True,\n verbose_name=\"информация при правильном ответе\"\n )\n\n incorrect_info = models.TextField(\n blank=True,\n verbose_name=\"информация при неправильном ответе\"\n )\n\n order = models.SmallIntegerField(\n null=True,\n blank=True,\n verbose_name=\"порядок\"\n )\n\n rating_change = models.SmallIntegerField(\n default=5,\n verbose_name=\"рейтинг за вопрос\"\n )\n\n def check_answer(self, answer: \"Option\", user: get_user_model()) -> bool:\n \"\"\"Проверяет правильность ответа.\n При правильном ответе увеличивает рейтинг пользователя\"\"\"\n if answer.question == self and answer.is_correct:\n user.increase_rating(self.rating_change)\n return True\n else:\n return False\n\n def get_info(self, correct: bool) -> str:\n \"\"\"Возвращает информацию в зависимости от выбранного ответа\"\"\"\n return self.correct_info if correct else self.incorrect_info\n\n def get_rating_change(self, correct: bool) -> str:\n \"\"\"Возвращает изменение рейтинга в зависимости от выбранного ответа\"\"\"\n return f\"+{self.rating_change}\" if correct else \"0\"\n\n def __str__(self) -> str:\n return f\"{self.text}\"\n\n class Meta:\n verbose_name = \"вопрос\"\n verbose_name_plural = \"вопросы\"\n\n\nclass Option(models.Model):\n \"\"\"Модель варианта ответа на вопрос в викторине\"\"\"\n\n text = models.CharField(\n max_length=512,\n verbose_name=\"содержание вопроса\"\n )\n\n question = models.ForeignKey(\n to=Question,\n on_delete=models.CASCADE,\n related_name=\"options\",\n verbose_name=\"вопрос\"\n )\n\n is_correct = models.BooleanField(\n default=False,\n verbose_name=\"правильный вариант?\"\n )\n\n def __str__(self) -> str:\n return f\"{self.text}\"\n\n class Meta:\n verbose_name = \"вариант ответа\"\n verbose_name_plural = \"варианты ответа\"\n","repo_name":"e-kondr01/psb-case-back","sub_path":"psb_learning/testing/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7617,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"44067871060","text":"import argparse\nimport tkinter as tk\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nimport os\nimport sys\n\nsys.path.append(os.getcwd())\n\nfrom model import StyleGan\n\nplt.ioff()\n\nclass StyleGANVisualizer(tk.Tk):\n def __init__(self, weights_path=None, sliders_no=20, gpu=0):\n tk.Tk.__init__(self)\n self.gpu=gpu\n self.sliders_no=sliders_no\n self.configure(bg='white')\n self.GAN = StyleGan(weights_path, gpu)\n self.GAN.reset_latent()\n print(\"\\nStyleGAN loaded.\")\n self.slider_job = None\n self.slider_to_feature_match = {}\n self.feature_index_change = 0\n self.active_slider = 0\n self.samples_plot_frame=None\n\n self.make_latent_space_frame()\n self.make_sample_frame()\n\n def make_latent_space_frame(self):\n self.latent_space_frame = tk.Frame(self, bg='white', relief=tk.RAISED, borderwidth=2)\n self.latent_space_frame_widgets = {}\n self.latent_space_frame_widgets['reset_latent_button'] = tk.Button(self.latent_space_frame,\n text=\"New Latent Vector\",\n command=self.reset_latent_vector)\n self.latent_space_frame_widgets['reset_latent_button'].pack()\n for i in range(self.sliders_no):\n self.slider_to_feature_match[i] = i\n self.latent_space_frame_widgets['slider_' + str(i) + '_frame'] = tk.Frame(self.latent_space_frame,\n bg='white', relief=tk.RAISED,\n borderwidth=2)\n self.latent_space_frame_widgets.update(\n self.make_latent_vector_slider(self.latent_space_frame_widgets['slider_' + str(i) + '_frame'], i, i))\n self.latent_space_frame_widgets['slider_' + str(i) + '_frame'].pack()\n self.latent_space_frame.pack(side=tk.LEFT)\n\n def make_latent_vector_slider(self, parent_frame, slider_no, feature_index):\n label_1 = tk.Label(parent_frame, text=\"Feature No:\", bg='white')\n entry = tk.Entry(parent_frame, width=32)\n entry.insert(tk.END, str(feature_index))\n label_2 = tk.Label(parent_frame, text=\"Feature Value:\", bg='white')\n slider = tk.Scale(parent_frame, from_=-4, to=4, command=self.get_slider_function(feature_index), bg='white',\n orient=tk.HORIZONTAL, length=200, resolution=0.01)\n slider.set(self.get_latent_feature(feature_index))\n button_choose_feature = tk.Button(parent_frame, text=\"Choose Feature\",\n command=self.get_button_cf_function(entry, slider, feature_index))\n button_add = tk.Button(parent_frame, text=\"+\",\n command=self.get_button_add_function(slider))\n button_sub = tk.Button(parent_frame, text=\"-\",\n command=self.get_button_sub_function(slider))\n label_1.pack(side=tk.LEFT)\n entry.pack(side=tk.LEFT)\n button_choose_feature.pack(side=tk.LEFT)\n label_2.pack(side=tk.LEFT)\n slider.pack(side=tk.LEFT)\n button_add.pack(side=tk.LEFT)\n button_sub.pack(side=tk.LEFT)\n return {'slider_' + str(slider_no) + '_label_1': label_1, 'slider_' + str(slider_no) + '_entry': entry,\n 'slider_' + str(slider_no) + '_label_2': label_2, 'slider_' + str(slider_no) + '_slider': slider,\n 'slider_' + str(slider_no) + '_button': button_choose_feature,\n 'slider_' + str(slider_no) + '_add_button': button_add,\n 'slider_' + str(slider_no) + '_sub_button': button_sub}\n\n def get_button_cf_function(self, entry, slider, feature_index):\n def button_cf_function():\n v = int(round(float(entry.get())))\n if v>=512:\n entry.delete(0, tk.END)\n entry.insert(tk.END, \"Error: Must be between 0 and 511\")\n else:\n self.slider_to_feature_match[feature_index] = v\n slider.set(self.get_latent_feature(v))\n\n return button_cf_function\n\n def get_button_add_function(self, slider):\n def button_add_function():\n v = slider.get()\n slider.set(v + 0.01)\n return button_add_function\n\n def get_button_sub_function(self, slider):\n def button_sub_function():\n v = slider.get()\n slider.set(v - 0.01)\n return button_sub_function\n\n def get_slider_function(self, feature_index):\n def slider_function(event):\n if self.slider_job is not None:\n self.after_cancel(self.slider_job)\n self.feature_index_change = self.slider_to_feature_match[feature_index]\n self.active_slider = feature_index\n self.slider_job = self.after(200, self.change_latent_vector_slider)\n\n return slider_function\n\n def change_latent_vector_slider(self):\n self.slider_job = None\n self.GAN.latent[0, self.feature_index_change] = self.latent_space_frame_widgets[\n 'slider_' + str(self.active_slider) + '_slider'].get()\n self.make_sample_frame()\n\n def make_sample_frame(self):\n fig = self.get_sample_plots()\n if self.samples_plot_frame is not None:\n self.samples_plot_frame.destroy()\n self.samples_plot_frame = tk.Frame(self, bg='white', relief=tk.RAISED, borderwidth=2)\n self.samples_plot_widgets = {}\n self.samples_plot_widgets['samples_canvas'] = FigureCanvasTkAgg(fig, master=self.samples_plot_frame)\n self.samples_plot_widgets['samples_canvas'].get_tk_widget().pack()\n self.samples_plot_widgets['samples_toolbar'] = NavigationToolbar2Tk(self.samples_plot_widgets['samples_canvas'], self.samples_plot_frame)\n self.samples_plot_widgets['samples_toolbar'].pack()\n self.samples_plot_frame.pack(side=tk.LEFT)\n\n def reset_latent_vector(self):\n self.GAN.reset_latent()\n for i in range(self.sliders_no):\n f = self.slider_to_feature_match[i]\n self.latent_space_frame_widgets['slider_' + str(i) + '_slider'].set(round(self.GAN.latent[0, f].item(), 2))\n\n def get_latent_feature(self, feature_index):\n return round(self.GAN.latent[0, feature_index].item(), 2)\n\n def get_sample_plots(self):\n img = self.GAN.fix_latent_sample()\n fig = plt.Figure(figsize=(9, 9), constrained_layout=True)\n gridspec = fig.add_gridspec(1)\n subfig=fig.add_subfigure(gridspec[0, :])\n subplot = subfig.add_subplot(1,1,1)\n subplot.imshow(img[0].cpu().numpy())\n return fig\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(conflict_handler='resolve')\n parser.add_argument(\"--weights_path\", default=None, type=str)\n parser.add_argument(\"--sliders_no\", default=15, type=int)\n parser.add_argument(\"--gpu\", default=0, type=int)\n args = parser.parse_known_args()[0]\n\n app = StyleGANVisualizer(args.weights_path, args.sliders_no, args.gpu)\n app.mainloop()\n plt.close()\n","repo_name":"ManiadisG/StyleGAN_exploration_UI","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7260,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"28"} +{"seq_id":"18713784557","text":"from footyhints.score_plugins.high_profile_matchup import HighProfileMatchup\n\nfrom tests.footyhints.unit_test import UnitTest\nfrom web.models import Team, Game\n\n\nclass TestHighProfileMatchup(UnitTest):\n\n def create_tmp_game(self, home_team_name, away_team_name):\n home_team = Team(name=home_team_name)\n away_team = Team(name=away_team_name)\n return Game(home_team=home_team, away_team=away_team)\n\n def test_not_enough_games_matchup(self):\n self.create_previous_game(winner=\"Draw\")\n game = self.create_current_game(winner=\"Winner\")\n matchup = HighProfileMatchup()\n score, reason = matchup.score(game)\n assert score == 0\n assert reason == 'Too Early in Season'\n\n def test_high_profile_matchup(self):\n self.create_previous_game(winner=\"Draw\")\n self.create_previous_game(winner=\"Draw\")\n self.create_previous_game(winner=\"Draw\")\n self.create_previous_game(winner=\"Draw\")\n game = self.create_current_game(winner=\"Winner\")\n matchup = HighProfileMatchup()\n score, reason = matchup.score(game)\n assert score == 100\n assert reason == 'Top 6 matchup (1 v 2)'\n","repo_name":"pwnbus/footyhints","sub_path":"tests/footyhints/score_plugins/test_high_profile_matchup.py","file_name":"test_high_profile_matchup.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"74996378633","text":"import os \nimport tensorflow as tf \nfrom metrics.segmentation_metrics import dice_coeff, bce_dice_loss, IoU, zero_IoU, dice_loss\nfrom dataloader.dataloader import build_augmenter, build_dataset, build_decoder\nfrom tensorflow.keras.utils import get_custom_objects\nfrom model import build_model\nimport cv2 \nimport numpy as np\n# from save_model.best_up_ca import build_model\n# from save_model.ca_best_msf import build_model\nimport matplotlib.pyplot as plt \nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"2\"\n\n\n\ndef load_dataset(route, img_size = 256):\n BATCH_SIZE = 1\n X_path = '{}/images/'.format(route)\n Y_path = '{}/masks/'.format(route)\n X_full = sorted(os.listdir(f'{route}/images'))\n Y_full = sorted(os.listdir(f'{route}/masks'))\n\n X_train = [X_path + x for x in X_full]\n Y_train = [Y_path + x for x in Y_full]\n\n test_decoder = build_decoder(with_labels=True, target_size=(img_size, img_size), ext='jpg', segment=True, ext2='jpg')\n test_dataset = build_dataset(X_train, Y_train, bsize=BATCH_SIZE, decode_fn=test_decoder, \n augmentAdv=False, augment=False, augmentAdvSeg=False, shuffle = None)\n return test_dataset, len(X_train)\n\ndef predict(model, dataset, len_data, outdir =\"./save_vis/Etis/\"):\n steps_per_epoch = len_data//1\n masks = model.predict(dataset, steps=steps_per_epoch)\n # print(masks.shape)\n i = 0\n for x, y in dataset:\n print(y[0].shape)\n # print(i, masks[i].shape)\n a = masks[i]\n mask_new = np.dstack([a, a, a])\n # print(x.shape, y.shape)\n gt = np.dstack([y[0], y[0], y[0]])\n # gt = cv2.cvtColor(y[0], cv2.COLOR_GRAY2RGB)\n # true = cv2.cvtColor(x[0], cv2.COLOR_BGR2RGB)\n im_h = np.concatenate([x[0], gt * 255, mask_new *255], axis = 1)\n cv2.imwrite(\"{}/{}.jpg\".format(outdir, i), im_h)\n i+=1\n\ndef visualize(src_dir, model, outdir =\"./save_vis/Etis/\"):\n dataset, len_data = load_dataset(src_dir)\n predict(model, dataset, len_data, outdir)\n\nif __name__ == \"__main__\":\n \n BATCH_SIZE = 16\n img_size = 256\n SEED = 1024\n save_path = \"best_model.h5\"\n route_data = \"./TestDataset/\"\n outdir =\"./save_vis/cvc300/\"\n src_dir = \"./TestDataset/CVC-300\"\n\n model = build_model(img_size)\n model.load_weights(save_path)\n\n visualize(src_dir, model, outdir)\n ","repo_name":"huyquoctrinh/MetaPolyp-CBMS2023","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"28"} +{"seq_id":"73994465035","text":"import time\nfrom selenium.common.exceptions import NoSuchElementException\n\nfrom lettuce import world\nfrom xivo_lettuce.form import submit, input, select\nfrom xivo_lettuce import common\n\n\ndef add_or_replace_directory_config(directory):\n _remove_directory_config(directory)\n _create_directory_config(directory)\n\n\ndef _remove_directory_config(directory):\n common.remove_element_if_exist('directory_config', directory['name'])\n\n\ndef _create_directory_config(directory):\n common.open_url('directory_config', 'add')\n input.set_text_field_with_label(\"Directory name\", directory['name'])\n input.set_text_field_with_label(\"URI\", directory['URI'])\n select.set_select_field_with_label(\"Type\", directory['type'])\n submit.submit_form()\n\n\ndef add_directory_definition(directory):\n _add_directory(\n directory['name'],\n directory['URI'],\n directory['direct match'],\n directory.get('delimiter'),\n directory.get('reverse match'),\n )\n\n\ndef add_or_replace_directory(name, uri, direct_match, reverse_match, fields):\n remove_directory(name)\n _add_directory(name, uri, direct_match, None, reverse_match)\n _add_directory_fields(fields)\n submit.submit_form()\n\n\ndef add_or_replace_display(name, fields):\n if common.element_is_in_list('cti_display_filter', name):\n common.remove_line(name)\n\n common.open_url('cti_display_filter', 'add')\n _type_display_name(name)\n for title, field_type, display in fields:\n _add_display_field(title, field_type, display)\n submit.submit_form()\n\n\ndef remove_directory(name):\n while common.element_is_in_list('cti_directory', name):\n common.remove_line(name)\n\n # Work around for directory associations that aren't deleted\n common.open_url('cti_direct_directory', 'list')\n try:\n common.edit_line('default')\n except Exception:\n pass # No default context configured\n else:\n submit.submit_form()\n\n\ndef _type_display_name(name):\n input.set_text_field_with_label('Name', name)\n\n\ndef _add_directory_fields(fields):\n time.sleep(world.timeout) # wait for javascript to load\n for field_name, value in fields.iteritems():\n add_field(field_name, value)\n\n\ndef _add_directory(name, uri, direct_match, delimiter=None, reverse_match=None):\n common.open_url('cti_directory', 'add')\n input.set_text_field_with_label(\"Name\", name)\n if delimiter:\n input.set_text_field_with_label(\"Delimiter\", delimiter)\n input.set_text_field_with_label(\"Direct match\", direct_match)\n if reverse_match:\n input.set_text_field_with_label(\"Match reverse directories\", reverse_match)\n select.set_select_field_with_label(\"URI\", uri)\n\n\ndef add_field(fieldname, value):\n b = world.browser\n add_btn = b.find_element_by_css_selector(\".sb-list table .sb-top .th-right a\")\n add_btn.click()\n\n xpath = \"//div[@class='sb-list']/table[position()=1]/tbody/tr[last()]/td[position()=%s]/input\"\n fieldname_input = b.find_element_by_xpath(xpath % 1)\n fieldname_input.send_keys(fieldname)\n\n value_input = b.find_element_by_xpath(xpath % 2)\n value_input.send_keys(value)\n\n\ndef _add_display_field(title, f_type, value):\n b = world.browser\n add_btn = b.find_element_by_css_selector(\".sb-list table .sb-top .th-right a\")\n add_btn.click()\n\n xpath = \"//div[@class='sb-list']/table[position()=1]/tbody/tr[last()]/td[position()=%s]/input\"\n field_title = b.find_element_by_xpath(xpath % 1)\n field_title.send_keys(title)\n\n field_type = b.find_element_by_xpath(xpath % 2)\n field_type.send_keys(f_type)\n\n display_format = b.find_element_by_xpath(xpath % 4)\n display_format.send_keys(value)\n\n\ndef add_directory_to_context(directory):\n select.set_select_field_with_id(\"it-directorieslist\", directory)\n\n right_arrow = world.browser.find_element_by_xpath(\"//div[@class='inout-list']/a[position()=1]\")\n right_arrow.click()\n\n\ndef assign_filter_and_directories_to_context(context, filter_name, directories):\n if common.element_is_in_list('cti_direct_directory', context):\n common.remove_line(context)\n\n common.open_url('cti_direct_directory', 'add')\n select.set_select_field_with_label(\"Name\", context)\n select.set_select_field_with_label(\"Display filter\", filter_name)\n for directory in directories:\n add_directory_to_context(directory)\n\n submit.submit_form()\n\n\ndef set_reverse_directories(directories):\n common.open_url('cti_reverse_directory', '')\n _remove_all_reverse_directories()\n for directory in directories:\n _add_reverse_directory(directory)\n submit.submit_form()\n\n\ndef _add_reverse_directory(directory):\n select.set_multiselect_field_with_id_containing(\"it-directorieslist\", directory)\n button = world.browser.find_element_by_xpath(\"//div[@class='inout-list']/a[1]\")\n button.click()\n\n\ndef _remove_all_reverse_directories():\n select.select_all_with_id(\"it-directories\")\n button = world.browser.find_element_by_xpath(\"//div[@class='inout-list']/a[2]\")\n button.click()\n","repo_name":"jaunis/xivo-acceptance","sub_path":"xivo_acceptance/action/webi/directory.py","file_name":"directory.py","file_ext":"py","file_size_in_byte":5037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"12934564265","text":"from ahc.Ahc import ComponentModel, Event, ConnectorTypes, EventTypes, Topology\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.fernet import Fernet\nimport time\nimport sys\nimport os\nfrom ahc.Channels.Channels import Channel\n\nimport json\n\ndef dict_to_bytes(dictionary):\n return json.dumps(dictionary).encode(\"utf-8\")\n\ndef bytes_to_dict(bytes_object):\n return json.loads(bytes_object.decode(\"utf-8\"))\n\nprivate_key_Alice = rsa.generate_private_key(\n public_exponent=65537,\n key_size=1024,\n)\npublic_key_Alice = private_key_Alice.public_key()\n\nprivate_key_Bob = rsa.generate_private_key(\n public_exponent=65537,\n key_size=1024,\n)\npublic_key_Bob = private_key_Bob.public_key()\n\nprivate_key_Trent = rsa.generate_private_key(\n public_exponent=65537,\n key_size=1024,\n)\npublic_key_Trent = private_key_Trent.public_key()\n\nclass Alice(ComponentModel):\n def on_init(self, eventobj: Event):\n message = {\"name\": \"Bob\"}\n msg_encoded = dict_to_bytes(message)\n event = Event(self, EventTypes.MFRB, msg_encoded)\n # time.sleep(1)\n # print(\"step1\")\n self.send_up(event)\n\n def on_message_from_top(self, eventobj: Event): # gets message from Trent\n message = eventobj.eventcontent\n public_key_Trent.verify(\n message[1],\n message[0],\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n if message[0].split(\"|\".encode(\"utf-8\"))[0] != b\"Bob\":\n print(\"Wrong Public Key received, retry with true name\")\n\n # gets bob public key from trent message\n bob_public = serialization.load_pem_public_key(message[0].split(\"|\".encode(\"utf-8\"))[1])\n\n # generates a session key and a public private key pair\n self.session_key = Fernet.generate_key()\n self.private_key = rsa.generate_private_key(public_exponent=65537, key_size=1024,)\n self.public_key = self.private_key.public_key()\n\n # encypts time stamp with session key\n timestamp = str(time.time()).encode(\"utf-8\")\n f = Fernet(self.session_key)\n encrypted_timestamp = f.encrypt(timestamp)\n\n # signs lifetime, alice name, and private key with alice private key\n self.lifetime = 3600\n lifetime = str(3600).encode(\"utf-8\") # 1 hour\n alice_name = \"Alice\".encode(\"utf-8\")\n LNP = lifetime + \"|\".encode(\"utf-8\") + alice_name + \"|\".encode(\"utf-8\") + self.public_key.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n )\n\n LNP_signed = private_key_Alice.sign(\n LNP,\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n # encrypts session key with bob public key and signs it with private key\n session_key_encoded = bob_public.encrypt(\n self.session_key,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n signature = self.private_key.sign(\n session_key_encoded,\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n\n # data to be sent to bob\n message = [encrypted_timestamp, LNP, LNP_signed, session_key_encoded, signature]\n\n # sends message to bob\n # print(\"step3\")\n event = Event(self, EventTypes.MFRT, message)\n self.send_down(event) # from Alice to Node-1\n\n\n def on_message_from_bottom(self, eventobj: Event): # gets message from Node-1\n if type(eventobj.eventcontent) == bytes:\n message = eventobj.eventcontent\n f = Fernet(self.session_key)\n decrypted_timestamp = f.decrypt(message)\n decrypted_timestamp = float(decrypted_timestamp.decode(\"utf-8\"))\n if (time.time() - decrypted_timestamp) > 10:\n print(\"Session expired\")\n sys.exit()\n\n self.session_time = time.time()\n print(\"authenticated\")\n content = input(\"Alice, enter your message: \")\n if content == \"quit\":\n os._exit(1)\n encrypted_content = f.encrypt(content.encode(\"utf-8\"))\n new_message = [\"message\".encode(\"utf-8\"), encrypted_content]\n\n event = Event(self, EventTypes.MFRT, new_message)\n self.send_down(event) # from Alice to Node-1\n\n elif type(eventobj.eventcontent) == list:\n if self.session_time + self.lifetime < time.time():\n print(\"Session expired\")\n quit()\n\n message = eventobj.eventcontent\n\n f = Fernet(self.session_key)\n print(\"from bob to Alice: \", f.decrypt(message[1]).decode(\"utf-8\"))\n content = input(\"Alice, enter your message: \")\n if content == \"quit\":\n os._exit(1)\n encrypted_content = f.encrypt(content.encode(\"utf-8\"))\n new_message = [\"message\".encode(\"utf-8\"), encrypted_content]\n\n event = Event(self, EventTypes.MFRT, new_message)\n self.send_down(event) # from Alice to Node-1\n\n\nclass Bob(ComponentModel):\n def on_message_from_top(self, eventobj: Event):\n message = eventobj.eventcontent\n\n if message[0].split(\"|\".encode(\"utf-8\"))[0] != b\"Alice\":\n print(\"Wrong Public Key received, retry with true name\")\n\n # verify trent signature\n public_key_Trent.verify(\n message[1],\n message[0],\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n # gets alice public key from trent message\n self.alice_public = serialization.load_pem_public_key(message[0].split(\"|\".encode(\"utf-8\"))[1])\n\n # verifies lifetime, alice name, and public key with alice public key\n self.alice_public.verify(\n self.message_from_alice[2],\n self.message_from_alice[1],\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n LNP = self.message_from_alice[1]\n lifetime, alice_name, public_key = LNP.split(\"|\".encode(\"utf-8\"))\n self.lifetime = int(lifetime.decode(\"utf-8\"))\n alice_name = alice_name.decode(\"utf-8\")\n\n public_key = serialization.load_pem_public_key(public_key)\n\n public_key.verify(\n self.message_from_alice[4],\n self.message_from_alice[3],\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n self.session_key = private_key_Bob.decrypt(\n self.message_from_alice[3],\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n f = Fernet(self.session_key)\n timestamp = f.decrypt(self.message_from_alice[0])\n self.session_time = time.time()\n # print(\"step6 completed\")\n\n if int(float(timestamp.decode(\"utf-8\"))) + 10 < time.time():\n print(\"timestamp is not valid\")\n print(\"no authentication\")\n\n new_timestamp = f.encrypt(str(time.time()).encode(\"utf-8\"))\n\n event = Event(self, EventTypes.MFRT, new_timestamp)\n self.send_down(event)\n\n def on_message_from_bottom(self, eventobj: Event):\n self.message_from_alice = eventobj.eventcontent\n\n if self.message_from_alice[0] != b\"message\":\n message = {\"name\": \"Alice\"}\n # print(type(self.message_from_alice))\n msg_encoded = dict_to_bytes(message)\n # print(\"step4\")\n event = Event(self, EventTypes.MFRB, msg_encoded)\n self.send_up(event) # from Alice to Node-2\n\n else:\n if self.session_time + self.lifetime < time.time():\n print(\"session expired\")\n os._exit(1)\n\n f = Fernet(self.session_key)\n print(\"from alice to bob: \", f.decrypt(self.message_from_alice[1]).decode(\"utf-8\"))\n\n message = input(\"Bob, enter your message: \")\n if message == \"quit\":\n os._exit(1)\n encrypted_message = f.encrypt(message.encode(\"utf-8\"))\n new_message = [\"message\".encode(\"utf-8\"), encrypted_message]\n\n event = Event(self, EventTypes.MFRT, new_message)\n self.send_down(event)\n\n\nclass Trent(ComponentModel):\n def on_message_from_bottom(self, eventobj: Event):\n message_rcvd = bytes_to_dict(eventobj.eventcontent)\n if message_rcvd[\"name\"] == \"Bob\":\n pem = public_key_Bob.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n )\n\n msgg = \"Bob\".encode() + \"|\".encode(\"utf-8\") + pem\n signature = private_key_Trent.sign(\n msgg,\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n eventContent = [msgg, signature]\n\n event = Event(self, EventTypes.MFRT, eventContent)\n # print(\"step2\")\n self.send_down(event)\n elif message_rcvd[\"name\"] == \"Alice\":\n pem = public_key_Alice.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n )\n\n msgg = \"Alice\".encode() + \"|\".encode(\"utf-8\") + pem\n signature = private_key_Trent.sign(\n msgg,\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n eventContent = [msgg, signature]\n\n event = Event(self, EventTypes.MFRT, eventContent)\n # print(\"step5\")\n self.send_down(event)\n else:\n event = Event(self, EventTypes.MFRT, \"name not recognized\".encode(\"utf-8\"))\n self.send_down(event)\n\n\n\nclass Node1(ComponentModel):\n def __init__(self, componentname, componentid):\n self.alice = Alice(\"Alice\", componentid)\n self.trent = Trent(\"Trent\", componentid)\n\n self.alice.connect_me_to_component(ConnectorTypes.UP, self.trent)\n self.trent.connect_me_to_component(ConnectorTypes.DOWN, self.alice)\n\n self.connect_me_to_component(ConnectorTypes.UP, self.alice)\n self.alice.connect_me_to_component(ConnectorTypes.DOWN, self)\n\n super().__init__(componentname, componentid)\n\n def on_message_from_bottom(self, eventobj: Event):\n self.send_up(eventobj)\n\n def on_message_from_top(self, eventobj: Event):\n self.send_down(eventobj)\n\n\nclass Node2(ComponentModel):\n def __init__(self, componentname, componentid):\n self.bob = Bob(\"Bob\", componentid)\n self.trent = Trent(\"Trent\", componentid)\n\n self.bob.connect_me_to_component(ConnectorTypes.UP, self.trent)\n self.trent.connect_me_to_component(ConnectorTypes.DOWN, self.bob)\n\n self.connect_me_to_component(ConnectorTypes.UP, self.bob)\n self.bob.connect_me_to_component(ConnectorTypes.DOWN, self)\n\n super().__init__(componentname, componentid)\n\n def on_message_from_bottom(self, eventobj: Event):\n self.send_up(eventobj)\n\n def on_message_from_top(self, eventobj: Event):\n self.send_down(eventobj)\n\n\nsys.path.insert(0, os.getcwd())\n\ndef main():\n topo = Topology()\n topo.construct_sender_receiver(Node1, Node2, Channel)\n topo.start()\n print(\"Type quit to exit chat\")\n\n while True: pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"umutozd/ahc","sub_path":"ahc/Security/AKA/DASS.py","file_name":"DASS.py","file_ext":"py","file_size_in_byte":12546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"28"} +{"seq_id":"41865926002","text":"from otree.api import (\r\n models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,\r\n Currency as c, currency_range\r\n)\r\n\r\nimport random\r\n\r\nauthor = 'Maggie'\r\n\r\ndoc = \"\"\"Player is given an example slider set to a certain point. Player has to move following sliders to match the \r\ngiven slider example, and tries to complete as many sliders as possible within 2 minutes. Player is rewarded for \r\neach correct slider completed within the time limit. \"\"\"\r\n\r\n\r\nclass Constants(BaseConstants):\r\n name_in_url = 'sliders_task'\r\n players_per_group = None\r\n num_rounds = 20\r\n\r\n\r\nclass Subsession(BaseSubsession):\r\n pass\r\n\r\n\r\nclass Group(BaseGroup):\r\n def set_slider_goals(self):\r\n for i in range(20):\r\n n = i + 1\r\n self.session.vars['slider_goals' + str(n)] = []\r\n # makes 20 arrays to hold the slider goals for the 20 rounds\r\n\r\n for j in range(20):\r\n n = j + 1\r\n for i in range(10):\r\n self.session.vars['slider_goals' + str(n)].append(random.randint(0, 100))\r\n print(\"For round \", str(n), 'the slider goals are set to: ', self.session.vars['slider_goals' + str(n)],\r\n '\\n')\r\n # fills the 20 arrays with the slider goals for the 20 rounds\r\n\r\n def ensure_random_goals(self):\r\n print('Running ensure_random_goals now:', '\\n')\r\n controller = self.get_player_by_role('Controller')\r\n\r\n for i in range(20):\r\n n = i+1 #this is the round number\r\n controller_slider_answers = [controller.in_round(n).slider1, controller.in_round(n).slider2, controller.in_round(n).slider3,\r\n controller.in_round(n).slider4,\r\n controller.in_round(n).slider5, controller.in_round(n).slider6, controller.in_round(n).slider7, controller.in_round(n).slider8,\r\n controller.in_round(n).slider9, controller.in_round(n).slider10]\r\n\r\n print(\"For round \", str(n), 'the orginal sliders are set to: ', controller_slider_answers)\r\n print(\"self.session.vars[slider_goals] are:\", self.session.vars['slider_goals' + str(n)], '\\n')\r\n\r\n for j in range(10):\r\n if self.session.vars['slider_goals' + str(n)][j] == controller_slider_answers[j]:\r\n if self.session.vars['slider_goals' + str(n)][j] > 95:\r\n self.session.vars['slider_goals' + str(n)][j] = (self.session.vars['slider_goals' + str(n)][j] - 5)\r\n print(\"Subtracted 5 for [\", j, ']. controller_slider_answers was', controller_slider_answers[j], 'so we reset the goal to: ', self.session.vars['slider_goals' + str(n)][j], '\\n')\r\n else:\r\n self.session.vars['slider_goals' + str(n)][j] = (self.session.vars['slider_goals' + str(n)][j] + 5)\r\n print(\"Added 5 for [\", j, ']. controller_slider_answers was', controller_slider_answers[j], 'so we reset the goal to: ', self.session.vars['slider_goals' + str(n)][j], '\\n')\r\n\r\n\r\n\r\n def check_slider_answers(self):\r\n print('\\n\\nFOR ROUND', self.round_number)\r\n\r\n controller = self.get_player_by_role('Controller')\r\n controller_slider_answers = [controller.slider1, controller.slider2, controller.slider3, controller.slider4,\r\n controller.slider5, controller.slider6, controller.slider7, controller.slider8,\r\n controller.slider9, controller.slider10]\r\n\r\n for i in range(10):\r\n if controller_slider_answers[i] == self.session.vars['slider_goals' + str(self.round_number)][i]:\r\n controller.total_sliders_correct += 1\r\n controller.payoff += c(10)\r\n\r\n print('For slider', i + 1, 'slider was correct. Controller.total_sliders_correct is',\r\n controller.total_sliders_correct, 'and controller.payoff is', controller.payoff)\r\n print('slider_goals[', i, '] was', self.session.vars['slider_goals' + str(self.round_number)][i],\r\n 'and controller_slider_answers[', i, '] was', controller_slider_answers[i], '\\n')\r\n else:\r\n print('For slider', i + 1, 'slider was incorrect. Controller.total_sliders_correct is still',\r\n controller.total_sliders_correct, 'and controller.payoff is still', controller.payoff)\r\n print('slider_goals[', i, '] was', self.session.vars['slider_goals' + str(self.round_number)][i],\r\n 'and controller_slider_answers[', i, '] was', controller_slider_answers[i], '\\n')\r\n\r\n\r\nclass Player(BasePlayer):\r\n slider1 = models.IntegerField(widget=widgets.Slider, min=0, max=100, initial=random.randint(0, 100), label=\"\")\r\n slider2 = models.IntegerField(widget=widgets.Slider, min=0, max=100, initial=random.randint(0, 100), label=\"\")\r\n slider3 = models.IntegerField(widget=widgets.Slider, min=0, max=100, initial=random.randint(0, 100), label=\"\")\r\n slider4 = models.IntegerField(widget=widgets.Slider, min=0, max=100, initial=random.randint(0, 100), label=\"\")\r\n slider5 = models.IntegerField(widget=widgets.Slider, min=0, max=100, initial=random.randint(0, 100), label=\"\")\r\n slider6 = models.IntegerField(widget=widgets.Slider, min=0, max=100, initial=random.randint(0, 100), label=\"\")\r\n slider7 = models.IntegerField(widget=widgets.Slider, min=0, max=100, initial=random.randint(0, 100), label=\"\")\r\n slider8 = models.IntegerField(widget=widgets.Slider, min=0, max=100, initial=random.randint(0, 100), label=\"\")\r\n slider9 = models.IntegerField(widget=widgets.Slider, min=0, max=100, initial=random.randint(0, 100), label=\"\")\r\n slider10 = models.IntegerField(widget=widgets.Slider, min=0, max=100, initial=random.randint(0, 100), label=\"\")\r\n\r\n # payoff = models.CurrencyField()\r\n total_sliders_correct = models.IntegerField(initial=0)\r\n\r\n def role(self):\r\n if self.id_in_group == 1:\r\n return 'Controller'\r\n","repo_name":"maggiegallagh/oTree-games","sub_path":"sliders_task/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6094,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"31600326394","text":"# -*- coding: future_fstrings -*-\nimport logging\nfrom collections import defaultdict\n\nfrom dpdb.problem import *\nfrom dpdb.reader import CnfReader\nfrom .sat_util import *\n\nlogger = logging.getLogger(__name__)\n\nclass Pmc(Problem):\n def __init__(self, name, pool, store_formula=False, **kwargs):\n super().__init__(name, pool, **kwargs)\n self.store_formula = store_formula\n\n def td_node_column_def(self,var):\n return td_node_column_def(var)\n \n def filter(self,node):\n return filter(self.var_clause_dict, node)\n\n def setup_extra(self):\n def create_tables():\n self.db.ignore_next_praefix()\n self.db.create_table(\"problem_pmc\", [\n (\"id\", \"INTEGER NOT NULL PRIMARY KEY REFERENCES PROBLEM(id)\"),\n (\"num_vars\", \"INTEGER NOT NULL\"),\n (\"num_clauses\", \"INTEGER NOT NULL\"),\n (\"model_count\", \"NUMERIC\")\n ])\n if \"faster\" not in self.kwargs or not self.kwargs[\"faster\"]:\n self.db.create_table(\"projected_vars\", [\n (\"id\", \"INTEGER NOT NULL REFERENCES PROBLEM(id)\"),\n (\"var\", \"INTEGER NOT NULL\")\n ])\n self.db.create_pk(\"projected_vars\",[\"id\",\"var\"])\n\n def insert_data():\n self.db.ignore_next_praefix()\n self.db.insert(\"problem_pmc\",(\"id\",\"num_vars\",\"num_clauses\"),\n (self.id, self.num_vars, self.num_clauses))\n if \"faster\" not in self.kwargs or not self.kwargs[\"faster\"]:\n for p in self.projected:\n self.db.insert(\"projected_vars\",(\"id\", \"var\"),(self.id, p))\n self.db.ignore_next_praefix()\n self.db.insert(\"problem_option\",(\"id\", \"name\", \"value\"),(self.id,\"store_formula\",self.store_formula))\n if self.store_formula:\n store_clause_table(self.db, self.clauses)\n\n create_tables()\n insert_data()\n\n def prepare_input(self, fname):\n input = CnfReader.from_file(fname)\n self.num_vars = input.num_vars\n self.num_clauses = input.num_clauses\n self.clauses = input.clauses\n self.projected = input.projected\n self.var_clause_dict = defaultdict(set)\n\n num_vars, edges = cnf2primal(input.num_vars, input.clauses, self.var_clause_dict)\n # Create clique over projected variables\n for a in self.projected:\n for b in self.projected:\n if a < b:\n edges.add((a,b))\n return (num_vars, edges)\n\n def after_solve(self):\n root_tab = f\"td_node_{self.td.root.id}\"\n projected_cols = \", \".join([f\"v{p}\" for p in self.projected])\n sum_count = self.db.replace_dynamic_tabs(f\"(select count(*) from (select distinct {projected_cols} from {root_tab}) as projected)\")\n self.db.ignore_next_praefix()\n model_count = self.db.update(\"problem_pmc\",[\"model_count\"],[sum_count],[f\"ID = {self.id}\"],\"model_count\")[0]\n logger.info(\"Problem has %d models\", model_count)\n\n def get_root(self, bags, adj, htd_root):\n def is_valid(bag):\n for p in self.projected:\n if p not in bag:\n return False\n return True\n\n wl = [htd_root]\n visited = set([htd_root])\n for n in wl:\n if is_valid(bags[n]):\n return n\n else:\n for c in adj[n]:\n if not c in visited:\n visited.add(c)\n wl.append(c)\n\n return htd_root\n\ndef var2cnt(node,var):\n if node.needs_introduce(var):\n return \"1\"\n else:\n return \"{}.model_count\".format(var2tab_alias(node,var))\n\ndef node2cnt(node):\n return \"{}.model_count\".format(node2tab_alias(node))\n\nargs.specific[Pmc] = dict(\n help=\"Solve PMC instances\",\n options={\n \"--store-formula\": dict(\n dest=\"store_formula\",\n help=\"Store formula in database\",\n action=\"store_true\",\n )\n }\n)\n","repo_name":"mk-tu/argBTW","sub_path":"dpdb/problems/pmc.py","file_name":"pmc.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"4647427632","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport io\nimport mmap\nimport os\nimport timeit\n\nfrom contextlib import contextmanager\n\nimport six\nimport pytest\n\nfrom .fakesanlock import FakeSanlock\nfrom testlib import make_uuid\nfrom testlib import namedTemporaryDir\n\nfrom vdsm import constants\nfrom vdsm import utils\nfrom vdsm.storage import outOfProcess as oop\nfrom vdsm.storage import xlease\n\n\nclass ReadError(Exception):\n \"\"\" Raised to simulate read errors \"\"\"\n\n\nclass WriteError(Exception):\n \"\"\" Raised to simulate read errors \"\"\"\n\n\nclass FailingReader(xlease.DirectFile):\n def pread(self, offset, buf):\n raise ReadError\n\n\nclass FailingWriter(xlease.DirectFile):\n def pwrite(self, offset, buf):\n raise WriteError\n\n\nclass TestIndex:\n\n def test_metadata(self, monkeypatch):\n monkeypatch.setattr(\"time.time\", lambda: 123456789)\n with make_volume() as vol:\n lockspace = os.path.basename(os.path.dirname(vol.path))\n assert vol.version == 1\n assert vol.lockspace == lockspace\n assert vol.mtime == 123456789\n\n def test_magic_big_endian(self):\n with make_volume() as vol:\n with io.open(vol.path, \"rb\") as f:\n f.seek(xlease.INDEX_BASE)\n assert f.read(4) == b\"\\x12\\x15\\x20\\x16\"\n\n def test_bad_magic(self):\n with make_leases() as path:\n self.check_invalid_index(path)\n\n def test_bad_version(self):\n with make_volume() as vol:\n with io.open(vol.path, \"r+b\") as f:\n f.seek(xlease.INDEX_BASE + 5)\n f.write(b\"blah\")\n self.check_invalid_index(vol.path)\n\n def test_unsupported_version(self):\n with make_volume() as vol:\n md = xlease.IndexMetadata(2, \"lockspace\")\n with io.open(vol.path, \"r+b\") as f:\n f.seek(xlease.INDEX_BASE)\n f.write(md.bytes())\n self.check_invalid_index(vol.path)\n\n def test_bad_lockspace(self):\n with make_volume() as vol:\n with io.open(vol.path, \"r+b\") as f:\n f.seek(xlease.INDEX_BASE + 10)\n f.write(b\"\\xf0\")\n self.check_invalid_index(vol.path)\n\n def test_bad_mtime(self):\n with make_volume() as vol:\n with io.open(vol.path, \"r+b\") as f:\n f.seek(xlease.INDEX_BASE + 59)\n f.write(b\"not a number\")\n self.check_invalid_index(vol.path)\n\n def test_updating(self):\n with make_volume() as vol:\n md = xlease.IndexMetadata(xlease.INDEX_VERSION, \"lockspace\",\n updating=True)\n with io.open(vol.path, \"r+b\") as f:\n f.seek(xlease.INDEX_BASE)\n f.write(md.bytes())\n self.check_invalid_index(vol.path)\n\n def test_truncated_index(self):\n with make_volume() as vol:\n # Truncate index, reading it should fail.\n with io.open(vol.path, \"r+b\") as f:\n f.truncate(\n xlease.INDEX_BASE + xlease.INDEX_SIZE - xlease.BLOCK_SIZE)\n self.check_invalid_index(vol.path)\n\n def check_invalid_index(self, path):\n file = xlease.DirectFile(path)\n with utils.closing(file):\n with pytest.raises(xlease.InvalidIndex):\n vol = xlease.LeasesVolume(file)\n vol.close()\n\n def test_format(self):\n with make_volume() as vol:\n assert vol.leases() == {}\n\n def test_rebuild_empty(self, fake_sanlock):\n with make_volume() as vol:\n # Add underlying sanlock resources\n for i in [3, 4, 6]:\n resource = \"%04d\" % i\n offset = xlease.USER_RESOURCE_BASE + xlease.SLOT_SIZE * i\n fake_sanlock.write_resource(\n vol.lockspace, resource, [(vol.path, offset)])\n # The index is empty\n assert vol.leases() == {}\n\n # After rebuilding the index it should contain all the underlying\n # resources.\n file = xlease.DirectFile(vol.path)\n with utils.closing(file):\n xlease.rebuild_index(vol.lockspace, file)\n expected = {\n \"0003\": {\n \"offset\": xlease.USER_RESOURCE_BASE + xlease.SLOT_SIZE * 3,\n \"updating\": False,\n },\n \"0004\": {\n \"offset\": xlease.USER_RESOURCE_BASE + xlease.SLOT_SIZE * 4,\n \"updating\": False,\n },\n \"0006\": {\n \"offset\": xlease.USER_RESOURCE_BASE + xlease.SLOT_SIZE * 6,\n \"updating\": False,\n },\n }\n file = xlease.DirectFile(vol.path)\n with utils.closing(file):\n vol = xlease.LeasesVolume(file)\n with utils.closing(vol):\n assert vol.leases() == expected\n\n def test_create_read_failure(self):\n with make_leases() as path:\n file = FailingReader(path)\n with utils.closing(file):\n with pytest.raises(ReadError):\n xlease.LeasesVolume(file)\n\n def test_lookup_missing(self):\n with make_volume() as vol:\n with pytest.raises(xlease.NoSuchLease):\n vol.lookup(make_uuid())\n\n def test_lookup_updating(self):\n record = xlease.Record(make_uuid(), 0, updating=True)\n with make_volume((42, record)) as vol:\n leases = vol.leases()\n assert leases[record.resource][\"updating\"]\n with pytest.raises(xlease.LeaseUpdating):\n vol.lookup(record.resource)\n\n def test_add(self, fake_sanlock):\n with make_volume() as vol:\n lease_id = make_uuid()\n lease = vol.add(lease_id)\n assert lease.lockspace == vol.lockspace\n assert lease.resource == lease_id\n assert lease.path == vol.path\n res = fake_sanlock.read_resource(lease.path, lease.offset)\n assert res[\"lockspace\"] == lease.lockspace\n assert res[\"resource\"] == lease.resource\n\n def test_add_write_failure(self):\n with make_volume() as base:\n file = FailingWriter(base.path)\n with utils.closing(file):\n vol = xlease.LeasesVolume(file)\n with utils.closing(vol):\n lease_id = make_uuid()\n with pytest.raises(WriteError):\n vol.add(lease_id)\n # Must succeed becuase writng to storage failed\n assert lease_id not in vol.leases()\n\n def test_add_sanlock_failure(self, fake_sanlock):\n with make_volume() as vol:\n lease_id = make_uuid()\n # Make sanlock fail to write a resource\n fake_sanlock.errors[\"write_resource\"] = \\\n fake_sanlock.SanlockException\n with pytest.raises(fake_sanlock.SanlockException):\n vol.add(lease_id)\n # We should have an updating lease record\n lease = vol.leases()[lease_id]\n assert lease[\"updating\"]\n # There should be no lease on storage\n with pytest.raises(fake_sanlock.SanlockException) as e:\n fake_sanlock.read_resource(vol.path, lease[\"offset\"])\n assert e.exception.errno == fake_sanlock.SANLK_LEADER_MAGIC\n\n def test_leases(self, fake_sanlock):\n with make_volume() as vol:\n uuid = make_uuid()\n lease_info = vol.add(uuid)\n leases = vol.leases()\n expected = {\n uuid: {\n \"offset\": lease_info.offset,\n \"updating\": False,\n }\n }\n assert leases == expected\n\n def test_add_exists(self, fake_sanlock):\n with make_volume() as vol:\n lease_id = make_uuid()\n lease = vol.add(lease_id)\n with pytest.raises(xlease.LeaseExists):\n vol.add(lease_id)\n res = fake_sanlock.read_resource(lease.path, lease.offset)\n assert res[\"lockspace\"] == lease.lockspace\n assert res[\"resource\"] == lease.resource\n\n def test_lookup_exists(self, fake_sanlock):\n with make_volume() as vol:\n lease_id = make_uuid()\n add_info = vol.add(lease_id)\n lookup_info = vol.lookup(lease_id)\n assert add_info == lookup_info\n\n def test_remove_exists(self, fake_sanlock):\n with make_volume() as vol:\n leases = [make_uuid() for i in range(3)]\n for lease in leases:\n vol.add(lease)\n lease = vol.lookup(leases[1])\n vol.remove(lease.resource)\n assert lease.resource not in vol.leases()\n res = fake_sanlock.read_resource(lease.path, lease.offset)\n # There is no sanlock api for removing a resource, so we mark a\n # removed resource with empty (invalid) lockspace and lease id.\n assert res[\"lockspace\"] == \"\"\n assert res[\"resource\"] == \"\"\n\n def test_remove_missing(self):\n with make_volume() as vol:\n lease_id = make_uuid()\n with pytest.raises(xlease.NoSuchLease):\n vol.remove(lease_id)\n\n def test_remove_write_failure(self):\n record = xlease.Record(make_uuid(), 0, updating=True)\n with make_volume((42, record)) as base:\n file = FailingWriter(base.path)\n with utils.closing(file):\n vol = xlease.LeasesVolume(file)\n with utils.closing(vol):\n with pytest.raises(WriteError):\n vol.remove(record.resource)\n # Must succeed becuase writng to storage failed\n assert record.resource in vol.leases()\n\n def test_remove_sanlock_failure(self, fake_sanlock):\n with make_volume() as vol:\n lease_id = make_uuid()\n vol.add(lease_id)\n # Make sanlock fail to remove a resource (currnently removing a\n # resouce by writing invalid lockspace and resoruce name).\n fake_sanlock.errors[\"write_resource\"] = \\\n fake_sanlock.SanlockException\n with pytest.raises(fake_sanlock.SanlockException):\n vol.remove(lease_id)\n # We should have an updating lease record\n lease = vol.leases()[lease_id]\n assert lease[\"updating\"]\n # There lease should still be on storage\n res = fake_sanlock.read_resource(vol.path, lease[\"offset\"])\n assert res[\"lockspace\"] == vol.lockspace\n assert res[\"resource\"] == lease_id\n\n def test_add_first_free_slot(self, fake_sanlock):\n with make_volume() as vol:\n uuids = [make_uuid() for i in range(4)]\n for uuid in uuids[:3]:\n vol.add(uuid)\n vol.remove(uuids[1])\n vol.add(uuids[3])\n leases = vol.leases()\n # The first lease in the first slot\n assert leases[uuids[0]][\"offset\"] == xlease.USER_RESOURCE_BASE\n # The forth lease was added in the second slot after the second\n # lease was removed.\n assert (leases[uuids[3]][\"offset\"] ==\n xlease.USER_RESOURCE_BASE + xlease.SLOT_SIZE)\n # The third lease in the third slot\n assert (leases[uuids[2]][\"offset\"] ==\n xlease.USER_RESOURCE_BASE + xlease.SLOT_SIZE * 2)\n\n @pytest.mark.slow\n def test_time_lookup(self):\n setup = \"\"\"\nimport os\nfrom testlib import make_uuid\nfrom vdsm import utils\nfrom vdsm.storage import xlease\n\npath = \"%s\"\nlockspace = os.path.basename(os.path.dirname(path))\nlease_id = make_uuid()\n\ndef bench():\n file = xlease.DirectFile(path)\n with utils.closing(file):\n vol = xlease.LeasesVolume(file)\n with utils.closing(vol, log=\"test\"):\n try:\n vol.lookup(lease_id)\n except xlease.NoSuchLease:\n pass\n\"\"\"\n with make_volume() as vol:\n count = 100\n elapsed = timeit.timeit(\"bench()\", setup=setup % vol.path,\n number=count)\n print(\"%d lookups in %.6f seconds (%.6f seconds per lookup)\"\n % (count, elapsed, elapsed / count))\n\n @pytest.mark.slow\n def test_time_add(self, fake_sanlock):\n setup = \"\"\"\nimport os\nfrom testlib import make_uuid\nfrom vdsm import utils\nfrom vdsm.storage import xlease\n\npath = \"%s\"\nlockspace = os.path.basename(os.path.dirname(path))\n\ndef bench():\n lease_id = make_uuid()\n file = xlease.DirectFile(path)\n with utils.closing(file):\n vol = xlease.LeasesVolume(file)\n with utils.closing(vol, log=\"test\"):\n vol.add(lease_id)\n\"\"\"\n with make_volume() as vol:\n count = 100\n elapsed = timeit.timeit(\"bench()\", setup=setup % vol.path,\n number=count)\n # Note: this does not include the time to create the real sanlock\n # resource.\n print(\"%d adds in %.6f seconds (%.6f seconds per add)\"\n % (count, elapsed, elapsed / count))\n\n\n@pytest.fixture(params=[\n xlease.DirectFile,\n pytest.param(\n xlease.InterruptibleDirectFile,\n marks=pytest.mark.skipif(\n six.PY3,\n reason=\"ioprocess is not availale on python 3\"))\n])\ndef direct_file(request):\n \"\"\"\n Returns a direct file factory function accpting a path. Test for\n xlease.*DirectFile can use this fixture for testing both implemntations.\n \"\"\"\n if request.param == xlease.InterruptibleDirectFile:\n try:\n test_oop = oop.getProcessPool(\"test\")\n yield functools.partial(request.param, oop=test_oop)\n finally:\n oop.stop()\n else:\n yield request.param\n\n\nclass TestDirectFile:\n\n def test_name(self, direct_file):\n with make_leases() as path:\n file = direct_file(path)\n with utils.closing(file):\n assert file.name == path\n\n def test_size(self, direct_file):\n with make_leases() as path:\n file = direct_file(path)\n with utils.closing(file):\n assert file.size() == constants.GIB\n\n @pytest.mark.parametrize(\"offset,size\", [\n (0, 1024), # some content\n (0, 2048), # all content\n (512, 1024), # offset, some content\n (1024, 1024), # offset, all content\n ])\n def test_pread(self, tmpdir, direct_file, offset, size):\n data = b\"a\" * 512 + b\"b\" * 512 + b\"c\" * 512 + b\"d\" * 512\n path = tmpdir.join(\"file\")\n path.write(data)\n file = direct_file(str(path))\n with utils.closing(file):\n buf = mmap.mmap(-1, size)\n with utils.closing(buf):\n n = file.pread(offset, buf)\n assert n == size\n assert buf[:] == data[offset:offset + size]\n\n def test_pread_short(self, tmpdir, direct_file):\n data = b\"a\" * 1024\n path = tmpdir.join(\"file\")\n path.write(data)\n file = direct_file(str(path))\n with utils.closing(file):\n buf = mmap.mmap(-1, 1024)\n with utils.closing(buf):\n n = file.pread(512, buf)\n assert n == 512\n assert buf[:n] == data[512:]\n\n @pytest.mark.parametrize(\"offset,size\", [\n (0, 1024), # some content\n (0, 2048), # all content\n (512, 1024), # offset, some content\n (1024, 1024), # offset, all content\n ])\n def test_pwrite(self, tmpdir, direct_file, offset, size):\n # Create a file full of \"a\"s\n path = tmpdir.join(\"file\")\n path.write(b\"a\" * 2048)\n buf = mmap.mmap(-1, size)\n with utils.closing(buf):\n # Write \"b\"s\n buf.write(b\"b\" * size)\n file = direct_file(str(path))\n with utils.closing(file):\n file.pwrite(offset, buf)\n data = path.read()\n expected = (\"a\" * offset +\n \"b\" * size +\n \"a\" * (2048 - offset - size))\n assert data == expected\n\n\n@pytest.fixture\ndef fake_sanlock(monkeypatch):\n sanlock = FakeSanlock()\n monkeypatch.setattr(xlease, \"sanlock\", sanlock)\n yield sanlock\n\n\n@contextmanager\ndef make_volume(*records):\n with make_leases() as path:\n lockspace = os.path.basename(os.path.dirname(path))\n file = xlease.DirectFile(path)\n with utils.closing(file):\n xlease.format_index(lockspace, file)\n if records:\n write_records(records, file)\n vol = xlease.LeasesVolume(file)\n with utils.closing(vol):\n yield vol\n\n\n@contextmanager\ndef make_leases():\n with namedTemporaryDir() as tmpdir:\n path = os.path.join(tmpdir, \"xleases\")\n with io.open(path, \"wb\") as f:\n f.truncate(constants.GIB)\n yield path\n\n\ndef write_records(records, file):\n index = xlease.VolumeIndex()\n with utils.closing(index):\n index.load(file)\n for recnum, record in records:\n block = index.copy_record_block(recnum)\n with utils.closing(block):\n block.write_record(recnum, record)\n block.dump(file)\n","repo_name":"chipmap/vdsm","sub_path":"tests/storage/xlease_test.py","file_name":"xlease_test.py","file_ext":"py","file_size_in_byte":17529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"28"} +{"seq_id":"71179915274","text":"#!/usr/bin/env python3\n\nimport json\nimport pprint\n\nf = open('tasks.json')\ndata = json.load(f)\nf.close()\n\nfor task in data:\n # print(i)\n # pprint.pprint(task)\n prov = data[task]['task']['provisionerId']\n wt = data[task]['task']['workerType']\n print(f\"{task}: {prov}/{wt}\")\n","repo_name":"mozilla-platform-ops/taskgraph-tools","sub_path":"worker_report.py","file_name":"worker_report.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"19065520966","text":"from random import randint\n\n## Configurable range\nguessRange = (1, 100, 500)\n\n# Print some instructions\nprint(f\"Simple guessing game. Guess a number from {guessRange[0]} to {guessRange[1]} inclusive.\")\nprint(f\"Enter {guessRange[2]} to escape\")\n\n# Choose the target\ntarget = randint(guessRange[0], guessRange[1])\n#print (f\"Debug {target}\")\n\n# Initialize our guess list (let's us count the guesses)\nguesses = [0]\n\nwhile True:\n num = input(\"Enter a guess: \")\n if num.isnumeric():\n num = int(num)\n guesses.append(num)\n else:\n print(\"Numbers only please\")\n continue\n # Input validatated and added to list, check stuff\n if num == guessRange[2]:\n print(\"The easy way out\")\n break\n elif num == target:\n print(f\"You got it! It took {len(guesses)-1} guesses!\")\n break\n # The easy part is done, now we need to look at our warmer/colder cycle\n elif len(guesses) > 2:\n if (abs(target-num) <= abs(target-guesses[-2])):\n print(\"Warmer\")\n else:\n print(\"colder\")\n else: # We haven't been within 10\n if abs(target-num) <= 10:\n print (\"Warm\")\n else:\n print (\"Ice Cold\")\n","repo_name":"matthews-al/pythonlearning","sub_path":"play/guessing.py","file_name":"guessing.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"6582201977","text":"import math\n\n\nclass Solution:\n def __init__(self):\n self.dataFilename = r\"..\\..\\Data\\data.txt\"\n self.k = 0\n self.dimension = 0\n self.points = None\n self.nearestPoints = None\n self.readData()\n self.findNearestPoints()\n self.findKthNearestPoint()\n\n def readData(self):\n with open(self.dataFilename) as file:\n content = file.read()\n nums = list(map(int, content.split()))\n self.k = nums[0]\n self.dimension = nums[1]\n self.points = [tuple(nums[i:i + self.dimension]) for i in range(2, len(nums), self.dimension)]\n\n def findNearestPoints(self):\n minDis = -1\n p1, p2 = None, None\n for i in range(len(self.points)):\n for j in range(i + 1, len(self.points)):\n dis = Solution.calDis(self.points[i], self.points[j])\n if minDis == -1 or dis < minDis:\n minDis = dis\n p1, p2 = self.points[i], self.points[j]\n\n print(\"距离最近的两个点坐标为:{} {}, 距离为{}\".format(p1, p2, minDis))\n self.nearestPoints = (p1, p2)\n\n def findKthNearestPoint(self):\n\n l1 = sorted(((Solution.calDis(p, self.nearestPoints[0]), p) for p in self.points if p != self.nearestPoints[0]),\n key=lambda x: x[0])\n\n l2 = sorted(((Solution.calDis(p, self.nearestPoints[1]), p) for p in self.points if p != self.nearestPoints[1]),\n key=lambda x: x[0])\n\n print(\"距离点{}最近的{}个点坐标为:{}\".format(self.nearestPoints[0], self.k, [item[1] for item in l1[:self.k]]))\n print(\"距离点{}最近的{}个点坐标为:{}\".format(self.nearestPoints[1], self.k, [item[1] for item in l2[:self.k]]))\n\n @staticmethod\n def calDis(p1: tuple, p2: tuple):\n s = sum(((i[0] - i[1]) ** 2 for i in zip(p1, p2)))\n return math.sqrt(s)\n\n\nif __name__ == \"__main__\":\n solution = Solution()\n","repo_name":"hhmy27/SUDA_UNGEE_CODE","sub_path":"复试/2017/保研上机题/Code/ArthurRen/2017.py","file_name":"2017.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"28"} +{"seq_id":"12195785522","text":"#!/usr/bin/env python\nimport rospy\nimport math\nimport sys\n\nfrom sensor_msgs.msg import PointCloud2\nimport std_msgs.msg\nimport sensor_msgs.point_cloud2 as pcl2\nfrom servo_lidar_test.msg import pointCloud\n\n\nx = []\ny = []\nz = []\n\n#pointCloud callback function\ndef get_pointCloud_coordinates(msg):\n\n global x\n global y\n global z\n global coordinatesArray\n\n x = msg.x[:]\n y = msg.y[:]\n z = msg.z[:]\n #coordinatesArray = msg.pointCloudCoordinates[:]\n \n\n\nif __name__ == '__main__':\n\n pointCloude_publish_rate = 10\n number_of_data_poins = 15000 # Number of data points to be published\n\n\n rospy.init_node('pointCloud2_test')\n rate = rospy.Rate(pointCloude_publish_rate) #Publishing Rate\n\n #--------------------------------------------------\n # Subscriber Setup\n #---------------------------------------------------\n rospy.Subscriber('pointCloud', pointCloud, get_pointCloud_coordinates) # Subscribes to Adrik's lidar\n\n\n #--------------------------------------------------\n # Publisher Setup\n #--------------------------------------------------\n pcl_pub = rospy.Publisher(\"/my_pcl_topic\", PointCloud2, queue_size=10)\n\n\n\n rospy.loginfo(\"Initializing sample pcl2 publisher node...\")\n \n #give time to roscore to make the connections\n rospy.sleep(1.)\n \n \n #header\n header = std_msgs.msg.Header()\n header.stamp = rospy.Time.now()\n header.frame_id = 'laser'\n \n \n #Initialization \n rospy.loginfo(\"happily publishing sample pointcloud.. !\")\n count = 0\n cloud_points = []\n coordinatesArray = []\n\n while not rospy.is_shutdown():\n \n \n #----------------- For When getting x,y and z single points from controller node-------------------\n # if(x != 0 or y!=0 or z!=0):\n\n # cloud_points.append([x, y, z])\n\n \n # if (len(cloud_points) > number_of_data_poins):\n\n # del(cloud_points[0])\n\n\n # #create pcl from points\n # scaled_polygon_pcl = pcl2.create_cloud_xyz32(header, cloud_points)\n\n # pcl_pub.publish(scaled_polygon_pcl)\n #--------------------------------------------------------------------------------------\n\n\n #create pcl from points\n for i in range(len(x)):\n\n if(x[i] != 0 or y[i]!=0 or z[i]!=0):\n cloud_points.append([x[i], y[i], z[i]])\n\n if (len(cloud_points) > number_of_data_poins):\n del(cloud_points[0])\n\n scaled_polygon_pcl = pcl2.create_cloud_xyz32(header, cloud_points)\n pcl_pub.publish(scaled_polygon_pcl)\n\n rate.sleep()\n\n rospy.spin()\n\n\n \n\n","repo_name":"adr288/Capstone-LIDAR-Project-GCC","sub_path":"servo_lidar_test/scripts/pointCloud2_test_node.py","file_name":"pointCloud2_test_node.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"5774427178","text":"\n# Write a simple python function that accepts a list as an arguemnent and returns\n# a new list of elements less than any input for the user\n\nn = [111, 222, 333, 444, 555, 200, 34]\n\n\ndef smaller_than_x(a):\n x = int(input(\"Enter a number\"))\n b = []\n for i in a:\n if i < x:\n b.append(a)\n return b\n\n\nprint(smaller_than_x(n))\n","repo_name":"sodiqafolayan/asorock_100days_pythoncode","sub_path":"day6_smaller_than_x.py","file_name":"day6_smaller_than_x.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"29688503126","text":"import logging\n\nfrom osc.services import cadastre\nfrom osc.services import climate\nfrom osc.services import soil\n\nlogger = logging.getLogger(__name__)\n\n\ndef is_valid_parcel(parcel):\n try:\n public_info = parcel['properties']['cadastralData']\n\n is_valid = not len(public_info['bico']['lspr']['spr'])\n for spr in public_info['bico']['lspr']['spr']:\n is_valid = is_valid or \\\n (spr['dspr']['ccc'] not in ['VT', 'OT', 'FF'])\n except KeyError:\n # As it does not have information about the type,\n # we assume that it is OK\n is_valid = True\n\n return is_valid\n\n\ndef update_parcel_by_cadastral_code(cadastral_code):\n logger.info('update_parcel_by_cadastral_code(%s)', cadastral_code)\n parcels_geojson = obtain_parcels_by_cadastral_code(\n cadastral_code,\n retrieve_public_info=True,\n retrieve_climate_info=False,\n retrieve_soil_info=True,\n retrieve_google_info=False)\n logger.debug('parcels_geojson[\\'features\\'][0] = %s',\n parcels_geojson['features'][0])\n json2source(parcels_geojson['features'][0])\n cadastre.index_parcel(json2source(parcels_geojson['features'][0]))\n\n\ndef json2source(json):\n source = {}\n source['geometry'] = json['geometry']\n source['properties'] = json['properties']\n source['bbox'] = json['bbox']\n return source\n\n\ndef obtain_parcels_by_cadastral_code(cadastral_code,\n retrieve_public_info=False,\n retrieve_climate_info=False,\n retrieve_soil_info=False,\n retrieve_google_info=False):\n logger.debug('obtain_parcels_by_cadastral_code(%s,%s,%s,%s)',\n cadastral_code,\n retrieve_public_info,\n retrieve_climate_info,\n retrieve_soil_info)\n parcels = cadastre.get_parcels_by_cadastral_code(cadastral_code,\n retrieve_public_info,\n retrieve_google_info)\n\n # Add climate info\n if retrieve_climate_info:\n for parcel in parcels:\n closest_station = \\\n climate.get_closest_station(\n parcel['properties']['reference_point']['lat'],\n parcel['properties']['reference_point']['lon'])\n parcel['properties']['closest_station'] = closest_station\n climate_agg = climate.get_aggregated_climate_measures(\n closest_station['IDESTACION'],\n closest_station['IDPROVINCIA'],\n 3)\n parcel['properties']['climate_aggregations'] = climate_agg\n\n # Add soil info\n if retrieve_soil_info:\n for parcel in parcels:\n closest_soil_measure = \\\n soil.get_closest_soil_measure(\n parcel['properties']['reference_point']['lat'],\n parcel['properties']['reference_point']['lon'])\n parcel['properties']['closest_soil_measure'] = closest_soil_measure\n\n parcels_geojson = {'type': 'FeatureCollection',\n 'features': parcels}\n\n return parcels_geojson\n\n\ndef obtain_parcels_by_bbox(lat_min, lon_min, lat_max, lon_max, precision):\n\n if precision == 0:\n parcels = cadastre.get_parcels_by_bbox(\n lat_min, lon_min, lat_max, lon_max)\n\n # Filter the parcels that are roads, ways, etc.\n # (JLG ATTENTION: To be removed when we have everything in ELASTIC)\n # parcels = filter(is_valid_parcel, parcels)\n return parcels\n else:\n parcels_bucket = cadastre.get_bucket_of_parcels_by_bbox_and_precision(\n lat_min, lon_min, lat_max, lon_max, precision)\n\n return parcels_bucket\n\n\ndef scan_parcels(update):\n cadastre.scan_parcels(update)\n","repo_name":"ilice/OSCapi","sub_path":"osc/services/parcels.py","file_name":"parcels.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"10652707371","text":"from django.db import models\n#room table\nclass Room(models.Model):\n Room_ID = models.AutoField(auto_created=True, primary_key=True)\n Room_Image = models.ImageField(default=\"\")\n ROOM_TYPES = (\n (\"Single Room\", \"Single Room\"),\n (\"Double Room\", \"Double Room\"),\n (\"Double Double Room\", \"Double Double Room\"),\n (\"Twin Room\", \"Twin Room\"),\n (\"Interconnecting Rooms\", \"Interconnecting Rooms\"),\n (\"Adjoining Rooms\", \"Adjoining Rooms\"),\n (\"Duplex\", \"Duplex\"),\n (\"Cabana\", \"Cabana\"),\n (\"Studio Room\", \"Studio Room\"),\n (\"Parlor\", \"Parlor\"),\n (\"Lanai\", \"Lanai\"),\n (\"Efficiency Room\", \"Efficiency Room\"),\n (\"Hospitality Room\", \"Hospitality Room\"),\n (\"Suite Room\", \"Suite Room\"),\n (\"King Bedroom\", \"King Bedroom\"),\n (\"Queen Bedroom\", \"Queen Bedroom\")\n )\n Room_Type = models.CharField(choices=ROOM_TYPES, default=\"\", max_length=50)\n Location = models.TextField(max_length=200, null=False)\n Price = models.CharField(max_length=10)\n Description = models.TextField(max_length=5000, null=False, default=\"\")\n class Meta:\n db_table = \"room\"\n\n","repo_name":"deStarxis/Exploro_System","sub_path":"Booking_System/my_booking_app/models/roommodel.py","file_name":"roommodel.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"36566103971","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.options import Options\n\n#about: в строке браузера - узнать инфу о юзерагентах\noptions = Options() #Добавляем опции \n# options.headless = True # Указываем в опциях, что бы браузер не открывался с графической оболочкой (Олдовый метод)\noptions.add_argument('--headless')# Так удобнее - добавить аргумент: что бы браузер открывался без графической оболочки\noptions.add_argument(\"user-agent=Mozilla/5.0\") # (Windows Phone 10.0; Android 4.2.1; Microsoft; Lumia 640 XL LTE) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Mobile Safari/537.36 Edge/12.10166\") - Сохраню в комментах название юзер агентов\ndriver = webdriver.Chrome(options=options)#Добавляем в наш Хром установленные ранее опции\ndriver.get(\"https://plarium.com/en/resource/generator/nickname-generator/\")\n\ncount = 0 # Создаем каунтер для того, что бы в будущем при достижении определенного количества сделать 'break'\nwhile True: # Цикл while пока тру будем продолжать\n count += 1 # Каждый цикл мы добавляем в каунтер +1\n button_xpath = \"//button[text()='Generate']\" #Создаем переменную с ХПАФ и вставялем в будущем (Аналог Variables в роботе)\n driver.find_element(By.XPATH, button_xpath).click()\n time.sleep(3)\n\n name = driver.find_element(By.XPATH, \"//input[@name='nickname']\").get_attribute(\"value\") #Ищем элемент по ХПАФ и получаем атрибут 'value'\n print(f'Name : {name}') # синтаксис для добавления переменных в строки/В данном случае мы добавляем ссылаемся на name\n\n if count == 3:\n a = driver.execute_script(\"return navigator.userAgent\")# Скрип для определения юзер агента . Далее его выведем в консоли\n print(\"User agent:\")\n print(a)\n break #При достижении 3 мы выходим из цикла с помощью синтаксического сахара","repo_name":"Sg5757/Parsing","sub_path":"User_Agent.py","file_name":"User_Agent.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"22341459294","text":"from pathlib import Path\n\nfrom script import solution_part1, solution_part2\n\nEXERCISE_SAMPLE = \"\"\"\nMonkey 0:\n Starting items: 79, 98\n Operation: new = old * 19\n Test: divisible by 23\n If true: throw to monkey 2\n If false: throw to monkey 3\n\nMonkey 1:\n Starting items: 54, 65, 75, 74\n Operation: new = old + 6\n Test: divisible by 19\n If true: throw to monkey 2\n If false: throw to monkey 0\n\nMonkey 2:\n Starting items: 79, 60, 97\n Operation: new = old * old\n Test: divisible by 13\n If true: throw to monkey 1\n If false: throw to monkey 3\n\nMonkey 3:\n Starting items: 74\n Operation: new = old + 3\n Test: divisible by 17\n If true: throw to monkey 0\n If false: throw to monkey 1\n\"\"\".strip()\nEXERCISE_FILE_CONTENT = (Path(__file__).parent / \"input.txt\").read_text()\n\nEXPECTED_SAMPLE_P1 = 10605\nEXPECTED_FILE_P1 = 64032\nEXPECTED_SAMPLE_P2 = 2713310158\nEXPECTED_FILE_P2 = 12729522272\n\n\ndef test_p1_sample():\n assert solution_part1(EXERCISE_SAMPLE) == EXPECTED_SAMPLE_P1\n\n\ndef test_p1_file():\n assert solution_part1(EXERCISE_FILE_CONTENT) == EXPECTED_FILE_P1\n\n\ndef test_p2_sample_after_20_rounds():\n assert solution_part2(EXERCISE_SAMPLE, rounds=20) == 10197\n\n\ndef test_p2_sample():\n assert solution_part2(EXERCISE_SAMPLE) == EXPECTED_SAMPLE_P2\n\n\ndef test_p2_file():\n assert solution_part2(EXERCISE_FILE_CONTENT) == EXPECTED_FILE_P2\n","repo_name":"bbelderbos/adventofcode","sub_path":"2022/day11/test_script.py","file_name":"test_script.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"28"} +{"seq_id":"11476925979","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\nimport pandas as pd\n\n\ndef cfa_update(df, year, outset):\n print(\"Function : \", sys._getframe().f_code.co_name)\n \n df_agg = df.groupby([\"zone\"], as_index = False).agg({f\"area_residence{year}\":\"sum\", f\"area_commercial{year}\":\"sum\"})\n df_agg = df_agg.rename(columns = {\"zone\":\"zone_code\", f\"area_residence{year}\":\"farea_residence\", f\"area_commercial{year}\":\"farea_shop\"})\n \n \"\"\" # 0埋めしておく \"\"\"\n df_agg = df_agg.fillna(0)\n \n if(outset[\"設定値\"] == \"T\"):\n df_agg.to_csv(rf\"{outset['フォルダ名']}/建物面積集計_{year}.csv\", index = False, encoding = \"cp932\")\n\n return df_agg\n","repo_name":"Project-PLATEAU/UC22-020-Urban-structure-simulation","sub_path":"system/program/building/cfa_update.py","file_name":"cfa_update.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"37005838358","text":"## Miracle battles\n## algo_astar\n\nfrom Resources import game_stats\nfrom Resources import game_obj\nfrom Content import exploration_catalog\n\nimport math\n\n# inspired by https://medium.com/@nicholas.w.swift/easy-a-star-pathfinding-7e6689c7f7b2\n\n\nclass Node:\n \"\"\"A node class for A* Pathfinding\"\"\"\n\n def __init__(self, parent=None, position=None):\n self.parent = parent\n self.position = position\n\n self.g = 0\n self.h = 0\n self.f = 0\n\n def __eq__(self, other):\n return self.position == other.position\n\n\ndef astar(travel_agent, start, end, destination, friendly_cities, hostile_cities, known_map):\n \"\"\"Returns a list of tuples as a path from the given start to the given end in the given maze\"\"\"\n\n # Create start and end node\n start_node = Node(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n ## print(\"Start node added \" + str(start_node.position))\n\n # Loop until you find the end\n while len(open_list) > 0:\n listik = []\n for i in open_list:\n listik.append(i.position)\n # print(\"Another attempt - len - \" + str(len(open_list)) + \" list is \" + str(listik))\n\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n # if current_node.parent is not None:\n # print(\"Current node \" + str(current_node.position) + \" with F - \" + str(current_node.f) + \" and parent - \"\n # + str(current_node.parent.position))\n # else:\n # print(\"Current node \" + str(current_node.position) + \" with F - \" + str(current_node.f))\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n # print(\"Final path is \" + str(path))\n return path[::-1] #, current_node.g # Return reversed path\n\n # Generate children\n children = []\n # print(\"current_node - [\" + str(current_node.position[0]) + \"; \" + str(current_node.position[1]) + \"]\")\n for new_position in [[-1, -1], [0, -1], [1, -1], [-1, 0],\n [1, 0], [-1, 1], [0, 1], [1, 1]]: # Adjacent squares\n\n # Get node position\n node_position = [current_node.position[0] + new_position[0], current_node.position[1] + new_position[1]]\n\n # Make sure it isn't a parent node\n # print(\"list(node_position) - \" + str(list(node_position)))\n # print(\"list(current_node.parent) - \" + str(current_node.parent))\n if current_node.parent is not None:\n if list(node_position) == list(current_node.parent.position):\n continue\n # Make sure within range\n if node_position[0] > game_stats.cur_level_width or node_position[0] < 1 \\\n or node_position[1] > game_stats.cur_level_height or node_position[1] < 1:\n continue\n\n # Make sure walkable terrain\n TileNum = (node_position[1] - 1) * game_stats.cur_level_width + node_position[0] - 1\n # print(\"Testing tile [\" + str(node_position[0]) + \"; \" + str(node_position[1]) + \"] - \" + str(TileNum))\n if node_position in known_map:\n if not game_obj.game_map[TileNum].travel: # No obstacles\n continue\n\n if game_obj.game_map[TileNum].city_id is not None:\n # Forbidden territory\n if game_obj.game_map[TileNum].city_id not in friendly_cities \\\n and game_obj.game_map[TileNum].city_id not in hostile_cities:\n continue\n\n if node_position == end: # Doesn't matter for the last tile\n pass\n elif node_position in known_map:\n if game_obj.game_map[TileNum].army_id is not None: # Army can't pass through other armies\n continue\n elif game_obj.game_map[TileNum].lot is not None:\n if game_obj.game_map[TileNum].lot == \"City\": # Army can't pass through settlement\n continue\n # For now army can’t pass through facilities and exploration objects\n # may reconsider later\n elif game_obj.game_map[TileNum].lot.obj_typ == \"Facility\" or \\\n game_obj.game_map[TileNum].lot.obj_typ in exploration_catalog.exploration_objects_groups_cat:\n continue\n\n # Create new node\n new_node = Node(current_node, node_position)\n\n # Append\n children.append(new_node)\n # print(\"Child \" + str(node_position) + \" added to children \" + str(len(children)))\n\n # Loop through children\n for child in children:\n # print(\"Loop through children - \" + str(child.position))\n\n closed_list_status = True # Not in closed list yet\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n closed_list_status = False\n continue\n\n if closed_list_status:\n\n # Create the f, g, and h values\n child.g = current_node.g + (math.sqrt(((child.position[0] - current_node.position[0]) ** 2) + (\n (child.position[1] - current_node.position[1]) ** 2)))\n child.h = math.sqrt(((child.position[0] - end_node.position[0]) ** 2) + (\n (child.position[1] - end_node.position[1]) ** 2))\n child.f = child.g + child.h\n # print(\"Child \" + str(child.position) + \" : f - \" + str(child.f) + \" = g - \" + str(child.g) + \" + h - \"\n # + str(child.h))\n\n append_true = True\n # print(\"open_list - \" + str(len(open_list)))\n # Child is already in the open list\n for open_node in open_list:\n # print(\"child.g \" + str(child.position) + \" g - \" + str(child.g) + \" and open_node.g \"\n # + str(open_node.position) + \" g - \" + str(open_node.g))\n # if child == open_node:\n # print(\"child == open_node is True\")\n # if child.g > open_node.g:\n # print(\"child.g > open_node.g is True\")\n if child == open_node and child.g >= open_node.g:\n # print(\"Both true\")\n append_true = False\n continue\n elif child == open_node and child.g < open_node.g:\n # print(\"Even better\")\n open_list.remove(open_node)\n continue\n\n ## print(\"Added child \" + str(child.position) + \" and F - \" + str(child.f))\n # Add the child to the open list\n if append_true:\n # print(\"Append child \" + str(child.position) + \" : f - \" + str(child.f) + \" = g - \" + str(child.g)\n # + \" + h - \" + str(child.h))\n open_list.append(child)\n","repo_name":"Snowjump/Test_new_game","sub_path":"Resources/algo_astar.py","file_name":"algo_astar.py","file_ext":"py","file_size_in_byte":7832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"23405020813","text":"from re import L\n\n\nclass Solution:\n def __init__(self) -> None:\n pass\n def issub(self,str1:str,str2:str):\n i , j = 0 , 0\n l1 , l2 = len(str1) , len(str2)\n # if len(str1) < len(str2):\n if l1 < l2:\n return False\n while i < l1:\n if str1[i] == str2[j]:\n j += 1\n i += 1\n if j == l2:\n return True\n else:\n return False\n\nif __name__ == '__main__':\n solu = Solution()\n ret = solu.issub('adcb','abc')\n print(ret)","repo_name":"xniu3/Leetcode_Repo","sub_path":"1048.py","file_name":"1048.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"41321625857","text":"import db\nimport Levenshtein\nimport math\n\n_CACHES = {\n\n}\n\n\ndef _fill_cache():\n sql = \"select `id`,name,`alias` from food_purine\"\n cursor = db.cursor()\n cursor.execute(sql)\n for each in cursor:\n _id, name, alias = each\n name_arr = [name]\n if alias:\n alias_arr = alias.split(',')\n name_arr.extend(alias_arr)\n _CACHES[_id] = name_arr\n cursor.close()\n\n\ndef similarity(s1, s2):\n return 1 - Levenshtein.distance(s1, s2) / max(len(s1), len(s2))\n\n\ndef max_similarity(s1, arr):\n return max([similarity(s1, e) for e in arr])\n\n\ndef query_data(name):\n if not _CACHES:\n _fill_cache()\n candidates = []\n max_score = 0\n for _id in _CACHES.keys():\n name_arr = _CACHES[_id]\n max_simi = max_similarity(name, name_arr)\n if max_simi != 0:\n if max_simi > max_score:\n candidates = [_id]\n max_score = max_simi\n elif max_simi == max_score:\n candidates.append(_id)\n if not candidates:\n return None\n sql = \"select * from food_purine where id in (%s)\" % (','.join([str(e) for e in candidates]))\n cursor = db.cursor()\n cursor.execute(sql)\n result = []\n for each in cursor:\n _, name, value, alias = each\n result.append(\n {\n 'name': name,\n 'alias': alias,\n 'value': value\n }\n )\n cursor.close()\n return result\n\n\nif __name__ == '__main__':\n print(query_data(\"大头菜\"))\n","repo_name":"zhiyongwu/health","sub_path":"apps/data_query.py","file_name":"data_query.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"36218311347","text":"# simple connect\nimport time\nfrom multiprocessing import Pool\nfrom redis.client import Redis\n\nREQUESTS_COUNT = 100000\n\ndef test(i):\n client = Redis(host='0.0.0.0', port=6379)\n client.set(i, i)\n assert client.get(i) != i, 'wrong save'\n\n\nstart_time = time.time()\n\nwith Pool(16) as p:\n p.map(test, range(REQUESTS_COUNT))\n\nsec = time.time() - start_time\nprint(f'time {sec:0.2f} seconds')\n\n","repo_name":"cdies/correct_connection_to_db","sub_path":"simple_get_set.py","file_name":"simple_get_set.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"3956595196","text":"from numpy import*\nglicose=array(eval(input(\"digite o numero:\")))\na=0\ni=0\nwhile(i99):\n\t\tprint(i)\n\t\ta=a+1\n\telse:\n\t\ta=a+0\n\ti=i+1\nprint(a)\n","repo_name":"JosephLevinthal/Research-projects","sub_path":"5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4357/codes/1771_1185.py","file_name":"1771_1185.py","file_ext":"py","file_size_in_byte":168,"program_lang":"python","lang":"ar","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"24375806848","text":"from collections import Counter\nprint(\"Welcome to the Frequency Analysis App\\n\")\n\nnonLetters = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"0\", \" \", \".\", \",\", \"?\", \"!\", \"'\", '\"', \":\", \";\", \")\", \"(\",\n \"[\", \"]\", \"+\", \"*\", \"-\", \"_\", \"=\", \"%\", \"#\", \"/\", \"|\", \"\\n\", \"\\t\"]\n\n\nfor i in range(1, 3):\n phrase = input(\"Enter a word or phrase to count the ocurrence of each letter: \")\n print()\n for nonLetter in nonLetters:\n phrase = phrase.replace(nonLetter, \"\")\n\n numberOfLetters = len(phrase)\n countOfEachLetter = Counter(phrase)\n\n print(\"Here is the frequency analysis from key phrase {}:\\n\".format(i))\n print(\"\\tLetter\\t\\tOcurrence\\t\\tPercentage\")\n for key, values in sorted(countOfEachLetter.items()):\n percentage = 100 * values/numberOfLetters\n print(\"\\t{}\\t\\t{}\\t\\t\\t{:.2f}\".format(key, values, percentage))\n\n orderedLetterCount = countOfEachLetter.most_common()\n listOfOrderedLetters = []\n\n for pair in orderedLetterCount:\n listOfOrderedLetters.append(pair[0])\n print(\"Letters ordered from highest ocurrence to lowest:\")\n for letter in listOfOrderedLetters:\n print(letter, end=\"\")\n print()","repo_name":"MiguelCF06/PythonProjects","sub_path":"Dictionaries/FrequencyAnalisis.py","file_name":"FrequencyAnalisis.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"780961341","text":"import random\n\nfrom battle_city.enums import Direction, UnitType, UpdateMode\nfrom battle_city.engine.game_constants import (\n DEFAULT_TANK_SPAWNER_COOL_DOWN,\n DEFAULT_TANK_SPAWNER_SIZE,\n DEFAULT_TANK_SPAWNER_TANK_TO_GO,\n)\nfrom battle_city.engine.units.tank_bot import TankBot\nfrom battle_city.engine.units.unit import Unit\nfrom battle_city.rect import Rect\n\n\nclass TankBotSpawner(Unit):\n def __init__(\n self,\n tank_to_go=DEFAULT_TANK_SPAWNER_TANK_TO_GO,\n next_tank_pointer=None,\n priority_direction=None,\n ):\n super().__init__()\n self.collision = Rect(\n -1, -1, DEFAULT_TANK_SPAWNER_SIZE[0], DEFAULT_TANK_SPAWNER_SIZE[1]\n )\n self.type = UnitType.BotSpawner\n\n self.is_tank_alive = False\n self.current_tank = None\n self.no_tank_tick_count = DEFAULT_TANK_SPAWNER_COOL_DOWN\n self.no_tank_tick_pointer = 0\n\n self.tank_to_go = tank_to_go\n self.next_tank_pointer = next_tank_pointer\n self.tanks_to_go = list()\n self.priority_direction = priority_direction\n\n self.update_mode = UpdateMode.StepOnly\n\n self.directions = [\n Direction.Down,\n Direction.Up,\n Direction.Right,\n Direction.Left,\n ]\n\n def is_completed(self) -> bool:\n return (\n self.tank_to_go <= 0\n or len(self.tanks_to_go) <= 0\n or (\n self.next_tank_pointer is not None\n and self.next_tank_pointer >= len(self.tanks_to_go)\n )\n ) and self.is_tank_alive\n\n def get_next_tank(self) -> TankBot:\n if self.next_tank_pointer is None:\n if len(self.tanks_to_go) > 1:\n tank_number = random.randint(0, len(self.tanks_to_go) - 1)\n else:\n tank_number = 0\n if tank_number >= len(self.tanks_to_go):\n return None\n tank = self.tanks_to_go[tank_number]\n self.tanks_to_go.remove(tank)\n else:\n if self.next_tank_pointer >= len(self.tanks_to_go):\n return None\n tank = self.tanks_to_go[self.next_tank_pointer]\n self.next_tank_pointer += 1\n\n if self.priority_direction is None:\n tank.set_velocity(self.directions[random.randint(0, 3)])\n else:\n tank.set_velocity(self.priority_direction)\n\n if self.is_completed:\n self.type = UnitType.BotSpawner\n\n return tank\n\n def step(self, field):\n super().step(field)\n if not self.is_tank_alive:\n if self.no_tank_tick_pointer != self.no_tank_tick_count:\n self.no_tank_tick_pointer += 1\n else:\n self.no_tank_tick_pointer = 0\n self.current_tank = self.get_next_tank()\n if self.current_tank is None:\n self.type = UnitType.EmptyBotSpawner\n elif field.try_place_unit(\n self.current_tank, self.collision.x, self.collision.y\n ):\n self.tank_to_go -= 1\n self.is_tank_alive = True\n else:\n if self.current_tank not in field.units:\n self.is_tank_alive = False\n self.current_tank = None\n\n def is_intersected_with_unit(self, other: Unit) -> bool:\n return False\n\n def is_intersected_with_rect(self, rect: Unit) -> bool:\n return False\n","repo_name":"waverma/battle_city","sub_path":"battle_city/engine/units/tank_bot_spawner.py","file_name":"tank_bot_spawner.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"10299300466","text":"import sys\nfrom time import sleep\nimport os\n\n\n\ndef write(text):\n \"\"\"Функция создаёт эффект печатания букв\"\"\"\n for char in text:\n sleep(0.05)\n sys.stdout.write(char)\n sys.stdout.flush()\n\n\ndef more_option():\n \"\"\"Функция спрашивает у пользователя о дополнительных настройках программы\"\"\"\n\n mark = '-'\n params = []\n question = input('Хотите указать дополнительные параметры? ДА\\НЕТ: ').upper().strip()\n print(mark * 30)\n while question.strip() != 'ДА' or question.strip() != 'НЕТ':\n if question == 'ДА':\n count_photos = input('''Какое количество фото хотите загрузить?\n Можете нажать ENTER и будет выставленно значение - 5: ''')\n if count_photos == '':\n count_photos = 5\n params.append(count_photos)\n dir = input('\\nКак назвать папку на вашем диске?: ')\n params.append(dir)\n album = input('\\nЧто будем загружать, аватарки или фото со стены? АВЫ\\СТЕНА: ').lower().strip()\n # Проверка правильного ввода альбома\n while album not in ['авы', 'стена']:\n print(mark * 30)\n write('Пожалуйста, введите АВЫ или СТЕНА.\\nЕсть еще альбом с сохраненными фото, он будет добавлен в будущем\\n')\n album = input('\\nЧто будем загружать, аватарки или фото со стены? АВЫ\\СТЕНА: ').lower().strip()\n else:\n if album == 'авы':\n params.append('profile')\n elif album == 'стена':\n params.append('wall')\n return params\n elif question == 'НЕТ':\n count_photos = 5\n params.append(count_photos)\n dir = 'BackUp-Program'\n params.append(dir)\n album = 'profile'\n params.append(album)\n print(f'''Выставлены значения по умолчанию.\n Количество фото - {count_photos}\n Наименование папки - {dir}\n Альбом загрузки - {album}''')\n return params\n else:\n print('Разработчик не реализовал другие варианты, так что следуйте инструкции :)')\n question = input('Хотите указать дополнительные параметры? ДА\\НЕТ: ').upper().strip()\n\n\n\ndef check_tokens():\n \"\"\"\n Функция проверяет наличие ID Профиля ВК и токена Яндекс.Диск в файлах папки Tokens.\n Если они отсутствуют, происходит запись введенных данных в файлы.\n\n Если файл пустой, то os.stat().st_size == 0 будет возвращать True\n \"\"\"\n\n tokens = []\n mark = '-'\n\n # Проверка ID ВК в файле Tokens/VK_ID.txt\n if os.stat(\"Tokens/VK_ID.txt\").st_size == 0:\n id_vk = input('Введите пожалуйста ID аккаунта VK: ').strip()\n while not id_vk.isdigit():\n print('ID VK должен быть числом, пожалуйста введите еще раз.')\n print(mark * 30)\n id_vk = input('Введите пожалуйста ID аккаунта VK: ')\n else:\n with open('Tokens/VK_ID.txt', 'w') as vk:\n vk.write(id_vk)\n tokens.append(id_vk)\n print(mark * 30)\n else:\n with open('Tokens/VK_ID.txt', 'r') as token:\n reader = token.read()\n print(f'Ваш ID профиля VK сохраненный в файле \"Tokens/VK_ID.txt\"\\n:{reader}:\\n')\n question = input('Оставляем введённый ID VK? ДА/НЕТ: ').lower().strip()\n while question != 'да' or question != 'нет':\n if question == 'да':\n with open('Tokens/VK_ID.txt', 'r') as token:\n reader = token.read()\n tokens.append(reader)\n print(mark * 30)\n break\n elif question == 'нет':\n id_vk = input('Введите новый ID - ')\n while not id_vk.isdigit():\n print('ID VK должен быть числом, пожалуйста введите еще раз.')\n print(mark * 30)\n id_vk = input('Введите пожалуйста ID аккаунта VK: ')\n else:\n with open('Tokens/VK_ID.txt', 'w') as vk:\n vk.write(id_vk)\n tokens.append(id_vk)\n break\n else:\n print('\\nПожалуйста, ответьте снова.')\n question = input('Оставляем введённый ID VK? ДА/НЕТ: ').lower().strip()\n\n # Проверка токена Яндекс.Диск в файле Tokens/Yandex_Token.txt\n if os.stat(\"Tokens/Yandex_Token.txt\").st_size == 0:\n yandex_token = input(f'''Введите пожалуйста свой токен Яндекс.Диска\n----Узнать его можно по этой ссылке - \"https://yandex.ru/dev/disk/poligon/\" \n----Ввод: ''', )\n\n with open('Tokens/Yandex_Token.txt', 'w') as yandex:\n yandex.write(yandex_token)\n tokens.append(yandex_token)\n else:\n with open('Tokens/Yandex_Token.txt', 'r') as token:\n reader = token.read()\n print(f'Ваш токен сохраненный в файле \"Tokens/Yandex_Token.txt\"\\n:{reader}:\\n')\n\n question = input(f'Оставляем введённый вами токен Яндекс.Диска? ДА/НЕТ: ').lower().strip()\n while question != 'да' or question != 'нет':\n if question == 'да':\n with open('Tokens/Yandex_Token.txt') as token:\n reader = token.read()\n tokens.append(reader)\n print(mark * 30)\n break\n elif question == 'нет':\n yandex_token = input('Введите новый токен - ')\n with open('Tokens/Yandex_Token.txt', 'w') as yandex:\n yandex.write(yandex_token)\n tokens.append(yandex_token)\n print(mark * 40)\n break\n else:\n print('Пожалуйста, ответьте снова.')\n question = input('Оставляем введённый токен Яндекс.Диска? ДА/НЕТ: ').lower().strip()\n print('\\n' * 20)\n\n\n\n\n return tokens\n\n","repo_name":"AlexanderPRM/Back-UP_Copier","sub_path":"system_files.py","file_name":"system_files.py","file_ext":"py","file_size_in_byte":7176,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"11241416391","text":"#!/usr/bin/env python3\n# [SublimeLinter flake8-max-line-length:120]\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nfrom random import gauss\nfrom math import asin, cos, sin\nfrom os import makedirs\nfrom os.path import dirname, isdir, join\n\n\nHEADER = (\n '\\n'\n '\\n'\n '\\n'\n '\\n'\n)\n\nMODULE = (\n ' \\n'\n ' {tx} {ty} {tz} \\n'\n ' {rx} {ry} {rz} \\n'\n ' \\n'\n ' \\n'\n)\n\nFOOTER = (\n '\\n'\n)\n\n\ndef make_global_xml(basedir):\n global_fn = join(basedir, 'SIMCOND/Conditions/VP/Alignment/Global.xml')\n if not isdir(dirname(global_fn)):\n makedirs(dirname(global_fn))\n\n xml = HEADER\n xml += MODULE.format(\n class_id=6, name='VPSystem',\n tx=0, ty=0, tz=0,\n rx=0, ry=0, rz=0\n )\n xml += MODULE.format(\n class_id=1008106, name='VPLeft',\n tx=0, ty=0, tz=0,\n rx=0, ry=0, rz=0\n )\n xml += MODULE.format(\n class_id=1008106, name='VPRight',\n tx=0, ty=0, tz=0,\n rx=0, ry=0, rz=0\n )\n xml += FOOTER\n with open(global_fn, 'wt') as f:\n f.write(xml)\n\n\ndef make_modules_xml(basedir, x_distortion, y_distortion, sigma, alternate=False):\n modules_fn = join(basedir, 'SIMCOND/Conditions/VP/Alignment/Modules.xml')\n if not isdir(dirname(basedir)):\n makedirs(dirname(basedir))\n\n xml = HEADER\n module_width = 100000\n for i in range(52):\n if alternate and int(i/2) % 2 == 0:\n xml += MODULE.format(\n class_id=6, name='Module{i:02d}'.format(i=i),\n tx=0, ty=0, tz=0,\n rx=0, ry=0, rz=0\n )\n continue\n\n rx = asin(x_distortion / module_width)\n if sigma > 0:\n rx = gauss(rx, sigma*abs(rx))\n\n ry = asin(y_distortion / module_width)\n if sigma > 0:\n ry = gauss(ry, sigma*abs(ry))\n\n rz = 0\n\n assert rx == rz == 0\n x_over_reach = 22810\n tx = - module_width/1000 * (1-cos(ry)) * (1-x_over_reach/module_width)\n tz = - module_width/1000 * sin(ry) * (1-x_over_reach/module_width)\n\n xml += MODULE.format(\n class_id=6, name='Module{i:02d}'.format(i=i),\n tx=tx, ty=0, tz=tz,\n rx=rx, ry=ry, rz=rz\n )\n xml += FOOTER\n with open(modules_fn, 'wt') as f:\n f.write(xml)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Make the global and modules xml')\n parser.add_argument('--basedir')\n parser.add_argument('--x-distortion', type=float, default=0)\n parser.add_argument('--y-distortion', type=float, default=0)\n parser.add_argument('--sigma', type=float, default=0)\n parser.add_argument('--alternate', action='store_true')\n\n args = parser.parse_args()\n\n make_global_xml(args.basedir)\n make_modules_xml(args.basedir, args.x_distortion, args.y_distortion, args.sigma, args.alternate)\n","repo_name":"chrisburr/lhcb-velopix-studies","sub_path":"hybrid-distortions/assets/build_alignment.py","file_name":"build_alignment.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"1871304427","text":"'''\n7. Reverse Integer\n\nhttps://leetcode.com/problems/reverse-integer/\n\nGiven a 32-bit signed integer, reverse digits of an integer.\n\nExample 1:\n\nInput: 123\nOutput: 321\nExample 2:\n\nInput: -123\nOutput: -321\nExample 3:\n\nInput: 120\nOutput: 21\nNote:\nAssume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [−231, 231 − 1]. For the purpose of this problem, assume that your function returns 0 when the reversed integer overflows.\n'''\n\nclass Solution:\n def reverse(self, x: int) -> int:\n \n INT_MAX_D10 = (2 ** 31 - 1) / 10\n INT_MIN_D10 = (2 ** 31)/10\n rev = 0\n temp = 0\n\n negative = -1 if x < 0 else 1\n x = abs(x)\n\n while x > 0:\n # pop\n pop = int(x % 10)\n x = int(x / 10)\n\n # check\n if negative == 1 and (rev > INT_MAX_D10 or (rev == INT_MAX_D10 and pop > 7)):\n return 0\n if negative == -1 and (rev > INT_MIN_D10 or (rev == INT_MIN_D10 and pop > 8)):\n return 0\n\n # push\n temp = rev * 10 + pop\n rev = temp\n \n return int(temp) * negative\n\n\ndef main():\n import sys\n import io\n def readlines():\n for line in io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8'):\n yield line.strip('\\n')\n\n lines = readlines()\n while True:\n try:\n line = next(lines)\n x = int(line);\n \n ret = Solution().reverse(x)\n\n out = str(ret);\n print(out)\n except StopIteration:\n break\n\nif __name__ == '__main__':\n main()","repo_name":"sunyikang/skilltraining","sub_path":"python/leetcode/leetcode_7_v2.py","file_name":"leetcode_7_v2.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"34013498498","text":"import os\nimport random\nimport string\n\n# list all directory file and the subdirectory files\ndef show_dir(filepath):\n for i in os.listdir(filepath):\n path = os.path.join(filepath,i)\n print(path)\n if os.path.isdir(path):\n show_dir(path)\n\n#show_dir(os.curdir)\n\n# find html file in current directory and all subdirectory.\ndef find_html(filepath):\n for i in os.listdir(filepath):\n path = os.path.join(filepath,i)\n if os.path.isdir(path):\n find_html(path)\n if path.endswith('html'):\n print(path)\n \n#find_html(os.curdir)\n\ndef wordsReplace(oldWord, newWord):\n\tpf_file = os.listdir()\n\t\n\tfor i, name in enumerate(pf_file):\n\t\tif name.startswith('_') or not name.endswith('.py'):\n\t\t\tpf_file.remove(name)\n\t\n\tfor file in pf_file:\n\t\twith open(file,'a+') as f:\n\t\t\tf.seek(0)\n\t\t\tall_text = f.readlines()\n\t\t\tfor i, s in enumerate(all_text):\n\t\t\t\tall_text[i] = s.replace(oldWord, newWord)\n\t\t\tf.seek(0)\n\t\t\tf.writelines(all_text)\n\t\t\t\n#wordsReplace('@loginrequired()','@loginrequired')\n\n# swap key and value in dictionary\ndict1 = {\"A\":1,\"B\":2,\"C\":3}\ndict2 = {y:x for (x,y) in dict1.items()}\nprint(dict2)\n\n#generate random n chars\ndef ranChars(nchars):\n L = []\n for ascii in [range(65,91),range(97,123),range(48,58)]:\n for char in ascii:\n L.append(chr(char))\n return random.sample(L,nchars)\n\n#print(''.join(ranChars(6)))\n\ndef ranChars2(nchars):\n s = string.digits + string.ascii_letters\n return random.sample(s, nchars)\n\n#print(''.join(ranChars2(10)))\nprint('square root: %0.6f' % 2**0.5)\n\n# input: filename or directory,\n# then remove that file\\directory, subdirectory, sub-subdirectory\ndef rmAll(rm_name, goal=''):\n\n if os.path.isdir(rm_name):\n for f in os.listdir(rm_name):\n if f.startswith(goal):\n for ff in os.listdir(rm_name + '\\\\' + f):\n os.remove(rm_name+'\\\\'+f+ff)\n os.removedirs(rm_name+'\\\\'+f)\n else:\n os.remove(rm_name)\n\ndef rmRecur(rm_name):\n if not os.path.isdir(rm_name):\n os.remove(rm_name)\n else:\n for f in os.listdir(rm_name):\n mm = rm_name+'/'+f\n rmRecur(mm)\n os.removedirs(rm_name)","repo_name":"karouu/python3_study_with_sample_code","sub_path":"code_sample/fileTextFilter.py","file_name":"fileTextFilter.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"11332083008","text":"import re\n\nINT_MAX = 2 ** 31 - 1\nINT_MIN = -2 ** 31\n\nNUMBER = re.compile(r'^[+-]?\\d+')\n\nclass Solution:\n def myAtoi(self, s: str) -> int:\n match = NUMBER.search(s.strip())\n \n if not match:\n return 0\n\n return max(min(int(match.group()), INT_MAX), INT_MIN)","repo_name":"mzoz/coding-challenges","sub_path":"leetcode/solutions/medium/string-to-integer-atoi/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"11774450735","text":"def unwrap_lines(text: str):\n result = []\n lines = []\n\n def _add_to_result(newline: bool = True):\n if lines:\n result.append(' '.join(lines))\n lines.clear()\n if newline:\n result.append('')\n\n def _is_list_line(line: str) -> bool:\n first, _ = line.split(maxsplit=1)\n return (first[:-1].isdigit() and first[-1] == '.') or (first == '-')\n\n for part in text.splitlines(keepends=False):\n if part:\n if _is_list_line(part):\n _add_to_result(newline=False)\n lines.append(part.strip())\n if part.endswith(' '):\n lines[-1] = part\n _add_to_result(newline=False)\n else:\n _add_to_result()\n\n _add_to_result()\n\n return '\\n'.join(result)\n","repo_name":"GeoffRiley/Morsels","sub_path":"unwrap_lines/unwrap.py","file_name":"unwrap.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"39129179093","text":"from pyramid.httpexceptions import HTTPFound\nfrom pyramid.view import view_config, view_defaults\n\nfrom lms.extensions.feature_flags._helpers import FeatureFlagsCookieHelper\n\n\n@view_defaults(\n route_name=\"feature_flags_cookie_form\",\n renderer=\"../_templates/cookie_form.html.jinja2\",\n)\nclass CookieFormViews:\n \"\"\"A form for toggling feature flags in a cookie.\"\"\"\n\n def __init__(self, request):\n self._cookie_helper = FeatureFlagsCookieHelper(request)\n self._request = request\n\n @view_config(request_method=\"GET\")\n def get(self):\n \"\"\"Render the feature flags cookie form page.\"\"\"\n flags = self._cookie_helper.get_all()\n\n return {\n \"flags\": flags,\n # The final state of each feature flag\n \"state\": {flag: self._request.feature(flag) for flag in flags.keys()},\n }\n\n @view_config(request_method=\"POST\")\n def post(self):\n \"\"\"Handle a feature flags cookie form submission.\"\"\"\n response = HTTPFound(\n location=self._request.route_url(\"feature_flags_cookie_form\")\n )\n self._cookie_helper.set_cookie(response)\n self._request.session.flash(\n \"Feature flags saved in cookie ✔\", \"feature_flags\", allow_duplicate=False\n )\n return response\n","repo_name":"hypothesis/lms","sub_path":"lms/extensions/feature_flags/views/cookie_form.py","file_name":"cookie_form.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"28"} +{"seq_id":"32815827024","text":"import requests\nfrom bs4 import BeautifulSoup\n\n@profile\ndef get_urls():\n response = requests.get('http://www.swufe.edu.cn/')\n s = BeautifulSoup(response.content, 'html.parser')\n urls = []\n for url in s.find_all('a'):\n urls.append(url['href'])\n\n\nif __name__ == '__main__':\n get_urls()\n","repo_name":"ChenZhongPu/swufe-se","sub_path":"week3/py-profile/line_profile.py","file_name":"line_profile.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"28"} +{"seq_id":"38536289643","text":"import datetime\nfrom uuid import uuid4\n\nfrom django.apps import apps\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.template.loader import render_to_string\nfrom django.utils import timezone\n\nfrom django.db.models import Q\nfrom django.views.decorators.clickjacking import xframe_options_exempt\n\nfrom .forms import PostForm, CommentForm\n\nfrom .click_house_link import ClickHouseService\n\nPostDB = apps.get_model('home', 'Post')\nUserDB = apps.get_model('accounts', 'User')\nFollowDB = apps.get_model('home', 'HumanConnections')\nCategoryDB = apps.get_model('home', 'Categories')\nCommentsDB = apps.get_model('home', 'Comments')\n\nclick_db = ClickHouseService.instance()\npost_chunk = 20\n\n\ndef create_post_list(index, posts, post_type):\n new_posts = [None] * 20\n\n n_objects = len(posts)\n\n for i in range(post_chunk):\n if index + i >= n_objects:\n if post_type == \"home\":\n overlapped_index = (index + i) % n_objects\n new_posts[i] = posts[overlapped_index]\n else:\n new_posts[i] = posts[index + i]\n\n return new_posts\n\n\ndef get_post_category(post):\n categories = CategoryDB.objects.filter(post=post)\n cat_string = \"\"\n for category in categories:\n cat_string += category.categorie\n cat_string += \"#\"\n return cat_string[:-1]\n\n\ndef click_execute(query):\n result = click_db.execute(query)\n return result\n\n\ndef shuffle_recommended(wanted, rest):\n recommended = []\n rest = list(rest)\n counter = 0\n for obj in wanted:\n recommended.append(obj.post)\n if counter % 3 == 0:\n recommended.append(rest.pop(0).post)\n recommended.append(rest.pop(0).post)\n counter += 1\n\n for obj in rest:\n recommended.append(obj.post)\n\n return recommended\n\n\ndef ajax_query(request):\n user = request.user\n\n if request.method == 'GET':\n try:\n user_prof = UserDB.objects.get(username=request.GET.get('username', None))\n except:\n user_prof = request.user\n index = request.GET.get('index', None)\n\n create_post = None\n posts = None\n finish = 0\n\n if request.GET.get('type', None) == \"home\":\n categories = user.categories.split(\"#\")\n\n cat_num = len(categories)\n\n if cat_num == 1:\n wanted_objects = CategoryDB.objects.filter(categorie=categories[0])\n unwanted_objects = CategoryDB.objects.filter(~Q(categorie=categories[0]))\n elif cat_num == 2:\n wanted_objects = CategoryDB.objects.filter(Q(categorie=categories[0]) | Q(categorie=categories[1]))\n unwanted_objects = CategoryDB.objects.filter(~Q(categorie=categories[0]) & ~Q(categorie=categories[1]))\n\n else:\n wanted_objects = CategoryDB.objects.filter(\n Q(categorie=categories[0]) | Q(categorie=categories[1]) | Q(categorie=categories[2]))\n unwanted_objects = CategoryDB.objects.filter(\n ~Q(categorie=categories[0]) & ~Q(categorie=categories[1]) & ~Q(categorie=categories[2]))\n\n recommended = shuffle_recommended(wanted_objects, unwanted_objects)\n\n create_post = create_home_post\n posts = create_post_list(int(index), recommended, \"home\")\n\n elif request.GET.get('type', None) == \"profile\":\n create_post = create_user_post\n post_list = PostDB.objects.filter(user_id=user_prof.id).order_by('published')\n posts = create_post_list(int(index), post_list, \"profile\")\n\n if int(index) + post_chunk > post_list.count():\n finish = 1\n\n htmls = []\n for post in posts:\n if post is not None:\n htmls.append(create_post(post, request.user))\n\n context = {'HTMLS': htmls, 'finish': finish}\n\n return JsonResponse(context)\n\n elif request.method == 'POST':\n request_type = request.POST.get('type', None)\n\n if request_type == 'like':\n value = int(request.POST.get('value', None))\n post_id = request.POST.get('post_id', None)\n\n post = PostDB.objects.get(id=post_id)\n\n query_string = click_db.insert(user.id, post.id, get_post_category(post), value,\n 0)\n click_execute(query_string)\n return HttpResponse()\n\n elif request_type == 'step_in':\n value = int(request.POST.get('value', None))\n post_id = request.POST.get('post_id', None)\n post = PostDB.objects.get(id=post_id)\n query_string = click_db.insert(user.id, post.id, get_post_category(post), 0, value)\n click_execute(query_string)\n\n return HttpResponse()\n\n elif request_type == 'follow':\n username = request.POST.get('username', None)\n value = request.POST.get('value', None)\n\n followed = UserDB.objects.get(username=username)\n\n if int(value) > 0:\n FollowDB.objects.create(id=create_id(), follower=user, followed=followed)\n elif int(value) < 0:\n FollowDB.objects.filter(follower=user, followed=followed).delete()\n\n return HttpResponse()\n\n\ndef autocomplete(request):\n username = request.GET.get('username')\n\n payload = []\n if username:\n matching_objs = UserDB.objects.filter(username__icontains=username)\n\n for obj in matching_objs:\n payload.append(obj.username)\n\n return JsonResponse({'status': 200, 'data': payload})\n\n\ndef save_into_categories(categories, post_id):\n CategoriesDB = apps.get_model('home', 'Categories')\n PostDB = apps.get_model('home', 'Post')\n\n categories = categories.split(\"#\")\n categories = list(filter(lambda a: a != \"\", categories))\n\n for categorie in categories:\n post = PostDB.objects.get(id=post_id)\n CategoriesDB.objects.create(id=f\"{post.id}|{categorie}\", post=post, categorie=categorie)\n\n\n# create\n@login_required(login_url='/accounts/login')\ndef select(request):\n user = request.user\n\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n description = form.cleaned_data['description']\n form.cleaned_data['user_id'] = request.user.id\n form.save()\n\n return redirect(\"/\")\n else:\n form = PostForm()\n context = {\n 'form': form,\n \"profile_pic\": user.profile_pic\n }\n return render(request, \"create.html\", context)\n\n\n# home\n\n@login_required(login_url='/accounts/login')\ndef home(request):\n user = request.user\n\n context = {\n \"profile_pic\": user.profile_pic\n }\n\n return render(request=request, template_name=\"home.html\", context=context)\n\n\ndef redirect_to_post_page(request, post_id=None):\n if PostDB.objects.filter(id=post_id).exists():\n user = request.user\n post = PostDB.objects.get(id=post_id)\n post_html = create_home_post(post, user, post_id)\n\n context = {\n \"profile_pic\": user.profile_pic,\n \"post\": post_html,\n }\n return render(request=request, template_name=\"post_page.html\", context=context)\n return redirect('/')\n\n\ndef get_delta_time(publish_time):\n time_delta = timezone.now() - publish_time\n\n days = time_delta.days\n seconds = time_delta.seconds\n minutes = int(seconds / 60)\n hours = int(minutes / 60)\n weeks = int(days / 7)\n months = int(weeks / 4)\n years = int(months / 12)\n\n if years > 0:\n delta_msg = f\"posted {years} years ago\"\n elif months > 0:\n delta_msg = f\"posted {months} months ago\"\n elif weeks > 0:\n delta_msg = f\"posted {weeks} weeks ago\"\n elif days > 0:\n delta_msg = f\"posted {days} days ago\"\n elif hours > 0:\n delta_msg = f\"posted {hours} hours ago\"\n elif minutes > 0:\n delta_msg = f\"posted {minutes} minutes ago\"\n else:\n delta_msg = \"posted just now\"\n\n return delta_msg\n\n\ndef create_id():\n now = datetime.datetime.now()\n return str(now.year) + str(now.month) + str(now.day) + str(uuid4())[:7]\n\n\n# profile\n\n@login_required(login_url='/accounts/login')\ndef profile(request, username=None):\n if request.user.username == username:\n user = request.user\n\n followers = FollowDB.objects.filter(followed=user).count\n following = FollowDB.objects.filter(follower=user).count\n posts = PostDB.objects.filter(user=user).count\n\n context = {\n \"profile_pic\": user.profile_pic,\n \"username\": user.username,\n \"posts\": posts,\n \"followers\": followers,\n \"following\": following,\n \"first_name\": user.first_name,\n \"last_name\": user.last_name,\n \"bio\": user.bio,\n \"show\": False\n }\n\n return render(request=request, template_name=\"profile.html\", context=context)\n else:\n user = None\n try:\n user = UserDB.objects.get(username=username)\n except:\n redirect(\"/\")\n if user:\n followers = FollowDB.objects.filter(followed=user).count\n following = FollowDB.objects.filter(follower=user).count\n posts = PostDB.objects.filter(user=user).count\n\n is_following = FollowDB.objects.filter(follower=request.user, followed=user).exists()\n\n context = {\n \"profile_pic\": request.user.profile_pic,\n \"username\": user.username,\n \"posts\": posts,\n \"followers\": followers,\n \"following\": following,\n \"first_name\": user.first_name,\n \"last_name\": user.last_name,\n \"bio\": user.bio,\n \"show\": True,\n \"is_follow\": is_following\n }\n\n return render(request=request, template_name=\"profile.html\", context=context)\n return redirect(\"/\")\n\n\n@login_required(login_url='/accounts/login')\ndef edit_profile(request, username=None):\n user = request.user\n context = {\n \"profile_pic\": user.profile_pic,\n \"username\": user.username,\n \"first_name\": user.first_name,\n \"last_name\": user.last_name,\n \"bio\": user.bio\n }\n\n return render(request=request, template_name=\"edit_profile.html\", context=context)\n\n\ndef get_likes(post_id):\n query_string = click_db.select(\"user_activity\",\n where=f\"(post_id = '{post_id}')\",\n select_list=[\"sum(likes) AS likes\"]\n )\n click_result = click_execute(query_string)\n\n if click_result:\n likes = click_result[0][0]\n else:\n likes = 0\n\n return likes\n\n\ndef am_i_like(user_id, post_id):\n query_string = click_db.select(\"user_activity\",\n where=f\"(post_id = '{post_id}') AND (user_id = '{user_id}')\",\n select_list=[\"sum(likes) AS likes\"]\n )\n\n click_result = click_execute(query_string)\n\n if click_result:\n likes = click_result[0][0]\n else:\n likes = 0\n\n if likes != 0:\n return True\n return False\n\n\ndef create_user_post(post_obj, user):\n post_id = post_obj.id\n image = post_obj.img\n likes = get_likes(post_id)\n\n comments = CommentsDB.objects.filter()\n comments = 0\n post_html = render_to_string(\"profile_post_temp.html\")\n\n replace_array = [post_id, image, likes, comments]\n\n for item in replace_array:\n post_html = post_html.replace(\"{}\", str(item), 1)\n\n return post_html\n\n\ndef create_home_post(post_obj, user, post_id=None):\n likes_svgs = [\n '',\n '']\n\n user_obj = UserDB.objects.get(id=post_obj.user_id)\n\n # plant content\n # post_data\n post_img_path = post_obj.img\n post_caption = post_obj.caption\n post_uploaded = get_delta_time(post_obj.published)\n\n # user_data\n\n user_username = user_obj.username\n user_pfp_path = user_obj.profile_pic\n\n post_html = render_to_string(\"post_temp.html\")\n\n likes = get_likes(post_obj.id)\n\n is_like = am_i_like(user.id, post_obj.id)\n\n if is_like:\n svg = likes_svgs[0]\n else:\n svg = likes_svgs[1]\n\n if post_id:\n comment_html = f\"\"\n else:\n comment_html = \"\"\n replace_array = [post_obj.id, is_like, user_pfp_path, user_username, post_img_path, svg, likes, user_username,\n post_caption,\n post_uploaded, comment_html]\n\n for item in replace_array:\n post_html = post_html.replace(\"{}\", str(item), 1)\n\n return post_html\n\n\n@login_required(login_url='accounts/login')\n@xframe_options_exempt\ndef comment(request, post_id):\n user = request.user\n\n if request.method == \"POST\":\n form = CommentForm(request.POST)\n if form.is_valid():\n form.pre_save(user.id, post_id)\n form.save()\n return redirect(f\"/{post_id}/comment\")\n else:\n form = CommentForm()\n context = {\n 'form': form,\n \"post_id\": post_id,\n \"comments\": get_comments(post_id)\n }\n return render(request=request, template_name=\"comments.html\", context=context)\n\n\ndef get_comments(post_id):\n comments = CommentsDB.objects.filter(post=post_id)\n html = \"
    \"\n for _comment in comments:\n html += f\"

    {_comment.user.username}{_comment.comment}

    \"\n\n html += \"
    \"\n\n return html\n","repo_name":"itaykbn/cyber-senior-project","sub_path":"locallibrary/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"4290746997","text":"import sqlite3\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget\nfrom PyQt5.QtWidgets import QMessageBox\nfrom PyQt5 import QtCore, QtWidgets\nimport NextOffice.shipment.shipment_main\nimport NextOffice.shipment.shipment_edit\nimport NextOffice.shipment.shipment_add\n\nclass Ui_Form(object):\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n Form.resize(1311, 809)\n self.label_13 = QtWidgets.QLabel(Form)\n self.label_13.setGeometry(QtCore.QRect(540, 10, 161, 31))\n self.label_13.setObjectName(\"label_13\")\n self.label_id1 = QtWidgets.QLabel(Form)\n self.label_id1.setGeometry(QtCore.QRect(120, 60, 91, 21))\n self.label_id1.setObjectName(\"label_id1\")\n self.lineEdit_search = QtWidgets.QLineEdit(Form)\n self.lineEdit_search.setGeometry(QtCore.QRect(160, 10, 113, 20))\n self.lineEdit_search.setObjectName(\"lineEdit_search\")\n self.label_customer1 = QtWidgets.QLabel(Form)\n self.label_customer1.setGeometry(QtCore.QRect(90, 130, 161, 21))\n self.label_customer1.setObjectName(\"label_customer1\")\n self.line_7 = QtWidgets.QFrame(Form)\n self.line_7.setGeometry(QtCore.QRect(10, 80, 251, 20))\n self.line_7.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_7.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_7.setObjectName(\"line_7\")\n self.pushButton_SwitchCustomer = QtWidgets.QPushButton(Form)\n self.pushButton_SwitchCustomer.setGeometry(QtCore.QRect(1170, 10, 91, 23))\n self.pushButton_SwitchCustomer.setObjectName(\"pushButton_SwitchCustomer\")\n self.pushButton_edit1 = QtWidgets.QPushButton(Form)\n self.pushButton_edit1.setGeometry(QtCore.QRect(10, 710, 71, 23))\n self.pushButton_edit1.setObjectName(\"pushButton_edit1\")\n self.label_4 = QtWidgets.QLabel(Form)\n self.label_4.setGeometry(QtCore.QRect(20, 150, 61, 39))\n self.label_4.setObjectName(\"label_4\")\n self.line = QtWidgets.QFrame(Form)\n self.line.setGeometry(QtCore.QRect(10, 40, 1291, 20))\n self.line.setFrameShape(QtWidgets.QFrame.HLine)\n self.line.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line.setObjectName(\"line\")\n self.pushButton_SwitchService = QtWidgets.QPushButton(Form)\n self.pushButton_SwitchService.setGeometry(QtCore.QRect(1070, 10, 91, 23))\n self.pushButton_SwitchService.setObjectName(\"pushButton_SwitchService\")\n self.label_8 = QtWidgets.QLabel(Form)\n self.label_8.setGeometry(QtCore.QRect(130, 12, 31, 20))\n self.label_8.setObjectName(\"label_8\")\n self.label_5 = QtWidgets.QLabel(Form)\n self.label_5.setGeometry(QtCore.QRect(20, 180, 61, 39))\n self.label_5.setObjectName(\"label_5\")\n self.label_2 = QtWidgets.QLabel(Form)\n self.label_2.setGeometry(QtCore.QRect(20, 90, 61, 39))\n self.label_2.setObjectName(\"label_2\")\n self.label_3 = QtWidgets.QLabel(Form)\n self.label_3.setGeometry(QtCore.QRect(20, 240, 61, 39))\n self.label_3.setObjectName(\"label_3\")\n self.pushButton_previous = QtWidgets.QPushButton(Form)\n self.pushButton_previous.setGeometry(QtCore.QRect(120, 770, 75, 23))\n self.pushButton_previous.setObjectName(\"pushButton_previous\")\n self.pushButton_next = QtWidgets.QPushButton(Form)\n self.pushButton_next.setGeometry(QtCore.QRect(230, 770, 75, 23))\n self.pushButton_next.setObjectName(\"pushButton_next\")\n self.pushButton_new = QtWidgets.QPushButton(Form)\n self.pushButton_new.setGeometry(QtCore.QRect(10, 10, 91, 23))\n self.pushButton_new.setObjectName(\"pushButton_new\")\n self.lineEdit_jump = QtWidgets.QLineEdit(Form)\n self.lineEdit_jump.setGeometry(QtCore.QRect(560, 770, 31, 20))\n self.lineEdit_jump.setObjectName(\"lineEdit_jump\")\n self.pushButton_jump = QtWidgets.QPushButton(Form)\n self.pushButton_jump.setGeometry(QtCore.QRect(620, 770, 41, 21))\n self.pushButton_jump.setObjectName(\"pushButton_jump\")\n self.label_9 = QtWidgets.QLabel(Form)\n self.label_9.setGeometry(QtCore.QRect(520, 760, 91, 39))\n self.label_9.setObjectName(\"label_9\")\n self.label_10 = QtWidgets.QLabel(Form)\n self.label_10.setGeometry(QtCore.QRect(20, 480, 51, 39))\n self.label_10.setObjectName(\"label_10\")\n self.label_11 = QtWidgets.QLabel(Form)\n self.label_11.setGeometry(QtCore.QRect(20, 420, 51, 39))\n self.label_11.setObjectName(\"label_11\")\n self.label_12 = QtWidgets.QLabel(Form)\n self.label_12.setGeometry(QtCore.QRect(30, 640, 51, 39))\n self.label_12.setObjectName(\"label_12\")\n self.textBrowser_payRecord1 = QtWidgets.QTextBrowser(Form)\n self.textBrowser_payRecord1.setGeometry(QtCore.QRect(90, 460, 161, 71))\n self.textBrowser_payRecord1.setFrameShadow(QtWidgets.QFrame.Plain)\n self.textBrowser_payRecord1.setLineWidth(0)\n self.textBrowser_payRecord1.setObjectName(\"textBrowser_payRecord1\")\n self.label_14 = QtWidgets.QLabel(Form)\n self.label_14.setGeometry(QtCore.QRect(20, 120, 51, 39))\n self.label_14.setObjectName(\"label_14\")\n self.label_shipDate1 = QtWidgets.QLabel(Form)\n self.label_shipDate1.setGeometry(QtCore.QRect(90, 100, 151, 21))\n self.label_shipDate1.setObjectName(\"label_shipDate1\")\n self.label_sales1 = QtWidgets.QLabel(Form)\n self.label_sales1.setGeometry(QtCore.QRect(90, 250, 151, 21))\n self.label_sales1.setObjectName(\"label_sales1\")\n self.label_production1 = QtWidgets.QLabel(Form)\n self.label_production1.setGeometry(QtCore.QRect(90, 160, 161, 21))\n self.label_production1.setObjectName(\"label_production1\")\n self.label_model1 = QtWidgets.QLabel(Form)\n self.label_model1.setGeometry(QtCore.QRect(90, 190, 151, 21))\n self.label_model1.setObjectName(\"label_model1\")\n self.label_15 = QtWidgets.QLabel(Form)\n self.label_15.setGeometry(QtCore.QRect(30, 210, 41, 39))\n self.label_15.setObjectName(\"label_15\")\n self.label_macID1 = QtWidgets.QLabel(Form)\n self.label_macID1.setGeometry(QtCore.QRect(90, 220, 151, 21))\n self.label_macID1.setObjectName(\"label_macID1\")\n self.label_16 = QtWidgets.QLabel(Form)\n self.label_16.setGeometry(QtCore.QRect(40, 300, 31, 39))\n self.label_16.setObjectName(\"label_16\")\n self.label_17 = QtWidgets.QLabel(Form)\n self.label_17.setGeometry(QtCore.QRect(40, 270, 31, 39))\n self.label_17.setObjectName(\"label_17\")\n self.label_18 = QtWidgets.QLabel(Form)\n self.label_18.setGeometry(QtCore.QRect(40, 330, 31, 39))\n self.label_18.setObjectName(\"label_18\")\n self.label_number1 = QtWidgets.QLabel(Form)\n self.label_number1.setGeometry(QtCore.QRect(90, 310, 121, 21))\n self.label_number1.setObjectName(\"label_number1\")\n self.label_unitPrice1 = QtWidgets.QLabel(Form)\n self.label_unitPrice1.setGeometry(QtCore.QRect(90, 280, 151, 21))\n self.label_unitPrice1.setObjectName(\"label_unitPrice1\")\n self.label_amount1 = QtWidgets.QLabel(Form)\n self.label_amount1.setGeometry(QtCore.QRect(90, 340, 141, 21))\n self.label_amount1.setObjectName(\"label_amount1\")\n self.label_19 = QtWidgets.QLabel(Form)\n self.label_19.setGeometry(QtCore.QRect(30, 360, 41, 39))\n self.label_19.setObjectName(\"label_19\")\n self.label_paid1 = QtWidgets.QLabel(Form)\n self.label_paid1.setGeometry(QtCore.QRect(90, 370, 151, 21))\n self.label_paid1.setObjectName(\"label_paid1\")\n self.label_20 = QtWidgets.QLabel(Form)\n self.label_20.setGeometry(QtCore.QRect(30, 390, 41, 39))\n self.label_20.setObjectName(\"label_20\")\n self.label_receivable1 = QtWidgets.QLabel(Form)\n self.label_receivable1.setGeometry(QtCore.QRect(90, 400, 161, 21))\n self.label_receivable1.setObjectName(\"label_receivable1\")\n self.label_payRemind1 = QtWidgets.QLabel(Form)\n self.label_payRemind1.setGeometry(QtCore.QRect(90, 430, 151, 21))\n self.label_payRemind1.setObjectName(\"label_payRemind1\")\n self.textBrowser_shipTo1 = QtWidgets.QTextBrowser(Form)\n self.textBrowser_shipTo1.setGeometry(QtCore.QRect(90, 540, 161, 71))\n self.textBrowser_shipTo1.setFrameShadow(QtWidgets.QFrame.Plain)\n self.textBrowser_shipTo1.setLineWidth(0)\n self.textBrowser_shipTo1.setObjectName(\"textBrowser_shipTo1\")\n self.label_21 = QtWidgets.QLabel(Form)\n self.label_21.setGeometry(QtCore.QRect(20, 550, 51, 39))\n self.label_21.setObjectName(\"label_21\")\n self.textBrowser_remark1 = QtWidgets.QTextBrowser(Form)\n self.textBrowser_remark1.setGeometry(QtCore.QRect(90, 620, 161, 71))\n self.textBrowser_remark1.setFrameShadow(QtWidgets.QFrame.Plain)\n self.textBrowser_remark1.setLineWidth(0)\n self.textBrowser_remark1.setObjectName(\"textBrowser_remark1\")\n self.line_8 = QtWidgets.QFrame(Form)\n self.line_8.setGeometry(QtCore.QRect(250, 90, 21, 611))\n self.line_8.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_8.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_8.setObjectName(\"line_8\")\n self.line_9 = QtWidgets.QFrame(Form)\n self.line_9.setGeometry(QtCore.QRect(10, 690, 251, 20))\n self.line_9.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_9.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_9.setObjectName(\"line_9\")\n self.line_10 = QtWidgets.QFrame(Form)\n self.line_10.setGeometry(QtCore.QRect(0, 90, 20, 611))\n self.line_10.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_10.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_10.setObjectName(\"line_10\")\n self.line_11 = QtWidgets.QFrame(Form)\n self.line_11.setGeometry(QtCore.QRect(70, 90, 20, 611))\n self.line_11.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_11.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_11.setObjectName(\"line_11\")\n self.pushButton_search = QtWidgets.QPushButton(Form)\n self.pushButton_search.setGeometry(QtCore.QRect(280, 10, 41, 21))\n self.pushButton_search.setObjectName(\"pushButton_search\")\n self.line_2 = QtWidgets.QFrame(Form)\n self.line_2.setGeometry(QtCore.QRect(10, 740, 1291, 20))\n self.line_2.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_2.setObjectName(\"line_2\")\n self.pushButton_SwitchShipment = QtWidgets.QPushButton(Form)\n self.pushButton_SwitchShipment.setGeometry(QtCore.QRect(980, 10, 81, 23))\n self.pushButton_SwitchShipment.setObjectName(\"pushButton_SwitchShipment\")\n self.label_22 = QtWidgets.QLabel(Form)\n self.label_22.setGeometry(QtCore.QRect(360, 760, 141, 39))\n self.label_22.setObjectName(\"label_22\")\n self.label_cPage = QtWidgets.QLabel(Form)\n self.label_cPage.setGeometry(QtCore.QRect(380, 770, 41, 21))\n self.label_cPage.setObjectName(\"label_cPage\")\n self.label_tPage = QtWidgets.QLabel(Form)\n self.label_tPage.setGeometry(QtCore.QRect(430, 770, 41, 21))\n self.label_tPage.setObjectName(\"label_tPage\")\n self.pushButton_edit2 = QtWidgets.QPushButton(Form)\n self.pushButton_edit2.setGeometry(QtCore.QRect(270, 710, 71, 23))\n self.pushButton_edit2.setObjectName(\"pushButton_edit2\")\n self.label_6 = QtWidgets.QLabel(Form)\n self.label_6.setGeometry(QtCore.QRect(280, 240, 61, 39))\n self.label_6.setObjectName(\"label_6\")\n self.label_25 = QtWidgets.QLabel(Form)\n self.label_25.setGeometry(QtCore.QRect(280, 480, 51, 39))\n self.label_25.setObjectName(\"label_25\")\n self.label_model2 = QtWidgets.QLabel(Form)\n self.label_model2.setGeometry(QtCore.QRect(350, 190, 161, 21))\n self.label_model2.setObjectName(\"label_model2\")\n self.label_26 = QtWidgets.QLabel(Form)\n self.label_26.setGeometry(QtCore.QRect(290, 390, 41, 39))\n self.label_26.setObjectName(\"label_26\")\n self.line_12 = QtWidgets.QFrame(Form)\n self.line_12.setGeometry(QtCore.QRect(270, 690, 251, 20))\n self.line_12.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_12.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_12.setObjectName(\"line_12\")\n self.label_macID2 = QtWidgets.QLabel(Form)\n self.label_macID2.setGeometry(QtCore.QRect(350, 220, 151, 21))\n self.label_macID2.setObjectName(\"label_macID2\")\n self.label_7 = QtWidgets.QLabel(Form)\n self.label_7.setGeometry(QtCore.QRect(280, 150, 61, 39))\n self.label_7.setObjectName(\"label_7\")\n self.label_production2 = QtWidgets.QLabel(Form)\n self.label_production2.setGeometry(QtCore.QRect(350, 160, 161, 21))\n self.label_production2.setObjectName(\"label_production2\")\n self.label_27 = QtWidgets.QLabel(Form)\n self.label_27.setGeometry(QtCore.QRect(280, 180, 61, 39))\n self.label_27.setObjectName(\"label_27\")\n self.label_paid2 = QtWidgets.QLabel(Form)\n self.label_paid2.setGeometry(QtCore.QRect(350, 370, 151, 21))\n self.label_paid2.setObjectName(\"label_paid2\")\n self.label_28 = QtWidgets.QLabel(Form)\n self.label_28.setGeometry(QtCore.QRect(280, 420, 51, 39))\n self.label_28.setObjectName(\"label_28\")\n self.label_29 = QtWidgets.QLabel(Form)\n self.label_29.setGeometry(QtCore.QRect(290, 210, 41, 39))\n self.label_29.setObjectName(\"label_29\")\n self.label_receivable2 = QtWidgets.QLabel(Form)\n self.label_receivable2.setGeometry(QtCore.QRect(350, 400, 151, 21))\n self.label_receivable2.setObjectName(\"label_receivable2\")\n self.line_13 = QtWidgets.QFrame(Form)\n self.line_13.setGeometry(QtCore.QRect(510, 90, 21, 611))\n self.line_13.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_13.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_13.setObjectName(\"line_13\")\n self.line_14 = QtWidgets.QFrame(Form)\n self.line_14.setGeometry(QtCore.QRect(260, 90, 20, 611))\n self.line_14.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_14.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_14.setObjectName(\"line_14\")\n self.textBrowser_payRecord2 = QtWidgets.QTextBrowser(Form)\n self.textBrowser_payRecord2.setGeometry(QtCore.QRect(350, 460, 161, 71))\n self.textBrowser_payRecord2.setFrameShadow(QtWidgets.QFrame.Plain)\n self.textBrowser_payRecord2.setLineWidth(0)\n self.textBrowser_payRecord2.setObjectName(\"textBrowser_payRecord2\")\n self.line_15 = QtWidgets.QFrame(Form)\n self.line_15.setGeometry(QtCore.QRect(270, 80, 251, 20))\n self.line_15.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_15.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_15.setObjectName(\"line_15\")\n self.label_30 = QtWidgets.QLabel(Form)\n self.label_30.setGeometry(QtCore.QRect(280, 550, 51, 39))\n self.label_30.setObjectName(\"label_30\")\n self.label_31 = QtWidgets.QLabel(Form)\n self.label_31.setGeometry(QtCore.QRect(290, 640, 51, 39))\n self.label_31.setObjectName(\"label_31\")\n self.label_customer2 = QtWidgets.QLabel(Form)\n self.label_customer2.setGeometry(QtCore.QRect(350, 130, 161, 21))\n self.label_customer2.setObjectName(\"label_customer2\")\n self.textBrowser_remark2 = QtWidgets.QTextBrowser(Form)\n self.textBrowser_remark2.setGeometry(QtCore.QRect(350, 620, 161, 71))\n self.textBrowser_remark2.setFrameShadow(QtWidgets.QFrame.Plain)\n self.textBrowser_remark2.setLineWidth(0)\n self.textBrowser_remark2.setObjectName(\"textBrowser_remark2\")\n self.label_32 = QtWidgets.QLabel(Form)\n self.label_32.setGeometry(QtCore.QRect(300, 270, 31, 39))\n self.label_32.setObjectName(\"label_32\")\n self.line_16 = QtWidgets.QFrame(Form)\n self.line_16.setGeometry(QtCore.QRect(330, 90, 20, 611))\n self.line_16.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_16.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_16.setObjectName(\"line_16\")\n self.label_33 = QtWidgets.QLabel(Form)\n self.label_33.setGeometry(QtCore.QRect(300, 300, 31, 39))\n self.label_33.setObjectName(\"label_33\")\n self.textBrowser_shipTo2 = QtWidgets.QTextBrowser(Form)\n self.textBrowser_shipTo2.setGeometry(QtCore.QRect(350, 540, 161, 71))\n self.textBrowser_shipTo2.setFrameShadow(QtWidgets.QFrame.Plain)\n self.textBrowser_shipTo2.setLineWidth(0)\n self.textBrowser_shipTo2.setObjectName(\"textBrowser_shipTo2\")\n self.label_34 = QtWidgets.QLabel(Form)\n self.label_34.setGeometry(QtCore.QRect(280, 120, 51, 39))\n self.label_34.setObjectName(\"label_34\")\n self.label_35 = QtWidgets.QLabel(Form)\n self.label_35.setGeometry(QtCore.QRect(280, 90, 61, 39))\n self.label_35.setObjectName(\"label_35\")\n self.label_payRemind2 = QtWidgets.QLabel(Form)\n self.label_payRemind2.setGeometry(QtCore.QRect(350, 430, 151, 21))\n self.label_payRemind2.setObjectName(\"label_payRemind2\")\n self.label_number2 = QtWidgets.QLabel(Form)\n self.label_number2.setGeometry(QtCore.QRect(350, 310, 141, 21))\n self.label_number2.setObjectName(\"label_number2\")\n self.label_sales2 = QtWidgets.QLabel(Form)\n self.label_sales2.setGeometry(QtCore.QRect(350, 250, 151, 21))\n self.label_sales2.setObjectName(\"label_sales2\")\n self.label_36 = QtWidgets.QLabel(Form)\n self.label_36.setGeometry(QtCore.QRect(300, 330, 31, 39))\n self.label_36.setObjectName(\"label_36\")\n self.label_37 = QtWidgets.QLabel(Form)\n self.label_37.setGeometry(QtCore.QRect(290, 360, 41, 39))\n self.label_37.setObjectName(\"label_37\")\n self.label_unitPrice2 = QtWidgets.QLabel(Form)\n self.label_unitPrice2.setGeometry(QtCore.QRect(350, 280, 161, 21))\n self.label_unitPrice2.setObjectName(\"label_unitPrice2\")\n self.label_shipDate2 = QtWidgets.QLabel(Form)\n self.label_shipDate2.setGeometry(QtCore.QRect(350, 100, 151, 21))\n self.label_shipDate2.setObjectName(\"label_shipDate2\")\n self.label_amount2 = QtWidgets.QLabel(Form)\n self.label_amount2.setGeometry(QtCore.QRect(350, 340, 151, 21))\n self.label_amount2.setObjectName(\"label_amount2\")\n self.pushButton_edit3 = QtWidgets.QPushButton(Form)\n self.pushButton_edit3.setGeometry(QtCore.QRect(530, 710, 71, 23))\n self.pushButton_edit3.setObjectName(\"pushButton_edit3\")\n self.label_38 = QtWidgets.QLabel(Form)\n self.label_38.setGeometry(QtCore.QRect(540, 180, 61, 39))\n self.label_38.setObjectName(\"label_38\")\n self.label_39 = QtWidgets.QLabel(Form)\n self.label_39.setGeometry(QtCore.QRect(550, 390, 41, 39))\n self.label_39.setObjectName(\"label_39\")\n self.textBrowser_payRecord3 = QtWidgets.QTextBrowser(Form)\n self.textBrowser_payRecord3.setGeometry(QtCore.QRect(610, 460, 161, 71))\n self.textBrowser_payRecord3.setFrameShadow(QtWidgets.QFrame.Plain)\n self.textBrowser_payRecord3.setLineWidth(0)\n self.textBrowser_payRecord3.setObjectName(\"textBrowser_payRecord3\")\n self.label_sales3 = QtWidgets.QLabel(Form)\n self.label_sales3.setGeometry(QtCore.QRect(610, 250, 161, 21))\n self.label_sales3.setObjectName(\"label_sales3\")\n self.label_40 = QtWidgets.QLabel(Form)\n self.label_40.setGeometry(QtCore.QRect(550, 210, 41, 39))\n self.label_40.setObjectName(\"label_40\")\n self.textBrowser_shipTo3 = QtWidgets.QTextBrowser(Form)\n self.textBrowser_shipTo3.setGeometry(QtCore.QRect(610, 540, 161, 71))\n self.textBrowser_shipTo3.setFrameShadow(QtWidgets.QFrame.Plain)\n self.textBrowser_shipTo3.setLineWidth(0)\n self.textBrowser_shipTo3.setObjectName(\"textBrowser_shipTo3\")\n self.label_41 = QtWidgets.QLabel(Form)\n self.label_41.setGeometry(QtCore.QRect(540, 240, 61, 39))\n self.label_41.setObjectName(\"label_41\")\n self.line_17 = QtWidgets.QFrame(Form)\n self.line_17.setGeometry(QtCore.QRect(590, 90, 20, 611))\n self.line_17.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_17.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_17.setObjectName(\"line_17\")\n self.label_42 = QtWidgets.QLabel(Form)\n self.label_42.setGeometry(QtCore.QRect(540, 550, 51, 39))\n self.label_42.setObjectName(\"label_42\")\n self.label_43 = QtWidgets.QLabel(Form)\n self.label_43.setGeometry(QtCore.QRect(550, 360, 41, 39))\n self.label_43.setObjectName(\"label_43\")\n self.label_44 = QtWidgets.QLabel(Form)\n self.label_44.setGeometry(QtCore.QRect(560, 330, 31, 39))\n self.label_44.setObjectName(\"label_44\")\n self.label_model3 = QtWidgets.QLabel(Form)\n self.label_model3.setGeometry(QtCore.QRect(610, 190, 161, 21))\n self.label_model3.setObjectName(\"label_model3\")\n self.label_45 = QtWidgets.QLabel(Form)\n self.label_45.setGeometry(QtCore.QRect(540, 150, 61, 39))\n self.label_45.setObjectName(\"label_45\")\n self.label_46 = QtWidgets.QLabel(Form)\n self.label_46.setGeometry(QtCore.QRect(550, 640, 51, 39))\n self.label_46.setObjectName(\"label_46\")\n self.label_47 = QtWidgets.QLabel(Form)\n self.label_47.setGeometry(QtCore.QRect(560, 270, 31, 39))\n self.label_47.setObjectName(\"label_47\")\n self.line_18 = QtWidgets.QFrame(Form)\n self.line_18.setGeometry(QtCore.QRect(530, 80, 251, 20))\n self.line_18.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_18.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_18.setObjectName(\"line_18\")\n self.label_shipDate3 = QtWidgets.QLabel(Form)\n self.label_shipDate3.setGeometry(QtCore.QRect(610, 100, 151, 21))\n self.label_shipDate3.setObjectName(\"label_shipDate3\")\n self.label_48 = QtWidgets.QLabel(Form)\n self.label_48.setGeometry(QtCore.QRect(540, 120, 51, 39))\n self.label_48.setObjectName(\"label_48\")\n self.label_49 = QtWidgets.QLabel(Form)\n self.label_49.setGeometry(QtCore.QRect(560, 300, 31, 39))\n self.label_49.setObjectName(\"label_49\")\n self.line_19 = QtWidgets.QFrame(Form)\n self.line_19.setGeometry(QtCore.QRect(770, 90, 21, 611))\n self.line_19.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_19.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_19.setObjectName(\"line_19\")\n self.label_unitPrice3 = QtWidgets.QLabel(Form)\n self.label_unitPrice3.setGeometry(QtCore.QRect(610, 280, 151, 21))\n self.label_unitPrice3.setObjectName(\"label_unitPrice3\")\n self.label_amount3 = QtWidgets.QLabel(Form)\n self.label_amount3.setGeometry(QtCore.QRect(610, 340, 151, 21))\n self.label_amount3.setObjectName(\"label_amount3\")\n self.line_20 = QtWidgets.QFrame(Form)\n self.line_20.setGeometry(QtCore.QRect(520, 90, 20, 611))\n self.line_20.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_20.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_20.setObjectName(\"line_20\")\n self.label_customer3 = QtWidgets.QLabel(Form)\n self.label_customer3.setGeometry(QtCore.QRect(610, 130, 161, 21))\n self.label_customer3.setObjectName(\"label_customer3\")\n self.label_number3 = QtWidgets.QLabel(Form)\n self.label_number3.setGeometry(QtCore.QRect(610, 310, 131, 21))\n self.label_number3.setObjectName(\"label_number3\")\n self.label_production3 = QtWidgets.QLabel(Form)\n self.label_production3.setGeometry(QtCore.QRect(610, 160, 161, 21))\n self.label_production3.setObjectName(\"label_production3\")\n self.label_50 = QtWidgets.QLabel(Form)\n self.label_50.setGeometry(QtCore.QRect(540, 90, 61, 39))\n self.label_50.setObjectName(\"label_50\")\n self.label_paid3 = QtWidgets.QLabel(Form)\n self.label_paid3.setGeometry(QtCore.QRect(610, 370, 151, 21))\n self.label_paid3.setObjectName(\"label_paid3\")\n self.label_payRemind3 = QtWidgets.QLabel(Form)\n self.label_payRemind3.setGeometry(QtCore.QRect(610, 430, 151, 21))\n self.label_payRemind3.setObjectName(\"label_payRemind3\")\n self.label_51 = QtWidgets.QLabel(Form)\n self.label_51.setGeometry(QtCore.QRect(540, 420, 51, 39))\n self.label_51.setObjectName(\"label_51\")\n self.label_52 = QtWidgets.QLabel(Form)\n self.label_52.setGeometry(QtCore.QRect(540, 480, 51, 39))\n self.label_52.setObjectName(\"label_52\")\n self.line_21 = QtWidgets.QFrame(Form)\n self.line_21.setGeometry(QtCore.QRect(530, 690, 251, 20))\n self.line_21.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_21.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_21.setObjectName(\"line_21\")\n self.textBrowser_remark3 = QtWidgets.QTextBrowser(Form)\n self.textBrowser_remark3.setGeometry(QtCore.QRect(610, 620, 161, 71))\n self.textBrowser_remark3.setFrameShadow(QtWidgets.QFrame.Plain)\n self.textBrowser_remark3.setLineWidth(0)\n self.textBrowser_remark3.setObjectName(\"textBrowser_remark3\")\n self.label_macID3 = QtWidgets.QLabel(Form)\n self.label_macID3.setGeometry(QtCore.QRect(610, 220, 161, 21))\n self.label_macID3.setObjectName(\"label_macID3\")\n self.label_receivable3 = QtWidgets.QLabel(Form)\n self.label_receivable3.setGeometry(QtCore.QRect(610, 400, 151, 21))\n self.label_receivable3.setObjectName(\"label_receivable3\")\n self.pushButton_edit4 = QtWidgets.QPushButton(Form)\n self.pushButton_edit4.setGeometry(QtCore.QRect(790, 710, 71, 23))\n self.pushButton_edit4.setObjectName(\"pushButton_edit4\")\n self.label_53 = QtWidgets.QLabel(Form)\n self.label_53.setGeometry(QtCore.QRect(800, 180, 61, 39))\n self.label_53.setObjectName(\"label_53\")\n self.label_54 = QtWidgets.QLabel(Form)\n self.label_54.setGeometry(QtCore.QRect(810, 390, 41, 39))\n self.label_54.setObjectName(\"label_54\")\n self.textBrowser_payRecord4 = QtWidgets.QTextBrowser(Form)\n self.textBrowser_payRecord4.setGeometry(QtCore.QRect(870, 460, 161, 71))\n self.textBrowser_payRecord4.setFrameShadow(QtWidgets.QFrame.Plain)\n self.textBrowser_payRecord4.setLineWidth(0)\n self.textBrowser_payRecord4.setObjectName(\"textBrowser_payRecord4\")\n self.label_sales4 = QtWidgets.QLabel(Form)\n self.label_sales4.setGeometry(QtCore.QRect(870, 250, 161, 21))\n self.label_sales4.setObjectName(\"label_sales4\")\n self.label_55 = QtWidgets.QLabel(Form)\n self.label_55.setGeometry(QtCore.QRect(810, 210, 41, 39))\n self.label_55.setObjectName(\"label_55\")\n self.textBrowser_shipTo4 = QtWidgets.QTextBrowser(Form)\n self.textBrowser_shipTo4.setGeometry(QtCore.QRect(870, 540, 161, 71))\n self.textBrowser_shipTo4.setFrameShadow(QtWidgets.QFrame.Plain)\n self.textBrowser_shipTo4.setLineWidth(0)\n self.textBrowser_shipTo4.setObjectName(\"textBrowser_shipTo4\")\n self.label_56 = QtWidgets.QLabel(Form)\n self.label_56.setGeometry(QtCore.QRect(800, 240, 61, 39))\n self.label_56.setObjectName(\"label_56\")\n self.line_22 = QtWidgets.QFrame(Form)\n self.line_22.setGeometry(QtCore.QRect(850, 90, 20, 611))\n self.line_22.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_22.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_22.setObjectName(\"line_22\")\n self.label_57 = QtWidgets.QLabel(Form)\n self.label_57.setGeometry(QtCore.QRect(800, 550, 51, 39))\n self.label_57.setObjectName(\"label_57\")\n self.label_58 = QtWidgets.QLabel(Form)\n self.label_58.setGeometry(QtCore.QRect(810, 360, 41, 39))\n self.label_58.setObjectName(\"label_58\")\n self.label_59 = QtWidgets.QLabel(Form)\n self.label_59.setGeometry(QtCore.QRect(820, 330, 31, 39))\n self.label_59.setObjectName(\"label_59\")\n self.label_model4 = QtWidgets.QLabel(Form)\n self.label_model4.setGeometry(QtCore.QRect(870, 190, 161, 21))\n self.label_model4.setObjectName(\"label_model4\")\n self.label_60 = QtWidgets.QLabel(Form)\n self.label_60.setGeometry(QtCore.QRect(800, 150, 61, 39))\n self.label_60.setObjectName(\"label_60\")\n self.label_61 = QtWidgets.QLabel(Form)\n self.label_61.setGeometry(QtCore.QRect(810, 640, 51, 39))\n self.label_61.setObjectName(\"label_61\")\n self.label_62 = QtWidgets.QLabel(Form)\n self.label_62.setGeometry(QtCore.QRect(820, 270, 31, 39))\n self.label_62.setObjectName(\"label_62\")\n self.line_23 = QtWidgets.QFrame(Form)\n self.line_23.setGeometry(QtCore.QRect(790, 80, 251, 20))\n self.line_23.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_23.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_23.setObjectName(\"line_23\")\n self.label_shipDate4 = QtWidgets.QLabel(Form)\n self.label_shipDate4.setGeometry(QtCore.QRect(870, 100, 161, 21))\n self.label_shipDate4.setObjectName(\"label_shipDate4\")\n self.label_63 = QtWidgets.QLabel(Form)\n self.label_63.setGeometry(QtCore.QRect(800, 120, 51, 39))\n self.label_63.setObjectName(\"label_63\")\n self.label_64 = QtWidgets.QLabel(Form)\n self.label_64.setGeometry(QtCore.QRect(820, 300, 31, 39))\n self.label_64.setObjectName(\"label_64\")\n self.line_24 = QtWidgets.QFrame(Form)\n self.line_24.setGeometry(QtCore.QRect(1030, 90, 21, 611))\n self.line_24.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_24.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_24.setObjectName(\"line_24\")\n self.label_unitPrice4 = QtWidgets.QLabel(Form)\n self.label_unitPrice4.setGeometry(QtCore.QRect(870, 280, 161, 21))\n self.label_unitPrice4.setObjectName(\"label_unitPrice4\")\n self.label_amount4 = QtWidgets.QLabel(Form)\n self.label_amount4.setGeometry(QtCore.QRect(870, 340, 151, 21))\n self.label_amount4.setObjectName(\"label_amount4\")\n self.line_25 = QtWidgets.QFrame(Form)\n self.line_25.setGeometry(QtCore.QRect(780, 90, 20, 611))\n self.line_25.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_25.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_25.setObjectName(\"line_25\")\n self.label_customer4 = QtWidgets.QLabel(Form)\n self.label_customer4.setGeometry(QtCore.QRect(870, 130, 161, 21))\n self.label_customer4.setObjectName(\"label_customer4\")\n self.label_number4 = QtWidgets.QLabel(Form)\n self.label_number4.setGeometry(QtCore.QRect(870, 310, 151, 21))\n self.label_number4.setObjectName(\"label_number4\")\n self.label_production4 = QtWidgets.QLabel(Form)\n self.label_production4.setGeometry(QtCore.QRect(870, 160, 161, 21))\n self.label_production4.setObjectName(\"label_production4\")\n self.label_65 = QtWidgets.QLabel(Form)\n self.label_65.setGeometry(QtCore.QRect(800, 90, 61, 39))\n self.label_65.setObjectName(\"label_65\")\n self.label_paid4 = QtWidgets.QLabel(Form)\n self.label_paid4.setGeometry(QtCore.QRect(870, 370, 161, 21))\n self.label_paid4.setObjectName(\"label_paid4\")\n self.label_payRemind4 = QtWidgets.QLabel(Form)\n self.label_payRemind4.setGeometry(QtCore.QRect(870, 430, 151, 21))\n self.label_payRemind4.setObjectName(\"label_payRemind4\")\n self.label_66 = QtWidgets.QLabel(Form)\n self.label_66.setGeometry(QtCore.QRect(800, 420, 51, 39))\n self.label_66.setObjectName(\"label_66\")\n self.label_67 = QtWidgets.QLabel(Form)\n self.label_67.setGeometry(QtCore.QRect(800, 480, 51, 39))\n self.label_67.setObjectName(\"label_67\")\n self.line_26 = QtWidgets.QFrame(Form)\n self.line_26.setGeometry(QtCore.QRect(790, 690, 251, 20))\n self.line_26.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_26.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_26.setObjectName(\"line_26\")\n self.textBrowser_remark4 = QtWidgets.QTextBrowser(Form)\n self.textBrowser_remark4.setGeometry(QtCore.QRect(870, 620, 161, 71))\n self.textBrowser_remark4.setFrameShadow(QtWidgets.QFrame.Plain)\n self.textBrowser_remark4.setLineWidth(0)\n self.textBrowser_remark4.setObjectName(\"textBrowser_remark4\")\n self.label_macID4 = QtWidgets.QLabel(Form)\n self.label_macID4.setGeometry(QtCore.QRect(870, 220, 161, 21))\n self.label_macID4.setObjectName(\"label_macID4\")\n self.label_receivable4 = QtWidgets.QLabel(Form)\n self.label_receivable4.setGeometry(QtCore.QRect(870, 400, 151, 21))\n self.label_receivable4.setObjectName(\"label_receivable4\")\n self.pushButton_edit5 = QtWidgets.QPushButton(Form)\n self.pushButton_edit5.setGeometry(QtCore.QRect(1050, 710, 71, 23))\n self.pushButton_edit5.setObjectName(\"pushButton_edit5\")\n self.label_68 = QtWidgets.QLabel(Form)\n self.label_68.setGeometry(QtCore.QRect(1060, 180, 61, 39))\n self.label_68.setObjectName(\"label_68\")\n self.label_69 = QtWidgets.QLabel(Form)\n self.label_69.setGeometry(QtCore.QRect(1070, 390, 41, 39))\n self.label_69.setObjectName(\"label_69\")\n self.textBrowser_payRecord5 = QtWidgets.QTextBrowser(Form)\n self.textBrowser_payRecord5.setGeometry(QtCore.QRect(1130, 460, 161, 71))\n self.textBrowser_payRecord5.setFrameShadow(QtWidgets.QFrame.Plain)\n self.textBrowser_payRecord5.setLineWidth(0)\n self.textBrowser_payRecord5.setObjectName(\"textBrowser_payRecord5\")\n self.label_sales5 = QtWidgets.QLabel(Form)\n self.label_sales5.setGeometry(QtCore.QRect(1130, 250, 161, 21))\n self.label_sales5.setObjectName(\"label_sales5\")\n self.label_70 = QtWidgets.QLabel(Form)\n self.label_70.setGeometry(QtCore.QRect(1070, 210, 41, 39))\n self.label_70.setObjectName(\"label_70\")\n self.textBrowser_shipTo5 = QtWidgets.QTextBrowser(Form)\n self.textBrowser_shipTo5.setGeometry(QtCore.QRect(1130, 540, 161, 71))\n self.textBrowser_shipTo5.setFrameShadow(QtWidgets.QFrame.Plain)\n self.textBrowser_shipTo5.setLineWidth(0)\n self.textBrowser_shipTo5.setObjectName(\"textBrowser_shipTo5\")\n self.label_71 = QtWidgets.QLabel(Form)\n self.label_71.setGeometry(QtCore.QRect(1060, 240, 61, 39))\n self.label_71.setObjectName(\"label_71\")\n self.line_27 = QtWidgets.QFrame(Form)\n self.line_27.setGeometry(QtCore.QRect(1110, 90, 20, 611))\n self.line_27.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_27.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_27.setObjectName(\"line_27\")\n self.label_72 = QtWidgets.QLabel(Form)\n self.label_72.setGeometry(QtCore.QRect(1060, 550, 51, 39))\n self.label_72.setObjectName(\"label_72\")\n self.label_73 = QtWidgets.QLabel(Form)\n self.label_73.setGeometry(QtCore.QRect(1070, 360, 41, 39))\n self.label_73.setObjectName(\"label_73\")\n self.label_74 = QtWidgets.QLabel(Form)\n self.label_74.setGeometry(QtCore.QRect(1080, 330, 31, 39))\n self.label_74.setObjectName(\"label_74\")\n self.label_model5 = QtWidgets.QLabel(Form)\n self.label_model5.setGeometry(QtCore.QRect(1130, 190, 151, 21))\n self.label_model5.setObjectName(\"label_model5\")\n self.label_75 = QtWidgets.QLabel(Form)\n self.label_75.setGeometry(QtCore.QRect(1060, 150, 61, 39))\n self.label_75.setObjectName(\"label_75\")\n self.label_76 = QtWidgets.QLabel(Form)\n self.label_76.setGeometry(QtCore.QRect(1070, 640, 51, 39))\n self.label_76.setObjectName(\"label_76\")\n self.label_77 = QtWidgets.QLabel(Form)\n self.label_77.setGeometry(QtCore.QRect(1080, 270, 31, 39))\n self.label_77.setObjectName(\"label_77\")\n self.line_28 = QtWidgets.QFrame(Form)\n self.line_28.setGeometry(QtCore.QRect(1050, 80, 251, 20))\n self.line_28.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_28.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_28.setObjectName(\"line_28\")\n self.label_shipDate5 = QtWidgets.QLabel(Form)\n self.label_shipDate5.setGeometry(QtCore.QRect(1130, 100, 151, 21))\n self.label_shipDate5.setObjectName(\"label_shipDate5\")\n self.label_78 = QtWidgets.QLabel(Form)\n self.label_78.setGeometry(QtCore.QRect(1060, 120, 51, 39))\n self.label_78.setObjectName(\"label_78\")\n self.label_79 = QtWidgets.QLabel(Form)\n self.label_79.setGeometry(QtCore.QRect(1080, 300, 31, 39))\n self.label_79.setObjectName(\"label_79\")\n self.line_29 = QtWidgets.QFrame(Form)\n self.line_29.setGeometry(QtCore.QRect(1290, 90, 21, 611))\n self.line_29.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_29.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_29.setObjectName(\"line_29\")\n self.label_unitPrice5 = QtWidgets.QLabel(Form)\n self.label_unitPrice5.setGeometry(QtCore.QRect(1130, 280, 161, 21))\n self.label_unitPrice5.setObjectName(\"label_unitPrice5\")\n self.label_amount5 = QtWidgets.QLabel(Form)\n self.label_amount5.setGeometry(QtCore.QRect(1130, 340, 161, 21))\n self.label_amount5.setObjectName(\"label_amount5\")\n self.line_30 = QtWidgets.QFrame(Form)\n self.line_30.setGeometry(QtCore.QRect(1040, 90, 20, 611))\n self.line_30.setFrameShape(QtWidgets.QFrame.VLine)\n self.line_30.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_30.setObjectName(\"line_30\")\n self.label_customer5 = QtWidgets.QLabel(Form)\n self.label_customer5.setGeometry(QtCore.QRect(1130, 130, 161, 21))\n self.label_customer5.setObjectName(\"label_customer5\")\n self.label_number5 = QtWidgets.QLabel(Form)\n self.label_number5.setGeometry(QtCore.QRect(1130, 310, 151, 21))\n self.label_number5.setObjectName(\"label_number5\")\n self.label_production5 = QtWidgets.QLabel(Form)\n self.label_production5.setGeometry(QtCore.QRect(1130, 160, 161, 21))\n self.label_production5.setObjectName(\"label_production5\")\n self.label_80 = QtWidgets.QLabel(Form)\n self.label_80.setGeometry(QtCore.QRect(1060, 90, 61, 39))\n self.label_80.setObjectName(\"label_80\")\n self.label_paid5 = QtWidgets.QLabel(Form)\n self.label_paid5.setGeometry(QtCore.QRect(1130, 370, 161, 21))\n self.label_paid5.setObjectName(\"label_paid5\")\n self.label_payRemind5 = QtWidgets.QLabel(Form)\n self.label_payRemind5.setGeometry(QtCore.QRect(1130, 430, 161, 21))\n self.label_payRemind5.setObjectName(\"label_payRemind5\")\n self.label_81 = QtWidgets.QLabel(Form)\n self.label_81.setGeometry(QtCore.QRect(1060, 420, 51, 39))\n self.label_81.setObjectName(\"label_81\")\n self.label_82 = QtWidgets.QLabel(Form)\n self.label_82.setGeometry(QtCore.QRect(1060, 480, 51, 39))\n self.label_82.setObjectName(\"label_82\")\n self.line_31 = QtWidgets.QFrame(Form)\n self.line_31.setGeometry(QtCore.QRect(1050, 690, 251, 20))\n self.line_31.setFrameShape(QtWidgets.QFrame.HLine)\n self.line_31.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.line_31.setObjectName(\"line_31\")\n self.textBrowser_remark5 = QtWidgets.QTextBrowser(Form)\n self.textBrowser_remark5.setGeometry(QtCore.QRect(1130, 620, 161, 71))\n self.textBrowser_remark5.setFrameShadow(QtWidgets.QFrame.Plain)\n self.textBrowser_remark5.setLineWidth(0)\n self.textBrowser_remark5.setObjectName(\"textBrowser_remark5\")\n self.label_macID5 = QtWidgets.QLabel(Form)\n self.label_macID5.setGeometry(QtCore.QRect(1130, 220, 161, 21))\n self.label_macID5.setObjectName(\"label_macID5\")\n self.label_receivable5 = QtWidgets.QLabel(Form)\n self.label_receivable5.setGeometry(QtCore.QRect(1130, 400, 151, 21))\n self.label_receivable5.setObjectName(\"label_receivable5\")\n self.label_order1 = QtWidgets.QLabel(Form)\n self.label_order1.setGeometry(QtCore.QRect(150, 710, 41, 21))\n self.label_order1.setObjectName(\"label_order1\")\n self.label_id2 = QtWidgets.QLabel(Form)\n self.label_id2.setGeometry(QtCore.QRect(390, 60, 91, 21))\n self.label_id2.setObjectName(\"label_id2\")\n self.label_order2 = QtWidgets.QLabel(Form)\n self.label_order2.setGeometry(QtCore.QRect(410, 710, 41, 21))\n self.label_order2.setObjectName(\"label_order2\")\n self.label_id3 = QtWidgets.QLabel(Form)\n self.label_id3.setGeometry(QtCore.QRect(650, 60, 91, 21))\n self.label_id3.setObjectName(\"label_id3\")\n self.label_order3 = QtWidgets.QLabel(Form)\n self.label_order3.setGeometry(QtCore.QRect(660, 710, 41, 21))\n self.label_order3.setObjectName(\"label_order3\")\n self.label_id4 = QtWidgets.QLabel(Form)\n self.label_id4.setGeometry(QtCore.QRect(910, 60, 91, 21))\n self.label_id4.setObjectName(\"label_id4\")\n self.label_order4 = QtWidgets.QLabel(Form)\n self.label_order4.setGeometry(QtCore.QRect(930, 710, 41, 21))\n self.label_order4.setObjectName(\"label_order4\")\n self.label_id5 = QtWidgets.QLabel(Form)\n self.label_id5.setGeometry(QtCore.QRect(1170, 60, 91, 21))\n self.label_id5.setObjectName(\"label_id5\")\n self.label_order5 = QtWidgets.QLabel(Form)\n self.label_order5.setGeometry(QtCore.QRect(1190, 710, 41, 21))\n self.label_order5.setObjectName(\"label_order5\")\n self.label_13.raise_()\n self.label_id1.raise_()\n self.lineEdit_search.raise_()\n self.label_customer1.raise_()\n self.line_7.raise_()\n self.pushButton_SwitchCustomer.raise_()\n self.pushButton_edit1.raise_()\n self.label_4.raise_()\n self.line.raise_()\n self.pushButton_SwitchService.raise_()\n self.label_8.raise_()\n self.label_5.raise_()\n self.label_2.raise_()\n self.label_3.raise_()\n self.pushButton_previous.raise_()\n self.pushButton_next.raise_()\n self.pushButton_new.raise_()\n self.pushButton_jump.raise_()\n self.label_9.raise_()\n self.lineEdit_jump.raise_()\n self.label_10.raise_()\n self.label_11.raise_()\n self.label_12.raise_()\n self.textBrowser_payRecord1.raise_()\n self.label_14.raise_()\n self.label_shipDate1.raise_()\n self.label_sales1.raise_()\n self.label_production1.raise_()\n self.label_model1.raise_()\n self.label_15.raise_()\n self.label_macID1.raise_()\n self.label_16.raise_()\n self.label_17.raise_()\n self.label_18.raise_()\n self.label_number1.raise_()\n self.label_unitPrice1.raise_()\n self.label_amount1.raise_()\n self.label_19.raise_()\n self.label_paid1.raise_()\n self.label_20.raise_()\n self.label_receivable1.raise_()\n self.label_payRemind1.raise_()\n self.textBrowser_shipTo1.raise_()\n self.label_21.raise_()\n self.textBrowser_remark1.raise_()\n self.line_8.raise_()\n self.line_9.raise_()\n self.line_10.raise_()\n self.line_11.raise_()\n self.pushButton_search.raise_()\n self.line_2.raise_()\n self.pushButton_SwitchShipment.raise_()\n self.label_22.raise_()\n self.label_cPage.raise_()\n self.label_tPage.raise_()\n self.pushButton_edit2.raise_()\n self.label_6.raise_()\n self.label_25.raise_()\n self.label_model2.raise_()\n self.label_26.raise_()\n self.line_12.raise_()\n self.label_macID2.raise_()\n self.label_7.raise_()\n self.label_production2.raise_()\n self.label_27.raise_()\n self.label_paid2.raise_()\n self.label_28.raise_()\n self.label_29.raise_()\n self.label_receivable2.raise_()\n self.line_13.raise_()\n self.line_14.raise_()\n self.textBrowser_payRecord2.raise_()\n self.line_15.raise_()\n self.label_30.raise_()\n self.label_31.raise_()\n self.label_customer2.raise_()\n self.textBrowser_remark2.raise_()\n self.label_32.raise_()\n self.line_16.raise_()\n self.label_33.raise_()\n self.textBrowser_shipTo2.raise_()\n self.label_34.raise_()\n self.label_35.raise_()\n self.label_payRemind2.raise_()\n self.label_number2.raise_()\n self.label_sales2.raise_()\n self.label_36.raise_()\n self.label_37.raise_()\n self.label_unitPrice2.raise_()\n self.label_shipDate2.raise_()\n self.label_amount2.raise_()\n self.pushButton_edit3.raise_()\n self.label_38.raise_()\n self.label_39.raise_()\n self.textBrowser_payRecord3.raise_()\n self.label_sales3.raise_()\n self.label_40.raise_()\n self.textBrowser_shipTo3.raise_()\n self.label_41.raise_()\n self.line_17.raise_()\n self.label_42.raise_()\n self.label_43.raise_()\n self.label_44.raise_()\n self.label_model3.raise_()\n self.label_45.raise_()\n self.label_46.raise_()\n self.label_47.raise_()\n self.line_18.raise_()\n self.label_shipDate3.raise_()\n self.label_48.raise_()\n self.label_49.raise_()\n self.line_19.raise_()\n self.label_unitPrice3.raise_()\n self.label_amount3.raise_()\n self.line_20.raise_()\n self.label_customer3.raise_()\n self.label_number3.raise_()\n self.label_production3.raise_()\n self.label_50.raise_()\n self.label_paid3.raise_()\n self.label_payRemind3.raise_()\n self.label_51.raise_()\n self.label_52.raise_()\n self.line_21.raise_()\n self.textBrowser_remark3.raise_()\n self.label_macID3.raise_()\n self.label_receivable3.raise_()\n self.pushButton_edit4.raise_()\n self.label_53.raise_()\n self.label_54.raise_()\n self.textBrowser_payRecord4.raise_()\n self.label_sales4.raise_()\n self.label_55.raise_()\n self.textBrowser_shipTo4.raise_()\n self.label_56.raise_()\n self.line_22.raise_()\n self.label_57.raise_()\n self.label_58.raise_()\n self.label_59.raise_()\n self.label_model4.raise_()\n self.label_60.raise_()\n self.label_61.raise_()\n self.label_62.raise_()\n self.line_23.raise_()\n self.label_shipDate4.raise_()\n self.label_63.raise_()\n self.label_64.raise_()\n self.line_24.raise_()\n self.label_unitPrice4.raise_()\n self.label_amount4.raise_()\n self.line_25.raise_()\n self.label_customer4.raise_()\n self.label_number4.raise_()\n self.label_production4.raise_()\n self.label_65.raise_()\n self.label_paid4.raise_()\n self.label_payRemind4.raise_()\n self.label_66.raise_()\n self.label_67.raise_()\n self.line_26.raise_()\n self.textBrowser_remark4.raise_()\n self.label_macID4.raise_()\n self.label_receivable4.raise_()\n self.pushButton_edit5.raise_()\n self.label_68.raise_()\n self.label_69.raise_()\n self.textBrowser_payRecord5.raise_()\n self.label_sales5.raise_()\n self.label_70.raise_()\n self.textBrowser_shipTo5.raise_()\n self.label_71.raise_()\n self.line_27.raise_()\n self.label_72.raise_()\n self.label_73.raise_()\n self.label_74.raise_()\n self.label_model5.raise_()\n self.label_75.raise_()\n self.label_76.raise_()\n self.label_77.raise_()\n self.line_28.raise_()\n self.label_shipDate5.raise_()\n self.label_78.raise_()\n self.label_79.raise_()\n self.line_29.raise_()\n self.label_unitPrice5.raise_()\n self.label_amount5.raise_()\n self.line_30.raise_()\n self.label_customer5.raise_()\n self.label_number5.raise_()\n self.label_production5.raise_()\n self.label_80.raise_()\n self.label_paid5.raise_()\n self.label_payRemind5.raise_()\n self.label_81.raise_()\n self.label_82.raise_()\n self.line_31.raise_()\n self.textBrowser_remark5.raise_()\n self.label_macID5.raise_()\n self.label_receivable5.raise_()\n self.label_order1.raise_()\n self.label_id2.raise_()\n self.label_order2.raise_()\n self.label_id3.raise_()\n self.label_order3.raise_()\n self.label_id4.raise_()\n self.label_order4.raise_()\n self.label_id5.raise_()\n self.label_order5.raise_()\n\n self.retranslateUi(Form)\n self.pushButton_previous.clicked.connect(self.reloadWidgetDec)\n self.pushButton_next.clicked.connect(self.reloadWidgetAdd)\n QtCore.QMetaObject.connectSlotsByName(Form)\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"Form\"))\n self.label_13.setText(_translate(\"Form\", \"

    产品出货记录

    \"))\n self.label_id1.setText(_translate(\"Form\", \"shipmentID\"))\n self.label_customer1.setText(_translate(\"Form\", \"

    customer

    \"))\n self.pushButton_SwitchCustomer.setText(_translate(\"Form\", \"客户信息界面\"))\n self.pushButton_edit1.setText(_translate(\"Form\", \"编辑\"))\n self.label_4.setText(_translate(\"Form\", \"出货产品\"))\n self.pushButton_SwitchService.setText(_translate(\"Form\", \"售后服务记录\"))\n self.label_8.setText(_translate(\"Form\", \"搜索\"))\n self.label_5.setText(_translate(\"Form\", \"规格型号\"))\n self.label_2.setText(_translate(\"Form\", \"出货日期\"))\n self.label_3.setText(_translate(\"Form\", \"业务负责\"))\n self.pushButton_previous.setText(_translate(\"Form\", \"上一页\"))\n self.pushButton_next.setText(_translate(\"Form\", \"下一页\"))\n self.pushButton_new.setText(_translate(\"Form\", \"新增出货信息\"))\n self.pushButton_jump.setText(_translate(\"Form\", \"GO!\"))\n self.label_9.setText(_translate(\"Form\", \"跳到第 页\"))\n self.label_10.setText(_translate(\"Form\", \"付款记录\"))\n self.label_11.setText(_translate(\"Form\", \"下次付款\"))\n self.label_12.setText(_translate(\"Form\", \"备注\"))\n self.textBrowser_payRecord1.setHtml(_translate(\"Form\", \"\\n\"\n\"\\n\"\n\"

    payRecord

    \"))\n self.label_14.setText(_translate(\"Form\", \"客户名称\"))\n self.label_shipDate1.setText(_translate(\"Form\", \"shipDate\"))\n self.label_sales1.setText(_translate(\"Form\", \"sales\"))\n self.label_production1.setText(_translate(\"Form\", \"

    production

    \"))\n self.label_model1.setText(_translate(\"Form\", \"model\"))\n self.label_15.setText(_translate(\"Form\", \"设备号\"))\n self.label_macID1.setText(_translate(\"Form\", \"macID\"))\n self.label_16.setText(_translate(\"Form\", \"数量\"))\n self.label_17.setText(_translate(\"Form\", \"单价\"))\n self.label_18.setText(_translate(\"Form\", \"金额\"))\n self.label_number1.setText(_translate(\"Form\", \"number\"))\n self.label_unitPrice1.setText(_translate(\"Form\", \"unitPrice\"))\n self.label_amount1.setText(_translate(\"Form\", \"amount\"))\n self.label_19.setText(_translate(\"Form\", \"已付款\"))\n self.label_paid1.setText(_translate(\"Form\", \"paid\"))\n self.label_20.setText(_translate(\"Form\", \"应收款\"))\n self.label_receivable1.setText(_translate(\"Form\", \"receivable\"))\n self.label_payRemind1.setText(_translate(\"Form\", \"payRemind\"))\n self.textBrowser_shipTo1.setHtml(_translate(\"Form\", \"\\n\"\n\"\\n\"\n\"

    shipto

    \"))\n self.label_21.setText(_translate(\"Form\", \"发货地址\"))\n self.textBrowser_remark1.setHtml(_translate(\"Form\", \"\\n\"\n\"\\n\"\n\"

    remark

    \"))\n self.pushButton_search.setText(_translate(\"Form\", \"GO!\"))\n self.pushButton_SwitchShipment.setText(_translate(\"Form\", \"产品出货信息\"))\n self.label_22.setText(_translate(\"Form\", \"第 / 页\"))\n self.label_cPage.setText(_translate(\"Form\", \"

    cPage

    \"))\n self.label_tPage.setText(_translate(\"Form\", \"

    tPage

    \"))\n self.pushButton_edit2.setText(_translate(\"Form\", \"编辑\"))\n self.label_6.setText(_translate(\"Form\", \"业务负责\"))\n self.label_25.setText(_translate(\"Form\", \"付款记录\"))\n self.label_model2.setText(_translate(\"Form\", \"model\"))\n self.label_26.setText(_translate(\"Form\", \"应收款\"))\n self.label_macID2.setText(_translate(\"Form\", \"macID\"))\n self.label_7.setText(_translate(\"Form\", \"出货产品\"))\n self.label_production2.setText(_translate(\"Form\", \"

    production

    \"))\n self.label_27.setText(_translate(\"Form\", \"规格型号\"))\n self.label_paid2.setText(_translate(\"Form\", \"paid\"))\n self.label_28.setText(_translate(\"Form\", \"下次付款\"))\n self.label_29.setText(_translate(\"Form\", \"设备号\"))\n self.label_receivable2.setText(_translate(\"Form\", \"receivable\"))\n self.textBrowser_payRecord2.setHtml(_translate(\"Form\", \"\\n\"\n\"\\n\"\n\"

    payRecord

    \"))\n self.label_30.setText(_translate(\"Form\", \"发货地址\"))\n self.label_31.setText(_translate(\"Form\", \"备注\"))\n self.label_customer2.setText(_translate(\"Form\", \"

    customer

    \"))\n self.textBrowser_remark2.setHtml(_translate(\"Form\", \"\\n\"\n\"\\n\"\n\"

    remark

    \"))\n self.label_32.setText(_translate(\"Form\", \"单价\"))\n self.label_33.setText(_translate(\"Form\", \"数量\"))\n self.textBrowser_shipTo2.setHtml(_translate(\"Form\", \"\\n\"\n\"\\n\"\n\"

    shipto

    \"))\n self.label_34.setText(_translate(\"Form\", \"客户名称\"))\n self.label_35.setText(_translate(\"Form\", \"出货日期\"))\n self.label_payRemind2.setText(_translate(\"Form\", \"payRemind\"))\n self.label_number2.setText(_translate(\"Form\", \"number\"))\n self.label_sales2.setText(_translate(\"Form\", \"sales\"))\n self.label_36.setText(_translate(\"Form\", \"金额\"))\n self.label_37.setText(_translate(\"Form\", \"已付款\"))\n self.label_unitPrice2.setText(_translate(\"Form\", \"unitPrice\"))\n self.label_shipDate2.setText(_translate(\"Form\", \"shipDate\"))\n self.label_amount2.setText(_translate(\"Form\", \"amount\"))\n self.pushButton_edit3.setText(_translate(\"Form\", \"编辑\"))\n self.label_38.setText(_translate(\"Form\", \"规格型号\"))\n self.label_39.setText(_translate(\"Form\", \"应收款\"))\n self.textBrowser_payRecord3.setHtml(_translate(\"Form\", \"\\n\"\n\"\\n\"\n\"

    payRecord

    \"))\n self.label_sales3.setText(_translate(\"Form\", \"sales\"))\n self.label_40.setText(_translate(\"Form\", \"设备号\"))\n self.textBrowser_shipTo3.setHtml(_translate(\"Form\", \"\\n\"\n\"\\n\"\n\"

    shipto

    \"))\n self.label_41.setText(_translate(\"Form\", \"业务负责\"))\n self.label_42.setText(_translate(\"Form\", \"发货地址\"))\n self.label_43.setText(_translate(\"Form\", \"已付款\"))\n self.label_44.setText(_translate(\"Form\", \"金额\"))\n self.label_model3.setText(_translate(\"Form\", \"model\"))\n self.label_45.setText(_translate(\"Form\", \"出货产品\"))\n self.label_46.setText(_translate(\"Form\", \"备注\"))\n self.label_47.setText(_translate(\"Form\", \"单价\"))\n self.label_shipDate3.setText(_translate(\"Form\", \"shipDate\"))\n self.label_48.setText(_translate(\"Form\", \"客户名称\"))\n self.label_49.setText(_translate(\"Form\", \"数量\"))\n self.label_unitPrice3.setText(_translate(\"Form\", \"unitPrice\"))\n self.label_amount3.setText(_translate(\"Form\", \"amount\"))\n self.label_customer3.setText(_translate(\"Form\", \"

    customer

    \"))\n self.label_number3.setText(_translate(\"Form\", \"number\"))\n self.label_production3.setText(_translate(\"Form\", \"

    production

    \"))\n self.label_50.setText(_translate(\"Form\", \"出货日期\"))\n self.label_paid3.setText(_translate(\"Form\", \"paid\"))\n self.label_payRemind3.setText(_translate(\"Form\", \"payRemind\"))\n self.label_51.setText(_translate(\"Form\", \"下次付款\"))\n self.label_52.setText(_translate(\"Form\", \"付款记录\"))\n self.textBrowser_remark3.setHtml(_translate(\"Form\", \"\\n\"\n\"\\n\"\n\"

    remark

    \"))\n self.label_macID3.setText(_translate(\"Form\", \"macID\"))\n self.label_receivable3.setText(_translate(\"Form\", \"receivable\"))\n self.pushButton_edit4.setText(_translate(\"Form\", \"编辑\"))\n self.label_53.setText(_translate(\"Form\", \"规格型号\"))\n self.label_54.setText(_translate(\"Form\", \"应收款\"))\n self.textBrowser_payRecord4.setHtml(_translate(\"Form\", \"\\n\"\n\"\\n\"\n\"

    payRecord

    \"))\n self.label_sales4.setText(_translate(\"Form\", \"sales\"))\n self.label_55.setText(_translate(\"Form\", \"设备号\"))\n self.textBrowser_shipTo4.setHtml(_translate(\"Form\", \"\\n\"\n\"\\n\"\n\"

    shipto

    \"))\n self.label_56.setText(_translate(\"Form\", \"业务负责\"))\n self.label_57.setText(_translate(\"Form\", \"发货地址\"))\n self.label_58.setText(_translate(\"Form\", \"已付款\"))\n self.label_59.setText(_translate(\"Form\", \"金额\"))\n self.label_model4.setText(_translate(\"Form\", \"model\"))\n self.label_60.setText(_translate(\"Form\", \"出货产品\"))\n self.label_61.setText(_translate(\"Form\", \"备注\"))\n self.label_62.setText(_translate(\"Form\", \"单价\"))\n self.label_shipDate4.setText(_translate(\"Form\", \"shipDate\"))\n self.label_63.setText(_translate(\"Form\", \"客户名称\"))\n self.label_64.setText(_translate(\"Form\", \"数量\"))\n self.label_unitPrice4.setText(_translate(\"Form\", \"unitPrice\"))\n self.label_amount4.setText(_translate(\"Form\", \"amount\"))\n self.label_customer4.setText(_translate(\"Form\", \"

    customer

    \"))\n self.label_number4.setText(_translate(\"Form\", \"number\"))\n self.label_production4.setText(_translate(\"Form\", \"

    production

    \"))\n self.label_65.setText(_translate(\"Form\", \"出货日期\"))\n self.label_paid4.setText(_translate(\"Form\", \"paid\"))\n self.label_payRemind4.setText(_translate(\"Form\", \"payRemind\"))\n self.label_66.setText(_translate(\"Form\", \"下次付款\"))\n self.label_67.setText(_translate(\"Form\", \"付款记录\"))\n self.textBrowser_remark4.setHtml(_translate(\"Form\", \"\\n\"\n\"\\n\"\n\"

    remark

    \"))\n self.label_macID4.setText(_translate(\"Form\", \"macID\"))\n self.label_receivable4.setText(_translate(\"Form\", \"receivable\"))\n self.pushButton_edit5.setText(_translate(\"Form\", \"编辑\"))\n self.label_68.setText(_translate(\"Form\", \"规格型号\"))\n self.label_69.setText(_translate(\"Form\", \"应收款\"))\n self.textBrowser_payRecord5.setHtml(_translate(\"Form\", \"\\n\"\n\"\\n\"\n\"

    payRecord

    \"))\n self.label_sales5.setText(_translate(\"Form\", \"sales\"))\n self.label_70.setText(_translate(\"Form\", \"设备号\"))\n self.textBrowser_shipTo5.setHtml(_translate(\"Form\", \"\\n\"\n\"\\n\"\n\"

    shipto

    \"))\n self.label_71.setText(_translate(\"Form\", \"业务负责\"))\n self.label_72.setText(_translate(\"Form\", \"发货地址\"))\n self.label_73.setText(_translate(\"Form\", \"已付款\"))\n self.label_74.setText(_translate(\"Form\", \"金额\"))\n self.label_model5.setText(_translate(\"Form\", \"model\"))\n self.label_75.setText(_translate(\"Form\", \"出货产品\"))\n self.label_76.setText(_translate(\"Form\", \"备注\"))\n self.label_77.setText(_translate(\"Form\", \"单价\"))\n self.label_shipDate5.setText(_translate(\"Form\", \"shipDate\"))\n self.label_78.setText(_translate(\"Form\", \"客户名称\"))\n self.label_79.setText(_translate(\"Form\", \"数量\"))\n self.label_unitPrice5.setText(_translate(\"Form\", \"unitPrice\"))\n self.label_amount5.setText(_translate(\"Form\", \"amount\"))\n self.label_customer5.setText(_translate(\"Form\", \"

    customer

    \"))\n self.label_number5.setText(_translate(\"Form\", \"number\"))\n self.label_production5.setText(_translate(\"Form\", \"

    production

    \"))\n self.label_80.setText(_translate(\"Form\", \"出货日期\"))\n self.label_paid5.setText(_translate(\"Form\", \"paid\"))\n self.label_payRemind5.setText(_translate(\"Form\", \"payRemind\"))\n self.label_81.setText(_translate(\"Form\", \"下次付款\"))\n self.label_82.setText(_translate(\"Form\", \"付款记录\"))\n self.textBrowser_remark5.setHtml(_translate(\"Form\", \"\\n\"\n\"\\n\"\n\"

    remark

    \"))\n self.label_macID5.setText(_translate(\"Form\", \"macID\"))\n self.label_receivable5.setText(_translate(\"Form\", \"receivable\"))\n self.label_order1.setText(_translate(\"Form\", \"order\"))\n self.label_id2.setText(_translate(\"Form\", \"shipmentID\"))\n self.label_order2.setText(_translate(\"Form\", \"order\"))\n self.label_id3.setText(_translate(\"Form\", \"shipmentID\"))\n self.label_order3.setText(_translate(\"Form\", \"order\"))\n self.label_id4.setText(_translate(\"Form\", \"shipmentID\"))\n self.label_order4.setText(_translate(\"Form\", \"order\"))\n self.label_id5.setText(_translate(\"Form\", \"shipmentID\"))\n self.label_order5.setText(_translate(\"Form\", \"order\"))\n\n####以上为QTDesiger自动生成代码\n###以下为自己写的代码\n def closeEvent(self, event): #关闭窗口触发以下事件\n reply = QMessageBox.question(self, '消息提示', '你确定要退出吗?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if reply == QMessageBox.Yes:\n event.accept() #接受关闭事件\n else:\n event.ignore() #忽略关闭事件\n def reloadWidgetAdd(self,i): #点击下一页触发\n print (\"reloadWidgeAdd\")\n i= int(self.label_order5.text())\n dblen=self.dblen()\n if i < dblen: #dblen():\n# global g\n NextOffice.shipment.shipment_main.g=NextOffice.shipment.shipment_main.g+1 #全局变量P页数+1\n self.pageDisplay(NextOffice.shipment.shipment_main.g)\n print(\"shipment_main的g是\",NextOffice.shipment.shipment_main.g)\n\n else:\n print(\"remain\")\n def reloadWidgetDec(self,i): #点击上一页触发\n print (\"reloadWidgeDec\")\n i= int(self.label_order5.text())\n if i > 5:\n global g\n NextOffice.shipment.shipment_main.g=NextOffice.shipment.shipment_main.g-1 #全局变量P页数+1\n# g=g-1 #全局变量P页数-1\n self.pageDisplay(NextOffice.shipment.shipment_main.g)\n else:\n print(\"remain\")\n def pageDisplay(self,g): #本函数显示g页的数据\n print(\"shipment_browser: pageDisplay\",g)\n orderE=g*5\n orderD=orderE-1\n orderC=orderE-2\n orderB=orderE-3\n orderA=orderE-4\n self.label_order1.setText(str(orderA))\n self.label_order2.setText(str(orderB))\n self.label_order3.setText(str(orderC))\n self.label_order4.setText(str(orderD))\n self.label_order5.setText(str(orderE))\n\n conn = sqlite3.connect('nextai.db')\n\n curs = conn.cursor() #创建游标\n# print (\"pageData()打开数据库成功\")\n curs.execute(\"SELECT * from shipment\")\n data= curs.fetchall() #data[]数组获得数据库中的全部数据\n curs.close() #关闭游标\n conn.commit() #保存数据库\n conn.close() #关闭数据库连接\n\n p=g*5-4 #获得当前页面的第1行\n\n blank=[\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"0\",\"0\",\"0\",\"\",\"\",\"\",\"\",\"\",\"\"]\n\n dblen=self.dblen()\n print(\"shipment_browser-pageDisplay:现在数据库中共有记录:\",dblen)\n a=dblen%5 #计算最后一页总共有几行?\n b=5-a #计算空白的有几行\n# c=0\n while b>0:#将当前页面的空白记录行填充N/A,以免data赋值的时候不够5行导致出错\n data.append(blank)\n if g == dblen//5+1: #如果当前页面是最后一页---总页数/5然后+1\n if b==1: #如果只有1行空白\n self.pushButton_edit5.hide() #隐藏最后一个编辑按钮\n elif b==2:#如果有2行空白\n self.pushButton_edit4.hide() #隐藏最后2个编辑按钮\n self.pushButton_edit5.hide()\n elif b==3:#如果有3行空白\n self.pushButton_edit3.hide() #隐藏最后2个编辑按钮\n self.pushButton_edit4.hide() #隐藏最后2个编辑按钮\n self.pushButton_edit5.hide()\n elif b==4:#如果有4行空白\n self.pushButton_edit2.hide() #隐藏最后2个编辑按钮\n self.pushButton_edit3.hide() #隐藏最后2个编辑按钮\n self.pushButton_edit4.hide() #隐藏最后2个编辑按钮\n self.pushButton_edit5.hide()\n b=b-1\n\n##计算出货产品价格与应收款\n unitPrice1=int(data[p-1][7])\n number1=int(data[p-1][8])\n amount1=unitPrice1*number1\n paid1=int(data[p-1][9])\n receivable1=amount1-paid1\n\n unitPrice2=int(data[p][7])\n number2=int(data[p][8])\n amount2=unitPrice2*number2\n paid2=int(data[p][9])\n receivable2=amount2-paid2\n\n unitPrice3=int(data[p+1][7])\n number3=int(data[p+1][8])\n amount3=unitPrice3*number3\n paid3=int(data[p+1][9])\n receivable3=amount3-paid3\n\n unitPrice4=int(data[p+2][7])\n number4=int(data[p+2][8])\n amount4=unitPrice4*number4\n paid4=int(data[p+2][9])\n receivable4=amount4-paid4\n\n unitPrice5=int(data[p+3][7])\n number5=int(data[p+3][8])\n amount5=unitPrice5*number5\n paid5=int(data[p+3][9])\n receivable5=amount5-paid5\n\n\n\n#以下将数据库中获取的内容分别对应显示在5行内\n self.label_id1.setText(str(data[p-1][0])) #数据库是从0开始的,所以减1\n self.label_shipDate1.setText(str(data[p-1][1]))\n self.label_customer1.setText(str(data[p-1][2]))\n self.label_sales1.setText(str(data[p-1][3]))\n self.label_production1.setText(str(data[p-1][4]))\n self.label_model1.setText(str(data[p-1][5]))\n self.label_macID1.setText(str(data[p-1][6]))\n self.label_unitPrice1.setText(str(data[p-1][7]))\n self.label_number1.setText(str(data[p-1][8]))\n self.label_paid1.setText(str(data[p-1][9]))\n self.textBrowser_payRecord1.setText(str(data[p-1][10]))\n self.label_payRemind1.setText(str(data[p-1][11]))\n self.textBrowser_shipTo1.setText(str(data[p-1][12]))\n self.textBrowser_remark1.setText(str(data[p-1][13]))\n self.label_amount1.setText(str(amount1))\n self.label_receivable1.setText(str(receivable1))\n\n self.label_id2.setText(str(data[p][0])) #数据库是从0开始的,所以减1\n self.label_shipDate2.setText(str(data[p][1]))\n self.label_customer2.setText(str(data[p][2]))\n self.label_sales2.setText(str(data[p][3]))\n self.label_production2.setText(str(data[p][4]))\n self.label_model2.setText(str(data[p][5]))\n self.label_macID2.setText(str(data[p][6]))\n self.label_unitPrice2.setText(str(data[p][7]))\n self.label_number2.setText(str(data[p][8]))\n# self.label_amount2.setText(str(data[p][9]))\n self.label_paid2.setText(str(data[p][9]))\n# self.label_receivable2.setText(str(data[p][11]))\n self.textBrowser_payRecord2.setText(str(data[p][10]))\n self.label_payRemind2.setText(str(data[p][11]))\n self.textBrowser_shipTo2.setText(str(data[p][12]))\n self.textBrowser_remark2.setText(str(data[p][13]))\n self.label_amount2.setText(str(amount2))\n self.label_receivable2.setText(str(receivable2))\n\n self.label_id3.setText(str(data[p+1][0])) #数据库是从0开始的,所以减1\n self.label_shipDate3.setText(str(data[p+1][1]))\n self.label_customer3.setText(str(data[p+1][2]))\n self.label_sales3.setText(str(data[p+1][3]))\n self.label_production3.setText(str(data[p+1][4]))\n self.label_model3.setText(str(data[p+1][5]))\n self.label_macID3.setText(str(data[p+1][6]))\n self.label_unitPrice3.setText(str(data[p+1][7]))\n self.label_number3.setText(str(data[p+1][8]))\n# self.label_amount3.setText(str(data[p+1][9]))\n self.label_paid3.setText(str(data[p+1][9]))\n# self.label_receivable3.setText(str(data[p+1][11]))\n self.textBrowser_payRecord3.setText(str(data[p+1][10]))\n self.label_payRemind3.setText(str(data[p+1][11]))\n self.textBrowser_shipTo3.setText(str(data[p+1][12]))\n self.textBrowser_remark3.setText(str(data[p+1][13]))\n self.label_amount3.setText(str(amount3))\n self.label_receivable3.setText(str(receivable3))\n\n\n self.label_id4.setText(str(data[p+2][0])) #数据库是从0开始的,所以减1\n self.label_shipDate4.setText(str(data[p+2][1]))\n self.label_customer4.setText(str(data[p+2][2]))\n self.label_sales4.setText(str(data[p+2][3]))\n self.label_production4.setText(str(data[p+2][4]))\n self.label_model4.setText(str(data[p+2][5]))\n self.label_macID4.setText(str(data[p+2][6]))\n self.label_unitPrice4.setText(str(data[p+2][7]))\n self.label_number4.setText(str(data[p+2][8]))\n# self.label_amount4.setText(str(data[p+2][9]))\n self.label_paid4.setText(str(data[p+2][9]))\n# self.label_receivable4.setText(str(data[p+2][11]))\n self.textBrowser_payRecord4.setText(str(data[p+2][10]))\n self.label_payRemind4.setText(str(data[p+2][11]))\n self.textBrowser_shipTo4.setText(str(data[p+2][12]))\n self.textBrowser_remark4.setText(str(data[p+2][13]))\n self.label_amount4.setText(str(amount4))\n self.label_receivable4.setText(str(receivable4))\n\n\n self.label_id5.setText(str(data[p+3][0])) #数据库是从0开始的,所以减1\n self.label_shipDate5.setText(str(data[p+3][1]))\n self.label_customer5.setText(str(data[p+3][2]))\n self.label_sales5.setText(str(data[p+3][3]))\n self.label_production5.setText(str(data[p+3][4]))\n self.label_model5.setText(str(data[p+3][5]))\n self.label_macID5.setText(str(data[p+3][6]))\n self.label_unitPrice5.setText(str(data[p+3][7]))\n self.label_number5.setText(str(data[p+3][8]))\n# self.label_amount5.setText(str(data[p+3][9]))\n self.label_paid5.setText(str(data[p+3][9]))\n# self.label_receivable5.setText(str(data[p+3][11]))\n self.textBrowser_payRecord5.setText(str(data[p+3][10]))\n self.label_payRemind5.setText(str(data[p+3][11]))\n self.textBrowser_shipTo5.setText(str(data[p+3][12]))\n self.textBrowser_remark5.setText(str(data[p+3][13]))\n self.label_amount5.setText(str(amount5))\n self.label_receivable5.setText(str(receivable5))\n\n# 以下设置页面中的当前页/总页数\n self.label_cPage.setText(str(g))\n if dblen%5==0: #如果能被5整除,就说明总记录刚好是5的倍数\n tPage=int(dblen/5)\n else:\n tPage=int(dblen/5+1) #否则,就应该加1页\n self.label_tPage.setText(str(tPage))\n\n def page(self):\n global g\n g=g+0\n return g\n\n def dblen(self):\n conn = sqlite3.connect('nextai.db')\n curs = conn.cursor() #创建游标\n curs.execute(\"SELECT * from shipment\")\n data= curs.fetchall()\n lens=len(data)\n curs.close() #关闭游标\n conn.commit() #保存数据库\n conn.close() #关闭数据库连接\n return lens\n\n def reload(self):\n print(\"shipment_browser:reload start\")\n print(\"g=\",NextOffice.shipment.shipment_main.g)\n self.pageDisplay(NextOffice.shipment.shipment_main.g)\n dblen=self.dblen()\n print(\"shipment_browser:reload finish\")\n\n def jumpTo(self):\n\n jump_g=str(self.lineEdit_jump.text())\n print(\"jump start\")\n if jump_g==\"\":\n QMessageBox.critical(self,\"注意\",\"输入错误,请输入要跳转到的页码,格式为数字\")\n pass\n elif jump_g.isdigit()==False:\n QMessageBox.critical(self,\"注意\",\"输入错误,请输入要跳转到的页码,格式为数字\")\n pass\n else:\n ttPage=int(self.label_tPage.text())\n jump_gg = int(jump_g)\n if jump_gg > ttPage :\n QMessageBox.critical(self,\"注意\",\"输入错误,输入的数字超过最大页码了啊!\")\n else:\n self.pageDisplay(jump_gg)\n\n\nclass MyForm ( QWidget, Ui_Form ):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.pushButton_jump.clicked.connect(self.jumpTo)\n\n\nif __name__=='__main__':\n# global g #全局变量p——当前页面\n g=1\n# dblen=dblen() #获取数据库的总��度,赋值给变量dblen,这样就不需要每次去获取了\n app=QApplication(sys.argv)\n b = MyForm()\n b.pageDisplay(g)\n# b.showBrowser()\n b.show()\n e = NextOffice.shipment.shipment_edit.MyForm()\n a = NextOffice.shipment.shipment_add.MyForm()\n\n b.pushButton_edit1.clicked.connect(e.handle_click1)\n b.pushButton_edit2.clicked.connect(e.handle_click2)\n b.pushButton_edit3.clicked.connect(e.handle_click3)\n b.pushButton_edit4.clicked.connect(e.handle_click4)\n b.pushButton_edit5.clicked.connect(e.handle_click5)\n\n b.pushButton_new.clicked.connect(a.handle_click)\n\n\n app.exec_()\n\n\n\n\"\"\"\nUIC之后,将以下2个语句中的Main修改为self即可\nself.pushButton_next.clicked.connect(self.reloadWidgetAdd)\nself.pushButton_previous.clicked.connect(self.reloadWidgetDec)\n\n\ndef dblen():\n# conn = sqlite3.connect('../nextai.db')\n conn = sqlite3.connect('nextai.db')\n curs = conn.cursor() #创建游标\n curs.execute(\"SELECT * from shipment\")\n data= curs.fetchall()\n lens=len(data)\n print (\"开始运行数据库共有记录:\",lens)\n curs.close() #关闭游标\n conn.commit() #保存数据库\n conn.close() #关闭数据库连接\n return lens\n\n\n\"\"\"","repo_name":"szbobby/python_study","sub_path":"practic1/shipment/trans.py","file_name":"trans.py","file_ext":"py","file_size_in_byte":82891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"19683989461","text":"class Solution:\n def rob(self, nums: List[int]) -> int:\n # edge case if len(nums) == 0\n # wihtout 0\n # without len(nums) - 1\n return max(nums[0], self.helper(nums[1:]), self.helper(nums[:-1])) \n\n def helper(self, nums):\n rob1, rob2 = 0, 0\n \n # [rob1, rob2, n, n+1, ...]\n for n in nums:\n tmp = max(n + rob1, rob2)\n rob1 = rob2\n rob2 = tmp\n \n return rob2 # by the end rob2 will be the last value (max we can rob)\n# T O(n)\n# S O(1)","repo_name":"TestardR/algorithms","sub_path":"blind_75/44_house_robber_2.py","file_name":"44_house_robber_2.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"7553509863","text":"from django.contrib.auth.models import User\nfrom shop.models import (\n Category, Banner, Product,\n OrderItem, Order, WishList,\n Brand, Coupon\n)\nfrom core.models import (\n AddressAndInfo, Profile,\n Theme, Node, Notification\n)\nfrom pages.models import Contact\nfrom .utils import Attr\n\n\nclass Model:\n model = None\n searchable = []\n required = []\n Return = {\n \"not_found\": \"DOESNOTEXIST\",\n \"duplicate\": \"DUPLICATE\",\n \"success\": \"OK\",\n \"failed\": \"FAIL\"\n }\n\n def __init__(self, **queries):\n self.fields = [\n f.name for f in self.model._meta.local_fields\n ]\n if not self.searchable:\n self.searchable = [_ for _ in self.fields if _ != \"id\"]\n if not self.required:\n self.required = [_ for _ in self.fields if _ != \"id\"]\n self.all = self.model.objects.all()\n for field in self.fields:\n setattr(\n self,\n \"all_{0}s\".format(field),\n [getattr(instance, field) for instance in self.all]\n )\n for key, query in queries.items():\n this = Attr(query)\n setattr(self, key, this)\n for field in self.fields:\n setattr(\n this,\n \"all_{0}s\".format(field),\n [getattr(instance, field) for instance in query]\n )\n\n def __iter__(self):\n return iter(self.all)\n\n def __getattr__(self, attr):\n if attr in self.__dict__:\n return self.__dict__.get(attr)\n return getattr(self.model, attr)\n\n def __add__(self, instance):\n matches = []\n for model in self.all:\n match = []\n for field in self.searchable:\n match.append(\n getattr(model, field) == getattr(instance, field)\n )\n matches.append(match)\n for match in matches:\n if all(match):\n return self.Return.get(\"duplicate\")\n instance.save()\n self.__init__()\n return self.Return.get(\"success\")\n\n def __sub__(self, instance):\n fields = {\n field: getattr(instance, field) for field in self.searchable\n }\n try:\n wanted = self.model.objects.get(**fields)\n wanted.delete()\n self.__init__()\n return self.Return.get(\"success\")\n except self.model.DoesNotExist:\n return self.Return.get(\"not_found\")\n\n def init(self, **fields):\n _ = {}\n if not fields:\n return self.Return[\"failed\"]\n for field in self.required:\n if not fields.get(field):\n return \"'%s' *REQUIRED*\" % field\n for field in self.fields:\n try:\n _[field] = fields[field]\n except KeyError:\n pass\n new_instance = self.model(**_)\n Return = self + new_instance\n if Return != self.Return[\"success\"]:\n return Return\n return new_instance\n\n\nclass Nodes(Model):\n model = Node\n searchable = [\"user\"]\n\n\nclass Themes(Model):\n model = Theme\n searchable = [\"pm\", \"sc\"]\n\n\nclass Wishlists(Model):\n model = WishList\n searchable = [\"owner\", \"item\"]\n\n\nclass Categories(Model):\n model = Category\n\n\nclass Brands(Model):\n model = Brand\n\n\nclass Coupons(Model):\n model = Coupon\n\n\nclass Products(Model):\n model = Product\n required = [\n \"name\", \"price\", \"slug\", \"image\",\n \"category\",\n ]\n\n\nclass OrderItems(Model):\n model = OrderItem\n searchable = [\"owner\", \"name\", \"price\"]\n required = searchable\n\n\nclass Orders(Model):\n model = Order\n searchable = [\n \"ref_code\", \"owner\", \"items\", \"price\"\n ]\n required = searchable\n\n\nclass Users(Model):\n model = User\n searchable = [\"email\", \"username\"]\n required = searchable\n\n\nclass Notifications(Model):\n model = Notification\n\n\ndef nodify(user):\n if not isinstance(user, User):\n raise Exception(\"User object needed.\")\n username = user.username\n theme = Theme.objects.create(name=username)\n new_node = Node(user=user, theme=theme)\n return Nodes() + new_node","repo_name":"Mcsavvy/fictional-meme","sub_path":"globals/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"1565715124","text":"def solve():\n suma: float = 0.0\n cont: int = 0\n while cont < 2:\n score = float(input())\n\n if 0 <= score <= 10:\n suma += score\n cont += 1\n else:\n print('nota invalida')\n\n print(f'media = {(suma / cont):.2f}')\n\n\ndef main():\n solve()\n\n while True:\n ans = int(input('novo calculo (1-sim 2-nao)\\n'))\n if ans == 1:\n solve()\n elif ans == 2:\n break\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"Dairo01001/python_URI","sub_path":"URI_1118.py","file_name":"URI_1118.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"70063227915","text":"import datetime\nimport cv2\nfrom matplotlib.colors import ListedColormap\nimport matplotlib.pyplot as plt\nfrom Model import get_model, get_prediction\n\nimage_shape = [1024, 1024, 3]\nrgb2classes = {\n (0, 0, 0): 0, # Background (Schwarz)\n (0, 0, 255): 1, # Human diver (Blau)\n (0, 255, 0): 2, # Plant (Grün)\n (0, 255, 255): 3, # Wreck or ruin (Sky)\n (255, 0, 0): 4, # Robot (Rot)\n (255, 0, 255): 5, # Reef or invertebrate (Pink)\n (255, 255, 0): 6, # Fish or vertebrate (Gelb)\n (255, 255, 255): 7 # Sea-floor or rock (Weiß)\n}\nclassColorMap = ListedColormap([(r / 255, g / 255, b / 255) for (r, g, b) in rgb2classes.keys()])\n\n\ndef start(video_path, model_path):\n model = get_model(model_path)\n\n video = cv2.VideoCapture(video_path)\n\n if not video.isOpened():\n print(\"Error reading video file\")\n\n fig = plt.figure(\"Realtime Test\")\n\n while True:\n ret, frame = video.read()\n\n if ret:\n frame = cv2.resize(frame, [1024, 1024])\n\n before_prediction = datetime.datetime.now()\n prediction = get_prediction(model, frame)\n dif = datetime.datetime.now() - before_prediction\n print(\"Prediction Time: \" + str(int(dif.total_seconds() * 1000)) + \" ms \\n\")\n\n plt.clf()\n plt.imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), interpolation=\"nearest\")\n plt.imshow(prediction.cpu(), alpha=0.5, interpolation=\"nearest\", cmap=classColorMap)\n plt.pause(0.1)\n\n else:\n break\n\n video.release()\n cv2.destroyAllWindows()\n","repo_name":"BenSchokoRiegel/Realtime_semantic_segmentation","sub_path":"Video_Prediction/RealtimeTest.py","file_name":"RealtimeTest.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"70413163276","text":"\"\"\"\n 题目描述:\n 你这个学期必须选修 numCourses 门课程,记为 0 到 numCourses - 1 。\n\n 在选修某些课程之前需要一些先修课程。 \n 先修课程按数组 prerequisites 给出,其中 prerequisites[i] = [ai, bi] ,\n 表示如果要学习课程 ai 则 必须 先学习课程  bi 。\n\n 例如,先修课程对 [0, 1] 表示:想要学习课程 0 ,你需要先完成课程 1 。\n\n 请你判断是否可能完成所有课程的学习?如果可以,返回 true ;否则,返回 false 。\n\n 链接: https://leetcode-cn.com/problems/course-schedule\n\"\"\"\n\nfrom typing import List\nimport collections\nfrom collections import deque\n\nclass Solution:\n def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:\n \"\"\"\n 将课程关系抽象成一个有向图,然后即为判断有向图中是否有环的问题。\n\n 采用 DFS 遍历求解,辅助 path 数组,判断 path 数组中是否有重复元素即可。\n\n 对于图中的任意一个节点,它在搜索的过程中有三种状态,即:\n “未搜索”:我们还没有搜索到这个节点;\n “搜索中”:我们搜索过这个节点,但还没有回溯到该节点,即该节点还没有入栈,还有相邻的节点没有搜索完成;\n “已完成”:我们搜索过并且回溯过这个节点,即该节点已经入栈,并且所有该节点的相邻节点都出现在栈的更底部的位置,满足拓扑排序的要求。\n\n 使用 DFS 得到拓扑排序的算法流程:\n 在每一轮的搜索搜索开始时,我们任取一个“未搜索”的节点开始进行深度优先搜索。\n\n 我们将当前搜索的节点 u 标记为“搜索中”,遍历该节点的每一个相邻节点 v:\n - 如果 v 为“未搜索”,那么我们开始搜索 v,待搜索完成回溯到 u;\n - 如果 v 为“搜索中”,那么我们就找到了图中的一个环,因此是不存在拓扑排序的;\n - 如果 v 为“已完成”,那么说明 v 已经在栈中了,而 u 还不在栈中,\n 因此 u 无论何时入栈都不会影响到 (u, v) 之前的拓扑关系,以及不用进行任何操作。\n\n 当 u 的所有相邻节点都为“已完成”时,我们将 u 放入栈中,并将其标记为“已完成”。\n\n 在整个深度优先搜索的过程结束后,如果我们没有找到图中的环,\n 那么栈中存储这所有的 n 个节点,从栈顶到栈底的顺序即为一种拓扑排序。\n \n 优化:\n 由于我们只需要判断是否存在一种拓扑排序,而栈的作用仅仅是存放最终的拓扑排序结果,\n 因此我们可以只记录每个节点的状态,而省去对应的栈。\n \"\"\"\n graph = [[] for _ in range(numCourses)]\n\n # 构建有向图,edge 是 0-based\n for edge in prerequisites:\n graph[edge[1]].append(edge[0])\n\n visit = [False] * numCourses # 标记是否访问过\n onPath = [False] * numCourses # 标记在路径中是否出现过\n hasCycle = False # 默认无环\n\n def dfs(node: int):\n nonlocal graph, visit, onPath, hasCycle\n\n if onPath[node]:\n hasCycle = True\n\n if visit[node] or hasCycle:\n # 访问过或者已经有环,则不必继续\n return\n\n # 当前节点处理\n visit[node] = True\n onPath[node] = True\n\n for neighbor in graph[node]:\n dfs(neighbor)\n \n # 回溯\n onPath[node] = False\n \n for node in range(numCourses):\n if not visit[node]:\n dfs(node)\n \n return not hasCycle\n \n def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:\n \"\"\"题解写法;优化存储空间结构,将 onPath 数组叠放在 visited 上,值得学习\n \"\"\"\n edges = collections.defaultdict(list)\n visited = [0] * numCourses\n result = list()\n valid = True\n\n for info in prerequisites:\n edges[info[1]].append(info[0])\n \n def dfs(u: int):\n nonlocal valid\n\n visited[u] = 1\n\n for v in edges[u]:\n if visited[v] == 0:\n dfs(v)\n\n if not valid:\n return\n\n elif visited[v] == 1:\n valid = False\n return\n \n visited[u] = 2\n result.append(u)\n \n for i in range(numCourses):\n if valid and not visited[i]:\n dfs(i)\n \n return valid\n\n def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:\n \"\"\"\n 基于 BFS 实现。\n\n 注意图的初始化,有向图表示依赖关系是 b -> a.\n\n 借助 indegree 数组记录每个节点的入度。\n\n 思路:\n 1. 对 BFS 队列进行初始化,将入度为 0 的节点首先装入队列。\n 2. 开始执行 BFS 循环,不断弹出队列中的节点,减少相邻节点的入度,并将入度变为 0 的节点加入队列。\n 3. 如果最终所有节点都被遍历过(count 等于节点数),则说明不存在环,反之则说明存在环。\n \"\"\"\n graph = [[] for _ in range(numCourses)]\n inDegrees = [0] * numCourses # 记录每个节点的入度\n\n for edge in prerequisites:\n graph[edge[1]].append(edge[0]) # 初始化图的依赖顺序\n inDegrees[edge[0]] += 1\n \n que = deque([u for u in range(numCourses) if inDegrees[u] == 0])\n \n nodeCount = 0\n while que:\n node = que.popleft()\n nodeCount += 1\n\n for neighbor in graph[node]:\n inDegrees[neighbor] -= 1\n\n if inDegrees[neighbor] == 0:\n que.append(neighbor)\n \n return nodeCount == numCourses\n\n\n\n","repo_name":"Chenghr/Algorithm","sub_path":"题库/labuladong/chap2_树图/2.3_图论/3_拓扑排序/1_课程表i.py","file_name":"1_课程表i.py","file_ext":"py","file_size_in_byte":6382,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"29126648122","text":"import pygame\r\nimport random\r\npygame.init()\r\n\r\nfile = open(\"settings.txt\", \"r\")\r\nfile_lines = file.readlines()\r\nfor i in range(len(file_lines)):\r\n\ttry:\r\n\t\tfile_lines[i] = int(\"\".join(file_lines[i][:-1]).lower())\r\n\texcept:\r\n\t\tpass\r\n\r\ngrid_width = file_lines[1]\r\ngrid_height = file_lines[4]\r\nbox_size = file_lines[7]\r\ncolor_amount = file_lines[10]\r\n\r\nif color_amount > 13:\r\n\tcolor_amount = 13\r\nelif color_amount < 2:\r\n\tcolor_amount = 2\r\n\r\nmoves = grid_width * grid_height // 100 * color_amount + color_amount\r\n# dark red orange yellow green dark blue light purple light blue cyan brown light green white black\t\t\tgold\r\ncolor_options = [(180, 0, 0), (255, 60, 0), (230, 230, 0), (0, 140, 0), (0, 0, 255), (130, 60, 255), (50, 100, 255), (0, 200, 255), (80, 30, 15), (128, 255, 128), (0, 0, 0), (255, 255, 255), (210, 130, 50)]\r\ncolor_options = color_options[:color_amount]\r\n\r\n\"\"\"\r\nnew_colors = []\r\nfor i in range(color_amount):\r\n\tnum = random.randint(0, len(color_options) - 1)\r\n\tnew_colors.append(color_options[num])\r\n\tcolor_options.pop(num)\r\n\r\ncolor_options = new_colors\r\n\"\"\"\r\n\r\nscreen_width = grid_width * box_size\r\nscreen_height = grid_height * box_size + 100\r\n\r\nwin = pygame.display.set_mode((screen_width, screen_height))\r\nclock = pygame.time.Clock()\r\npygame.display.set_caption(\"Flood It\")\r\n\r\nclass Square(object):\r\n\tdef __init__(self, x, y):\r\n\t\tself.x = x\r\n\t\tself.y = y\r\n\t\tself.color = random.choice(color_options)\r\n\r\n\t# to draw the certain box\r\n\tdef draw(self):\r\n\t\tpygame.draw.rect(win, self.color, (self.x, self.y, box_size, box_size))\r\n\r\n\r\ndef change_surround(x, y, color):\r\n\tif x > 0:\r\n\t\tif grid[x-1][y].color == color and (x - 1, y) not in set_colors:\r\n\t\t\tset_colors.append((x-1, y))\r\n\tif x < grid_width - 1:\r\n\t\tif grid[x+1][y].color == color and (x + 1, y) not in set_colors:\r\n\t\t\tset_colors.append((x+1, y))\r\n\tif y > 0:\r\n\t\tif grid[x][y-1].color == color and (x, y - 1) not in set_colors:\r\n\t\t\tset_colors.append((x, y-1))\r\n\tif y < grid_height - 1:\r\n\t\tif grid[x][y+1].color == color and (x, y + 1) not in set_colors:\r\n\t\t\tset_colors.append((x, y+1))\r\n\r\n\r\ndef move(x, y, first=False):\r\n\tglobal old_color, moves\r\n\told_color = grid[0][0].color\r\n\tcolor = grid[x][y].color\r\n\tif color != old_color or first:\r\n\t\tmoves -= 1\r\n\t\tfor pos in set_colors:\r\n\t\t\tgrid[pos[0]][pos[1]].color = color\r\n\t\t\tchange_surround(pos[0], pos[1], color)\r\n\r\n\r\ndef click(x, y):\r\n\tglobal moves\r\n\tfor i, row in enumerate(grid):\r\n\t\tfor j, box in enumerate(row):\r\n\t\t\tif x > box.x and x < box.x + box_size:\r\n\t\t\t\tif y > box.y and y < box.y + box_size:\r\n\t\t\t\t\tmove(i, j)\r\n\r\ndef text_objects(text, font):\r\n\ttextSurface = font.render(text, True, (255, 255, 255))\r\n\treturn textSurface, textSurface.get_rect()\r\n\r\ndef display_score(text):\r\n\tlargeText = pygame.font.Font('freesansbold.ttf', 60)\r\n\tTextSurf, TextRect = text_objects(text, largeText)\r\n\tTextRect.center = (round(screen_width * 0.5), 50)\r\n\twin.blit(TextSurf, TextRect)\r\n\r\n\r\ndef redraw_window():\r\n\twin.fill((0, 0, 0))\r\n\r\n\tfor x in grid:\r\n\t\tfor y in x:\r\n\t\t\ty.draw()\r\n\r\n\tdisplay_score(f\" {str(moves)} moves left. {str(int(len(set_colors) / (grid_width * grid_height) * 100))}% completed.\")\r\n\r\n\tpygame.display.update()\r\n\r\n\r\ngrid = []\r\nset_colors = [(0, 0)]\r\nfor i in range(grid_width):\r\n\tgrid.append([])\r\n\tfor j in range(grid_height):\r\n\t\tgrid[i].append(Square(i * box_size, j * box_size + 100))\r\n\r\nmove(0, 0, True)\r\nrun = True\r\nwhile run:\r\n\tclock.tick(60)\r\n\r\n\tfor event in pygame.event.get():\r\n\t\tif event.type == pygame.QUIT:\r\n\t\t\trun = False\r\n\t\tif event.type == pygame.MOUSEBUTTONUP:\r\n\t\t\tif moves > 0 and (len(set_colors) / (grid_width * grid_height) * 100) != 100:\r\n\t\t\t\tclick(pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1])\r\n\r\n\tkeys = pygame.key.get_pressed()\r\n\r\n\tredraw_window()\r\n\r\n\r\npygame.quit()\r\n","repo_name":"mitchellpincham/Flood-it","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"18538095190","text":"from django.shortcuts import render\nfrom django.template import loader\nfrom django.http import HttpResponse\nfrom django.views import View\nimport requests\nimport urllib2\nimport json\nimport re\n# Create your views here.\n\nfrom .forms import SubmitQueryForm\n\nclass queryIndexView(View):\n def get(self, request):\n the_form = SubmitQueryForm()\n context = {\n 'title': 'Search your favourite movies and shows',\n 'subTitle': 'Proudly powered by OMDb API',\n 'form': the_form,\n 'loadResults': 'False'\n }\n return render(request, \"queryOMBd/index.html\", context)\n\n def post(self, request):\n #print(request.POST.get('query'))\n form = SubmitQueryForm(request.POST)\n if form.is_valid():\n print(form.cleaned_data)\n query_response = omdbapi_search(form.cleaned_data['url'])\n context = {\n 'title': 'Search your favourite movies and shows',\n 'subTitle': 'Proudly powered by OMDb API',\n 'form': form,\n 'query': form.cleaned_data['url'],\n 'loadResults': 'True',\n 'response': query_response\n }\n return render(request, \"queryOMBd/index.html\", context)\n\ndef omdbapi_search(query):\n if re.match( r'tt\\d+', query):\n url = 'http://www.omdbapi.com/?i=' + query\n display = 'Id'\n else:\n search_query = query.replace(' ', '+')\n url = 'http://www.omdbapi.com/?s=' + search_query + '&plot=full'\n display = 'Search'\n json_obj = urllib2.urlopen(url)\n data = json.load(json_obj)\n data['Display'] = display\n #if data['Response'] == 'True':\n #for item in data['Search']:\n # print item['Title'], item['Year']\n return data;\n\n'''\ndef index(request):\n if request.method == \"POST\":\n print(request.POST)\n print(request.POST['query'])\n print(request.POST.get('query'))\n form = SubmitQueryForm(request.POST)\n #if form.is_valid()\n return render(request, \"queryOMBd/index.html\", {})\n\ndef test(request):\n return HttpResponse('My second view!')\n\ndef profile(request):\n req = requests.get('http://www.omdbapi.com/?t=game+of+thrones')\n content = req.text\n return HttpResponse(content)\n'''\n","repo_name":"kevyo23/getFlix","sub_path":"queryOMBd/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"6995499042","text":"from time import sleep,ctime\nfrom multiprocessing import Process\n\ni=100\ndef proc1():\n global i\n count=1\n while True:\n print ('proc1 >>',i)\n i=i+2\n sleep(1)\n if count==5:\n break\n count=count+1\n\ndef proc2():\n global i\n count=1\n while True:\n print ('proc2 >>>>>>>>',i)\n i=i-3\n sleep(1)\n if count==5:\n break\n count=count+1\nif __name__ == '__main__':\n\tprint (\"start\")\n\tp1=Process(target=proc1)\n\tp2=Process(target=proc2)\n\tp1.start()\n\tp2.start()\n\tp1.join()\n\tp2.join()\n\tprint (\"end\")","repo_name":"wei20066/myapps","sub_path":"share.py","file_name":"share.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"17659899049","text":"from envs import REGISTRY as env_REGISTRY\nfrom components import env_utils\nfrom functools import partial\nfrom components.episode_buffer import EpisodeBatch\nfrom multiprocessing import Pipe, Process\nimport numpy as np\n\nfrom components.featurize import *\n\n# Based (very) heavily on SubprocVecEnv from OpenAI Baselines\n# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/subproc_vec_env.py\nclass ParallelRunner:\n\n def __init__(self, args, logger):\n self.args = args\n self.logger = logger\n self.batch_size = self.args.batch_size_run # 并行环境数量\n\n # Make subprocesses for the envs\n self.parent_conns, self.worker_conns = zip(*[Pipe() for _ in range(self.batch_size)])\n env_fn = env_REGISTRY[self.args.env]\n self.ps = [Process(target=env_worker,\n args=(worker_conn, CloudpickleWrapper(env_fn), self.args.train_idx_list))\n for worker_conn in self.worker_conns]\n\n for p in self.ps:\n p.daemon = True\n p.start()\n\n # self.parent_conns[0].send((\"get_env_info\", None))\n # self.env_info = self.parent_conns[0].recv()\n self.env_info = env_utils.get_env_info(self.args)\n self.episode_limit = self.env_info[\"episode_limit\"]\n # 设置最大步长\n for parent_conn in self.parent_conns:\n parent_conn.send((\"set_max_steps\", args.max_step-1))\n\n self.t = 0\n\n self.t_env = 0\n\n self.train_returns = []\n self.test_returns = []\n self.train_stats = {}\n self.test_stats = {}\n\n self.log_train_stats_t = -100000\n\n def setup(self, scheme, groups, preprocess, mac):\n self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit + 1,\n preprocess=preprocess, device=self.args.device)\n self.mac = mac\n self.scheme = scheme\n self.groups = groups\n self.preprocess = preprocess\n\n # def get_env_info(self):\n # return self.env_info\n #\n # def save_replay(self):\n # pass\n\n def close_env(self):\n for parent_conn in self.parent_conns:\n parent_conn.send((\"close\", None))\n\n def reset(self):\n self.batch = self.new_batch()\n\n # Reset the envs\n for parent_conn in self.parent_conns:\n parent_conn.send((\"reset\", None))\n\n pre_transition_data = {\n \"board_state\": [],\n \"flat_state\": [],\n \"avail_actions\": [],\n \"board_obs\": [],\n \"flat_obs\": []\n }\n # Get the obs, state and avail_actions back\n for parent_conn in self.parent_conns:\n data = parent_conn.recv()\n pre_transition_data[\"board_state\"].append(data[\"board_state\"])\n pre_transition_data[\"flat_state\"].append(data[\"flat_state\"])\n pre_transition_data[\"avail_actions\"].append(data[\"avail_actions\"])\n pre_transition_data[\"board_obs\"].append(data[\"board_obs\"])\n pre_transition_data[\"flat_obs\"].append(data[\"flat_obs\"])\n\n self.batch.update(pre_transition_data, ts=0)\n\n self.t = 0\n self.env_steps_this_run = 0\n\n def run(self, test_mode=False):\n self.reset()\n\n all_terminated = False\n episode_returns = [0 for _ in range(self.batch_size)] # 长度为并行环境的数量\n episode_lengths = [0 for _ in range(self.batch_size)]\n self.mac.init_hidden(batch_size=self.batch_size)\n terminated = [False for _ in range(self.batch_size)]\n envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed] # 未结束的环境的编号\n # final_env_infos = [] # may store extra stats like battle won. this is filled in ORDER OF TERMINATION\n\n while True:\n # 将截至目前的 batch 发给所有 agent\n # 收到的是未结束的环境中,在这个 time_step 下 agents 的动作\n # Pass the entire batch of experiences up till now to the agents\n # Receive the actions for each agent at this timestep in a batch for each un-terminated env\n actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated, test_mode=test_mode)\n cpu_actions = actions.to(\"cpu\").numpy()\n # print(\"terminated:\", terminated)\n # print('cpu_actions:', cpu_actions)\n # Update the actions taken\n actions_chosen = {\n \"actions\": actions.unsqueeze(1) # 变成列向量\n }\n self.batch.update(actions_chosen, bs=envs_not_terminated, ts=self.t, mark_filled=False)\n\n for idx, parent_conn in enumerate(self.parent_conns):\n if idx in envs_not_terminated:\n if not terminated[idx]:\n parent_conn.send((\"get_all_actions\", None))\n\n action_idx = 0\n all_actions_list = []\n for idx, parent_conn in enumerate(self.parent_conns):\n if idx in envs_not_terminated:\n if not terminated[idx]:\n all_actions = parent_conn.recv()\n # print('all_actions_', idx, ':',all_actions)\n for agent_idx, train_idx in enumerate(self.args.train_idx_list):\n all_actions[train_idx] = int(cpu_actions[action_idx][agent_idx]) # 替换\n all_actions_list.append(all_actions)\n else:\n all_actions_list.append(None)\n action_idx += 1\n # print('all_actions:', all_actions_list)\n # print('================================')\n # Send actions to each env\n action_idx = 0\n for idx, parent_conn in enumerate(self.parent_conns):\n if idx in envs_not_terminated: # We produced actions for this env\n if not terminated[idx]: # Only send the actions to the env if it hasn't terminated\n parent_conn.send((\"step\", all_actions_list[action_idx]))\n action_idx += 1 # actions is not a list over every env\n\n # Update envs_not_terminated, 全部环境都回合结束了才退出\n envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]\n all_terminated = all(terminated)\n if all_terminated:\n break\n\n # 当前 timestep 的数据\n # Post step data we will insert for the current timestep\n post_transition_data = {\n \"reward\": [],\n \"terminated\": []\n }\n # 下一 timestep 的数据\n # Data for the next step we will insert in order to select an action\n pre_transition_data = {\n \"board_state\": [],\n \"flat_state\": [],\n \"avail_actions\": [],\n \"board_obs\": [],\n \"flat_obs\": []\n }\n\n # 只收集未结束的环境的数据反馈\n # Receive data back for each unterminated env\n for idx, parent_conn in enumerate(self.parent_conns):\n if not terminated[idx]:\n data = parent_conn.recv()\n # 处理当前 timestep 的数据\n # Remaining data for this current timestep\n post_transition_data[\"reward\"].append((data[\"reward\"],))\n\n episode_returns[idx] += data[\"reward\"]\n episode_lengths[idx] += 1\n if not test_mode:\n self.env_steps_this_run += 1\n\n # todo: 有问题\n env_terminated = False\n # if data[\"terminated\"]:\n # final_env_infos.append(data[\"info\"])\n if data[\"terminated\"]:# and not data[\"info\"].get(\"episode_limit\", False):\n env_terminated = True\n terminated[idx] = data[\"terminated\"]\n post_transition_data[\"terminated\"].append((env_terminated,))\n\n # 处理下一回合需要的数据\n # Data for the next timestep needed to select an action\n pre_transition_data[\"board_state\"].append(data[\"board_state\"])\n pre_transition_data[\"flat_state\"].append(data[\"flat_state\"])\n pre_transition_data[\"avail_actions\"].append(data[\"avail_actions\"])\n pre_transition_data[\"board_obs\"].append(data[\"board_obs\"])\n pre_transition_data[\"flat_obs\"].append(data[\"flat_obs\"])\n\n # 把当前 timestep 的数据添加到 episode batch 里\n # Add post_transiton data into the batch\n self.batch.update(post_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=False)\n\n # Move onto the next timestep\n self.t += 1\n\n # timestep 加 1 之后,有关数据添加到 episode batch 里\n # Add the pre-transition data\n self.batch.update(pre_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=True)\n\n if not test_mode:\n self.t_env += self.env_steps_this_run # 加上当前 timestep 下未终止的环境跑的步数, t_env 是 total_timesteps\n\n # Get stats back for each env\n for parent_conn in self.parent_conns:\n parent_conn.send((\"get_stats\",None))\n\n env_stats = []\n for parent_conn in self.parent_conns:\n env_stat = parent_conn.recv()\n env_stats.append(env_stat)\n\n cur_stats = self.test_stats if test_mode else self.train_stats\n cur_returns = self.test_returns if test_mode else self.train_returns\n log_prefix = \"test_\" if test_mode else \"\"\n infos = [cur_stats] #+ final_env_infos\n # cur_stats.update({k: sum(d.get(k, 0) for d in infos) for k in set.union(*[set(d) for d in infos])})\n cur_stats[\"n_episodes\"] = self.batch_size + cur_stats.get(\"n_episodes\", 0)\n cur_stats[\"ep_length\"] = sum(episode_lengths) + cur_stats.get(\"ep_length\", 0)\n\n cur_returns.extend(episode_returns)\n\n n_test_runs = max(1, self.args.test_nepisode // self.batch_size) * self.batch_size\n if test_mode and (len(self.test_returns) == n_test_runs):\n self._log(cur_returns, cur_stats, log_prefix)\n elif self.t_env - self.log_train_stats_t >= self.args.runner_log_interval:\n self._log(cur_returns, cur_stats, log_prefix)\n if hasattr(self.mac.action_selector, \"epsilon\"):\n self.logger.log_stat(\"epsilon\", self.mac.action_selector.epsilon, self.t_env)\n self.log_train_stats_t = self.t_env\n\n return self.batch\n\n def _log(self, returns, stats, prefix):\n self.logger.log_stat(prefix + \"return_mean\", np.mean(returns), self.t_env)\n self.logger.log_stat(prefix + \"return_std\", np.std(returns), self.t_env)\n returns.clear()\n\n for k, v in stats.items():\n if k != \"n_episodes\":\n self.logger.log_stat(prefix + k + \"_mean\" , v/stats[\"n_episodes\"], self.t_env)\n stats.clear()\n\n\ndef env_worker(remote, env_fn, train_list):\n # Make environment\n env = env_fn.var()\n while True:\n cmd, data = remote.recv()\n if cmd == \"step\":\n actions = data\n # Take a step in the environment\n next_obs_list, reward_list, terminated, env_info = env.step(actions)\n\n state = env.get_state()\n board_state = to_board_state(state, train_list)\n flat_state = to_flat_state(state, train_list)\n\n obs_list = [] # 取训练的 obs\n for agent_idx in train_list:\n obs_list.append(next_obs_list[agent_idx])\n board_obs_list = to_board_obs(obs_list)\n flat_obs_list = to_flat_obs(obs_list)\n\n avail_actions = env_utils.get_avail_actions(obs_list)\n\n reward = reward_list[train_list[0]] # 取其中一个训练智能体的奖励返回\n\n remote.send({\n # Data for the next timestep needed to pick an action\n \"board_state\": board_state,\n \"flat_state\": flat_state,\n \"avail_actions\": avail_actions,\n \"board_obs\": board_obs_list,\n \"flat_obs\": flat_obs_list,\n # Rest of the data for the current timestep\n \"reward\": reward,\n \"terminated\": terminated,\n \"info\": env_info\n })\n elif cmd == \"reset\":\n env.reset()\n state = env.get_state()\n board_state = to_board_state(state, train_list)\n flat_state = to_flat_state(state, train_list)\n\n obs_list = env_utils.get_agent_obs(env, train_list) # 包含了两个本方智能体 obs 的列表\n board_obs_list = to_board_obs(obs_list)\n flat_obs_list = to_flat_obs(obs_list)\n\n remote.send({\n \"board_state\": board_state,\n \"flat_state\": flat_state,\n \"avail_actions\": env_utils.get_avail_actions(obs_list),\n \"board_obs\": board_obs_list,\n \"flat_obs\": flat_obs_list\n })\n elif cmd == \"close\":\n env.close()\n remote.close()\n break\n # elif cmd == \"get_env_info\":\n # remote.send(env_utils.get_env_info(args=args))\n elif cmd == \"get_stats\":\n remote.send(env_utils.get_stats(env))\n elif cmd == \"get_all_actions\":\n all_obs = env_utils.get_all_obs(env)\n all_actions = env.act(all_obs)\n remote.send(all_actions)\n elif cmd == \"set_max_steps\":\n env._max_steps = data\n else:\n raise NotImplementedError\n\n\nclass CloudpickleWrapper():\n \"\"\"\n Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)\n \"\"\"\n def __init__(self, var):\n self.var = var\n def __getstate__(self):\n import cloudpickle\n return cloudpickle.dumps(self.var)\n def __setstate__(self, ob):\n import pickle\n self.var = pickle.loads(ob)\n\n","repo_name":"goodbyeearth/marl_pmm","sub_path":"runners/parallel_runner.py","file_name":"parallel_runner.py","file_ext":"py","file_size_in_byte":14346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"36902741496","text":"import numpy as np\n# Internal modules\nfrom miseval.confusion_matrix import calc_ConfusionMatrix\n\n#-----------------------------------------------------#\n# Calculate : Volumetric Similarity #\n#-----------------------------------------------------#\n\"\"\"\nFormula:\n VS = 1 - (|FN-FP| / (2TP + FP + FN))\n\nReferences:\n Taha, A.A., Hanbury, A.\n Metrics for evaluating 3D medical image segmentation: analysis, selection, and tool.\n BMC Med Imaging 15, 29 (2015). https://doi.org/10.1186/s12880-015-0068-x\n\"\"\"\ndef calc_VolumetricSimilarity(truth, pred, c=1, **kwargs):\n # Obtain confusion mat\n tp, tn, fp, fn = calc_ConfusionMatrix(truth, pred, c)\n # Compute VS\n if (2*tp + fp + fn) != 0:\n vs = 1 - (np.abs(fn-fp) / (2*tp + fp + fn))\n else : vs = 1.0 - 0.0\n # Return VS score\n return vs\n","repo_name":"frankkramer-lab/miseval","sub_path":"miseval/volumetric_similarity.py","file_name":"volumetric_similarity.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"28"} +{"seq_id":"214716429","text":"# split.py\n# author: Jing Wen\n# date: 2023-11-28\n\nimport click\nimport os\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n@click.command()\n@click.option('--raw_data', type=str, help=\"Path to raw data\")\n@click.option('--data_to', type=str, help=\"Path to directory where processed data will be written to\")\n@click.option('--random_state', type=int, help=\"Random state\", default=123)\n\ndef main(raw_data, data_to, random_state):\n '''\n This script reads a raw dataset, splits it into train and test sets, and saves them as CSV files.\n\n Parameters\n ----------\n raw_data : str\n A string representing the path to the raw data in CSV format.\n\n data_to : str\n A string representing the path to the directory where the split datasets will be saved.\n\n random_state : int, optional\n An integer that is used as a seed for the random number generator during the split.\n Default is 123.\n '''\n\n df = pd.read_csv(raw_data, index_col=0)\n\n # create the split\n train_df, test_df = train_test_split(\n df, test_size=0.20, random_state=random_state\n )\n\n train_df.to_csv(os.path.join(data_to, \"train_df.csv\"), index=True)\n test_df.to_csv(os.path.join(data_to, \"test_df.csv\"), index=True)\n\nif __name__ == '__main__':\n main()","repo_name":"UBC-MDS/DSCI_522_Group-11_Save-The-Earth","sub_path":"scripts/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"26194733818","text":"import sys\nfrom abc import ABCMeta, abstractmethod\nfrom torch import nn\nfrom copy import copy\nimport inspect\n\n\nclass BaseModel(nn.Module, metaclass=ABCMeta):\n default_conf = {}\n required_inputs = []\n\n def __init__(self, conf):\n \"\"\"Perform some logic and call the _init method of the child model.\"\"\"\n super().__init__()\n self.conf = conf = {**self.default_conf, **conf}\n self.required_inputs = copy(self.required_inputs)\n self._init(conf)\n sys.stdout.flush()\n\n def forward(self, data):\n \"\"\"Check the data and call the _forward method of the child model.\"\"\"\n for key in self.required_inputs:\n assert key in data, 'Missing key {} in data'.format(key)\n return self._forward(data)\n\n @abstractmethod\n def _init(self, conf):\n \"\"\"To be implemented by the child class.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def _forward(self, data):\n \"\"\"To be implemented by the child class.\"\"\"\n raise NotImplementedError\n\n\ndef dynamic_load(root, model):\n module_path = f'{root.__name__}.{model}'\n module = __import__(module_path, fromlist=[''])\n classes = inspect.getmembers(module, inspect.isclass)\n # Filter classes defined in the module\n classes = [c for c in classes if c[1].__module__ == module_path]\n # Filter classes inherited from BaseModel\n classes = [c for c in classes if issubclass(c[1], BaseModel)]\n assert len(classes) == 1, classes\n return classes[0][1]\n # return getattr(module, 'Model')\n","repo_name":"cvg/Hierarchical-Localization","sub_path":"hloc/utils/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":2510,"dataset":"github-code","pt":"28"} +{"seq_id":"14359476176","text":"#!/usr/bin/python\nimport copy\nimport logging\nimport os\nimport pickle\nimport re\nimport urllib\nfrom functools import reduce\nfrom typing import Dict, List\n\nimport cobra\nfrom cobra.io import read_sbml_model\nimport dill\n\n# reimports for type hints\nfrom class_generate_database import *\nfrom class_generate_database import compound as CompoundType\nfrom class_generate_database import gene as GeneType\nfrom class_generate_database import reaction as ReactionType\nfrom equations_generate_database import * \nfrom functions_generate_database import *\nfrom pattern_generate_database import *\n\n\ndef cobra_reconstruction(\n model_name: str,\n model_id: str,\n metabolite_list: Dict[List, CompoundType],\n reaction_list: Dict[List, ReactionType],\n gene_list: Dict[List, GeneType],\n pathways: Dict[str, str],\n location_dict: Dict[str, str],\n metabolite_equivalent: Dict[str, str],\n metabolite_list_general: Dict[List, CompoundType],\n) -> cobra.Model:\n \"\"\"Reconstruction of gathered information given by using cobrapy.\n\n Parameters\n ----------\n model_name: str\n model_name: id\n metabolite_list: Dict[List, compound]\n reaction_list: Dict[List, reaction]\n gene_list: Dict[List, gene]\n pathways: Dict[str, str]\n Map from Pathway to Reaction identifiers (PathNameRxn).\n location_dict: Dict[str, str]\n generated from the Comparment_Cl. This stores human readable\n compartment names to comparment identifiers. The extracted identifiers\n in the reactions and metabolites contain the id and the comparment\n name, so this mapping is necessary to achieve a proper\n SBML-compatible identifier (no spaces).\n \"\"\"\n model = cobra.Model(model_id or model_name, model_name or model_id)\n location_dict = {k.lower(): v for k, v in location_dict.items()}\n # metabolites\n compounds = [\n cobra.Metabolite(\n compound.ID2() + \"_\" + location_dict.get(compound.Subcel.lower()),\n compound.Formula1(),\n compound.Name(),\n float(compound.charge()),\n location_dict.get(compound.Subcel.lower()),\n )\n for iden, compound in metabolite_list.items()\n ] \n model.add_metabolites(compounds)\n # store mapping (met identifier -> met.id in model) for reaction section\n met_mapping = {}\n # metabolite annotation\n for iden, compound in metabolite_list.items():\n model_met = model.metabolites.get_by_id(\n compound.ID2() + \"_\" + location_dict.get(compound.Subcel.lower())\n )\n met_mapping[iden] = model_met.id\n annotation = {\n \"pubchem.compound\": compound.PubChem(),\n \"chebi.compound\": compound.CheBI(),\n \"glycomedb\": compound.GlyDB(),\n \"jcggdb\": compound.JCGGDB(),\n \"inchi\": compound.inchi(),\n \"inchikey\": compound.inchikey(),\n \"lipidbank\": compound.LipidBank(),\n \"lipidmaps\": compound.LIPIDMAPS(),\n }\n alt_formulas = [compound.Formula2(), compound.Formula3(), compound.Formula4()]\n alt_formula = [\n form for form in alt_formulas if form != model_met.formula and form\n ]\n if alt_formula:\n model_met.annotation[\"glycan_formula\"] = alt_formula[0]\n\n # only add non-empty annotation\n model_met.annotation = {k: v for k, v in annotation.items() if v}\n\n for x in model.metabolites: # replace glycan formula by a sbml suitable format\n if x.id[0] == 'G':\n try:\n print(x.id)\n compartment = [y[0] for y in location_dict.items() if y[1] in x.id.split('_')[1]][0]\n xth_metabolite_id = x.id.split('_')[0] + '_' + compartment\n x.formula = metabolite_list[xth_metabolite_id].Formula4()\n except Exception:\n continue\n\n for x in model.metabolites: # eliminate potential discrepancies between metabolite id and reaction compounds ids\n if x.id.split(\"_\")[0] in metabolite_equivalent.keys():\n x.id = metabolite_list_general[metabolite_equivalent[x.id.split(\"_\")[0]]].ID1() + \"_\" + x.compartment.lower()\n\n def normalize_id(reac_id: str):\n iden, comp_desc = reac_id.split(\"_\")\n comp = location_dict[comp_desc.lower()]\n return f\"{iden}_{comp}\"\n\n from gpr.ast_gpr import sanitize_gpr\n # reactions\n reactions = [\n cobra.Reaction(\n normalize_id(iden), reac.Name(), \"\", 0 if reac.Termodyn() else -1000, 1000\n )\n for iden, reac in reaction_list.items()\n ]\n model.add_reactions(reactions)\n for iden, rxn in reaction_list.items():\n print(iden)\n reac = model.reactions.get_by_id(normalize_id(iden))\n rxn_id = rxn.ID\n if \"_\" in rxn_id:\n kegg_id = rxn_id.split(\"_\")[0]\n comp_id = \"_\" + location_dict[rxn_id.split(\"_\")[-1].lower()]\n else:\n kegg_id = rxn_id\n comp_id = \"\"\n try:\n # this may fail after serialization because these are overwritten\n # at runtime via a capturing lambda\n # substrates might come with positive coefficients\n substrates = [\n (-abs(convert_to_float(subs[0])), subs[1], subs[2]) for subs in rxn.Substrate()\n ]\n products = [\n (abs(convert_to_float(prod[0])), prod[1], prod[2]) for prod in rxn.Product()\n ]\n reac_compounds = products + substrates\n except Exception:\n # these metabolites do not have an specified comparment!\n # substrates might come with positive coefficients\n substrates = [(-abs(convert_to_float(subs[0])), subs[1], subs[2]) for subs in rxn.subs]\n products = [(abs(convert_to_float(prod[0])), prod[1], prod[2]) for prod in rxn.prods] \n reac_compounds = products + substrates\n metabolites = {\n met_mapping[met[2]]\n if met[2] in met_mapping\n else f\"{met[2]}{comp_id}\": float(met[0])\n for met in reac_compounds\n }\n # there may be some metabolites that were not passed in metabolite list\n new_mets = []\n for met_id in metabolites:\n if met_id not in model.metabolites:\n # first check if we have them in a different comparment\n met_root, met_comp = met_id.split(\"_\")[0], met_id.split(\"_\")[1]\n maybe_mets = [m for m in model.metabolites if m.id.startswith(met_root)]\n if maybe_mets:\n model_met = maybe_mets[0]\n LOGGER.warning(\n f\"Reactant '{met_root}' was added in a different comparment '{met_comp}'.\"\n )\n new_met = cobra.Metabolite(\n met_id,\n model_met.formula,\n model_met.name,\n model_met.charge,\n # might be a new compartment!\n met_comp,\n )\n new_met.annotation = model_met.annotation\n else:\n LOGGER.warning(\n f\"Reactant {met_id} was not found in any compartment. Creating new one!\"\n )\n new_met = cobra.Metabolite(\n met_id,\n compartment=met_comp,\n )\n new_mets.append(new_met)\n if new_mets:\n model.add_metabolites(new_mets)\n\n reac.add_metabolites(metabolites)\n ec = rxn.EC()\n reac.annotation = {\"kegg.reaction\": kegg_id, \"ec-code\": ec[0] if ec else \"\"}\n sgpr, gpr = rxn.GPR[0], rxn.GPR[1].replace(\"[\", \"\").replace(\"]\", \"\")\n if sgpr and sgpr != \"[]\":\n if \"or\" in gpr and \"and\" not in gpr:\n # GPRs scrapped from Kegg are added with ORs and the genes\n # may be repeated so they have to be deduplicated\n # TODO(carrascomj): should come from getGPR / getLocation\n gpr = \" or \".join({gene for gene in gpr.split(\" or \") if gene})\n reac.gene_reaction_rule = sanitize_gpr(gpr)\n reac.annotation[\"sGPR\"] = sgpr\n reac.id = kegg_id + comp_id\n # add a group per pathway\n model.add_groups([cobra.core.Group(group, group) for group in pathways])\n for group, members in pathways.items():\n # the members are the reactions in each pathway\n # TODO(carrascomj): reaction ids coming from paths are not in compartments\n model.groups.get_by_id(group).add_members(\n reduce(\n lambda x, y: x + y,\n [model.reactions.query(member) for member in members.split()],\n [],\n )\n )\n # gene annotation (genes were added with the GPRs)\n pat_enstp = re.compile(\"ENS[TP][0-9]+\")\n for iden, gene in gene_list.items():\n print(iden)\n\n if not iden in model.genes:\n LOGGER.warning(f\"Gene '{iden}' was not found. Creating new one!\")\n model_gene = cobra.Gene(\n iden,\n gene.Name().replace(\"[\", \"\").replace(\"]\", \"\").replace(\"-\", \"\"),\n )\n \n else:\n model_gene = model.genes.get_by_id(iden)\n model_gene.name = gene.Name().replace(\"[\", \"\").replace(\"]\", \"\").replace(\"-\", \"\")\n\n # there may be other ensembl genes, we have to query them\n ensembl = gene.Ensg()\n ensembl_genes = []\n if ensembl:\n try:\n retrieved_ids = str(\n urllib.request.urlopen(\n \"https://www.ensembl.org/Homo_sapiens/Gene/Summary?g=\" + ensembl\n ).read()\n ) # only add non-empty annotation\n ensembl_genes = [\n str(x) for x in list(set(pat_enstp.findall(retrieved_ids)))\n ] + [ensembl]\n except Exception:\n ensembl_genes = []\n \n model_gene.annotation = {\n k: v\n for k, v in {\n \"ensembl\": ensembl_genes,\n \"ncbigene\": gene.Entrez(),\n \"uniprot\": gene.Uniprot(),\n \"hgcn.symbol\": model_gene.name,\n }.items()\n if v\n }\n return model\n\n\nLOGGER = logging.getLogger(__name__)\nsession = setup_biocyc_session()\n\n#### Initial Parameters\nListOfPaths = \"files/human_kegg_pathways.txt\" # in the current folder\nModelCompounds = \"files/extra_compounds.txt\" # in the current folder\nExtraFormula = \"files/extra_formula.txt\"\nModelReactions = \"\"\nModelGenes = \"\"\nEnsblDB = \"files/ensembl\" # From Ensembl database: ensembl gene ID vs Entrez vs Name.\ntime = 20 # Time to download url: Parameter defined in function getHtml\nPath = (\n open(ListOfPaths, \"r\").read().split(\"\\n\")\n) # From analysis using metaboanalyst.\nCompound = (\n open(ModelCompounds, \"r\").read().split(\"\\n\")\n) \nEF = [_f for _f in open(ExtraFormula, \"r\").read().split(\"\\n\") if _f]\nOutput = \"model/Human_Database.xml\" # Output model\nModID = Output\nModName = Output\nvariablesFile = \"files/model_variables.pkl\" # File where the working environment is saved\nspecialCompounds = \"files/special_compounds.txt\" # File where we save the IDs of the compounds with a (group)n in their formula\nopen(specialCompounds, \"w\").close() # Erase or create the file\n\n\n# Dictionary with extra compounds that can be added to mass balance the metabolic reactions\nextra_compound = {'H':'C00080','H2O':'C00001','Fe':'C00023','Na':'C01330','Ca':'C00076','K':'C00238','F':'C00023','R':'C00000','X':'C0000X'}\n\n\n#### Initial List and dictionaries\nPathList = {}\nRxnList = {}\nMetList = {}\nGPRList = {}\nMetEquiv = {}\nPathNameRxn = {}\nRxnEquiv = {}\nPathIdent = []\nRxnIdent = []\nMetIdent = []\nGPRIdent = []\nRxnIDList = []\nMetIDList = []\n\n\n#### List and dictionaries for the subcelular location annotation\nCSL_ID = {\n\"extracellular\": \"e\",\n\"peroxisome\": \"x\",\n\"mitochondria\": \"m\",\n\"cytosol\": \"c\",\n\"lysosome\": \"l\",\n\"endoplasmic reticulum\": \"r\",\n\"golgi apparatus\": \"g\",\n\"nucleus\": \"n\",\n\"inner mitochondria\": \"i\",\n} # to keep the consistency between the DB and the initial compartments in Human1\nCSL_ID = compartment_file_to_dict()\nCSL_ID = dict((k.lower(), v.lower()) for k, v in CSL_ID.items())\nlistOfID = []\nLocVar = {}\nRxnList_CL = {}\nMetList_CL = {}\nRxnIdent_CL = []\nMetIdent_CL = []\nCompartment_CL = []\nRxnList_Subcel = []\n\n######### Pathways ###########\ni = 0\nwhile i < len(Path) - 1:\n\n #### Build network\n PathID = Path[i].split(\"\\t\")[0]\n PathName = Path[i].split(\"\\t\")[1]\n PathURL = \"http://www.kegg.jp/kegg-bin/download?entry=\" + PathID + \"&format=kgml\"\n PathReferer = \"https://www.kegg.jp/kegg-bin/show_pathway?\" + PathID\n PathList[PathID] = pathway(PathURL, time, PathID, PathReferer, PathName)\n if PathList[PathID].Compounds() or not PathList[PathID].Compounds():\n print(\n PathName\n + \": defined in human\"\n + \"(\"\n + str(i + 1)\n + \"/\"\n + str(len(Path) - 1)\n + \")\"\n )\n PathNameRxn[PathName] = \"\"\n j = 0\n while j < len(PathList[PathID].Reactions()):\n try:\n RxnID = PathList[PathID].Reactions()[j][0][0]\n if (\n not RxnID in RxnIdent and not RxnID in RxnEquiv\n ):\n\n ######### Define New Reaction ###########\n RxnIdent = RxnIdent + [RxnID]\n RxnURL = PathList[PathID].Reactions()[j][1]\n RxnTermDyn = PathList[PathID].Reactions()[j][0][1]\n RxnList[RxnID] = reaction(RxnURL, time, RxnID, PathName, RxnTermDyn)\n\n ######### Check if all the compounds in the jth reaction are in the compound list ###########\n RxnCmp = [x[2] for x in RxnList[RxnID].Substrate()] + [\n x[2] for x in RxnList[RxnID].Product()\n ]\n c = 0\n while c < len(RxnCmp):\n CompID = RxnCmp[c]\n if not CompID in MetIdent and not CompID in MetEquiv:\n MetIdent = MetIdent + [CompID]\n if CompID[0] == \"C\":\n CompURL = (\n \"http://www.kegg.jp/dbget-bin/www_bget?cpd:\" + CompID\n )\n if CompID[0] == \"G\":\n CompURL = (\n \"http://www.kegg.jp/dbget-bin/www_bget?gl:\" + CompID\n )\n MetList[CompID] = compound(\n CompURL, CompID, time, EF, specialCompounds\n )\n if MetList[CompID].ID1() != MetList[CompID].ID2():\n MetIdent[len(MetIdent) - 1] = MetList[CompID].ID1()\n MetEquiv[CompID] = MetList[CompID].ID1()\n MetList[MetList[CompID].ID1()] = copy.deepcopy(\n MetList[CompID]\n ) # Change the reference in the dictionary to account for the 1th ID\n del MetList[CompID]\n c = c + 1\n\n ######### Define Substrates, Products and New Compounds ###########\n # Evaluate the relation between substrates and products #\n Rxn = getRxncons(\n RxnList[RxnID],\n time,\n MetEquiv,\n MetList,\n MetIdent,\n EF,\n specialCompounds,\n )\n RxnList[RxnID] = copy.deepcopy(Rxn)\n \n # Check reaction ID\n if RxnID != RxnList[RxnID].ID:\n RxnEquiv[RxnID] = RxnList[\n RxnID\n ].ID # Glycan Reaction : Compound Reaction\n RxnIdent[len(RxnIdent) - 1] = RxnList[RxnID].ID\n RxnList[RxnList[RxnID].ID] = copy.deepcopy(\n RxnList[RxnID]\n ) # Change the reference in the dictionary to account for the new ID\n tmpID = RxnList[RxnList[RxnID].ID].ID\n del RxnList[RxnID] # remove the old reaction ID\n RxnID = tmpID\n\n ######### Mass Balance the reaction #########\n ithRxn = RxnList[RxnID]\n eq , mb_test = RxnParam2Eq(ithRxn, MetList, MetEquiv)\n LibIni = WrapRxnSubsProdParam(ithRxn, MetList, MetEquiv)\n if mb_test != 0:\n IthRxnMB = mass_balance(eq, RxnID)\n if IthRxnMB[4]: # If new compounds have to be added to mass balance the reactions, then check if they need to be added to the network as compounds\n for x in IthRxnMB[4]:\n if not extra_compound[x[0]] in MetIdent and not extra_compound[x[0]] in MetEquiv:\n MetIdent = MetIdent + [extra_compound[x[0]]]\n if not x[0] in 'R' and not x[0] in 'X':\n extra_url = 'https://www.genome.jp/entry/'+extra_compound[x[0]]\n MetList[extra_compound[x[0]]] = compound(extra_url,extra_compound[x[0]],time,EF,specialCompounds)\n else:\n MetList[extra_compound[x[0]]] = add_extra_compound (x[0],extra_compound, time, EF, specialCompounds)\n else: # if the reaction cannot be mass balanced all the stoichimetric coef are assumed to be like in the original reaction \n IthRxnMB = ([float(x[0]) for x in ithRxn.Substrate()],[float(x[0]) for x in ithRxn.Product()],0,0,0,0,[MetEquiv[x[2]] if x[2] in MetEquiv else x[2] for x in ithRxn.Substrate()], [MetEquiv[x[2]] if x[2] in MetEquiv else x[2] for x in ithRxn.Product()],'','',0)\n LibEnd = UnwrapRxnSubsProdParam(IthRxnMB, LibIni, IthRxnMB)\n\n # Add metabolites and stc coeff to reaction\n S = list()\n for x in LibEnd[0]:\n S.append(\n [\n str(LibEnd[0][x][0]),\n (\n \"http://www.genome.jp/dbget-bin/www_bget?cpd:\"\n + LibEnd[0][x][1]\n ),\n LibEnd[0][x][1],\n ]\n )\n P = list()\n for x in LibEnd[1]:\n P.append(\n [\n str(LibEnd[1][x][0]),\n (\n \"http://www.genome.jp/dbget-bin/www_bget?cpd:\"\n + LibEnd[1][x][1]\n ),\n LibEnd[1][x][1],\n ]\n )\n S2 = copy.deepcopy(S)\n P2 = copy.deepcopy(P)\n RxnList[RxnID].Substrate = lambda: S2\n RxnList[RxnID].Product = lambda: P2\n RxnList[RxnID].SetSubstrate = lambda: S2\n RxnList[RxnID].SetProduct = lambda: P2\n \n RxnList[RxnID].subs = S2\n RxnList[RxnID].prods = P2\n if not RxnID in PathNameRxn.get(PathName):\n PathNameRxn[PathName] += RxnID + \" \"\n\n ######### Define New GPR ###########\n tmpGPR = ()\n tmpSC = ()\n for x in RxnList[RxnID].EC():\n if x not in GPRIdent:\n GPRIdent = GPRIdent + [x]\n GPRList[x] = gpr(x, session)\n try:\n tmpGPR = tmpGPR + GPRList[x].GprSubcell()[0:2]\n tmpSC = tmpSC + GPRList[x].GprSubcell()[2:4]\n except:\n tmpGPR = tmpGPR + GPRList[x].GprSubcell[0:2]\n tmpSC = tmpSC + GPRList[x].GprSubcell[2:4] \n # Reorganize S-GPRs and GPRs based on their specific location\n tmpSC2 = [dict(),dict()]\n reactio_compartment_list = list(set([x.strip() for x in str([list(x.keys()) for x in tmpSC]).replace('[','').replace(']','').replace('\\'','').split(',')]))\n for x in reactio_compartment_list:\n xth_tmp_gpr = [y for y in tmpSC if x in y.keys()]\n tmp_xth_sgpr = \"\"\n tmp_xth_gpr = \"\"\n for y in range(int(len(xth_tmp_gpr)/2)):\n if not re.findall('^\\[\\]$', xth_tmp_gpr[y+y][x]):\n tmp_xth_sgpr += xth_tmp_gpr[y+y][x]\n if not re.findall('^\\[\\]$', xth_tmp_gpr[y+y+1][x]): \n tmp_xth_gpr += xth_tmp_gpr[y+y+1][x]\n tmp_xth_sgpr = str(set(tmp_xth_sgpr.replace('][', '] or [').split(\" or \"))).replace('\\'', '').replace('{', '').replace('}', '').replace(',', ' or')\n tmp_xth_gpr = str(set(tmp_xth_gpr.replace('][', '] or [').split(\" or \"))).replace('\\'', '').replace('{', '').replace('}', '').replace(',', ' or')\n tmpSC2[0][x] = tmp_xth_sgpr\n tmpSC2[1][x] = tmp_xth_gpr\n\n RxnList[RxnID].GPR = tmpGPR\n RxnList[RxnID].Subcel = tmpSC2\n\n ######### Expand the annotations based on the cellular location ###########\n Compartment_CL, rxn_cl, comp_cl = rxnSubcel(\n RxnList[RxnID],\n RxnList_CL,\n MetList_CL,\n RxnIdent_CL,\n MetIdent_CL,\n Compartment_CL,\n RxnList,\n MetList,\n MetEquiv,\n )\n \n RxnIdent_CL = RxnIdent_CL + rxn_cl\n MetIdent_CL = MetIdent_CL + comp_cl\n\n \n print(\n \"Reaction(\"\n + str(j + 1)\n + \"/\"\n + str(len(PathList[PathID].Reactions()))\n + \")_Pathway\"\n + \"(\"\n + str(i + 1)\n + \"/\"\n + str(len(Path) - 1)\n + \")\"\n )\n except:\n continue\n j = j + 1\n else:\n print(\n PathName\n + \": not defined in human\"\n + \"(\"\n + str(i + 1)\n + \"/\"\n + str(len(Path) - 1)\n + \")\"\n )\n\n i = i + 1\n\n\n\n######### Genes ###########\nGeneList = {}\nGeneIdent = []\ng = 0\nwhile g < len(GPRList):\n if GPRIdent[g] in GPRList.keys() and GPRList[GPRIdent[g]].GprSubcell():\n gene_matches = re.findall(\n \"([A-Za-z0-9\\-]+)\",\n GPRList[GPRIdent[g]].GprSubcell()[1].replace(\"and\", \"\").replace(\"or\", \"\"),\n )\n z = 0\n while z < len(gene_matches):\n if not gene_matches[z] in GeneIdent:\n GeneIdent = GeneIdent + [gene_matches[z]]\n GeneList[gene_matches[z]] = gene(gene_matches[z], EnsblDB)\n z = z + 1\n g = g + 1\n\nwith open(\"files/pre_sbml_raw.pk\", \"wb\") as f:\n dill.dump(\n {\n \"name\": ModName,\n \"id\": ModID,\n \"mets\": MetList,\n \"mets_cl\": MetList_CL,\n \"met_equiv\": MetEquiv,\n \"reactions\": RxnList,\n \"reactions_cl\": RxnList_CL,\n \"genes\": GeneList,\n \"pathways\": PathNameRxn,\n \"loc\": LocVar,\n },\n f,\n )\n\n\nCompartment_CL = sorted(Compartment_CL)\n\nlistOfID = list(CSL_ID.values()) # abbr. id\nLipidMasterlistOfID = []\n\nfor CSL in Compartment_CL:\n CSL2 = re.sub(r'[^A-Za-z0-9 ]+', '', CSL)\n ModMaster = list(set(LipidMasterlistOfID + listOfID))\n if CSL_ID.get(CSL):\n ID = CSL_ID.get(CSL)\n elif len(re.sub(\" $\", \"\", re.sub(\"^ \", \"\", CSL)).split(\" \")) > 1:\n ID = (CSL2.split(\" \")[0][0] + CSL2.split(\" \")[1][0]).lower().replace(\" \", \"\")\n else:\n if len(CSL.split(\" \")) > 1:\n ID = CSL2[0:3].lower().replace(\" \", \"\")\n else:\n ID = CSL2[0:2].lower().replace(\" \", \"\")\n if not CSL_ID.get(CSL) and ID in ModMaster:\n r = re.compile(ID)\n ID = ID + str(len(list(filter(r.match, ModMaster))) + 1)\n LocVar[CSL] = \"\"\n LocVar[CSL] += ID\n listOfID.append(ID)\n LipidMasterlistOfID.append(ID)\n\n\nwith open(\"files/pre_sbml_pos_comp.pk\", \"wb\") as f:\n dill.dump(\n {\n \"name\": ModName,\n \"id\": ModID,\n \"mets\": MetList,\n \"mets_cl\": MetList_CL,\n \"met_equiv\": MetEquiv,\n \"reactions\": RxnList,\n \"reactions_cl\": RxnList_CL,\n \"genes\": GeneList,\n \"pathways\": PathNameRxn,\n \"loc\": LocVar,\n },\n f,\n )\n\nwith open('files/pre_sbml_pos_comp.pk', 'rb') as f:\n data = pickle.load(f)\n#with open('files/pre_sbml_pos_comp.pk', 'rb') as f:\n# data = f.read()\n\nmodel = cobra_reconstruction(\n ModName, ModID, MetList_CL, RxnList_CL, GeneList, PathNameRxn, LocVar, MetEquiv, MetList\n)\ncobra.io.write_sbml_model(model, Output)\n\n\n\n","repo_name":"biosustain/THG","sub_path":"generate_data-base/generate_db.py","file_name":"generate_db.py","file_ext":"py","file_size_in_byte":26196,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"28"} +{"seq_id":"42226953502","text":"import argparse\nimport pandas as pd\nimport os\n\nfrom stable_baselines.common.policies import MlpPolicy\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines import PPO2\nfrom util import evaluate_train_set, evaluate_test_set, SRC_DIR\nfrom custom_policy import CustomLSTMPolicy\nfrom env import LSTM_Env\n\n\ndef make_env(seed, df, serial):\n def _init():\n env = LSTM_Env(df, serial)\n env.seed(seed)\n return env\n return _init\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--mode\", type=str,\n default=\"train\",\n help=\"specific mode to run our model, available options are: train, test\")\n parser.add_argument(\"--test_mode\", type=str,\n default=\"single\",\n help=\"specific mode to run test, available options are: single, double, \"\n \"single means test with trained dataset. Double means test with train and test data set\")\n parser.add_argument(\"--model\", type=str,\n default=\"lstm\",\n help=\"specific model to run, available models are: mlp, lstm\")\n parser.add_argument(\"--render\", type=str,\n default=\"verbose\",\n help=\"specific display mode, available models are: verbose, human\")\n args = parser.parse_args()\n\n # read data and init environments\n train_df_m15 = pd.read_csv(os.path.join(SRC_DIR, \"..\", \"data\", \"EURUSD_m15_train.csv\"), index_col=0)\n train_df_h1 = pd.read_csv(os.path.join(SRC_DIR, \"..\", \"data\", \"EURUSD_h1_train.csv\"), index_col=0)\n test_df_m15 = pd.read_csv(os.path.join(SRC_DIR, \"..\", \"data\", \"EURUSD_m15_test.csv\"), index_col=0)\n test_df_h1 = pd.read_csv(os.path.join(SRC_DIR, \"..\", \"data\", \"EURUSD_h1_test.csv\"), index_col=0)\n # The algorithms require a vectorized environment to run\n serial = False\n if args.mode == \"test\":\n serial = True\n\n if args.model == 'mlp':\n train_env = DummyVecEnv([lambda: LSTM_Env(train_df_m15, train_df_h1, serial)])\n test_env = DummyVecEnv([lambda: LSTM_Env(test_df_m15, test_df_h1, serial)])\n model = PPO2(MlpPolicy, train_env, gamma=0.95, verbose=1, tensorboard_log=os.path.join(SRC_DIR, \"..\", \"logs\"))\n else:\n train_env = DummyVecEnv([lambda: LSTM_Env(train_df_m15, train_df_h1, serial)])\n test_env = DummyVecEnv([lambda: LSTM_Env(test_df_m15, train_df_h1, serial)])\n\n model = PPO2(CustomLSTMPolicy,\n train_env,\n gamma=0.95,\n verbose=1,\n tensorboard_log=os.path.join(SRC_DIR, \"..\", \"logs\"),\n nminibatches=1,\n n_steps=16)\n\n save_path = os.path.join(SRC_DIR, \"..\", \"models\", args.model + \"_model\")\n\n render_mode = args.render\n\n if args.mode == \"train\":\n print(\"Training started\")\n model.learn(total_timesteps=100000, seed=69)\n model.save(save_path)\n print(\"Training's done, saved model to: \", save_path)\n else:\n # load pre-trained model\n print(\"Loading model at: \", save_path)\n model = PPO2.load(save_path)\n print(\"Start testing on train set\")\n evaluate_train_set(model, train_env, 1000, render_mode)\n\n if args.test_mode == 'double':\n print(\"Start testing on test set\")\n evaluate_test_set(model, test_env, 1000, render_mode)\n\n print(\"Testing's comeplete\")\n\n\n\n\n\n\n","repo_name":"vonhathanh/ForexBot","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"7594338770","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nimport utils\n\n\ndef run():\n # List of species we want a map for\n SPECIES = utils.get_analysis_species()\n\n # Load bird sightings\n df = utils.load_ebird_data(filter=True, region='GE-MM')\n\n # Get a list of checklists and species\n checklists = list(df['SAMPLING EVENT IDENTIFIER'].unique())\n species = list(df['SCIENTIFIC NAME'].unique())\n\n # Build these into a matrix\n df_check = pd.DataFrame(index=checklists, columns=species)\n df_check = df_check.fillna(0)\n\n # Fill in the matrix cells if species appears in checklist\n for index, row in df.iterrows():\n spec = row['SCIENTIFIC NAME']\n chck = row['SAMPLING EVENT IDENTIFIER']\n df_check.loc[chck, spec] = 1\n\n # Create figure with two subplots\n fig, ax = plt.subplots(1, 2, figsize=(20,12))\n\n for index, bird in enumerate(SPECIES):\n col_bird = df_check[bird]\n col_other = df_check.copy().drop(bird, axis=1)\n similarities = []\n\n # Find similarities\n for col in col_other:\n similarities.append((col,\n cosine_similarity(col_bird.values.reshape(1,-1), col_other[col].values.reshape(1,-1))[0][0]))\n \n # Sort by largest to smallest\n similarities.sort(key=lambda x: -x[1])\n\n # Get top 10\n similarities = similarities[:20]\n df_sim = pd.DataFrame(similarities, columns=['Scientific Name', 'Similarity'])\n\n # Find common name\n df_sim['Common Name'] = df_sim['Scientific Name'].apply(lambda x: df.loc[df['SCIENTIFIC NAME'] == x, 'COMMON NAME'].max())\n \n # Invert DataFrame\n df_sim = df_sim.iloc[::-1]\n\n # Plot bar chart\n df_sim.plot(kind='barh', ax=ax[index], y='Similarity', x='Common Name')\n ax[index].set_title(f'{bird} Similars')\n\n # Save image of similar birds\n fig.savefig(f'./fig/cosine_similars.png', bbox_inches='tight')\n\n\nif __name__ == '__main__':\n run()","repo_name":"CurtisThompson/caucasus-birds-georgia","sub_path":"cosine_species.py","file_name":"cosine_species.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"13306943817","text":"from typing import Optional\n\nfrom .base import BaseAssistant, Context, Response, logger\nfrom core.dialog.names import SOURCES, REQUEST_TYPES\nfrom models.yandex import YandexRequest, YandexResponse\nfrom core.utils.text import encode_uri\n\n\nclass AliceAssistant(BaseAssistant):\n SOURCE = SOURCES.ALICE\n\n def __init__(self, native_state: bool = False, **kwargs):\n super(AliceAssistant, self).__init__(**kwargs)\n self.native_state = native_state\n\n def make_context(self, message: dict, **kwargs) -> Context:\n metadata = {}\n\n if set(message.keys()) == {'body'}:\n message = message['body']\n try:\n sess = message['session']\n except KeyError:\n raise KeyError(\n f'Ключ \"session\" не найден {list(message.keys())}.')\n if sess.get('user', {}).get('user_id'):\n user_id = self.SOURCE + '_auth__' + sess['user']['user_id']\n else:\n user_id = self.SOURCE + '__' + sess['user_id']\n try:\n message_text = message['request'].get('command', '')\n except KeyError:\n raise KeyError(\n f'Ключ \"request\" не найден {list(message.keys())}.')\n metadata['new_session'] = message.get('session', {}).get('new', False)\n\n ctx = Context(\n user_object=None,\n message_text=message_text,\n metadata=metadata,\n user_id=user_id,\n session_id=sess.get('session_id'),\n source=self.SOURCE,\n raw_message=message,\n )\n\n ctx.request_type = message['request'].get(\n 'type', REQUEST_TYPES.SIMPLE_UTTERANCE)\n ctx.payload = message['request'].get('payload', {})\n try:\n ctx.yandex = YandexRequest.from_dict(message)\n except Exception as e:\n logger.warning(\n 'Не удалось десериализовать запрос Yandex: '\n 'получено исключение \"{}\".'.format(e))\n\n return ctx\n\n def make_response(self, response: Response,\n original_message: dict = None, **kwargs):\n result = {\n \"version\": original_message['version'],\n \"response\": {\n \"end_session\": response.has_exit_command,\n \"text\": response.text\n }\n }\n if self.native_state and response.updated_user_object:\n if self.native_state == 'session':\n result['session_state'] = response.updated_user_object\n elif self.native_state == 'application':\n result['application_state'] = response.updated_user_object\n elif self.native_state == 'user':\n if original_message.get('session') and \\\n 'user' not in original_message['session']:\n result['application_state'] = response.updated_user_object\n result['user_state_update'] = response.updated_user_object\n else:\n if 'session' in response.updated_user_object:\n result['session_state'] = response.updated_user_object['session']\n if 'application' in response.updated_user_object:\n result['application_state'] = response.updated_user_object['application']\n if 'user' in response.updated_user_object:\n result['user_state_update'] = response.updated_user_object['user']\n if response.raw_response is not None:\n if isinstance(response.raw_response, YandexResponse):\n result = response.raw_response.to_dict()\n else:\n result['response'] = response.raw_response\n return result\n buttons = response.links or []\n for button in buttons:\n if 'url' in button:\n button['url'] = encode_uri(button['url'])\n if response.suggests:\n buttons = buttons + [{'title': suggest}\n for suggest in response.suggests]\n for button in buttons:\n if not isinstance(button.get('hide'), bool):\n button['hide'] = True\n result['response']['buttons'] = buttons\n return result\n\n def uses_native_state(self, context: Context) -> bool:\n return bool(self.native_state)\n\n def get_native_state(self, context: Context) -> Optional[dict]:\n if not self.native_state:\n return\n message = context.raw_message or {}\n state = message.get('state', {})\n\n if self.native_state == 'session':\n user_object = state.get('session')\n elif self.native_state == 'user':\n user_object = state.get('user')\n if message.get('session') and 'user' not in message['session']:\n user_object = state.get('application')\n elif self.native_state == 'application':\n user_object = state.get('application')\n else:\n user_object = state\n return user_object\n","repo_name":"sergei-tolshin/YP_films","sub_path":"assistant_nlp/src/assistants/alice.py","file_name":"alice.py","file_ext":"py","file_size_in_byte":5052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"74296295754","text":"import streamlit as st\r\nfrom streamlit_webrtc import webrtc_streamer\r\nimport cv2\r\nimport av\r\nfrom insightface.app import FaceAnalysis\r\nimport time\r\n\r\n@st.cache(allow_output_mutation=True)\r\ndef load_insightface():\r\n app = FaceAnalysis(allowed_modules=['detection'])\r\n app.prepare(ctx_id=0, det_size=(640, 640))\r\n return app\r\n\r\nINSIGHTFACE = load_insightface()\r\nst.title('timing test')\r\n\r\ndef callback(frame):\r\n img = frame.to_ndarray(format=\"rgb24\")\r\n\r\n global face_detection_time_arr\r\n\r\n out_img = img.copy()\r\n\r\n start_time = time.time()\r\n faces = INSIGHTFACE.get(out_img)\r\n end_time = time.time()\r\n\r\n face_detection_time_arr.append(end_time - start_time)\r\n \r\n for face in faces:\r\n # Blurring\r\n x1 = int(face['bbox'][0])\r\n y1 = int(face['bbox'][1])\r\n x2 = int(face['bbox'][2])\r\n y2 = int(face['bbox'][3])\r\n\r\n roi = out_img[y1:y2, x1:x2]\r\n roi = cv2.GaussianBlur(roi, (23, 23), 30)\r\n out_img[y1:y2, x1:x2] = roi\r\n \r\n mean_face_detecion_time = sum(face_detection_time_arr) / len(face_detection_time_arr)\r\n print('-'*50)\r\n print(\"Face Detection Time:\", mean_face_detecion_time)\r\n print('-'*50)\r\n\r\n return av.VideoFrame.from_ndarray(out_img, format=\"rgb24\")\r\n\r\nctx = webrtc_streamer(\r\n key=\"real-time\",\r\n video_frame_callback=callback,\r\n media_stream_constraints={\r\n \"video\": True,\r\n \"audio\": False\r\n },\r\n # For Deploying\r\n rtc_configuration={\r\n \"iceServers\": [\r\n {\r\n \"urls\": \"stun:openrelay.metered.ca:80\",\r\n },\r\n {\r\n \"urls\": \"turn:openrelay.metered.ca:80\",\r\n \"username\": \"openrelayproject\",\r\n \"credential\": \"openrelayproject\",\r\n },\r\n {\r\n \"urls\": \"turn:openrelay.metered.ca:443\",\r\n \"username\": \"openrelayproject\",\r\n \"credential\": \"openrelayproject\",\r\n },\r\n {\r\n \"urls\": \"turn:openrelay.metered.ca:443?transport=tcp\",\r\n \"username\": \"openrelayproject\",\r\n \"credential\": \"openrelayproject\",\r\n },\r\n ]\r\n }\r\n)\r\n\r\nif ctx.state.playing:\r\n face_detection_time_arr = []","repo_name":"Ishneet0710/metric-test","sub_path":"insightface_app.py","file_name":"insightface_app.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"16350608716","text":"import csv\r\nimport json\r\nimport sys\r\nimport psycopg2\r\n\r\n# Gets credentials for news database from JSON file\r\ndef getDBCredentials(jsonCreds):\r\n\tcreds = json.load(jsonCreds)\r\n\treturn creds\r\n\r\ndef connectDB(creds):\r\n\thost = creds[\"host\"]\r\n\tusername = creds[\"user\"]\r\n\tpassword = creds[\"passwd\"]\r\n\tdbName = creds[\"db\"]\r\n\tdb = psycopg2.connect(host=host, user=username, password=password, dbname=dbName)\r\n\treturn db\r\n\r\ndef getCSVData(dataFile):\r\n\ttry:\r\n\t\tdata = open(dataFile)\r\n\t\treturn data\r\n\texcept:\r\n\t\tsys.stderr.write(\"Error: Invalid data file\\n\")\r\n\t\tsys.exit(1) \r\n\r\n# Loads article\r\ndef loadArticleFeatureData(articleData, db):\r\n\tcursor = db.cursor()\r\n\t# Creates table\r\n\ttableCreateSQL = open(\"sql/articleFeatures.sql\").read()\r\n\tcursor.execute(tableCreateSQL)\r\n\t# Loads CSV data into table\r\n\tcursor.copy_from(articleData, \"articleFeatures\", sep=\",\")\r\n\t# Creates indices\r\n\tindexCreateSQL = open(\"sql/articleFeatures2.sql\").read()\r\n\tcursor.execute(indexCreateSQL)\r\n\r\n\tdb.commit()\r\n\r\ndef loadArticleMetadata(articleData, db):\r\n\tcursor = db.cursor()\r\n\t# Creates table\r\n\ttableCreateSQL = open(\"sql/articleMetadata.sql\").read()\r\n\tcursor.execute(tableCreateSQL)\r\n\t# Loads CSV data into table\r\n\tcursor.copy_from(articleData, \"articleMetadata\", sep=\",\")\r\n\r\n\tdb.commit()\r\n\r\n\r\ndef loadTopSourcePhrasesData(topSourcePhrasesData, db):\r\n\tcursor = db.cursor()\r\n\t# Creates table\r\n\ttableCreateSQL = open(\"sql/topSourcePhrases.sql\").read()\r\n\tcursor.execute(tableCreateSQL)\r\n\t# Loads CSV data into table\r\n\tcursor.copy_from(topSourcePhrasesData, \"topSourcePhrases\", sep=\",\")\r\n\r\n\tdb.commit()\r\n\r\ndef loadSourceMetadata(sourceMetadata, db):\r\n\tcursor = db.cursor()\r\n\t# Creates table\r\n\ttableCreateSQL = open(\"sql/sourceMetadata.sql\").read()\r\n\tcursor.execute(tableCreateSQL)\r\n\t# Loads CSV data into table\r\n\tcursor.copy_from(sourceMetadata, \"sourceMetadata\", sep=\",\")\r\n\r\n\tdb.commit()\r\n\r\nif __name__ == \"__main__\":\r\n\tcredsFile = \"../../dbCredentials.json\"\r\n\r\n\t# Check that credentials and article data files exist\r\n\tjsonCreds = \"\"\r\n\ttry:\r\n\t\tjsonCreds = open(credsFile)\r\n\texcept:\r\n\t\tsys.stderr.write(\"Error: Invalid database credentials file\\n\")\r\n\t\tsys.exit(1)\r\n\r\n\tcreds = getDBCredentials(jsonCreds)\r\n\tdb = connectDB(creds)\r\n\r\n\tarticleFeatureData = getCSVData(\"data/articleFeatures.csv\")\r\n\tloadArticleFeatureData(articleFeatureData, db)\r\n\r\n\tarticleMetadata = getCSVData(\"data/articleMetadata.csv\")\r\n\tloadArticleMetadata(articleMetadata, db)\r\n\t\r\n\ttopSourcePhrasesData = getCSVData(\"data/topSourcePhrases.csv\")\r\n\tloadTopSourcePhrasesData(topSourcePhrasesData, db)\r\n\r\n\tsourceMetadata = getCSVData(\"data/sourceMetadata.csv\")\r\n\tloadSourceMetadata(sourceMetadata, db)\r\n\r\n\r\n\t\r\n","repo_name":"BenjaminDHorne/The-NELA-Toolkit","sub_path":"dbSetup/loadData.py","file_name":"loadData.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"28"} +{"seq_id":"599061018","text":"import requests\nimport unittest\nfrom unittest import mock\nimport json\nfrom app_config_tests import training_service_config, mlflow_models_mapping, service_config\nfrom prediction.prediction_service import PredictionService, TrainingResponse, JobStatus\n\n\nclass MockResponse:\n def __init__(self, json_data, status_code):\n self.json_data = json_data\n self.status_code = status_code\n self.content = json.dumps(self.json_data).encode(\"utf-8\")\n\n def json(self):\n return self.json_data\n\n\nclass PredictionServiceTest(unittest.TestCase):\n prediction_service = PredictionService(training_service_config, mlflow_models_mapping, service_config)\n\n # test check_training_status function\n def mocked_failed_training_response(*args, **kwargs):\n if args[0] == 'http://training-host:3001/train/run_id':\n raise requests.exceptions.ConnectionError\n return MockResponse(None, 404)\n\n def mocked_success_training_response(*args, **kwargs):\n if args[0] == 'http://training-host:3001/train/run_id':\n return MockResponse({'state': 'completed', 'message': None}, 200)\n return MockResponse(None, 404)\n\n @mock.patch('requests.get', side_effect=mocked_failed_training_response)\n def test_check_training_status_failure(self, mock):\n assert self.prediction_service.check_training_status(\"run_id\").status == JobStatus.ERROR.value\n\n @mock.patch('requests.get', side_effect=mocked_success_training_response)\n def test_check_training_status_success(self, mock):\n run_id = \"run_id\"\n assert self.prediction_service.check_training_status(run_id).run_id == run_id\n assert self.prediction_service.check_training_status(run_id).status == JobStatus.COMPLETED.value\n assert self.prediction_service.check_training_status(run_id).status_message is None\n\n\n # test get_prediction function\n def mocked_prediction_response(*args, **kwargs):\n if args[0] == 'http://model-handler-host:3002/predict':\n return MockResponse( {'status': JobStatus.COMPLETED.value,\n 'statusMessage': None, \"result\": [\n {\n \"date\": \"2017-04-29T00:00:00.000000\",\n \"salesVolume\": 101.89271519246286\n },\n {\n \"date\": \"2017-05-01T00:00:00.000000\",\n \"salesVolume\": 102.02099330778664\n },\n {\n \"date\": \"2017-05-02T00:00:00.000000\",\n \"salesVolume\": 111.72246771092415\n }], \"probability\": {\n \"binsEdges\": [\n 116990.02301757515,\n 124455.5402428202,\n 131921.05746806526,\n 139386.57469331034\n ],\n \"probabilities\": [\n 0.05,\n 0.74,\n 0.21\n ]\n }}, 200)\n return MockResponse(None, 404)\n\n # it should return only the relevant parameters\n @mock.patch('requests.post', side_effect=mocked_prediction_response)\n def test_get_prediction(self, mock):\n assert self.prediction_service.get_prediction({'param1': 'value'},\n 'model-type') == {'status': JobStatus.COMPLETED.value,\n 'statusMessage': None, \"result\": [\n {\n \"date\": \"2017-04-29T00:00:00.000000\",\n \"salesVolume\": 101.89271519246286\n },\n {\n \"date\": \"2017-05-01T00:00:00.000000\",\n \"salesVolume\": 102.02099330778664\n },\n {\n \"date\": \"2017-05-02T00:00:00.000000\",\n \"salesVolume\": 111.72246771092415\n }], \"probability\": {\n \"binsEdges\": [\n 116990.02301757515,\n 124455.5402428202,\n 131921.05746806526,\n 139386.57469331034\n ],\n \"probabilities\": [\n 0.05,\n 0.74,\n 0.21\n ]\n }}\n","repo_name":"microsoft/OnDemandMLflowTrainAndServe","sub_path":"prediction/tests/test_prediction_service.py","file_name":"test_prediction_service.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"28"} +{"seq_id":"24763715605","text":"# -*- coding: utf-8 -*-\nfrom django import forms\nfrom django.views.generic.edit import FormView\n\nfrom django.shortcuts import render\nfrom django.core.mail import send_mail\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom studentsdb.settings import ADMIN_EMAIL\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\n\n\nclass ContactForm(forms.Form):\n contact_name = forms.CharField(required=True, label=u\"Ваше Ім'я\")\n contact_email = forms.EmailField(required=True, label=u\"Ваша Емейл Адреса\")\n text = forms.CharField(\n required=True,\n widget=forms.Textarea,\n label=u\"Текст повідомлення\"\n )\n \n def __init__(self, *args, **kwargs):\n # call original initializator\n super(ContactForm, self).__init__(*args, **kwargs)\n \n # this helper object allows us to customize form\n self.helper = FormHelper()\n \n # form tag attributes\n self.helper.form_class = 'form-horizontal'\n self.helper.form_method = 'post'\n\n \n # twitter bootstrap styles\n self.helper.help_text_inline = True\n self.helper.html5_required = True\n self.helper.label_class = 'col-sm-2 control-label'\n self.helper.field_class = 'col-sm-10'\n \n # form buttons\n self.helper.add_input(Submit('send_button', u'Надіслати'))\n \nclass ContactAdmin(FormView):\n template_name = 'contact_admin/contact.html'\n form_class = ContactForm\n \n \n def get_success_url(self):\n return u'%s?status_message=Повідомлення успішно надіслано.' % reverse('contact') \n\n def form_valid(self, form):\n text = form.cleaned_data['text']\n contact_name = form.cleaned_data['contact_name']\n contact_email = form.cleaned_data['contact_email']\n \n try:\n text += (u'\\n E-mail відправлено від - %s, %s') % (contact_name, contact_email)\n send_mail(contact_name, text, contact_email, [ADMIN_EMAIL])\n \n except Exception:\n return HttpResponseRedirect(\n u'%s?status_message=Під час відправки листа виникла непередбачувана помилка.' %\n reverse('contact')) \n \n \n return super(ContactAdmin, self).form_valid(form)\n \n","repo_name":"DenysRakhuba/Students-Database","sub_path":"students/views/contact_admin.py","file_name":"contact_admin.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"5809975603","text":"\nfrom huobi.client.account import AccountClient\nfrom huobi.constant import *\n\n# get accounts\naccount_client = AccountClient(api_key=g_api_key,\n secret_key=g_secret_key)\n\nret = account_client.post_sub_uid_management(sub_uid=g_sub_uid, action=SubUidAction.LOCK)\nret.print_object()\n\nret = account_client.post_sub_uid_management(sub_uid=g_sub_uid, action=SubUidAction.UNLOCK)\nret.print_object()","repo_name":"HuobiRDCenter/huobi_Python","sub_path":"example/account/post_sub_user_management.py","file_name":"post_sub_user_management.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":672,"dataset":"github-code","pt":"28"} +{"seq_id":"253942088","text":"#program that can encrypt and Decrypt using a 2 X 2 Hill Cipher.\r\n\r\nimport numpy as np;\r\n\r\nlist1=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',\" \"];\r\n\r\n# taking input form user\r\ncode=input(\"enter the code: \").lower();\r\n\r\n# making index list of characters inputted by user \r\ncode_list=[list1.index(i) for i in code];\r\n\r\n# if code length is odd then add space index\r\nif (len(code)%2!=0):\r\n code_list.append(list1.index(\" \"))\r\n\r\n# making key matrix of 2x2 using user input\r\ndef making_key_matrix():\r\n print(\"enter the key for 2X2 matrix: \");\r\n key_matrix=[];\r\n print(\"Enter values: \")\r\n for i in range(2):\r\n a=[]\r\n for j in range(2):\r\n a.append(int(input()));\r\n key_matrix.append(a); \r\n return key_matrix\r\n\r\nkey_matrix=making_key_matrix()\r\n\r\nencoded_code=\"\";\r\n\r\n#multiplication matrix code using index with key_matrix\r\ndef multiplication_matrix_with_key(code_list,code,key_matrix):\r\n\r\n while(len(code_list)!=0):\r\n matrix=[];\r\n\r\n for i in range(2):\r\n a=[];\r\n for j in range(1):\r\n a.append(code_list[j]);\r\n \r\n del code_list[j];\r\n matrix.append(a);\r\n\r\n\r\n \r\n for i in range(2):\r\n \r\n for j in range(1):\r\n code+=list1[(key_matrix[i][j]*matrix[j][j]+key_matrix[i][j+1]*matrix[j+1][j])%27];\r\n return code; \r\n\r\n# encoded code\r\nencoded_code=multiplication_matrix_with_key(code_list,encoded_code,key_matrix)\r\nprint(\"the encoded code: \"+encoded_code); \r\n\r\n#decoded section\r\n\r\n\r\n#inverse block\r\n# finding determinant inverse using kx%26==1\r\ndef finding_det_inverse(determinant_key_matrix):\r\n\r\n flag=True;\r\n i=1;\r\n while(flag):\r\n output=(determinant_key_matrix*i)%27;\r\n if(output==1):\r\n det_inverse=i;\r\n flag=False;\r\n i+=1;\r\n return det_inverse \r\n\r\n#multiplying determinant inverse with adjoint key_matrix\r\ndef making_k_inverse_matrix(key_matrix,determinant_key_matrix): \r\n\r\n det_inverse=finding_det_inverse(determinant_key_matrix) \r\n k_inverse=[];\r\n for i in range(2):\r\n a=[]\r\n for j in range(2):\r\n key_matrix[i][j]=key_matrix[i][j]*det_inverse;\r\n a.append(key_matrix[i][j]%27);\r\n k_inverse.append(a); \r\n return k_inverse\r\n\r\n# finding decoded section and calling inverse block\r\ndef decoded_section(key_matrix,encoded_code):\r\n \r\n determinant_key_matrix=round(np.linalg.det(key_matrix));\r\n\r\n for i in range(2):\r\n for j in range(2):\r\n if(i!=j):\r\n key_matrix[i][j]=key_matrix[i][j]*-1;\r\n\r\n for i in range(2):\r\n for j in range(2):\r\n if(key_matrix[i][j]<0):\r\n key_matrix[i][j]=key_matrix[i][j]+27; \r\n \r\n\r\n for i in range(2):\r\n for j in range(2):\r\n if(i==j==0):\r\n a=key_matrix[i][j];\r\n if(i==j==1):\r\n key_matrix[0][0]=key_matrix[i][j];\r\n key_matrix[i][j]=a;\r\n\r\n \r\n\r\n\r\n\r\n\r\n code_list=[list1.index(i) for i in encoded_code];\r\n\r\n\r\n\r\n decoded_code=\"\"\r\n\r\n k_inverse=making_k_inverse_matrix(key_matrix,determinant_key_matrix)\r\n\r\n decoded_code=multiplication_matrix_with_key(code_list,decoded_code,k_inverse)\r\n\r\n\r\n print(\"the decoded code: \"+decoded_code); \r\n\r\n\r\n#calling decoded section\r\ndecoded_section(key_matrix,encoded_code) ","repo_name":"Gurdeep123singh/Information-Security--Assignment1","sub_path":"information security/hill_cipher.py","file_name":"hill_cipher.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"16844476660","text":"import random\nfrom flask import Flask, url_for\nfrom flask_admin import Admin\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom flask_security import Security, SQLAlchemyUserDatastore, auth_required, hash_password,current_user\nfrom flask_security.models import fsqla_v2 as fsqla\nfrom flask_admin import helpers as admin_helpers\nimport config\nimport datetime\n\nfrom admin.view import MyModelView, MyAdminIndexView, MyModelViewAdress, ExtendedLoginForm\n\ndb = SQLAlchemy()\nfsqla.FsModels.set_db_info(db)\napp = Flask(__name__)\nmigrate = Migrate(app, db)\n\napp.config.from_object(\"config.Config\")\nadmin = Admin(app,index_view=MyAdminIndexView(), name=config.Config.CATALOG_TITLE, base_template = 'my_master.html', template_mode='bootstrap3')\n\ndb.init_app(app)\n\nwith app.app_context():\n import routes.catalog\n from models.models import Product, Address, User,Role\n admin.add_view(MyModelView(User, db.session))\n admin.add_view(MyModelView(Product, db.session))\n admin.add_view(MyModelViewAdress(Address, db.session))\n\n user_datastore = SQLAlchemyUserDatastore(db, User,Role)\n security = Security(app, user_datastore, login_form=ExtendedLoginForm)\n\n db.create_all()\n if not user_datastore.find_user(email=\"test@me.com\",username='test'):\n user_datastore.create_user(username= 'test',email=\"test@me.com\", password=hash_password(\"password\"), created = datetime.datetime.now())\n user_datastore.create_role(name='superuser')\n user_datastore.add_role_to_user(User.query.filter_by(username='test').first(),Role.query.filter_by(name='superuser').first())\n\n # def create_sampe_database():\n if not Product.query.filter_by(name='name1'):\n for i in range(10):\n prod = Product(name=f'name{i}',color=f'color{i}',weight=i**2,price=i**2,numbers=1)\n addr = Address(product=prod,country=f'country{i}',city=f'city{i}',street=f'street{i}',number_of_building=random.randint(1,10))\n db.session.add(prod)\n db.session.add(addr)\n db.session.commit()\n\n\n\n\n\n @security.context_processor\n def security_context_processor():\n return dict(\n admin_base_template=admin.base_template,\n admin_view=admin.index_view,\n h=admin_helpers,\n get_url=url_for\n )\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"mishkasv/flask_simple","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"34207767688","text":"\nfrom typing import List\nclass Solution:\n def kthSmallest(self, matrix: List[List[int]], k: int) -> int:\n left=matrix[0][0]\n right=matrix[-1][-1]\n while lefttarget:\n pos=low\n return pos\n\ns=Solution()\nmatrix = [\n [ 1, 5, 9],\n [10, 11, 13],\n [12, 13, 15]\n ]\nk = 8\nprint(s.kthSmallest(matrix,k))\n","repo_name":"wanbiguizhao/leetcode","sub_path":"search/378.py","file_name":"378.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"34766722624","text":"import os\nfrom collections import defaultdict\n\nimport capnp\ncapnp.remove_import_hook()\n\nfrom dist_zero import cgen, errors, capnpgen, primitive, settings, concrete_types, recorded\nfrom dist_zero.reactive import expression\nfrom dist_zero import settings\nfrom dist_zero import types, concrete_types\n\nEVENT_QUEUE_INITIAL_CAPACITY = 10\n\n\nclass ReactiveCompiler(object):\n '''\n The root object for building a reactive program from a set of normalized expressions.\n\n Usage:\n\n - Create a ``compiler = ReactiveCompiler()`` instance.\n - Call `ReactiveCompiler.compile` to produce a python module from some normalize expressions.\n - Create a Net with from the generated python module\n - Call methods on the Net to run the reactive program.\n '''\n\n def __init__(self, name, docstring=''):\n '''\n :param str name: A name, safe to use in c variables and filenames.\n :param str docstring: The python docstring to use for the module this compiler will eventually generate.\n '''\n self.name = name\n self.docstring = docstring\n\n capnp_lib_dir = os.path.join(settings.CAPNP_DIR, 'c-capnproto', 'lib')\n\n self.program = cgen.Program(\n name=self.name,\n docstring=self.docstring,\n includes=[\n '\"capnp_c.h\"',\n f'\"{self._capnp_header_filename()}\"',\n ],\n library_dirs=[\n settings.CAPNP_DIR,\n capnp_lib_dir,\n ],\n sources=[\n os.path.join(self._capnp_dirname(), self._capnp_source_filename()),\n # NOTE(KK): We compile all these files into each extension.\n os.path.join(capnp_lib_dir, \"capn.c\"),\n os.path.join(capnp_lib_dir, \"capn-malloc.c\"),\n os.path.join(capnp_lib_dir, \"capn-stream.c\"),\n ],\n libraries=[],\n include_dirs=[\n self._capnp_dirname(),\n settings.CAPNP_DIR,\n capnp_lib_dir,\n ])\n\n self.type_to_concrete_type = {}\n self.capnp = capnpgen.CapnpFile(capnpgen.gen_capn_uid())\n\n self.BadInputError = self.program.AddException('BadReactiveInput')\n\n self.output_key_to_norm_expr = None\n\n self._finalize_turn = None # A function to clean up data associated with a turn\n self._initialize_turn = None # A function to initialize a turn\n\n self._cached_after_transitions_function = {} # map each expr to a function to run after adding new transitions\n self._shall_maintain_state = None\n self._serialize_output_transitions = None # Function to serialize all output transitions inside the turn\n self._write_output_transitions = {} # map expr to the function to write its output transitions to the turn result\n self._write_output_state = {} # map expr to the function to return the bytes object for its current state\n\n self._type_by_expr = {} # expr to dist_zero.types.Type\n self._concrete_type_by_type = {} # type to dist_zero.concrete_types.ConcreteType\n\n # when in the middle of generating code for a turn, this variable will refer to a kvec of pointers\n # that will be freed at the end of the turn,\n self.ptrsToFree = lambda vGraph: vGraph.Arrow('turn').Dot('ptrs_to_free')\n\n self._graph_struct = None\n self._turn_struct = None\n self._cached_n_exprs = None\n\n self._top_exprs = None\n self.expr_to_inputs = None\n self.expr_to_outputs = None\n self.expr_index = None\n self._input_exprs = None\n self._output_exprs = None # Dictionary from output expression to its list of keys\n self._net = None\n\n self._built_capnp = False\n self._pycapnp_module = None\n\n def compile(self, output_key_to_norm_expr, other_concrete_exprs=None):\n '''\n Compile normalized expressions into a reactive program.\n\n ``mod = reactive_compiler.compile(output_key_to_norm_expr)``\n\n The input program is provided as a dictionary that maps output keys to `ConcreteExpression` instances.\n Any expression used in constructing one of these \"output\" expressions is considered part of the program.\n Any such `dist_zero.reactive.expression.Input` expression is treated as an input to the reactive program.\n In general, much structure will be shared between distinct output expressions.\n\n The return value of ``compile`` will be a python module that exports a new type ``Net`` which\n can be used in the following ways:\n\n\n Passing in and receiving states:\n\n Each call to ``net = mod.Net()`` creates a separate instance of the\n reactive program described by ``output_key_to_norm_expr``.\n Once the reactive program ``net`` has been created, you can\n\n - Register an output with ``net.OnOutput_{output_key}()``. This method may be called exactly once for each output key\n in ``output_key_to_norm_expr``\n - For each input expression ``I`` that was provided to `ReactiveCompiler.compile`, you can register the input key\n for ``I`` by calling ``net.OnInput_{I.name}(bytes)`` where ``bytes`` is a python bytes object containing\n a capnpproto serialized message for ``I``. You can use `ReactiveCompiler.capnp_state_builder_for_type` to obtain\n a builder for such a python bytes object.\n\n Each of the above methods of ``Net`` will return a python dictionary mapping output keys to byte-like objects.\n For each mapping ``output_key`` -> ``bytes``, ``bytes`` will be a serialized capnproto message for that\n output key. You can use ``compiler.capnp_state_builder_for_type(output_key_to_norm_expr[output_key].type)``\n to get a builder that will parse ``bytes``.\n\n Each output key will only ever produce at most one output state, and that state will be returned\n as soon as the output has received all the inputs it needs to calculate it.\n The calculated state will be exactly the one determined by its associated\n `ConcreteExpression`.\n\n Passing in and receiving transitions:\n\n ``net.OnTransitions(input_transitions)`` takes as input an ``input_transitions`` dictionary that maps\n certain registered input keys to lists of bytes objects representing capnproto transitions\n (use `ReactiveCompiler.capnp_transitions_builder_for_type` to obtain a builder that can generate the proper bytes).\n This method will return a dictionary mapping output keys to bytes-like objects with the\n appropriate output capnproto transitions.\n\n Output transitions will be returned whenever an update to an input expression leads to an update to an output\n expression. The calculated transitions will be exactly those determined by the associated `ConcreteExpression`\n\n See :file:`test/test_reactives.py` for some examples of how to use reactives.\n\n :param output_key_to_norm_expr: A map from strings to normalized expressions.\n :type output_key_to_norm_expr: dict[str, ConcreteExpression]\n :param set other_exprs: If provided, a set of `ConcreteExpression` instances that should also be compiled in,\n whether or not they're accessible from an output key.\n\n :return: The compiled c extension module, loaded into the current interpret as a python module.\n '''\n all_exprs = set(other_concrete_exprs) if other_concrete_exprs is not None else set()\n all_exprs.update(output_key_to_norm_expr.values())\n self._output_key_to_norm_expr = output_key_to_norm_expr\n\n topsorter = _Topsorter(list(all_exprs))\n self._top_exprs = topsorter.topsort()\n\n self.expr_to_inputs = topsorter.expr_to_inputs\n self.expr_to_outputs = topsorter.expr_to_outputs\n self.expr_index = {}\n for i, expr in enumerate(self._top_exprs):\n self.expr_index[expr] = i\n\n self._concrete_types = [self.get_concrete_type(expr.type) for expr in self._top_exprs]\n\n self._net = self.program.AddPythonType(name='Net', docstring=f\"For running the {self.name} reactive network.\")\n\n # Add capnproto types for outputs\n self._output_exprs = defaultdict(list)\n for key, expr in self._output_key_to_norm_expr.items():\n self._output_exprs[expr].append(key)\n self.get_concrete_type(expr.type).initialize_capnp(self)\n\n # Add capnproto types for inputs\n self._input_exprs = []\n for expr in self._top_exprs:\n if expr.__class__ == expression.Input:\n self._input_exprs.append(expr)\n self.get_concrete_type(expr.type).initialize_capnp(self)\n elif expr.spy_keys:\n self.get_concrete_type(expr.type).initialize_capnp(self)\n\n self._build_capnp()\n\n self._generate_graph_struct()\n self._generate_graph_initializer()\n self._generate_graph_finalizer()\n self._generate_python_bytes_from_capnp()\n self._shall_maintain_state_function()\n\n self._generate_cur_time()\n self._generate_next_time()\n self._generate_elapse()\n\n for i in range(0, len(self._top_exprs)):\n self._generate_initialize_state(i)\n\n for i in range(0, len(self._top_exprs)):\n self._generate_subscribe(i)\n\n for expr in self._output_exprs.keys():\n self._write_output_state_function(expr)\n\n for expr in self._output_exprs.keys():\n self._write_output_transitions_function(expr)\n\n for i in range(len(self._top_exprs) - 1, -1, -1):\n self._generate_produce(i)\n\n for expr in self._input_exprs:\n self._generate_on_input(expr)\n self._generate_deserialize_transitions(expr)\n\n self._serialize_output_transitions_function()\n\n for key, expr in self._output_key_to_norm_expr.items():\n self._generate_on_output(key, expr)\n\n for expr in self._top_exprs:\n self._generate_react_to_transitions(expr)\n\n self._generate_on_transitions()\n\n spies = set()\n for expr in self._top_exprs:\n for key in expr.spy_keys:\n if key in spies:\n raise errors.InternalError(f\"spy key \\\"{key}\\\" should not be used twice in the same reactive compiler.\")\n else:\n self._write_output_state_function(expr)\n self._generate_spy_method(expr, key)\n\n if settings.c_debug:\n with open('msg.capnp', 'w') as f:\n for line in self.capnp.lines():\n f.write(line)\n\n with open('example.c', 'w') as f:\n for line in self.program.to_c_string():\n f.write(line)\n\n module = self.program.build_and_import()\n\n return module\n\n def _capnp_filename(self):\n return f\"{self.name}.capnp\"\n\n def _capnp_source_filename(self):\n return f\"{self._capnp_filename()}.c\"\n\n def _capnp_header_filename(self):\n return f\"{self._capnp_filename()}.h\"\n\n def _capnp_dirname(self):\n return os.path.join(os.path.realpath('.'), '.tmp', 'capnp')\n\n def _build_capnp(self):\n if not self._built_capnp:\n dirname = self._capnp_dirname()\n os.makedirs(dirname, exist_ok=True)\n filename = self._capnp_filename()\n self.capnp.build_in(dirname=dirname, filename=filename)\n self._built_capnp = True\n\n def get_pycapnp_module(self):\n '''\n Return a python module for generating and parsing capnp messages.\n This method caches it's result, and should only be called after the program is finished being compiled.\n '''\n if self._pycapnp_module is None:\n self._build_capnp()\n dirname = self._capnp_dirname()\n filename = self._capnp_filename()\n\n self._pycapnp_module = capnp.load(os.path.join(dirname, filename), imports=[settings.CAPNP_DIR])\n\n return self._pycapnp_module\n\n def capnp_state_builder(self, expr):\n '''\n Get the capnp builder for ``expr``\n\n :param expr: A dist_zero expression involved in compiling the program.\n :type expr: `ConcreteExpression`\n\n :return: The pycapnp builder object for ``expr``. The specific builder subclass class\n will be generated by the capnproto compiler.\n :rtype: `capnp._DynamicStructBuilder`\n '''\n t = self.get_concrete_type(expr.type).capnp_state_type\n capnp_module = self.get_pycapnp_module()\n return capnp_module.__dict__[t.name]\n\n def capnp_state_builder_for_type(self, t):\n '''\n Get the capnp builder for ``t``\n\n :param t: The dist_zero type involved in compiling the program.\n :type t: `dist_zero.types.Type`\n :return: The pycapnp builder object for ``t``. The specific builder subclass class\n will be generated by the capnproto compiler.\n :rtype: `capnp._DynamicStructBuilder`\n '''\n capnp_module = self.get_pycapnp_module()\n return capnp_module.__dict__[self.get_concrete_type(t).name]\n\n def capnp_transitions_builder(self, expr):\n '''\n Get the capnp builder for transitions on ``expr``\n\n :param expr: A dist_zero expression involved in compling the program.\n :type expr: `ConcreteExpression`\n :return: The pycapnp builder object for transitions on ``expr``. The specific builder subclass class\n will be generated by the capnproto compiler.\n :rtype: `capnp._DynamicStructBuilder`\n '''\n t = self.get_concrete_type(expr.type).capnp_transitions_type\n capnp_module = self.get_pycapnp_module()\n return capnp_module.__dict__[t.name]\n\n def capnp_transitions_builder_for_type(self, t):\n '''\n Get the capnp builder for transitions on ``t``\n\n :param t: The dist_zero type involved in compiling the program.\n :type t: `dist_zero.types.Type`\n :return: The pycapnp builder object for transitions on ``t``. The specific builder subclass class\n will be generated by the capnproto compiler.\n :rtype: `capnp._DynamicStructBuilder`\n '''\n capnp_module = self.get_pycapnp_module()\n return capnp_module.__dict__[self.get_concrete_type(t).name]\n\n def get_concrete_type(self, t):\n '''\n :param t: A type in the input program.\n :type t: `dist_zero.types.Type`\n :return: The unique `ConcreteType` this `ReactiveCompiler` instance will eventually use to represent ``t``.\n :rtype: `ConcreteType`\n '''\n if t not in self._concrete_type_by_type:\n result = self._compute_concrete_type(t)\n self._concrete_type_by_type[t] = result\n return result\n else:\n return self._concrete_type_by_type[t]\n\n def state_lvalue(self, vGraph, expr):\n '''\n :param vGraph: The c variable for the relevant graph structure.\n :type vGraph: `CExpression`\n :param expr: Any expression in the input program. \n :type expr: `ConcreteExpression`\n :return: The c lvalue that holds the current state of ``expr``.\n :rtype: `dist_zero.cgen.lvalue.Lvalue`\n '''\n index = self.expr_index[expr]\n return vGraph.Arrow(self._state_key_in_graph(index))\n\n def state_rvalue(self, vGraph, expr):\n '''\n :param vGraph: The c variable for the relevant graph structure.\n :type vGraph: `CExpression`\n :param expr: Any expression in the input program. \n :type expr: `ConcreteExpression`\n :return: A c expression that holds the current state of ``expr``.\n :rtype: `CExpression`\n '''\n index = self.expr_index[expr]\n return vGraph.Arrow(self._state_key_in_graph(index))\n\n def transitions_rvalue(self, vGraph, expr):\n '''\n :param vGraph: The c variable for the relevant graph structure.\n :type vGraph: `CExpression`\n :param expr: Any expression in the input program. \n :type expr: `ConcreteExpression`\n :return: A c expression that holds the current transitions kvec for ``expr``.\n :rtype: `CExpression`\n '''\n index = self.expr_index[expr]\n return vGraph.Arrow('turn').Dot(self._transition_key_in_turn(index))\n\n def _compute_concrete_type(self, t):\n '''Determine which `ConcreteType` to use for ``t``'''\n if isinstance(t, concrete_types.ConcreteType):\n return t.initialize(self)\n elif t.__class__ == types.Product:\n return concrete_types.ConcreteProductType(t).initialize(self)\n elif t.__class__ == types.Sum:\n return concrete_types.ConcreteSumType(t).initialize(self)\n elif t.__class__ == types.List:\n return concrete_types.ConcreteList(t).initialize(self)\n elif t.__class__ == types.FunctionType:\n raise errors.InternalError(\n \"Reactive compiler can't produce a concrete type for a function type. It should have been normalized away.\")\n elif t.__class__ == types.BasicType:\n return concrete_types.ConcreteBasicType(t).initialize(self)\n else:\n raise RuntimeError(f\"Unrecognized dist_zero type {t.__class__}.\")\n\n def _generate_graph_struct(self):\n '''Generate the graph struct in self.program.'''\n self._graph_struct = self._net.struct\n\n self._graph_struct.AddField('cur_time', cgen.UInt64)\n\n self._graph_struct.AddField('events', cgen.EventQueue)\n\n # -1 if the expr has not been subscribed to, otherwise the number of inputs that still need to be produced.\n self._graph_struct.AddField('n_missing_productions', cgen.Int32.Array(self._n_exprs()))\n\n # The number of output expressions (or graph outputs) that have yet to subscribe to the expr.\n self._graph_struct.AddField('n_missing_subscriptions', cgen.Int32.Array(self._n_exprs()))\n\n # Array of react_to_transitions* functions. They should be called on initialized states when they\n # have input transitions to react to.\n self._graph_struct.AddField(\n 'react_to_transitions',\n cgen.FunctionType(cgen.UInt8, [self._graph_struct.Star()]).Star().Array(self._n_exprs()))\n\n self._turn_struct = self.program.AddStruct('turn')\n self._graph_struct.AddField('turn', self._turn_struct)\n\n # To hold a python dict of outputs. This variable is used by chained produce* calls starting\n # from an OnInput call.\n self._turn_struct.AddField('result', cgen.PyObject.Star())\n\n self._turn_struct.AddField('processed_transitions', cgen.MachineInt.Array(self._n_exprs()))\n\n # kvec of functions that will serialize output transitions into the turn\n self._turn_struct.AddField('turn_outputs', cgen.KVec(cgen.Void.Star()))\n # true iff expr i has been added to turn_outputs. Used to avoid adding the same function to the array twice\n self._turn_struct.AddField('is_turn_output', cgen.UInt8.Array(self._n_exprs()))\n\n self._turn_struct.AddField('remaining', cgen.Queue)\n self._turn_struct.AddField('was_added', cgen.UInt8.Array(self._n_exprs()))\n self._turn_struct.AddField('vecs_to_free', cgen.KVec(cgen.Void.Star()))\n self._turn_struct.AddField('ptrs_to_free', cgen.KVec(cgen.Void.Star()))\n\n for i, expr in enumerate(self._top_exprs):\n ct = self.get_concrete_type(expr.type)\n self._graph_struct.AddField(self._state_key_in_graph(i), ct.c_state_type)\n self._turn_struct.AddField(self._transition_key_in_turn(i), cgen.KVec(ct.c_transitions_type))\n\n @property\n def graph_struct(self):\n return self._graph_struct\n\n def _n_exprs(self):\n if self._cached_n_exprs is None:\n self._cached_n_exprs = cgen.Constant(len(self._top_exprs))\n\n return self._cached_n_exprs\n\n def _generate_graph_initializer(self):\n '''Generate the graph initialization function.'''\n init = self._net.AddInit()\n vGraph = init.SelfArg()\n\n init.AddAssignment(vGraph.Arrow('cur_time'), cgen.Zero)\n (init.AddIf(cgen.event_queue_init(\n vGraph.Arrow('events').Address(), cgen.Constant(EVENT_QUEUE_INITIAL_CAPACITY))).consequent.AddAssignment(\n None, self.pyerr_from_string(\"Failed to allocate a new event queue\")).AddReturn(cgen.MinusOne))\n init.Newline()\n\n init.AddAssignment(\n None, cgen.memset(vGraph.Arrow('turn').Dot('is_turn_output'), cgen.Zero,\n self._n_exprs() * cgen.UInt8.Sizeof()))\n\n init.AddAssignment(None, cgen.kv_init(vGraph.Arrow('turn').Dot('turn_outputs')))\n\n init.Newline()\n\n for i, expr in enumerate(self._top_exprs):\n init.AddAssignment(vGraph.Arrow('n_missing_productions').Sub(i), cgen.MinusOne)\n\n for i, expr in enumerate(self._top_exprs):\n n_outputs = len(self._output_exprs.get(expr, []))\n for outputExpr in self.expr_to_outputs[expr]:\n if outputExpr.__class__ == expression.Product:\n # We add an extra output for a product expression to ensure that this expression's\n # state is maintained if the product's state must be maintained.\n # In the event that the product's state need NOT be maintained, it will satisfy this addition output.\n n_outputs += 2\n else:\n n_outputs += 1\n\n total_subscriptions = n_outputs\n if expr.spy_keys:\n total_subscriptions += 1 # 1 extra subscription for the spy key\n init.AddAssignment(vGraph.Arrow('n_missing_subscriptions').Sub(i), cgen.Constant(total_subscriptions))\n\n for expr in self._top_exprs:\n init.AddAssignment(None, cgen.kv_init(self.transitions_rvalue(vGraph, expr)))\n\n for i, expr in enumerate(self._top_exprs):\n if self._expr_can_react(expr):\n react = cgen.Var(self._react_to_transitions_function_name(i))\n init.AddAssignment(vGraph.Arrow('react_to_transitions').Sub(i), react.Address())\n\n for i, expr in enumerate(self._top_exprs):\n if expr.spy_keys:\n subscribeFunction = cgen.Var(self._subscribe_function_name(i))\n init.AddAssignment(None, subscribeFunction(vGraph))\n\n init.AddReturn(cgen.Constant(0))\n\n def _generate_graph_finalizer(self):\n '''Generate the graph finalization function.'''\n finalize = self._net.AddFinalize()\n\n vGraph = finalize.SelfArg()\n\n # Free memory associated with the events queue.\n with finalize.ForInt(vGraph.Arrow('events').Dot('count')) as (loop, eventIndex):\n vData = vGraph.Arrow('events').Dot('data').Sub(eventIndex).Dot('data')\n loop.AddIf(vData != cgen.NULL).consequent.AddAssignment(None, cgen.free(vData))\n finalize.AddAssignment(None, cgen.free(vGraph.Arrow('events').Dot('data'))).Newline()\n\n for i, expr in enumerate(self._top_exprs):\n ifInitialized = finalize.AddIf(cgen.Zero == vGraph.Arrow('n_missing_productions').Sub(i)).consequent\n expr.generate_free_state(self, ifInitialized, self.state_rvalue(vGraph, expr))\n\n def _python_bytes_from_capn_function_name(self):\n return \"python_bytes_from_capn\"\n\n def _transition_key_in_turn(self, index):\n return f'transitions_{index}'\n\n def _state_key_in_graph(self, index):\n return f'state_{index}'\n\n def _react_to_transitions_function_name(self, index):\n return f\"react_to_transitions_{index}\"\n\n def _initialize_state_function_name(self, index):\n return f\"initialize_state_{index}\"\n\n def _deserialize_transitions_function_name(self, index):\n return f\"deserialize_transitions_{index}\"\n\n def _subscribe_function_name(self, index):\n return f\"subscribe_to_{index}\"\n\n def _produce_function_name(self, index):\n return f\"produce_on_{index}\"\n\n def _on_input_function_name(self, expr):\n return f\"OnInput_{expr.name}\"\n\n def _on_output_function_name(self, key):\n return f\"OnOutput_{key}\"\n\n def _generate_initialize_state(self, index):\n '''\n Generate the state initialization function for this index.\n\n It should be called once all the input states have already been populated.\n '''\n\n expr = self._top_exprs[index]\n if expr.__class__ == expression.Input:\n return # Input expressions do not require an ordinary state initialization function\n\n vGraph = self._graph_struct.Star().Var('graph')\n initialize_state = self.program.AddFunction(\n name=self._initialize_state_function_name(index), retType=cgen.Void, args=[vGraph])\n\n expr.generate_initialize_state(self, initialize_state, vGraph)\n\n def _generate_produce(self, index):\n '''\n Generate the produce function in c for this expression index.\n This function will only be called after the state for the expression has been initialized and its\n n_missing_productions variable set to zero.\n Calling it ensures that any expression enabled by the setting of this state will be initialized and its\n produced function will be called.\n '''\n vGraph = self._graph_struct.Star().Var('graph')\n produce = self.program.AddFunction(name=self._produce_function_name(index), retType=cgen.Void, args=[vGraph])\n\n expr = self._top_exprs[index]\n\n if expr in self._output_exprs:\n getBytes = self._write_output_state_function(expr)\n vBytes = produce.AddDeclaration(cgen.PyObject.Star().Var('result_bytes'), getBytes(vGraph))\n produce.AddIf(vBytes == cgen.NULL).consequent.AddReturnVoid()\n for key in self._output_exprs[expr]:\n (produce.AddIf(cgen.MinusOne == cgen.PyDict_SetItemString(\n vGraph.Arrow('turn').Dot('result'), cgen.StrConstant(key), vBytes)).consequent.AddReturnVoid())\n\n for output_expr in self.expr_to_outputs[expr]:\n output_index = self.expr_index[output_expr]\n\n vNMissingInputs = vGraph.Arrow('n_missing_productions').Sub(output_index)\n whenSubscribed = produce.AddIf(vNMissingInputs >= cgen.Zero).consequent\n\n whenSubscribed.AddAssignment(vGraph.Arrow('n_missing_productions').Sub(output_index), vNMissingInputs - cgen.One)\n\n whenReady = whenSubscribed.AddIf(vNMissingInputs == cgen.Zero).consequent\n initializeFunction = cgen.Var(self._initialize_state_function_name(output_index))\n produceFunction = cgen.Var(self._produce_function_name(output_index))\n whenReady.AddAssignment(None, initializeFunction(vGraph))\n whenReady.AddAssignment(None, produceFunction(vGraph))\n\n produce.AddReturnVoid()\n\n def pyerr(self, err_type, s, *args):\n '''\n Return a c function call that sets a python exception.\n :param err_type: A c variable that refers to a python exception type.\n :type err_type: `cgen.expression.Var`\n :param str s: The printf format string\n :param args: The c variables to matching the format specifiers in ``s``\n :type args: list[`cgen.expression.Var`]\n '''\n if len(args) == 0:\n return cgen.PyErr_SetString(err_type, cgen.StrConstant(s))\n else:\n return cgen.PyErr_Format(err_type, cgen.StrConstant(s), *args)\n\n def pyerr_from_string(self, s, *args):\n '''\n Return a c function call that sets a python RuntimeError.\n :param str s: The printf format string\n :param args: The c variables to matching the format specifiers in ``s``\n :type args: list[`cgen.expression.Var`]\n '''\n return self.pyerr(cgen.PyExc_RuntimeError, s, *args)\n\n def _write_output_transitions_function(self, expr):\n if expr not in self._write_output_transitions:\n index = self.expr_index[expr]\n exprType = self._concrete_types[index]\n vGraph = self._graph_struct.Star().Var('graph')\n block = self.program.AddFunction(f'write_output_transitions_{index}', cgen.UInt8, args=[vGraph], predeclare=True)\n self._write_output_transitions[expr] = block\n\n vBytes = block.AddDeclaration(cgen.PyObject.Star().Var('resulting_python_bytes'))\n exprType.generate_c_transitions_to_capnp(self, block, self.transitions_rvalue(vGraph, expr), vBytes)\n\n block.AddIf(vBytes == cgen.NULL).consequent.AddReturn(cgen.true)\n for key in self._output_exprs[expr]:\n block.AddIf(cgen.MinusOne == cgen.PyDict_SetItemString(\n vGraph.Arrow('turn').Dot('result'), cgen.StrConstant(key), vBytes)).consequent.AddReturn(cgen.true)\n\n block.AddReturn(cgen.false)\n\n return self._write_output_transitions[expr]\n\n def _write_output_state_function(self, expr):\n '''\n Generate the write_output_state function in c for ``expr``.\n '''\n if expr not in self._write_output_state:\n index = self.expr_index[expr]\n exprType = self._concrete_types[index]\n vGraph = self._graph_struct.Star().Var('graph')\n write_output_state = self.program.AddFunction(\n name=f\"write_output_state_{index}\", retType=cgen.PyObject.Star(), args=[vGraph], predeclare=True)\n self._write_output_state[expr] = write_output_state\n\n vPythonBytes = write_output_state.AddDeclaration(cgen.PyObject.Star().Var('resulting_python_bytes'))\n exprType.generate_c_state_to_capnp(self, write_output_state, self.state_rvalue(vGraph, expr), vPythonBytes)\n\n write_output_state.AddReturn(vPythonBytes)\n return self._write_output_state[expr]\n\n def _generate_on_output(self, key, expr):\n '''\n Generate the OnOutput_{key} function in c for ``expr``.\n '''\n on_output = self._net.AddMethod(name=self._on_output_function_name(key), args=None)\n output_index = self.expr_index[expr]\n\n vGraph = on_output.SelfArg()\n\n vResult = on_output.AddDeclaration(cgen.PyObject.Star().Var('result'), cgen.PyDict_New())\n\n (on_output.AddIf(vResult == cgen.NULL).consequent.AddAssignment(\n None, self.pyerr_from_string(\"Failed to create output dictionary\")).AddReturn(cgen.NULL))\n\n subscribeFunction = cgen.Var(self._subscribe_function_name(output_index))\n ifHasState = on_output.AddIf(subscribeFunction(vGraph))\n whenHasState = ifHasState.consequent\n\n outputState = self.state_rvalue(vGraph, expr)\n\n getBytes = self._write_output_state_function(expr)\n vBytes = whenHasState.AddDeclaration(cgen.PyObject.Star().Var('result_bytes'), getBytes(vGraph))\n\n (whenHasState.AddIf(vBytes == cgen.NULL).consequent.AddAssignment(None,\n cgen.Py_DECREF(vResult)).AddReturn(cgen.NULL))\n\n (whenHasState.Newline().AddIf(\n cgen.MinusOne == cgen.PyDict_SetItemString(vResult, cgen.StrConstant(key), vBytes)).consequent.AddAssignment(\n None, cgen.Py_DECREF(vResult)).AddAssignment(None, cgen.Py_DECREF(vBytes)).AddReturn(cgen.NULL))\n\n on_output.Newline().AddReturn(vResult)\n\n def _generate_on_input(self, expr):\n '''\n Generate the OnInput_{name} function in c for ``expr``.\n '''\n index = self.expr_index[expr]\n inputType = self.get_concrete_type(expr.type)\n\n on_input = self._net.AddMethod(name=self._on_input_function_name(expr), args=None) # We'll do our own arg parsing\n vGraph = on_input.SelfArg()\n vArgsArg = on_input.ArgsArg()\n\n vBuf = on_input.AddDeclaration(cgen.UInt8.Star().Var('buf'))\n vBuflen = on_input.AddDeclaration(cgen.MachineInt.Var('buflen'))\n vCapn = on_input.AddDeclaration(cgen.Capn.Var('capn'))\n\n whenParseFail = on_input.AddIf(\n cgen.PyArg_ParseTuple(vArgsArg, cgen.StrConstant(\"s#\"), vBuf.Address(), vBuflen.Address()).Negate()).consequent\n whenParseFail.AddReturn(cgen.NULL)\n\n on_input.Newline()\n\n (on_input.AddIf(\n cgen.Zero != cgen.capn_init_mem(vCapn.Address(), vBuf, vBuflen, cgen.Zero)).consequent.AddAssignment(\n None, self.pyerr(self.BadInputError, \"Failed to parse message input.\")).AddReturn(cgen.NULL))\n\n on_input.Newline()\n\n vResult = on_input.AddDeclaration(cgen.PyObject.Star().Var('result'), cgen.PyDict_New())\n (on_input.AddIf(vResult == cgen.NULL).consequent.AddAssignment(\n None, self.pyerr_from_string(\"Failed to create output dictionary\")).AddReturn(cgen.NULL))\n on_input.AddAssignment(vGraph.Arrow('turn').Dot('result'), vResult)\n\n ptr = on_input.AddDeclaration(inputType.capnp_state_type.c_ptr_type.Var(f'ptr'))\n on_input.AddAssignment(ptr.Dot('p'), cgen.capn_getp(cgen.capn_root(vCapn.Address()), cgen.Zero, cgen.One))\n\n inputType.generate_capnp_to_c_state(\n concrete_types.CapnpReadContext(compiler=self, block=on_input, ptrsToFree=None, ptr=ptr),\n self.state_lvalue(vGraph, expr))\n\n on_input.AddAssignment(None, cgen.capn_free(vCapn.Address()))\n on_input.AddAssignment(vGraph.Arrow('n_missing_productions').Sub(index), cgen.Zero)\n\n produceState = cgen.Var(self._produce_function_name(index))\n on_input.AddAssignment(None, produceState(vGraph))\n\n on_input.AddAssignment(vGraph.Arrow('turn').Dot('result'), cgen.NULL)\n on_input.AddReturn(vResult)\n\n def _generate_subscribe(self, index):\n '''\n Generate the function to subscribe on this index when one of its outputs is subscribed to.\n\n It will return true iff the expression at this index has a state.\n '''\n expr = self._top_exprs[index]\n if expr.__class__ == expression.Input:\n self._generate_subscribe_input(index, expr)\n else:\n self._generate_subscribe_noninput(index, expr)\n\n def _generate_subscribe_input(self, index, expr):\n '''see _generate_subscribe'''\n vGraph = self._graph_struct.Star().Var('graph')\n subscribe = self.program.AddFunction(name=self._subscribe_function_name(index), retType=cgen.Int32, args=[vGraph])\n\n subscribe.AddAssignment(\n vGraph.Arrow('n_missing_subscriptions').Sub(index),\n vGraph.Arrow('n_missing_subscriptions').Sub(index) - cgen.One)\n\n # Inputs will have their n_missing_productions value set to 0 only after they have been initialized.\n subscribe.AddReturn(vGraph.Arrow('n_missing_productions').Sub(index) == cgen.Zero)\n\n def _generate_subscribe_noninput(self, index, expr):\n '''see _generate_subscribe'''\n vGraph = self._graph_struct.Star().Var('graph')\n subscribe = self.program.AddFunction(name=self._subscribe_function_name(index), retType=cgen.Int32, args=[vGraph])\n\n subscribe.AddAssignment(\n vGraph.Arrow('n_missing_subscriptions').Sub(index),\n vGraph.Arrow('n_missing_subscriptions').Sub(index) - cgen.One)\n\n if expr.__class__ == expression.Product:\n ifZero = subscribe.AddIf(vGraph.Arrow('n_missing_subscriptions').Sub(index) == cgen.Zero)\n for inputExpr in self.expr_to_inputs[expr]:\n ifZero.consequent.AddAssignment(\n vGraph.Arrow('n_missing_subscriptions').Sub(self.expr_index[inputExpr]),\n vGraph.Arrow('n_missing_subscriptions').Sub(self.expr_index[inputExpr]) - cgen.One)\n\n subscribe.Newline()\n\n missingInputsI = vGraph.Arrow('n_missing_productions').Sub(index)\n updateMissingInputsI = vGraph.Arrow('n_missing_productions').Sub(index)\n\n ifAlreadySubscribed = subscribe.AddIf(missingInputsI >= cgen.Zero)\n ifAlreadySubscribed.consequent.AddReturn(missingInputsI == cgen.Zero)\n whenNotAlreadySubscribed = ifAlreadySubscribed.alternate\n\n nMissingInputs = whenNotAlreadySubscribed.AddDeclaration(\n cgen.Int32.Var('n_missing_productions'), cgen.Constant(len(self.expr_to_inputs[expr])))\n\n for inputExpr in self.expr_to_inputs[expr]:\n inputSubscribeFunction = cgen.Var(self._subscribe_function_name(self.expr_index[inputExpr]))\n ifInputIsReady = whenNotAlreadySubscribed.AddIf(inputSubscribeFunction(vGraph))\n ifInputIsReady.consequent.AddAssignment(nMissingInputs, nMissingInputs - cgen.One)\n\n whenNotAlreadySubscribed.AddAssignment(updateMissingInputsI, nMissingInputs)\n\n ifInputsAreSubscribed = whenNotAlreadySubscribed.AddIf(nMissingInputs == cgen.Zero)\n ifInputsAreSubscribed.alternate.AddReturn(cgen.false)\n whenInputsAreSubscribed = ifInputsAreSubscribed.consequent\n\n initializeFunction = cgen.Var(self._initialize_state_function_name(self.expr_index[expr]))\n whenInputsAreSubscribed.AddAssignment(None, initializeFunction(vGraph))\n whenInputsAreSubscribed.AddReturn(cgen.true)\n\n def _shall_maintain_state_function(self):\n '''\n Generate a shall_maintain_state c function that determines whether an index must maintain its state\n as new transitions arrive.\n '''\n if self._shall_maintain_state is None:\n vGraph = self._graph_struct.Star().Var('graph')\n vIndex = cgen.MachineInt.Var('index')\n shall_maintain_state = self.program.AddFunction(\n name='shall_maintain_state', retType=cgen.MachineInt, args=[vGraph, vIndex])\n\n # Current implementation: Check whether any other expr is still unsubscribed to it.\n shall_maintain_state.AddReturn(vGraph.Arrow('n_missing_subscriptions').Sub(vIndex) > cgen.Zero)\n\n self._shall_maintain_state = shall_maintain_state\n\n return self._shall_maintain_state\n\n def _generate_cur_time(self):\n '''Generate the CurTime C function.'''\n cur_time = self._net.AddMethod(name='CurTime', args=[])\n vGraph = cur_time.SelfArg()\n\n cur_time.AddReturn(cgen.PyLong_FromLong(vGraph.Arrow('cur_time')))\n\n def _generate_next_time(self):\n next_time = self._net.AddMethod(name='NextTime', args=[])\n vGraph = next_time.SelfArg()\n\n ifEmpty = next_time.AddIf(vGraph.Arrow('events').Dot('count') == cgen.Zero)\n ifEmpty.consequent.AddReturn(cgen.Py_None)\n ifEmpty.alternate.AddReturn(cgen.PyLong_FromLong(vGraph.Arrow('events').Dot('data').Sub(cgen.Zero).Dot('when')))\n\n def _generate_python_bytes_from_capnp(self):\n '''generate a c function to produce a python bytes object from a capnp structure.'''\n vCapn = cgen.Capn.Star().Var('capn')\n python_bytes_from_capn = self.program.AddFunction(\n name=self._python_bytes_from_capn_function_name(), retType=cgen.PyObject.Star(), args=[vCapn])\n\n vBuf = python_bytes_from_capn.AddDeclaration(cgen.UInt8.Star().Var('result_buf'))\n vWroteBytes = python_bytes_from_capn.AddDeclaration(cgen.MachineInt.Var('wrote_bytes'))\n pyBuffer = python_bytes_from_capn.AddDeclaration(cgen.PyObject.Star().Var('py_buffer_result'))\n vSize = python_bytes_from_capn.AddDeclaration(cgen.MachineInt.Var('n_bytes'), cgen.Constant(4096))\n\n python_bytes_from_capn.Newline()\n\n loop = python_bytes_from_capn.AddWhile(cgen.true)\n\n loop.AddAssignment(vBuf, cgen.malloc(vSize).Cast(vBuf.type))\n (loop.AddIf(vBuf == cgen.NULL).consequent.AddAssignment(None, self.pyerr_from_string(\"malloc failed\")).AddReturn(\n cgen.NULL))\n loop.AddAssignment(vWroteBytes, cgen.capn_write_mem(vCapn, vBuf, vSize, cgen.Zero))\n\n ifsuccess = loop.AddIf(vSize > vWroteBytes)\n ifsuccess.consequent.AddAssignment(pyBuffer, cgen.PyBytes_FromStringAndSize(\n vBuf.Cast(cgen.Char.Star()), vWroteBytes))\n (ifsuccess.consequent.AddIf(pyBuffer == cgen.NULL).consequent.AddAssignment(\n None, self.pyerr_from_string(\"Could not allocate a python bytes object.\")))\n ifsuccess.consequent.AddAssignment(None, cgen.free(vBuf))\n ifsuccess.consequent.AddReturn(pyBuffer)\n\n loop.AddAssignment(None, cgen.free(vBuf))\n loop.AddAssignment(vSize, vSize + vSize)\n\n def _generate_elapse(self):\n ms = cgen.UInt64.Var('ms')\n elapse = self._net.AddMethod(name=\"Elapse\", args=[ms])\n vGraph = elapse.SelfArg()\n\n vResult = self._generate_output_dictionary(elapse, vGraph)\n elapse.AddAssignment(vGraph.Arrow('cur_time'), vGraph.Arrow('cur_time') + ms)\n\n elapse.logf(\"Responding to %llu ms of events.\\n\", ms)\n\n whenHasEvents = elapse.AddIf(self._has_events(vGraph)).consequent\n\n self._generate_initialize_turn(whenHasEvents, vGraph, vResult)\n self._generate_events_loop(whenHasEvents, vGraph, vResult)\n self._generate_finalize_turn(whenHasEvents, vGraph)\n\n elapse.AddReturn(vResult)\n\n def _has_events(self, vGraph):\n return (cgen.BinOp(cgen.And, (vGraph.Arrow('events').Dot('count') > cgen.Zero),\n (vGraph.Arrow('events').Dot('data').Sub(cgen.Zero).Dot('when') <= vGraph.Arrow('cur_time'))))\n\n def _generate_events_loop(self, block, vGraph, vResult):\n loop = block.AddWhile(self._has_events(vGraph))\n for expr in self._top_exprs:\n loop.AddAssignment(self.vProcessedTransitions(vGraph, expr), cgen.kv_size(self.transitions_rvalue(vGraph, expr)))\n\n loop.AddAssignment(None, cgen.memset(vGraph.Arrow('turn').Dot('was_added'), cgen.Zero, self._n_exprs()))\n\n vEvent = loop.AddDeclaration(\n cgen.BasicType('struct event').Var('next_event'), cgen.event_queue_pop(vGraph.Arrow('events').Address()))\n loop.logf(\"Responding to event at time %llu.\\n\", vEvent.Dot('when'))\n loop.AddAssignment(None, vEvent.Dot('occur').Deref()(vGraph, vEvent.Dot('data')))\n\n queueLoop = loop.Newline().AddWhile(cgen.Zero != vGraph.Arrow('turn').Dot('remaining').Dot('count'))\n nextIndex = queueLoop.AddDeclaration(\n cgen.MachineInt.Var('next_index'), cgen.queue_pop(vGraph.Arrow('turn').Dot('remaining').Address()))\n reactFailed = queueLoop.AddIf(vGraph.Arrow('react_to_transitions').Sub(nextIndex)(vGraph)).consequent\n reactFailed.AddAssignment(None, cgen.Py_DECREF(vResult))\n reactFailed.AddAssignment(None, self._finalize_turn_function()(vGraph))\n reactFailed.AddReturn(cgen.NULL)\n\n serializeFailed = block.AddIf(self._serialize_output_transitions_function()(vGraph)).consequent\n serializeFailed.AddAssignment(None, cgen.Py_DECREF(vResult))\n serializeFailed.AddAssignment(vResult, cgen.NULL)\n\n def _generate_on_transitions(self):\n '''Generate the c function that implements the OnTransitions method of the Net object.'''\n vTransitionsDict = cgen.PyObject.Star().Var('input_transitions_dict')\n on_transitions = self._net.AddMethod(name='OnTransitions', args=[vTransitionsDict]) # We'll do our own arg parsing\n vGraph = on_transitions.SelfArg()\n\n vResult = self._generate_output_dictionary(on_transitions, vGraph)\n self._generate_initialize_turn(on_transitions, vGraph, vResult)\n self._generate_read_input_transitions(on_transitions, vGraph, vResult, vTransitionsDict)\n self._generate_queue_loop(on_transitions, vGraph, vResult)\n serializeFailed = on_transitions.AddIf(self._serialize_output_transitions_function()(vGraph)).consequent\n serializeFailed.AddAssignment(None, cgen.Py_DECREF(vResult))\n serializeFailed.AddAssignment(vResult, cgen.NULL)\n self._generate_finalize_turn(on_transitions, vGraph)\n on_transitions.AddReturn(vResult)\n\n def _generate_spy_method(self, expr, key):\n index = self.expr_index[expr]\n spy = self._net.AddMethod(name=f'Spy_{key}', args=[])\n vGraph = spy.SelfArg()\n\n ifHasState = spy.AddIf(vGraph.Arrow('n_missing_productions').Sub(index) == cgen.Zero)\n ifHasState.alternate.logf(\"Spy does not have a state. n_missing_productions = %d.\\n\",\n vGraph.Arrow('n_missing_productions').Sub(index))\n\n block = ifHasState.consequent\n ifHasState.alternate.AddReturn(cgen.Py_None)\n\n getBytes = self._write_output_state_function(expr)\n block.AddReturn(getBytes(vGraph))\n\n def _generate_output_dictionary(self, block, vGraph):\n vResult = block.Newline().AddDeclaration(cgen.PyObject.Star().Var('result'), cgen.PyDict_New())\n (block.AddIf(vResult == cgen.NULL).consequent.AddAssignment(\n None, self.pyerr_from_string(\"Failed to create output dictionary\")).AddReturn(cgen.NULL))\n block.Newline()\n return vResult\n\n def _generate_initialize_turn(self, block, vGraph, vResult):\n block.AddAssignment(vGraph.Arrow('turn').Dot('result'), vResult)\n\n vRemainingData = block.AddDeclaration(cgen.MachineInt.Array(self._n_exprs()).Var('data'))\n block.AddAssignment(None, self._initialize_turn_function()(vGraph, vRemainingData))\n\n def _initialize_turn_function(self):\n if self._initialize_turn is None:\n vGraph = self._graph_struct.Star().Var('graph')\n vRemainingData = cgen.MachineInt.Star().Var('data')\n self._initialize_turn = self.program.AddFunction(\n 'initialize_turn', cgen.Void, args=[vGraph, vRemainingData], predeclare=True)\n block = self._initialize_turn\n\n # Initialize the queue\n block.AddAssignment(None, cgen.kv_init(self.ptrsToFree(vGraph)))\n\n # Initialize procesed_transitions\n block.AddAssignment(\n None,\n cgen.memset(\n vGraph.Arrow('turn').Dot('processed_transitions'), cgen.Zero,\n self._n_exprs() * cgen.MachineInt.Sizeof()))\n\n # initialize was_added\n block.AddAssignment(None, cgen.memset(vGraph.Arrow('turn').Dot('was_added'), cgen.Zero, self._n_exprs()))\n\n block.Newline().AddAssignment(vGraph.Arrow('turn').Dot('remaining').Dot('count'), cgen.Zero)\n block.AddAssignment(vGraph.Arrow('turn').Dot('remaining').Dot('data'), vRemainingData)\n\n block.Newline()\n\n return self._initialize_turn\n\n def _generate_read_input_transitions(self, block, vGraph, vResult, vTransitionsDict):\n vKey = block.AddDeclaration(cgen.PyObject.Star().Var('input_key'))\n vValue = block.AddDeclaration(cgen.PyObject.Star().Var('input_value'))\n vPos = block.AddDeclaration(cgen.Py_ssize_t.Var('loop_pos'), cgen.Zero)\n\n dictLoop = block.AddWhile(cgen.PyDict_Next(vTransitionsDict, vPos.Address(), vKey.Address(), vValue.Address()))\n\n condition = dictLoop\n for inputExpr in self._input_exprs:\n key = inputExpr.name\n input_index = self.expr_index[inputExpr]\n ifMatch = condition.AddIf(cgen.Zero == cgen.PyUnicode_CompareWithASCIIString(vKey, cgen.StrConstant(key)))\n (ifMatch.consequent.AddIf(\n vGraph.Arrow('n_missing_productions').Sub(input_index) != cgen.Zero).consequent.AddAssignment(\n None,\n self.pyerr(self.BadInputError,\n f'Transitions were given for a key \"{key}\" that has not been initialized.')).AddAssignment(\n None, cgen.Py_DECREF(vResult)).AddReturn(cgen.NULL))\n deserializeTransitions = cgen.Var(self._deserialize_transitions_function_name(input_index))\n ifMatch.consequent.AddAssignment(None, deserializeTransitions(vGraph, vValue))\n condition = ifMatch.alternate\n\n (condition.AddAssignment(\n None,\n self.pyerr(self.BadInputError, 'keys of the argument OnTransition must correspond to inputs. Got \"%S\"',\n vKey)).AddAssignment(None, cgen.Py_DECREF(vResult)).AddReturn(cgen.NULL))\n\n def _generate_queue_loop(self, block, vGraph, vResult):\n queueLoop = block.Newline().AddWhile(cgen.Zero != vGraph.Arrow('turn').Dot('remaining').Dot('count'))\n nextIndex = queueLoop.AddDeclaration(\n cgen.MachineInt.Var('next_index'), cgen.queue_pop(vGraph.Arrow('turn').Dot('remaining').Address()))\n (queueLoop.AddIf(vGraph.Arrow('react_to_transitions').Sub(nextIndex)(vGraph)).consequent.AddAssignment(\n None, cgen.Py_DECREF(vResult)).AddAssignment(vResult, cgen.NULL).AddBreak())\n\n def vProcessedTransitions(self, vGraph, expr):\n return vGraph.Arrow('turn').Dot('processed_transitions').Sub(self.expr_index[expr])\n\n def _finalize_turn_function(self):\n if self._finalize_turn is None:\n vGraph = self._graph_struct.Star().Var('graph')\n self._finalize_turn = self.program.AddFunction(\"finalize_turn\", cgen.Void, args=[vGraph], predeclare=True)\n block = self._finalize_turn\n\n # free from ptrs_to_free\n with block.Newline().ForInt(cgen.kv_size(self.ptrsToFree(vGraph))) as (freeLoop, ptrsFreeIndex):\n freeLoop.AddAssignment(None, cgen.free(cgen.kv_A(self.ptrsToFree(vGraph), ptrsFreeIndex)))\n block.AddAssignment(None, cgen.kv_destroy(self.ptrsToFree(vGraph)))\n\n # free from vecs_to_free\n for expr in self._top_exprs:\n transitions = self.transitions_rvalue(vGraph, expr)\n block.AddAssignment(None, cgen.kv_destroy(transitions))\n block.AddAssignment(None, cgen.kv_init(transitions))\n\n block.Newline().AddAssignment(vGraph.Arrow('turn').Dot('result'), cgen.NULL)\n\n return self._finalize_turn\n\n def _generate_finalize_turn(self, block, vGraph):\n block.AddAssignment(None, self._finalize_turn_function()(vGraph))\n\n def _expr_can_react(self, expr):\n return expr.__class__ not in [expression.Input, recorded.RecordedUser]\n\n def _generate_react_to_transitions(self, expr):\n '''\n Generate a c function that implement ``expr`` reacting to transitions on its inputs.\n This function will read the transitions for the input exprs to ``expr`` (from the `transitions_rvalue` kvecs),\n and based on their values, write output transitions in `transitions_rvalue` for ``expr``.\n\n :param expr: Any expression in the input program. \n :type expr: `ConcreteExpression`\n '''\n if self._expr_can_react(expr):\n index = self.expr_index[expr]\n vGraph = self._graph_struct.Star().Var('graph')\n react = self.program.AddFunction(\n name=self._react_to_transitions_function_name(index),\n retType=cgen.UInt8, # Return 1 if there was an error\n args=[vGraph])\n\n # Update the state and write the transitions.\n expr.generate_react_to_transitions(\n self,\n react.Newline(),\n vGraph,\n )\n\n react.AddAssignment(None, self._after_transitions_function(expr)(vGraph))\n\n react.AddReturn(cgen.false)\n\n def _after_transitions_function(self, expr):\n if expr not in self._cached_after_transitions_function:\n index = self.expr_index[expr]\n vGraph = self._graph_struct.Star().Var('graph')\n block = self.program.AddFunction(f'after_transitions_{index}', cgen.Void, [vGraph], predeclare=True)\n self._cached_after_transitions_function[expr] = block\n\n if expr in self._output_exprs:\n isTurnOutput = vGraph.Arrow('turn').Dot('is_turn_output').Sub(index)\n whenNeedsToSetOutput = block.AddIf(isTurnOutput == cgen.Zero).consequent\n whenNeedsToSetOutput.AddAssignment(isTurnOutput, cgen.One)\n whenNeedsToSetOutput.AddAssignment(\n None,\n cgen.kv_push(cgen.Void.Star(),\n vGraph.Arrow('turn').Dot('turn_outputs'),\n self._write_output_transitions_function(expr).Address().Cast(cgen.Void.Star())))\n\n if expr.spy_keys:\n whenMaintainsState = block\n else:\n whenMaintainsState = block.AddIf(self._shall_maintain_state_function()(vGraph, cgen.Constant(index))).consequent\n transitions = self.transitions_rvalue(vGraph, expr)\n with whenMaintainsState.ForInt(cgen.kv_size(transitions)) as (loop, vIndex):\n ct = self.get_concrete_type(expr.type)\n if expr.__class__ == expression.Input and self.get_concrete_type(\n expr.type).__class__ == concrete_types.ConcreteProductType:\n generate_apply = ct.generate_product_apply_transition_forced\n else:\n generate_apply = ct.generate_apply_transition\n\n generate_apply(loop, self.state_lvalue(vGraph, expr), self.state_rvalue(vGraph, expr),\n cgen.kv_A(transitions, vIndex))\n\n for next_expr in self.expr_to_outputs[expr]:\n nextIndex = cgen.Constant(self.expr_index[next_expr])\n whenShouldAdd = block.Newline().AddIf(\n cgen.BinOp(cgen.And, (vGraph.Arrow('n_missing_productions').Sub(nextIndex) == cgen.Zero),\n (vGraph.Arrow('turn').Dot('was_added').Sub(nextIndex).Negate()))).consequent\n\n whenShouldAdd.AddAssignment(None, cgen.queue_push(vGraph.Arrow('turn').Dot('remaining').Address(), nextIndex))\n whenShouldAdd.AddAssignment(vGraph.Arrow('turn').Dot('was_added').Sub(nextIndex), cgen.One)\n\n block.AddAssignment(\n None,\n cgen.kv_push(cgen.Void.Star(),\n vGraph.Arrow('turn').Dot('vecs_to_free'),\n self.transitions_rvalue(vGraph, expr).Address().Cast(cgen.Void.Star())))\n\n return self._cached_after_transitions_function[expr]\n\n def _serialize_output_transitions_function(self):\n if self._serialize_output_transitions is None:\n vGraph = self._graph_struct.Star().Var('graph')\n block = self.program.AddFunction('serialize_output_transitions', cgen.UInt8, [vGraph])\n self._serialize_output_transitions = block\n\n vTurnOutputs = vGraph.Arrow('turn').Dot('turn_outputs')\n block.logf('Serializing %zu outputs.\\n', cgen.kv_size(vTurnOutputs))\n with block.ForInt(cgen.kv_size(vTurnOutputs)) as (loop, index):\n failed = loop.AddIf(\n cgen.kv_A(vTurnOutputs, index).Cast(cgen.BasicType('uint8_t (*)(struct Net *)')).Deref()\n (vGraph) != cgen.Zero).consequent\n failed.AddReturn(cgen.One)\n\n block.AddAssignment(\n None, cgen.memset(\n vGraph.Arrow('turn').Dot('is_turn_output'), cgen.Zero,\n self._n_exprs() * cgen.UInt8.Sizeof()))\n\n block.AddAssignment(None, cgen.kv_destroy(vTurnOutputs))\n block.AddAssignment(None, cgen.kv_init(vTurnOutputs))\n block.AddReturn(cgen.Zero)\n\n return self._serialize_output_transitions\n\n def _generate_deserialize_transitions(self, inputExpr):\n '''\n Generate a c function to convert from a capnproto representation of a transition on an input expression\n to the internal c representation inside the graph struct.\n '''\n index = self.expr_index[inputExpr]\n vGraph = self._graph_struct.Star().Var('graph')\n vPythonList = cgen.PyObject.Star().Var('user_input_list_of_bytes')\n deserialize_transitions = self.program.AddFunction(\n name=self._deserialize_transitions_function_name(index), retType=cgen.Void, args=[vGraph, vPythonList])\n\n vKVec = vGraph.Arrow('turn').Dot(self._transition_key_in_turn(index))\n\n vNumber = deserialize_transitions.AddDeclaration(\n cgen.Py_ssize_t.Var('n_transitions'), cgen.PyList_Size(vPythonList))\n vI = deserialize_transitions.AddDeclaration(cgen.Py_ssize_t.Var('i'), cgen.Zero)\n\n listLoop = deserialize_transitions.Newline().AddWhile(vI < vNumber)\n\n vPythonBytes = listLoop.AddDeclaration(cgen.PyObject.Star().Var('python_bytes'), cgen.PyList_GetItem(\n vPythonList, vI))\n listLoop.AddIf(cgen.NULL == vPythonBytes).consequent.AddReturnVoid()\n\n listLoop.Newline()\n\n vBuf = listLoop.AddDeclaration(cgen.Char.Star().Var('buf'))\n vBuflen = listLoop.AddDeclaration(cgen.Py_ssize_t.Var('buflen'))\n vCapn = listLoop.AddDeclaration(cgen.Capn.Var('capn'))\n (listLoop.AddIf(cgen.MinusOne == cgen.PyBytes_AsStringAndSize(vPythonBytes, vBuf.Address(), vBuflen.Address())).\n consequent.AddReturnVoid())\n\n (listLoop.AddIf(cgen.Zero != cgen.capn_init_mem(vCapn.Address(), vBuf.Cast(cgen.UInt8.Star()), vBuflen, cgen.Zero)).\n consequent.AddAssignment(\n None, self.pyerr(self.BadInputError,\n \"Failed to initialize struct capn when parsing a transitions message.\")).AddReturnVoid())\n\n listLoop.Newline()\n\n concreteInputType = self.get_concrete_type(inputExpr.type)\n ptr = listLoop.AddDeclaration(concreteInputType.capnp_transitions_type.c_ptr_type.Var(f'ptr'))\n listLoop.AddAssignment(ptr.Dot('p'), cgen.capn_getp(cgen.capn_root(vCapn.Address()), cgen.Zero, cgen.One))\n\n read_ctx = concrete_types.CapnpReadContext(\n compiler=self, block=listLoop, ptrsToFree=vGraph.Arrow('turn').Dot('ptrs_to_free'), ptr=ptr)\n for cblock, cexp in concreteInputType.generate_and_yield_capnp_to_c_transition(read_ctx):\n cblock.AddAssignment(None, cgen.kv_push(concreteInputType.c_transitions_type, vKVec, cexp))\n\n listLoop.AddAssignment(None, cgen.capn_free(vCapn.Address()))\n\n listLoop.Newline().AddAssignment(vI, vI + cgen.One)\n\n deserialize_transitions.AddAssignment(None, self._after_transitions_function(inputExpr)(vGraph))\n\n\nclass _Topsorter(object):\n '''\n Helper class to populate ReactiveCompiler._top_exprs via topological traversal of `ConcreteExpression`\n instances referenced by a list of root expressions.\n '''\n\n def __init__(self, root_exprs):\n '''\n :param list root_exprs: A list of `ConcreteExpression` instances in any order.\n '''\n self.root_exprs = root_exprs\n self.visited = set()\n self.active = set()\n self.result = []\n\n self.expr_to_inputs = {}\n self.expr_to_outputs = defaultdict(list)\n\n def topsort(self):\n '''\n Populate all the parameters of self by traversing expressions in ``self.root_exprs`` in topological order.\n '''\n for expr in self.root_exprs:\n self._visit(expr)\n\n for expr, inputExprs in self.expr_to_inputs.items():\n for inputExpr in inputExprs:\n self.expr_to_outputs[inputExpr].append(expr)\n\n return self.result\n\n def _visit(self, expr):\n '''Visit a single expression. It may have already been visited.'''\n if expr in self.visited:\n return\n elif expr in self.active:\n raise errors.InternalError(\"Cycle detected in normalized expression.\")\n else:\n self.active.add(expr)\n inputs = []\n self.expr_to_inputs[expr] = inputs\n try:\n for kid in self._yield_kids(expr):\n inputs.append(kid)\n self._visit(kid)\n finally:\n self.active.remove(expr)\n self.result.append(expr)\n self.visited.add(expr)\n\n def _yield_kids(self, expr):\n '''\n yield all \"kids\" of ``expr``\n A \"kid\" in any other expression ``k`` such that the value of ``expr`` depends directly on ``k``\n '''\n if expr.__class__ == expression.Applied:\n yield expr.arg\n elif expr.__class__ == expression.Product:\n for key, kid in expr.items:\n yield kid\n elif expr.__class__ == expression.Input:\n return\n elif expr.__class__ == expression.Project:\n yield expr.base\n elif expr.__class__ in [expression.Constant, recorded.RecordedUser]:\n pass\n else:\n raise errors.InternalError(f\"Unrecognized type of normalized expression {expr.__class__}.\")\n","repo_name":"koreiklein/dist_zero","sub_path":"dist_zero/reactive/compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":56282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"73902119435","text":"\nfrom scipy.stats import zscore\nfrom Proyecto.DataSet.DataFrame import DataFrame\nimport pandas as pd\nimport copy as c\n\n\n\nclass CategoricalValues():\n\n # ----------------------One hot-------------------------------------\n # ------------------------------------------------------------------\n # input: list with the clases in the column\n # function: create a dictionary with ceros values\n # output: dictionary\n # columns have de form ['value'],...,['value']]\n def get_dictionary_tasg(self, column_classes):\n dictionary = {}\n for item in column_classes:\n dictionary[item[0]] = 0\n return dictionary\n\n # ----------------------One hot-------------------------------------\n # ------------------------------------------------------------------\n # input: list with the clases in the column\n # function: create a dictionary with index values\n # output: dictionary\n # columns have de form ['value'],...,['value']]\n def get_dictionary_tasg_number(self, column_classes):\n dictionary = {}\n for i in range(len(column_classes)):\n dictionary[column_classes[i][0]] =float(i)\n\n return dictionary\n # ------------------------------------------------------------------\n # input: - data_list: a list with the column values\n # - a dictionary with the classes keys\n # function: build a matrix with the dumies columns\n # output: a matrix\n def biuld_dumies(self, data_list, dictionary_classes):\n dumies = []\n for item in data_list:\n classes_copy = c.deepcopy(dictionary_classes)\n classes_copy[item[0]] = 1\n dumies += [classes_copy]\n return dumies\n\n def biuld_dumies_index(self, data_list, dictionary_classes):\n dumies = []\n for item in data_list:\n classes_copy = c.deepcopy(dictionary_classes)\n classes_copy[item[0]] = 1\n dumies += [classes_copy]\n return dumies\n # ------------------------------------------------------------------\n # input:\n # function:\n # output:\n def dumies_tags(self, data_set, column_tag):\n classes = data_set.unique_values_in_column(column_tag)\n classes = classes.tolist()\n return self.get_dictionary_tasg(classes)\n\n # ------------------------------------------------------------------\n # input:\n # function:\n # output:\n def dumies_tags_index(self, data_set, column_tag):\n classes = data_set.unique_values_in_column(column_tag)\n classes = classes.tolist()\n return self.get_dictionary_tasg_number(classes)\n\n # ------------------------------------------------------------------\n # input:\n # function:\n # output:\n def one_hot(self, data_set, column_tag ):\n df = DataFrame()\n df.data_set = data_set\n data_list = df.get_all_values().tolist()\n dumies = self.dumies_tags(df, column_tag)\n dumies = self.biuld_dumies(data_list, dumies)\n return pd.DataFrame(dumies)\n\n # ------------------------------------------------------------------\n # input:\n # function:\n # output:\n def one_hot2(self, data_set, column_tag ):\n df = DataFrame()\n df.data_set = data_set\n data_list = df.get_all_values().tolist()\n dumies = self.dumies_tags_index(df,column_tag)\n\n for i in range(len(data_list)):\n data_list[i] = dumies[data_list[i][0]]\n\n dic = {str(column_tag):data_list}\n data = pd.DataFrame(dic)\n return data\n\n","repo_name":"anthonylle/IA-Proyecto1","sub_path":"Proyecto/Normalizer/CategoricalValues.py","file_name":"CategoricalValues.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"73493433994","text":"import numpy as np\nimport scipy as sp\nimport scipy.linalg\n\nimport matplotlib.pyplot as plt\n\nfrom dynamicmodels import DynamicModel\n\n# optional external module imports\ntry :\n from tqdm import trange\n pretty_range = trange\nexcept ModuleNotFoundError :\n print(\"TQDM isn't installed. Progress bars will be disabled.\")\n pretty_range = range\n\n\n# helper functions for constructing useful finite difference matrices\nD1 = lambda N : 0.5*sp.linalg.toeplitz([0, -1] + [0]*(N - 2), [0, 1] + [0]*(N - 2)) # first diff.\nD1_c = lambda N : 0.5*sp.linalg.circulant([0, -1] + [0]*(N - 3) + [1]) # circ. first diff.\nD2 = lambda N : sp.linalg.toeplitz([2, -1] + [0]*(N - 2), [2, -1] + [0]*(N - 2)) # second diff.\nD2_c = lambda N : sp.linalg.circulant([2, -1] + [0]*(N - 3) + [-1]) # circ. second diff.\n\nclass Puppet2d(DynamicModel) :\n N_state = None\n N_control = None\n\n # 14 postural free parameters -- peristaltic preset\n sign_a = 1 # sign of axial Hookean term\n n_a = 1 # axial damping coefficient \n a_a = 1.1 # axial skew cooperative coupling\n b_a = 0 # axial sym cooperative coupling\n A_a = -1 # axial alpha nonlinearity\n B_a = 10 # axial beta nonlinearity\n g_a = 0 # axial noise strength\n \n sign_t = -0.1 # sign of transverse Hookean term\n n_t = 0.1 # transverse damping coefficient\n a_t = 0 # transverse skew coupling strength\n b_t = 0 # transverse symmetric coupling strength\n d_t = 0 # axial--transverse quadratic coupling strength\n B_t = 0 # transverse beta nonlinearity\n g_t = 0.01 # transverse noise strength\n \n # 2 environmental free parameters -- peristaltic preset\n f_t = 0.0 \n f_a = 0.02\n\n channel_radius = 10\n channel_width = None\n constraint_force = 1\n\n def __init__(self, N=10, channel_radius=10, channel_width=0.2) :\n self.N = N # number of points/vertices in discrete model\n self.N_vertices = N\n self.N_edges = N - 1 # number of edges in discrete model\n self.N_internal_vertices = N - 2 # number of vertices in interior of domain (joints)\n self.N_joints = self.N_internal_vertices\n\n self.N_configuration = 2*N # 2 coordinates for each vertex\n self.N_state = 4*N # 2 coordinates and 2 momenta for each vertex\n self.N_control = self.N_edges + self.N_internal_vertices # no. tensions + no. torques\n\n self.space_axis = np.linspace(0, 1, self.N_vertices)\n self.space_axis_vertices = self.space_axis\n self.space_axis_edges = np.linspace(0, 1, self.N_edges)\n self.space_axis_internal_vertices = np.linspace(0, 1, self.N_internal_vertices)\n\n self.channel_width = channel_width\n self.channel_radius = channel_radius\n self.inner_radius = channel_radius\n self.center_radius = channel_radius + self.channel_width/2\n self.outer_radius = channel_radius + self.channel_width\n self.channel_circumference = 2*np.pi*self.center_radius\n\n # store some useful matrices\n self.R = np.array([[0, 1], [-1, 0]]) # 90 degree clockwise rotation matrix\n self.D1_a = D1_c(self.N_edges) # first difference matrix on edges\n self.D2_a = D2_c(self.N_edges) # second difference matrix on edges\n self.D1_t = D1_c(self.N_joints) # first difference matrix on internal vertices\n self.D2_t = D2_c(self.N_joints) # second difference matrix on internal vertices\n \n self.M_av = sp.linalg.toeplitz([1, 1] + [0]*(self.N_edges - 2), [1] + [0]*(self.N_edges - 1))[1:]\n\n def evolution_rule(self, time, state, inputs=None) :\n coordinates = state[:self.N_configuration] # flattened coordinate vector\n momenta = state[self.N_configuration:] # flattened momentum vector\n\n Dcoordinates = momenta # coordinate dynamics\n Dmomenta = self.forces(time, state, inputs) # momentum dynamics\n\n Dstate = np.concatenate([Dcoordinates, Dmomenta])\n return Dstate\n\n def analyse_trajectory(self) :\n print(\"analysing trajectory\")\n\n kinematic_names = [\"r\", \"p\", \"e\", \"l\", \"t\", \"n\", \"epsilon\", \"alpha\",\n \"theta\", \"kappa\", \"De\", \"Depsilon\", \"Dalpha\", \"Dtheta\", \"Dkappa\"]\n\n # compute kinematics for every frame of the final trajectory; store as a list\n # of arrays\n kinematic_quantities = []\n for i in pretty_range(len(self.trajectory)) :\n state = self.trajectory[i]\n kinematic_quantities.append(self.kinematics(state))\n\n # unpack kinematics into member attributes\n for i in pretty_range(len(kinematic_names)) :\n name = kinematic_names[i]\n setattr(self, name, np.array([arr[i] for arr in kinematic_quantities]))\n\n def kinematics(self, x) :\n # x is state vector; decompose into position and momentum components\n r = x[:2*self.N].reshape(self.N, 2)\n p = x[2*self.N:].reshape(self.N, 2)\n \n # configuration space kinematics\n e = np.diff(r, axis=0) # compute edge vectors\n l = np.linalg.norm(e, axis=1) # edge lengths\n t = (e.T/l).T # unit tangent vectors\n n = np.dot(self.R, t.T).T # unit normal vectors\n \n epsilon = l - 1 # strain (resting edge length = 1)\n \n alpha = np.arctan2(t[:, 1], t[:, 0]) # linkage angles (rad)\n theta = np.diff(alpha) # bending angles (rad)\n kappa = 2*np.tan(theta/2) # use DDG curvature\n \n # tangent space kinematics\n De = np.diff(p, axis=0) # edge velocity\n Depsilon = [np.dot(t_i, De_i) for t_i, De_i in zip(t, De)] # stretch rate\n Depsilon = np.array(Depsilon) # typecast\n Dalpha = [np.dot(n_i, -De_i)/l_i for n_i, De_i, l_i in zip(n, De, l)] # linkage angular velocity\n Dalpha = np.array(Dalpha) # typecast\n Dtheta = np.diff(Dalpha) # bending velocity\n Dkappa = Dtheta/(np.cos(theta/2)**2) # DDG curvature rate\n \n return r, p, e, l, t, n, epsilon, alpha, theta, kappa, De, Depsilon, Dalpha, Dtheta, Dkappa\n\n def forces(self, time, x, inputs=None) :\n \"\"\"\n Compute force as a function of phase space coordinates x.\n \"\"\"\n\n # decompose inputs into tensions and torques\n if inputs is None : inputs = np.zeros(self.N_control)\n input_tensions = inputs[:self.N_edges]\n input_torques = inputs[self.N_edges:]\n \n # compute kinematics from given phase space coordinate vector\n kinematic_quantities = self.kinematics(x)\n r, p, e, l, t, n, epsilon, alpha, theta, kappa, De, Depsilon, Dalpha, Dtheta, Dkappa = kinematic_quantities\n\n # compute generalised forces using useful kinematic variables\n tau_epsilon_local = self.sign_a*epsilon - self.A_a*epsilon**2 - self.B_a*epsilon**3 - self.n_a*Depsilon\n tau_epsilon_nonlocal = - self.a_a*np.dot(self.D1_a, epsilon) - self.b_a*np.dot(self.D2_a, epsilon)\n \n tau_kappa_local = (self.d_t*np.dot(self.M_av, epsilon) + self.sign_t)*kappa\\\n - self.B_t*kappa**3 - self.n_t*Dkappa\n tau_kappa_nonlocal = self.a_t*np.dot(self.D1_t, kappa) - self.b_t*np.dot(self.D2_t, kappa)\n \n tau_epsilon = tau_epsilon_local + tau_epsilon_nonlocal # total axial tension\n tau_kappa = tau_kappa_local + tau_kappa_nonlocal # total transverse torque\n \n # find force exerted on each vertex\n F_a = np.zeros((self.N, 2)) # will hold axial forces\n for i in range(self.N_edges) : # loop through edges\n F_a[i] += -tau_epsilon[i]*t[i] # edge force exerts force on previous vertex\n F_a[i + 1] += +tau_epsilon[i]*t[i] # edge force exerts force on next vertex\n \n F_t = np.zeros((self.N, 2)) # will hold transverse forces\n for i in range(self.N - 2) : # loop through joints\n # first couple\n F_t[i + 0] += -tau_kappa[i]*n[i]/l[i] # torque produces force on previous vertex\n F_t[i + 1] += +tau_kappa[i]*n[i]/l[i] # torque produces force on current vertex\n # second couple\n F_t[i + 1] += +tau_kappa[i]*n[i + 1]/l[i + 1] # torque produces force on current vertex\n F_t[i + 2] += -tau_kappa[i]*n[i + 1]/l[i + 1] # torque produces force on next vertex\n \n F_s = np.zeros((self.N, 2)) # will hold substrate interaction forces\n \n# TODO remove this -- it's only good for nematode simulations!\n# # substrate interaction at head/tail\n# F_s[0] = -self.f_a*t[0]*np.dot(t[0], p[0]) - self.f_t*n[0]*np.dot(n[0], p[0])\n# F_s[-1] = -self.f_a*t[-1]*np.dot(t[-1], p[-1]) - self.f_t*n[-1]*np.dot(n[-1], p[-1])\n# for i in range(N_t) : # loop through internal vertices\n# tt = (t[i] + t[i + 1])/np.linalg.norm(t[i] + t[i + 1]) # find tangent vector\n# nn = np.dot(R, tt) # find normal vector\n# F_s[i + 1] = -self.f_a*tt*np.dot(tt, p[i + 1]) - self.f_t*nn*np.dot(nn, p[i + 1])\n \n # isotropic Coulomb substrate interaction\n p_unit = (p.T/np.linalg.norm(p, axis=1)).T\n F_s = -self.f_a*p_unit\n \n # compute confinement forces due to track boundaries\n ry = r[:, 1] # y displacement of each point\n F_cy = - self.constraint_force*(ry > self.channel_width/2)\\\n + self.constraint_force*(ry < -self.channel_width/2)\n F_cx = np.zeros(F_cy.shape)\n F_c = np.array([F_cx, F_cy]).T\n\n F_c = self.constraint_forces(kinematic_quantities)\n\n F_total = F_a + F_t + F_s + F_c\n \n return F_total.flatten()\n\n def constraint_forces(self, kinematic_quantities) :\n # kinematics\n r, p, e, l, t, n, epsilon, alpha, theta, kappa, De, Depsilon, Dalpha, Dtheta, Dkappa = kinematic_quantities\n segment_radii = np.linalg.norm(r, axis=1)\n unit_vectors = (r.T/segment_radii).T\n\n # forces\n inner_force = ((segment_radii < self.inner_radius)*self.constraint_force*unit_vectors.T).T\n outer_force = -((segment_radii > self.outer_radius)*self.constraint_force*unit_vectors.T).T\n total_constraint_force = inner_force + outer_force\n return total_constraint_force\n\n\n def generate_initial_state(self, coordinate_std=0, momentum_std=0) :\n # our default configuration has all segments equally spaced, wrapped onto the\n # circular track\n axial_displacements = np.arange(self.N_vertices)\n track_angles = 2*np.pi*axial_displacements/self.channel_circumference\n\n template_rx = self.center_radius*np.cos(track_angles)\n template_ry = self.center_radius*np.sin(track_angles)\n template_r = np.array([template_rx, template_ry]).T\n\n # add noise to the template configuration\n initial_r = template_r + coordinate_std*np.random.randn(*template_r.shape)\n\n # generate noisy initial momentum, near rest\n template_px = np.zeros(self.N_vertices)\n template_py = np.zeros(self.N_vertices)\n template_p = np.array([template_px, template_py]).T\n initial_p = template_p + momentum_std*np.random.randn(*template_p.shape)\n\n initial_x = np.concatenate([initial_r.flatten(), initial_p.flatten()])\n\n return initial_x\n\n","repo_name":"janeloveless/QTEES-modelling","sub_path":"puppet2d.py","file_name":"puppet2d.py","file_ext":"py","file_size_in_byte":12149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"22496473233","text":"from django.contrib import admin\nfrom django.urls import include, path\nfrom rest_framework import routers\nfrom django.views import generic\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n TokenVerifyView,\n)\n\nrouter = routers.DefaultRouter()\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/', include(router.urls)),\n path('', generic.RedirectView.as_view(\n url='/api/', permanent=False)),\n path('api/auth/', include(\n 'rest_framework.urls', namespace='rest_framework')),\n path('api/auth/token/obtain/', TokenObtainPairView.as_view()),\n path('api/auth/token/refresh/', TokenRefreshView.as_view()),\n path('api/auth/token/verify/', TokenVerifyView.as_view()),\n path('api/weddingplanner/', include('weddingplanner.urls')),\n]\n","repo_name":"emilyunderhill/weddingplanner","sub_path":"backend/backend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"22137815535","text":"# 01 List\n\n# food = ['Dahi Bhallay', 'Biryani', 'Daal', 'Samosay', 'Shami', 'Palak Paneer']\n\n\n# print(food[0])\n# print(food[-1])\n\n# food[0] = 'Chiken Puloe'\n\n# print(food[0])\n\n\n# 02 Tupple\n# coordinates = (4.21, 9.29)\n# print(coordinates)\n# print(coordinates[0])\n\n\n# 03 Set\n\n# fruit_set = {'Dahi bhallay', 'Biryani', \"Dall\"} #we can not \n# print(fruit_set)\n\n# fruit_set.add('Samosa')\n# print(fruit_set)\n\n\n# 04 Dictionary\n\ncar = {\n 'brand': 'Ford',\n 'model': 'Mustang',\n 'year': 1964\n}\nprint(car)\ncar['year'] = 1998\nprint(car['year'])","repo_name":"xaqibshah/DataScienceLearning","sub_path":"Python_Ka_Chilla/python_practice/data_structure_index.py","file_name":"data_structure_index.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"14443334317","text":"import cv2 as cv\nimport numpy as np\n\n# img=cv.imread('./images/cat1.png')\n# cv.imshow('ori',img)\n\nblank = np.zeros((500, 500, 3), dtype='uint8')\n\n# rectangle\nrec=cv.rectangle(blank.copy(),(30,30),(370,370),255,-1)\ncv.imshow('rec',rec)\ncv.waitKey(0)\n\n# circle\ncir=cv.circle(blank.copy(),(200,200),200,255,-1) #position,radius,color,fill\ncv.imshow('cir',cir)\ncv.waitKey(0)\n\n# bitwise\nBitwise_and=cv.bitwise_and(rec,cir)\ncv.imshow('and',Bitwise_and)\ncv.waitKey(0)\n\nBitwise_or=cv.bitwise_or(rec,cir)\ncv.imshow('or',Bitwise_or)\ncv.waitKey(0)\n\nimg=cv.imread('./images/cat1.png')\n# create mask\nmask=np.zeros(img.shape[:2],dtype='unit8')\n\ncirMask=cv.circle(blank.copy(),(200,200),200,255,-1)\ncv.imshow('cirMask',cirMask)\ncv.waitKey(0)\n\n# cropped\nmaskedImg =cv. bitwise_and(img,img,mask=cirMask)\ncv.imshow('masked',maskedImg)\n\n\n","repo_name":"Chathura99/Colab-Notebooks","sub_path":"openCV/advanced/bitWise.py","file_name":"bitWise.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"1648002231","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nloss = np.load(\"6layer_loss.npy\")\nloss = loss[100:]\nx = np.arange(len(loss))\n\nplt.plot(x, loss, label=\"6 layer conv\")\nplt.ylabel(\"Loss value\", fontsize=16)\n#plt.ylabel(\"Mean Absolute Error\", fontsize=16)\nplt.xlabel(\"Number of Images\", fontsize=16)\n\nplt.legend(loc='upper right')\nplt.tight_layout()\nplt.show()","repo_name":"yukeyi/MCDS-Capstone","sub_path":"code4step3/plot/plot_reward.py","file_name":"plot_reward.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"11386652267","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 28 12:38:22 2022\r\n\r\n@author: punja\r\n\"\"\"\r\n\r\n\r\n#MTCNN on MAFA\r\n\r\n#IMPORT NECESSARY LIBRARIES\r\nimport os \r\nfrom os import listdir\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nfrom mtcnn.mtcnn import MTCNN\r\nimport numpy as np\r\nc1 = cv2.getTickCount() #START TIMER\r\nfolder= 'C:\\\\Users\\\\punja\\\\Documents\\\\SEM 8\\\\MAFA\\\\test-images\\\\t\\\\multi face' #FOLDER LOCATION WHERE IMAGES ARE STORED\r\nindexN=0\r\n\r\ndef face_extractor(img):\r\n detector = MTCNN() #LOAD MTCNN MODEL\r\n faces = detector.detect_faces(img) #GIVE IMAGE AS AN INPUT TO MODEL\r\n print('faces:',faces)\r\n \r\n if faces == ():\r\n return None\r\n\r\n for result in faces:\r\n x, y, w, h = result['box']\r\n x1, y1 = x + w, y + h\r\n cv2.rectangle(img, (x,y), (x1,y1), (0,0,0), 8) #DRAW RECTANGULAR BOUNDING BOX\r\n \r\n return img\r\n#cv2.imwrite('image0-pred.jpg', image)\r\n plt.imshow(img)\r\n\r\nfor images in os.listdir(folder):\r\n count=0\r\n if (images.endswith(\".png\") or images.endswith(\".jpg\") or images.endswith(\".jpeg\")):\r\n print(images)\r\n #frame=cv2.imread(folder+images)\r\n \r\n f = os.path.join(folder, images)\r\n x=os.path.splitext(os.path.basename(f))[0]\r\n \r\n frame=cv2.imread(f)\r\n \r\n \r\n # print(frame)\r\n file_name_path1 = 'C:\\\\Users\\\\punja\\\\Documents\\\\SEM 8\\\\MAFA\\\\test-images\\\\t\\\\multi_face_out\\\\MT_'+str(indexN)+images+'.jpg' #path to store image\r\n image_90_clk = face_extractor(frame)\r\n \r\n \r\n #cv2.imwrite(file_name_path1,image_90_clk)\r\n \r\n indexN=indexN+1\r\n count=count+1 \r\n \r\nc2 = cv2.getTickCount() #STOP TIMER\r\ntime_taken = (c2 - c1)/ cv2.getTickFrequency() \r\nprint(f'The time taken for execution is {time_taken}') ","repo_name":"spthakkar/FAS_DDU_SSIP","sub_path":"MTCNN_2.py","file_name":"MTCNN_2.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"1812261699","text":"from typing import Callable, Any\nimport inspect\n\ncalls = 1\n\n\ndef run(func: Callable[[str], Any]) -> None:\n file = inspect.getmodulename(inspect.getfile(func))\n with open(f\"{file}.txt\", \"r\") as f:\n data = f.read()\n res = func(data)\n global calls\n print(f\"Day {file[3:]} - Problem {calls} - Result: {res}\")\n calls += 1\n","repo_name":"vittoriom94/advent-of-code-22","sub_path":"advent.py","file_name":"advent.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"14022680107","text":"import itertools\nfrom django.contrib.admin.views.main import (\n ALL_VAR, ORDER_VAR, PAGE_VAR, SEARCH_VAR,\n)\nfrom django.utils.safestring import mark_safe\nfrom django.utils.html import format_html\nfrom django.utils.translation import gettext_lazy as _\nfrom django.template import Library\nfrom django.template.loader import get_template\nimport urllib.parse\n\nregister = Library()\n\nDOT = '.'\n\n\n@register.simple_tag\ndef adminlte_paginator_number(cl, i):\n \"\"\"\n Generate an individual page index link in a paginated list.\n \"\"\"\n if i == DOT or i == '…':\n #
  • {}
  • '\n return format_html(\n '
  • ')\n elif i == cl.page_num:\n return format_html(\n '
  • {}
  • ',\n i + 1)\n else:\n return format_html(\n '
  • {}
  • ',\n cl.get_query_string({PAGE_VAR: i}),\n mark_safe(\n ' class=\"end\"' if i == cl.paginator.num_pages - 1 else ''),\n i + 1,\n )\n\n\ndef get_filter_id(spec):\n try:\n return getattr(spec, 'field_path')\n except AttributeError:\n try:\n return getattr(spec, 'parameter_name')\n except AttributeError:\n pass\n return spec.title\n\n\n@register.simple_tag\ndef admin_extra_filters(cl):\n \"\"\" Return the dict of used filters which is not included\n in list_filters form \"\"\"\n used_parameters = list(itertools.chain(*(s.used_parameters.keys()\n for s in cl.filter_specs)))\n return dict(\n (k, v) for k, v in cl.params.items() if k not in used_parameters)\n\n\n@register.simple_tag\ndef adminlte_admin_list_filter(cl, spec):\n tpl = get_template(spec.template)\n choices = list(spec.choices(cl))\n field_key = get_filter_id(spec)\n matched_key = field_key\n for choice in choices:\n query_string = choice['query_string'][1:]\n query_parts = urllib.parse.parse_qs(query_string)\n\n value = ''\n matches = {}\n for key in query_parts.keys():\n if key == field_key:\n value = query_parts[key][0]\n matched_key = key\n elif key.startswith(\n field_key + '__') or '__' + field_key + '__' in key:\n value = query_parts[key][0]\n matched_key = key\n\n if value:\n matches[matched_key] = value\n\n # Iterate matches, use first as actual values, additional for hidden\n i = 0\n for key, value in matches.items():\n if i == 0:\n choice['name'] = key\n choice['value'] = value\n # else:\n # choice['additional'] = '%s=%s' % (key, value)\n i += 1\n\n return tpl.render({\n 'field_name': field_key,\n 'title': spec.title,\n 'choices': choices,\n 'spec': spec,\n })\n","repo_name":"wuyue92tree/django-adminlte-ui","sub_path":"adminlteui/templatetags/adminlte_list.py","file_name":"adminlte_list.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","stars":267,"dataset":"github-code","pt":"28"} +{"seq_id":"39638592461","text":"\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def swapNodes(self, head: ListNode, k: int) -> ListNode:\n \n \n # kth from first, kth from second ---- swap\n array = []\n \n h1 = head\n while h1:\n array.append(h1.val)\n h1 = h1.next\n \n # print(\"array: \", array)\n \n first_value = array[k-1]\n last_value = array[-k]\n \n array[k-1] = last_value\n array[-k] = first_value\n \n # make linked list from it\n new_head = ListNode(array[0])\n h2 = new_head\n \n for i in range(1, len(array)):\n new_node = ListNode(array[i])\n h2.next = new_node\n h2 = h2.next\n \n return new_head\n \n \n","repo_name":"dear-s/LeetCode-Solutions","sub_path":"1721. Swapping Nodes in a Linked List.py","file_name":"1721. Swapping Nodes in a Linked List.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"32595341257","text":"#Programa de paso de por valor de refencia\n#Por copia\nx = 2\ndef modificarVariable(y):\n y += 5\n return y\nz = modificarVariable(x)\nprint(z)\nprint(x)\n\n#Por direccion\n\nv = [2,6,4]\ndef modificarVector(w):\n w.append(0)\n return w\nt = modificarVector(v)\nprint(t)\nprint(v)","repo_name":"KarenSSanchezP/Python_FrontEnd2_BID_2023","sub_path":"Sesion7/funciones2.py","file_name":"funciones2.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"14721060509","text":"#! /usr/bin/env python3\n\n##############################################################################################\n# BCAST_IDS: A NETWORK INTRUSION DETECTION SYSTEM\n# PREDICT WITH ISOLATION FOREST ALGORITHM\n#\n# Dpto de Redes\n# Gestion Tributaria Territorial\n# 2020\n##############################################################################################\n\nimport argparse\nimport sys\nimport os\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn.model_selection import train_test_split\nimport warnings\n\nname_columns = ['MAC', 'NUM_MACS', 'UCAST', 'MCAST', 'BCAST','ARP','IPF','IP_ICMP','IP_UDP','IP_TCP','IP_RESTO','IP6','ETH_RESTO','ARP_noIP','SSDP','ICMPv6']\n\n# Type the columns you want to delete in the detection phase\ndelete_columns = ['MAC']\n\n\n\"\"\" Load the model from disk \"\"\"\ndef load_model(filename):\n try:\n loaded_model = pickle.load(open(filename, 'rb'))\n return loaded_model\n except:\n return None\n\n\"\"\" Prediction of the activity of a set of MAC addresses. Returns a list of abnormal MACs in the current capture \"\"\"\ndef predict_capture(dataset):\n global name_columns\n loaded_model = load_model(\"./model_iso_forest.bin\")\n macs_atacando = list()\n if loaded_model != None:\n try:\n # Read the captured data\n dataFrame = pd.DataFrame(dataset,columns=name_columns)\n\n # Prepare the data\n to_model_columns=dataFrame.columns[1:19]\n dataFrame[to_model_columns] = dataFrame[to_model_columns].astype(int)\n\n # We delete the columns that we do not want\n dataFrame_aux = dataFrame.drop(delete_columns, axis=1)\n\n # Prediction\n prediction = loaded_model.predict(dataFrame_aux)\n dataFrame_aux['IF']=prediction\n\n # List of MACs with abnormal activity\n macs_atacando = dataFrame.loc[dataFrame_aux['IF']==-1]['MAC'].tolist()\n\n except FileNotFoundError:\n msg = \"Dataset does not exist or there was a problem in reading it\".format(dataset)\n print(msg)\n\n finally:\n return macs_atacando\n\n else:\n return macs_atacando\n\ndef predict_if(cad, filename):\n # Cabecera:\n headers = \"MAC;NUM_MACS;UCAST;MCAST;BCAST;ARP;IPF;IP_ICMP;IP_UDP;IP_TCP;IP_RESTO;IP6;ETH_RESTO;ARP_noIP;SSDP;ICMPv6\"\n\n if filename == None:\n loaded_model = load_model(\"./model_iso_forest.bin\")\n else:\n loaded_model = load_model(filename)\n\n if loaded_model != None:\n # Convertimos los datos a un DataFrame para poder predecir el resultado:\n d = dict()\n for i in range(len(headers.split(\";\"))):\n if i == 0:\n d[headers.split(\";\")[i]]=[(cad.split(\";\"))[i]]\n else:\n d[headers.split(\";\")[i]]=[int(cad.split(\";\")[i])]\n df = pd.DataFrame(data=d)\n\n traff_dif = df.drop(delete_columns, axis=1)\n try:\n # Resultado de la prediccion:\n prediction = loaded_model.predict(traff_dif)\n print(f\"Prediction taking into accout these columns: {(traff_dif.columns.tolist())}:\")\n print(prediction)\n except:\n print(\"ERROR! The prediction could not made\")\n else:\n print(f\"ERROR! Model not found in the current directory {os.getcwd()}\")\n\n\ndef predict_dataset_if(dataset, filename):\n global name_columns\n if filename == None:\n loaded_model = load_model(\"./model_iso_forest.bin\")\n else:\n loaded_model = load_model(filename)\n\n if loaded_model != None:\n try:\n dataFrame=pd.read_csv(dataset,sep=';',names=name_columns)\n dataFrame= dataFrame.fillna(0)\n to_model_columns=dataFrame.columns[1:19]\n dataFrame[to_model_columns] = dataFrame[to_model_columns].astype(int)\n dataFrame_aux = dataFrame.drop(delete_columns, axis=1)\n prediction = loaded_model.predict(dataFrame_aux)\n count_normal = 0;\n count_anomaly = 0;\n for p in prediction:\n if p == 1:\n count_normal += 1\n else:\n count_anomaly += 1\n\n print(f\"Prediction taking into accout these columns: {(dataFrame_aux.columns.tolist())}\")\n print(\"------------------------------------\")\n print(f\"Quantity of normal MAC activity: {count_normal}\")\n print(f\"Quantity of abnormal MAC activity: {count_anomaly}\")\n print(\"------------------------------------\")\n\n # Imprimimos las anomalias detectadas en pantalla\n outliers=dataFrame.loc[prediction==-1]\n print(\"\\t\\nANOMALIES:\")\n print(outliers.to_string())\n\n except FileNotFoundError:\n msg = \"Dataset does not exist or there were a problem in reading the columns {0}.\".format(dataset)\n print(msg)\n else:\n print(\"ERROR! Model not found in the current directory {os.getcwd()}\")\n\nif __name__ == '__main__':\n text_help= \"Script to predict the activity of a MAC address or dataset using the Isolation Forest algorithm\"\n text_help += \"\\n\\t./predict_iso_forest.py -s \\\"MAC_ADDRESS;257;1;5;251;251;0;0;0;5;0;0;0;1;5;0;246;0;5\\\"\"\n text_help += \"\\n\\t./predict_iso_forest.py -s \\\"MAC_ADDRESS;257;1;5;251;251;0;0;0;5;0;0;0;1;5;0;246;0;5\\\" -m model_iso_forest.bin\"\n text_help += \"\\n\\t./predict_iso_forest.py -d dataset.csv\"\n text_help += \"\\n\\t./predict_iso_forest.py -d dataset.csv -m model_iso_forest.bin\"\n text_help += \"\\nSALIDA\"\n text_help += \"\\n\\t[+] 1 -> normal activity \\n\"\n text_help += \"\\t[+] -1 -> abnormal activity \\n\\n\"\n ap = argparse.ArgumentParser(text_help)\n ap.add_argument(\"-s\", \"--actividadMAC\", required=False, help=\"Activity of a single MAC address\")\n ap.add_argument(\"-d\", \"--datasetMAC\", required=False, help=\"Dataset\")\n ap.add_argument(\"-m\", \"--model\", required=False, help=\"Machine Learning model\")\n args = ap.parse_args(args=None if sys.argv[1:] else ['--help'])\n\n if args.actividadMAC:\n model_file = vars(ap.parse_args())[\"model\"]\n cad = vars(ap.parse_args())[\"actividadMAC\"]\n predict_if(cad,model_file)\n if args.datasetMAC:\n model_file = vars(ap.parse_args())[\"model\"]\n dataset = vars(ap.parse_args())[\"datasetMAC\"]\n predict_dataset_if(dataset,model_file)\n","repo_name":"redesgtt/bcast_ids","sub_path":"predict_iso_forest.py","file_name":"predict_iso_forest.py","file_ext":"py","file_size_in_byte":6550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"12318231629","text":"\"\"\"Models for Kicad Library Plugin.\"\"\"\nfrom django.core import validators\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom django.contrib.auth.models import User\n\nfrom part.models import PartCategory, PartParameterTemplate\n\n\nclass SelectedCategory(models.Model):\n \"\"\"Categories which are used in Kicad.\"\"\"\n\n class Meta:\n app_label = 'inventree_kicad'\n verbose_name = 'KiCad Category'\n verbose_name_plural = 'KiCad Categories'\n\n category = models.OneToOneField(\n PartCategory,\n on_delete=models.CASCADE,\n related_name='get_enabled_kicad_categories',\n verbose_name=_('Category')\n )\n\n default_symbol = models.CharField(\n max_length=200,\n blank=True,\n verbose_name=_('Default Symbol'),\n help_text=_('Default symbol for this category, if not specified for an individual part'),\n )\n\n default_footprint = models.CharField(\n max_length=200,\n blank=True,\n verbose_name=_('Default Footprint'),\n help_text=_('Default footprint for this category, if not specified for an individual part'),\n )\n\n default_reference = models.CharField(\n max_length=20,\n blank=True,\n verbose_name=_('Default Reference'),\n help_text=_('Default reference for this category, if not specified for an individual part'),\n )\n\n default_value_parameter_template = models.ForeignKey(\n PartParameterTemplate,\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n verbose_name=_('Default Value Parameter Template'),\n help_text=_('Default value parameter template for this category, if not specified for an individual part'),\n )\n\n footprint_parameter_template = models.ForeignKey(\n PartParameterTemplate,\n on_delete=models.SET_NULL,\n related_name=\"footprint_kicad_categories\",\n blank=True,\n null=True,\n verbose_name=_('Footprint Parameter Template'),\n help_text=_('Footprint parameter template for this category. Overrides the KICAD_FOOTPRINT_PARAMETER setting for this category.'),\n )\n\n def __str__(self):\n \"\"\"Default name string which is returned when object is called\"\"\"\n return f'{self.category.pathstring}'\n\n\nclass FootprintParameterMapping(models.Model):\n \"\"\"Mapping entry to map from the footprint parameter value to a KiCad footprint name\"\"\"\n\n class Meta:\n app_label = \"inventree_kicad\"\n verbose_name = \"Footprint Mapping\"\n unique_together = (\"kicad_category\", \"parameter_value\")\n\n kicad_category = models.ForeignKey(SelectedCategory, on_delete=models.CASCADE)\n\n parameter_value = models.CharField(\n max_length=200,\n verbose_name=\"Footprint Parameter Value\",\n )\n\n kicad_footprint = models.CharField(\n max_length=200,\n verbose_name=\"KiCad Footprint\",\n )\n\n def __str__(self):\n \"\"\"Default name string which is returned when object is called\"\"\"\n return f\"{self.parameter_value} -> {self.kicad_footprint}\"\n\n\nclass ProgressIndicator(models.Model):\n \"\"\"Progress indicators which are used to display a loading bar inside a multiuser environment.\"\"\"\n\n class Meta:\n app_label = 'inventree_kicad'\n verbose_name = 'Progress Indicator'\n verbose_name_plural = 'Progress Indicators'\n\n user = models.OneToOneField(\n User,\n on_delete=models.CASCADE,\n related_name='get_progress_bar_users',\n verbose_name=_('Category')\n )\n\n current_progress = models.IntegerField(\n default=0,\n validators=[validators.MinValueValidator(0), validators.MaxValueValidator(100)],\n help_text=_('current progress')\n )\n\n file_name = models.CharField(\n max_length=100,\n default='',\n help_text=_('Name of currently processed file.')\n )\n\n def __str__(self):\n \"\"\"Default name string which is returned when object is called\"\"\"\n return f'{self.user.username}'\n","repo_name":"afkiwers/inventree_kicad","sub_path":"inventree_kicad/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4015,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"28"} +{"seq_id":"38377488189","text":"option = input('Do you want a negative or positive number line: ')\noption = option.lower()\nnumber = int(input('Number: '))\nnumber_line = []\n\nif option == 'positive':\n if number > 0:\n for num in range(number + 1):\n number_line.append(num)\n else:\n print('NUMBER NOT POSITIVE')\nelif option == 'negative':\n if number < 0:\n for num in range(number, 1):\n number_line.append(num)\n else:\n print('NUMBER NOT NEGATIVE')\n\nprint(number_line)\n\n\n\n","repo_name":"datormx/PythonExercises","sub_path":"retos-platzi/Reto5_9_RectaNumerica.py","file_name":"Reto5_9_RectaNumerica.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"2810916211","text":"import os\n\nSECRET_KEY = 'e1c6fe5819c4bc8857e2cbce963a7be5904692eba862d1a809f195710aff5cdc'\nSQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://std_1679_exam:1q2w3e4r@std-mysql.ist.mospolytech.ru/std_1679_exam'\nSQLALCHEMY_TRACK_MODIFICATIONS = False\nSQLALCHEMY_ECHO = True\nADMIN_ROLE_ID = 1\nMODER_ROLE_ID = 2\nUSER_ROLE_ID = 3\nUPLOAD_FOLDER = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'media', 'images')","repo_name":"YuraKhoroshev/webexam","sub_path":"app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"26740109634","text":"from itertools import product\nfrom re import S\nimport sys\nfrom src.utils import open_as_np\nfrom solver.geometric_median import geometric_median\nimport math\nimport solver.pixel as pix\nimport solver.pixel2 as pix2\nimport solver.costs as costs_m\nimport dataclasses\nimport numpy as np\nimport traceback\n\n@dataclasses.dataclass\nclass LogEntry:\n key: str\n pixel_size: int\n prog: pix.Prog\n prog_length: int\n cost: int\n similarity: int\n\n def total_cost(self):\n return self.cost + self.similarity\n\nclass PixelSolver3(pix2.PixelSolver2):\n BACKGROUND = np.array([255, 255, 255, 255])\n canvas: np.ndarray = None\n\n def __init__(self, problem_id, start_block, max_block_id, pixel_size, max_steps = -1, gravity_point=(400,400)):\n super().__init__(problem_id, start_block, max_block_id, pixel_size, max_steps=max_steps)\n self.gravity_point = gravity_point\n self.subimg = self.img[:,:]\n # create a matrix of indexes, such that indexes[x][y] -> (x,y)\n # we will take and flip slices of data, the matrix will help restore indexes\n self.indices = np.indices((400,400))\n self.subindices = self.indices[...]\n\n def init_colors(self):\n # _, (x0,y0), (x1,y1) = self.start_block\n self.canvas = np.zeros(np.shape(self.img), dtype=np.uint16)\n self.canvas[:,:] = self.BACKGROUND\n self.subcanvas = self.canvas[:,:]\n\n def get_pixel_color(self, x, y):\n return self.canvas[x, y]\n\n def compute_similarity(self):\n return costs_m.simil(self.img - self.canvas)\n\n def set_pixel_color(self, x, y, color:np.array):\n self.subcanvas[x:,y:] = color\n\n def pick_color(self, x, y, color_width, color_height):\n subpixel = self.subimg[x:x+self.pixel_size, y:y+self.pixel_size]\n color = [round(int(v)) for v in geometric_median(\n subpixel.reshape((subpixel.shape[0] * subpixel.shape[1], 4)), eps=1e-2)]\n\n color_cost = costs_m.get_cost(\n costs_m.COSTS.COLOR, color_width * color_height)\n new_simil = costs_m.float_simil(subpixel - color)\n old_simil = costs_m.float_simil(subpixel - self.subcanvas[x,y])\n if old_simil > new_simil + color_cost:\n return color\n else:\n return None\n\n def run(self):\n self.log_state(\"initial\")\n\n self.pixelize_block(self.start_block, self.max_steps)\n\n self.log_state(\"final\")\n\n def pixelize_block(self, block: pix.Block, max_steps):\n block_size_x, block_size_y = self.subindices[0].shape\n if block_size_x <= 0 or block_size_y <= 0:\n return\n _, (x0,y0), (x1,y1) = block\n width = block.width()\n height = block.height()\n mx, my = self.subindices[:,width//2,height//2]\n gx, gy = self.gravity_point\n dx = 1 if mx <= gx else -1\n dy = 1 if my <= gy else -1\n print('>>>', 'b', (x0,y0), (x1,y1), 'm', (mx, my), 'g', (gx, gy), 'd', (dx,dy))\n self.subimg = self.img[x0:x1,y0:y1][::dx,::dy]\n self.subcanvas = self.canvas[x0:x1,y0:y1][::dx,::dy]\n self.subindices = self.indices[:,x0:x1,y0:y1][:,::dx,::dy]\n # start corner point\n cx, cy = self.subindices[:,0,0]\n print(\n f\"pixelize_block {block} {cx} {cy} corner color {self.subcanvas[0,0]}, max_steps={max_steps}\")\n if max_steps == 0:\n return\n\n color = self.pick_color(0, 0, width, height)\n if color:\n self.prog.color(block.name, color, block.sq_size())\n self.subcanvas[0:,0:] = color\n\n # Horizontal row\n # for xs in range(x + self.pixel_size, x + width, self.pixel_size):\n for xs in range(self.pixel_size, width, self.pixel_size):\n # print(f\"Horizontal row {xs} {y}\")\n color = self.pick_color(xs, 0, width - xs, height)\n if color:\n abs_x = self.subindices[0,xs,0] + (1-dx)//2\n left, right = block.line_x(abs_x, self.prog)\n block_to_color = right if dx == 1 else left\n self.prog.color(block_to_color.name, color, right.sq_size())\n self.subcanvas[xs:,0:] = color\n cur_name = self.merge(left.name, right.name, left.sq_size(), right.sq_size())\n block = pix.Block(cur_name, block.begin, block.end)\n\n # Vertical row\n # for ys in range(y + self.pixel_size, y + height, self.pixel_size):\n for ys in range(self.pixel_size, height, self.pixel_size):\n # print(f\"Vertical row {x} {ys}\")\n color = self.pick_color(0, ys, width, height - ys)\n if color:\n abs_y = self.subindices[1,0,ys] + (1-dy)//2\n bottom, top = block.line_y(abs_y, self.prog)\n block_to_color = top if dy == 1 else bottom\n self.prog.color(block_to_color.name, color, top.sq_size())\n self.subcanvas[0:,ys:] = color\n cur_name = self.merge(bottom.name, top.name, bottom.sq_size(), top.sq_size())\n block = pix.Block(cur_name, block.begin, block.end)\n\n # if x == 120 and y == 120:\n # return\n\n self.log_state(f\"x{cx} y{cy}\")\n\n if block.width() > self.pixel_size and block.height() > self.pixel_size:\n split_pt = self.subindices[:, self.pixel_size+(dx-1)//2, self.pixel_size+(dy-1)//2]\n bot_left, bot_right, top_right, top_left = block.split(split_pt, self.prog)\n if (dx, dy) == (1,1):\n next_block = top_right\n elif (dx, dy) == (-1,1):\n next_block = top_left\n elif (dx, dy) == (-1,-1):\n next_block = bot_left\n elif (dx, dy) == (1,-1):\n next_block = bot_right\n\n self.pixelize_block(next_block, max_steps - 1)\n\ndef run_pixel_solver(problem_id, start_block, max_block_id, gravity_point, pixel_size, max_steps=-1):\n try:\n log_entries = []\n for ps in range(pixel_size//2, pixel_size*2, pixel_size//4):\n # for ps in [pixel_size]:\n solver = PixelSolver3(\n problem_id=problem_id,\n start_block=start_block,\n max_block_id=max_block_id,\n pixel_size=ps,\n max_steps=max_steps,\n gravity_point=gravity_point,\n )\n\n solver.run()\n log_entries.extend(solver.log)\n\n for entry in solver.log:\n print(f\"{entry} => {entry.total_cost()}\")\n\n best_entry: LogEntry = min(log_entries, key=lambda entry: entry.total_cost())\n print(f\"\\n\\nBEST: {best_entry} => {best_entry.total_cost()}\")\n\n cmds = best_entry.prog.cmds[:best_entry.prog_length]\n\n return cmds\n except Exception as err:\n traceback.print_exc()\n print(f\"run_pixel_solver() failed {err}\")\n return f\"Error: {err}\"\n # total = 0\n # for i, (cmd, cost) in enumerate(zip(solver.prog.cmds, solver.prog.costs)):\n # total += cost\n # print(f\"{i + 1}: [{total}] {cost}: {cmd}\")\n\nif __name__ == \"__main__\":\n problem_id = sys.argv[1]\n pixel_size = int(sys.argv[2])\n start = int(sys.argv[3]) if len(sys.argv) > 3 else 0\n\n cmds = run_pixel_solver(\n problem_id=problem_id,\n start_block=pix.Block(\"0\", begin=(0, 0), end=(400, 400)),\n max_block_id = start,\n pixel_size = pixel_size)\n\n with open(f\"solutions/pixel_solver2/{problem_id}.txt\", \"wt\") as f:\n f.write(\"\\n\".join(cmds))\n","repo_name":"pankdm/icfpc-2022","sub_path":"solver/pixel3.py","file_name":"pixel3.py","file_ext":"py","file_size_in_byte":7500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"11868057443","text":"from magiconfig import ArgumentParser, MagiConfigOptions\nimport six\n\nif __name__==\"__main__\":\n parser = ArgumentParser(config_options=MagiConfigOptions())\n parser.add_argument(\"-f\",\"--foo\", dest=\"foo\", type=str, default=\"lorem\", help=\"foo arg\")\n parser.add_argument(\"-b\",\"--bar\", dest=\"bar\", type=float, required=True, help=\"bar arg\")\n parser.add_argument(\"-i\",\"--ipsum\", dest=\"ipsum\", action=\"store_true\", help=\"ipsum arg\")\n args = parser.parse_args()\n six.print_(args)\n parser.write_config(args,\"examples/config1_out.py\")\n","repo_name":"kpedro88/magiconfig","sub_path":"examples/example1.py","file_name":"example1.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"28"} +{"seq_id":"39602662219","text":"from math import sqrt, log, floor, pow\n\n\ndef isPrime(n):\n for i in range(2, int(sqrt(n)) + 1):\n if isPrime(i) and n % i == 0:\n return False\n return True\n\n\ndef primeFactorization(num, primes):\n primeFactors = {}\n for prime in primes:\n while True:\n if num % prime == 0:\n primeFactors[prime] = primeFactors.get(prime, 0) + 1\n num //= prime\n else:\n break\n return primeFactors\n\n\nif __name__ == \"__main__\":\n n = int(input(\"Enter a number: \"))\n if isPrime(n):\n print(n, \"= 1 *\", n)\n else:\n primes = []\n for i in range(2, n // 2 + 1):\n if isPrime(i):\n primes.append(i)\n primeFactors = primeFactorization(n, primes)\n noOfPrimes = len(primeFactors)\n print(n, \"=\", end=\" \")\n for base, power in primeFactors.items():\n print(str(base) + \"^\" + str(power), end=\"\")\n if noOfPrimes != 1:\n print(\" * \", end=\"\")\n noOfPrimes -= 1\n print(\"\\nBase:Power Form: \", primeFactors, end=\"\")\n","repo_name":"rohithpala/Python","sub_path":"primeFactorization.py","file_name":"primeFactorization.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"30134696117","text":"from PyQt5 import QtWidgets, QtCore\nimport sys, os\nfrom buttonForm import buttonForm\n\nclass page(QtWidgets.QWidget):\n def __init__(self,shop,table,editform,parent=None):\n QtWidgets.QWidget.__init__(self,parent, shop)\n self.__VBox=QtWidgets.QVBoxLayout()\n self.__HBox=QtWidgets.QHBoxLayout()\n self.__table=table\n self.__editForm=editform\n self.__buttonForm=buttonForm(self)\n\n self.__VBox.addWidget(self.__table)\n self.__HBox.addWidget(self.__editForm)\n self.__HBox.addWidget(self.__buttonForm)\n self.__VBox.addLayout(self.__HBox)\n self.setLayout(self.__VBox)\n\n self.__table.curRowChSignal.connect(self.curRowCh)\n self.__buttonForm.editRecSignal.connect(self.editRec)\n self.__buttonForm.newRecSignal.connect(self.newRec)\n self.__buttonForm.delRecSignal.connect(self.delRec)\n\n def curRowCh(self):\n self.__editForm.setCurrentCode(self.__table.getCurrentCode())\n\n def update(self):\n self.__table.update()\n self.__editForm.setCurrentCode(self.__table.getCurrentCode())\n\n def newRec(self):\n self.__editForm.newClick()\n self.__table.update()\n\n def editRec(self):\n self.__editForm.editClick()\n self.__table.update()\n\n def delRec(self):\n self.__editForm.delClick()\n self.__table.update()\n","repo_name":"creepystaisy/OOP_4","sub_path":"page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"25123336244","text":"import sys\n\"\"\"\nBFS 사용\n\"\"\"\n\ndef bfs(tomato, visited, start, tomato_cnt, n, m):\n # 하, 상, 우, 좌\n x_way = [1, -1 ,0, 0]\n y_way = [0, 0, 1, -1]\n\n result = 0\n tomato_cnt += len(start)\n\n _next = start\n\n while _next:\n child = _next.copy()\n _next = []\n\n for c in child:\n x, y = c\n tomato[x][y] = 1\n visited[x][y] = True\n tomato_cnt -= 1\n\n for w in range(4):\n if x + x_way[w] in (-1, n) or y + y_way[w] in (-1, m):\n continue\n else:\n if tomato[x + x_way[w]][y + y_way[w]] == 0 and visited[x + x_way[w]][y + y_way[w]] == False:\n tomato[x + x_way[w]][y + y_way[w]] = 1\n visited[x + x_way[w]][y + y_way[w]] = True\n _next.append([x + x_way[w], y + y_way[w]])\n if _next:\n result += 1\n\n if tomato_cnt == 0:\n return result\n else:\n return -1\n\nm, n = map(int, sys.stdin.readline().split())\ntomato = [[int(j) for j in sys.stdin.readline().split()] for i in range(n)]\nvisited = [[False for i in range(m)] for i in range(n)]\nstart = []\ntomato_cnt = 0\n\nfor i in range(n):\n for j in range(m):\n if tomato[i][j] == 1:\n start.append([i, j])\n elif tomato[i][j] == 0:\n tomato_cnt += 1\n\nprint(bfs(tomato, visited, start, tomato_cnt, n, m))\n\n","repo_name":"devplutus/Python3","sub_path":"Baekjoon/7576-토마토.py","file_name":"7576-토마토.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"17355477206","text":"people = {\n 'htanaka':'Haru Tanaka',\n 'zmin' : 'Zhang Min',\n 'afarooqi' : 'Ayesha Farooqi',\n}\n\n# show original people dictionary.\nprint(people)\n\n#Remove all data from the dictionary.\npeople.clear()\n\n#Show what's in people now.\n","repo_name":"formallinguist/100_days_of_coding","sub_path":"dictionary_d1.py","file_name":"dictionary_d1.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"74826503436","text":"import os\nimport sys\nsys.path.append(\"../\")\nimport time\nimport numpy as np \nimport scipy as sp\nfrom scipy import signal\nimport py3lib.NetSSH as net \nimport py3lib.FileToArray as fil2a\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nimport logging\nimport datetime\nimport py3lib.fakeData as fakeData\n\nSETTING_FILEPATH = \"set\"\nSCAN_PRESET_FILE_NAME = \"set/scan_setting.txt\"\nSYS_PRESET_FILE_NAME = \"set/sys_setting.txt\"\nCAL_PRESET_FILE_NAME = \"set/cal_setting.txt\"\nHK_PRESET_FILE_NAME = \"set/hk_setting.txt\"\nENG_SETTING_FILE = \"set/eng_Setting.txt\"\n\nROW_FILEPATH = \"./ms1rawdata/\"\n\nMS1_FILE = \"MS1.txt\"\nFAKE_DATA = \"data.txt\"\nINIT_DATACOUNT = 10000\n\nUART_CMD = \"LD_LIBRARY_PATH=/opt/quantaser/lib ./UART \"\n\nADC_DATA_FILE = \"adc_data.bin\"\nISO_OUT_FILE = \"chirp_out.bin\"\nMSMS_OUT_FILE = \"msms_out.bin\"\nADC_CMD = \"/opt/redpitaya/bin/monitor 0x40200058\"\nADC_TIMEOUT = 1000\nDELTAT = 0.00024 #ms = 8*30 = 240 ns\nCHIRP_DATA_COUNT = 32768\nFREQ_SPACE = 125\n\nISO_ERROR_MSG1 = \"The mass should be between \"\nISO_ERROR_MSG2 = \"\\nPlease modify ISO Mass\"\n\nTEST_MODE = False\n\nclass qss005Action(QObject):\n\tms1_update_array = pyqtSignal(object)\n\tms1_single_finished = pyqtSignal()\n\n\tms1_update_total_array = pyqtSignal(object, object)\n\tms1_finished = pyqtSignal()\n\tdef __init__(self, loggername, paraent = None):\t\n\t\tsuper(QObject, self).__init__(paraent)\n\t\tself.loggername = loggername\n\t\tself.Qss005header = \"\"\n\t\tself.ssh = net.NetSSH(loggername)\n\t\tself.logger = logging.getLogger(loggername)\n\t\tself.ms1init()\n\t\tself.calibra_init()\n\t\tself.loadPreset()\n\t\tself.updateCalMass()\n\n# start the function define for all QSS005\n\tdef sshConnect(self, ip, port, usr, psswd):\n\t\tsshresult = self.ssh.connectSSH(ip, port, usr, psswd)\n\t\tftpresult = self.ssh.connectFTP()\n\t\tif TEST_MODE:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn (sshresult and ftpresult)\n\n\tdef loadPreset(self):\n\t\tif not os.path.isdir(SETTING_FILEPATH):\n\t\t\tos.mkdir(SETTING_FILEPATH)\n\n\t\tif os.path.exists(SCAN_PRESET_FILE_NAME):\n\t\t\tself.scanPreset = fil2a.TexTFileto1DList(SCAN_PRESET_FILE_NAME, self.loggername)\n\t\telse:\n\t\t\tself.logger.warning(\"scan file load failed\")\n\t\t\tself.scanPreset = [100, 1, 0, 0, 0, 0, 1, 1, 10, 0.5, 100, 30, 50, 10, 10, 10]\n\t\t\tself.savePreset(1)\n\n\t\tif os.path.exists(SYS_PRESET_FILE_NAME):\n\t\t\tself.sysPreset = fil2a.TexTFileto1DList(SYS_PRESET_FILE_NAME, self.loggername)\n\t\telse:\n\t\t\tself.logger.warning(\"system file load failed\")\n\t\t\tself.sysPreset = [1, 0.0, 1, 0, 0.1, 0, 0.1, 30, 0.67, 0.52]\n\t\t\tself.savePreset(2)\n\n\t\tif os.path.exists(CAL_PRESET_FILE_NAME):\n\t\t\tself.calibPreset = fil2a.TexTFileto1DList(CAL_PRESET_FILE_NAME, self.loggername)\n\t\telse:\n\t\t\tself.logger.warning(\"cal file load failed\")\n\t\t\n\t\tif os.path.exists(HK_PRESET_FILE_NAME):\n\t\t\tself.hkPreset = fil2a.TexTFileto1DList(HK_PRESET_FILE_NAME, self.loggername)\n\t\telse:\n\t\t\tself.logger.warning(\"hk file load failed\")\n\n\t\tif os.path.exists(ENG_SETTING_FILE):\n\t\t\tself.engSet = fil2a.TexTFileto1DList(ENG_SETTING_FILE, self.loggername)\n\t\telse:\n\t\t\tself.logger.warning(\"failed to laod eng setting file\")\n\n\tdef savePreset(self, type):\n\t\tif (type == 4):\n\t\t\tparalist = self.engSet\n\t\t\tfilename = ENG_SETTING_FILE\n\t\telif (type == 3):\n\t\t\tparalist = self.calibPreset\n\t\t\tfilename = CAL_PRESET_FILE_NAME\n\t\telif (type == 2):\n\t\t\tparalist = self.sysPreset\n\t\t\tfilename = SYS_PRESET_FILE_NAME\n\t\telif (type == 1):\n\t\t\tparalist = self.scanPreset\n\t\t\tfilename = SCAN_PRESET_FILE_NAME\n\t\telse:\t#elif (type == 0):\n\t\t\tparalist = self.hkPreset\n\t\t\tfilename = HK_PRESET_FILE_NAME\n\n\t\tfil2a.array1DtoTextFile(filename, paralist, self.loggername)\n\n\tdef setQss005header(self, header):\n\t\tself.Qss005header = header\n\n# start the function define for MS1\n\tdef ms1init(self):\n\t\tself.ms1singleRunFlag = False\n\t\tself.singleData = np.empty(0)\n\t\tself.cmd = \"\"\n\t\tself.cmd_delay_time = 0\n\t\tself.ms1noisefilter = False\n\t\tself.ms1filterLevel = 1\n\t\tself.ms1runFlag = False\n\t\tself.ms1TotalData = np.zeros(INIT_DATACOUNT)\n\t\t#self.ms1saveRaw = False\n\t\tself.ms1saveRawPath = ROW_FILEPATH\n\t\tself.rawfileindex = 0\n\t\tself.ms1datalen = INIT_DATACOUNT\n\t\tself.runLoop = 1\n\t\tself.polarity = 1\n\t\tself.pts = 0\n\t\tself.old_ch1_trapping_amp = 0\n\t\tself.old_ch2_freq_factor = 0\n\t\tself.old_ch2_final_freq = 0\n\t\tself.old_isoMassCenter = 0\n\t\tself.old_isoMassRange = 0\n\t\tself.old_iso_chirp_amp = 0\n\t\tself.old_msms_amp = 0\n\t\tself.ms1isChecked = False\n\t\tself.getFile_delay_time = 0\n\n\tdef ms1_setCmdAndValue(self, cmd, cmd_delay_time, ms1isChecked, getFile_delay_time):\n\t\tself.cmd = cmd\n\t\tself.cmd_delay_time = cmd_delay_time\n\t\tself.ms1isChecked = ms1isChecked\n\t\tself.getFile_delay_time = getFile_delay_time\n\n\tdef ms1_setNoiseAndLevel(self, enable, level):\n\t\tself.ms1noisefilter = enable\n\t\tself.ms1filterLevel = level\n\n\tdef ms1_setRowAndPath(self, row_path = \"\"):\n\t\t#self.ms1saveRaw = save_row\n\t\t#if (self.ms1saveRaw):\n\t\t\t#if (row_path != ''):\n\t\tself.ms1saveRawPath = row_path\n\n\tdef resetIndex(self):\n\t\tself.rawfileindex = 0\n\t\tself.ms1TotalData = np.zeros(INIT_DATACOUNT)\n\t\tself.ms1datalen = INIT_DATACOUNT\n\n\t# def ms1fakeData(self): # NOT use\n\t# \tself.singleData = fil2a.TexTFileto1DList(FAKE_DATA, self.loggername)\n\n\tdef checkAndGetFile(self, filename, len):\n\t\tdata = np.empty(0)\n\t\tls_cmd = \"ls \" + filename\n\t\tTRIG_PASS_FLAG = False\n\t\ti = 0\n\t\twhile (not TRIG_PASS_FLAG) and (i < ADC_TIMEOUT):\n\t\t\tstdout = self.ssh.sendQuerry(ls_cmd)\n\t\t\toutput = stdout.readline()\n\t\t\tif output.find(filename, 0, len) == 0:\n\t\t\t\tTRIG_PASS_FLAG = True\n\t\t\ti = i + 1\n\t\t\t# print(i)\n\t\t\tif (i == 4):\n\t\t\t\tself.ssh.sendCmd(self.cmd)\n\t\t\t\tprint(\"i = \" + str(i))\n\t\t\t\tprint(\"re-send cmd : \" + self.cmd)\n\t\t\t\ti = 0\n\t\t\telse:\n\t\t\t\t# print(\"delay = \" + str(self.getFile_delay_time) )\n\t\t\t\ttime.sleep(self.getFile_delay_time)\n\n\t\tif not TRIG_PASS_FLAG:\n\t\t\tself.logger.error(\"ADC file time out\")\n\t\telse:\n\t\t\tself.ssh.getFtpFile(filename)\n\t\treturn TRIG_PASS_FLAG\n\n\tdef setPolarity(self, polarity):\n\t\tif polarity:\n\t\t\tself.polarity = -1\n\t\telse:\n\t\t\tself.polarity = 1\n\n\tdef ADCfiletoData(self):\n\t\tif (TEST_MODE):\n\t\t\tif (self.ms1isChecked):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tself.singleData = fil2a.BinFiletoArray(ISO_OUT_FILE, 4, 'f', self.loggername)*self.polarity\n\t\telse:\n\t\t\tself.singleData = fil2a.BinFiletoArray(ADC_DATA_FILE, 4, 'f', self.loggername)*self.polarity\n\n\t\tif len(self.singleData) > 0:\n\t\t\t# self.singleData = np.delete(self.singleData, 0)\n\t\t\tif (self.ms1noisefilter):\n\t\t\t\tself.singleData = sp.signal.medfilt(self.singleData, self.ms1filterLevel*2+1)\n\t\telse:\n\t\t\tself.logger.error(\"ADC File Empty\")\n\n\tdef sendSingleCmd(self):\n\t\t# rm ADC file\n\t\trm_cmd = \"rm \" + ADC_DATA_FILE\n\t\tself.ssh.sendCmd(rm_cmd)\n\t\tadc_cmd = ADC_CMD + \" 0\"\n\t\tself.ssh.sendCmd(self.cmd)\n\t\tself.ssh.sendCmd(adc_cmd, getpty = True, timedelay = self.cmd_delay_time)\n\n\tdef getData(self):\n\t\tif (TEST_MODE):\n\t\t\tif (0):\n\t\t\t\tself.ADCfiletoData()\n\t\t\telse:\n\t\t\t\t# print(\"action : \" + str(self.pts))\n\t\t\t\tfakeD = fakeData.QSS005MSData(self.pts)\n\t\t\t\tdefine = fakeD.genRandDefine(5, 6)\n\t\t\t\tfakeD.genPeak(define, 4)\n\t\t\t\tfakeD.genNoise(1)\n\t\t\t\tself.singleData = fakeD.data*self.polarity\n\t\telse:\n\t\t\tself.checkAndGetFile(ADC_DATA_FILE, 13)\n\t\t\tself.ADCfiletoData()\n\t\t# rm ADC file\n\t\trm_cmd = \"rm \" + ADC_DATA_FILE\n\t\tself.ssh.sendCmd(rm_cmd)\n\n\tdef sendStartCmd(self):\n\t\t# write MS1.txt \"0\" to stop\n\t\techo_cmd = \"echo \\\"0\\\" > \" + MS1_FILE\n\t\t# print(echo_cmd)\n\t\tself.ssh.sendCmd(echo_cmd)\n\n\tdef sendStopCmd(self):\n\t\t# write MS1.txt \"1\" to stop\n\t\techo_cmd = \"echo \\\"1\\\" > \" + MS1_FILE\n\t\t# print(echo_cmd)\n\t\tself.ssh.sendCmd(echo_cmd)\n\n\tdef ms1single(self):\n\t\ttime.sleep(0.1)\n\t\t# self.checkISOinit() # move to main : ms1BtnRun\n\t\tif self.ms1singleRunFlag:\n\t\t\t# self.sendStartCmd()\n\t\t\tself.sendStopCmd()\n\t\t\tself.sendSingleCmd()\n\t\t\t# t0 = time.time()\n\t\t\t# m0 = time.localtime(t0).tm_min\n\t\t\t# s0 = time.localtime(t0).tm_sec\n\t\t\tself.getData()\n\t\t\t# t1 = time.time()\n\t\t\t# m1 = time.localtime(t1).tm_min\n\t\t\t# s1 = time.localtime(t1).tm_sec\n\t\t\t# print(\"getData TIME = \" + str(m1-m0) + \":\" + str(s1-s0))\n\t\t\tself.ms1_update_array.emit(self.singleData)\n\t\tself.sendStopCmd()\n\t\tself.ms1_single_finished.emit()\n\t\tprint(\"----------\")\n\n\tdef ms1multiRun(self):\n\t\t# self.checkISOinit() # move to main : ms1BtnRunAll\n\t\tself.sendStartCmd()\n\t\tself.sendSingleCmd()\n\n\t\twhile (self.ms1runFlag and self.rawfileindex < self.runLoop):\n\t\t\t# t0 = time.time()\n\t\t\t# m0 = time.localtime(t0).tm_min\n\t\t\t# s0 = time.localtime(t0).tm_sec\n\t\t\tself.getData()\n\t\t\t# t1 = time.time()\n\t\t\t# m1 = time.localtime(t1).tm_min\n\t\t\t# s1 = time.localtime(t1).tm_sec\n\t\t\t# print(\"getData TIME = \" + str(m1-m0) + \":\" + str(s1-s0))\n\t\t\tnewdatalen = len(self.singleData)\n\t\t\tif (newdatalen > 0):\n\t\t\t\toutdata = self.singleData\n\t\t\t\tself.ms1datalen = min(newdatalen, self.ms1datalen)\t\n\t\t\t\tself.ms1TotalData = self.ms1TotalData[0:self.ms1datalen]\n\t\t\t\toutdata = outdata[0:self.ms1datalen]\n\t\t\t\tself.ms1TotalData += outdata\n\t\t\t\tif (self.ms1saveRawPath != ''):\n\t\t\t\t\tcurr_time = datetime.datetime.now()\n\t\t\t\t\tfname = self.ms1saveRawPath +\"/\"+curr_time.strftime(\"%Y_%m_%d_%H_%M_%S\")+\"_\"+str(self.rawfileindex)+\".txt\"\t\n\t\t\t\t\ttempdata = np.array([self.xplotdata[0:self.ms1datalen], outdata], np.float64)\n\t\t\t\t\ttempdata = np.transpose(tempdata)\n\t\t\t\t\theader = self.Qss005header+\"\\n\"+str(curr_time)+\"\\n\"+\"mass, signal\"\n\t\t\t\t\tfil2a.list2DtoTextFile(fname, tempdata,\",\",self.loggername, header = header)\n\t\t\t\tself.rawfileindex += 1\n\t\t\t\ttotalDataOut = self.ms1TotalData / self.rawfileindex\n\t\t\t\tself.ms1_update_total_array.emit(self.singleData, totalDataOut)\n\t\t# while end\n\t\tself.sendStopCmd()\n\t\tself.ms1_finished.emit()\n\t\tprint(\"----------\")\n\n# start the function define for calibration\n\tdef calibra_init(self):\n\t\tself.calibPreset = [1, 0]\n\t\tself.currData = np.zeros(INIT_DATACOUNT)\n\t\tself.xplotdata = np.zeros(INIT_DATACOUNT)\n\n\tdef calibra_findPeak(self, minHeight, minWidth, calib):\n\t\t# print(\"threshold = \" + str(minHeight))\n\t\t# print(\"noise_width = \" + str(minWidth))\n\t\tif calib:\n\t\t\tself.peaks, _= sp.signal.find_peaks(self.currData, height = minHeight, width = minWidth)\n\t\telse:\n\t\t\tself.peaks, _= sp.signal.find_peaks(self.singleData, height = minHeight, width = minWidth)\n\t\t\n\t\tself.logger.debug(\"lens of peak\" + str(self.peaks))\n\t\tself.logger.debug(str(self.peaks))\n\t\treturn self.peaks\n\n\tdef calibra_curveFit(self, calbratedata) :\n\t\tnum = len(calbratedata)\n\t\tfitIndex = []\n\t\tcalbIndex = []\n\t\tfor i in range(0, num):\n\t\t\tfitIndex.append(self.peaks[calbratedata[i][0]])\n\t\t\tcalbIndex.append(calbratedata[i][1])\n\t\tcalibPreset = np.polyfit(fitIndex, calbIndex, 1)\n\t\tself.calibPreset[0] = \"%2.4f\"%calibPreset[0]\n\t\tself.calibPreset[1] = \"%2.4f\"%calibPreset[1]\n\t\tself.logger.debug(str(self.calibPreset))\n\n\tdef updateCalMass(self):\n\t\tfor i in range(0, INIT_DATACOUNT):\n\t\t\tself.xplotdata[i] = i*float(self.calibPreset[0]) + float(self.calibPreset[1])\n\n\tdef checkParamChanged(self, ch2_freq_factor, ch2_final_freq, isoMassCenter, isoMassRange, \\\n\t\tch1_trapping_amp, rfVolGain, ch1_freq, r0, z0, iso_chirp_amp, msms_amp):\n\t\tif ( (self.old_ch1_trapping_amp == ch1_trapping_amp) \n\t\tand (self.old_ch2_freq_factor == ch2_freq_factor)\n\t\tand (self.old_ch2_final_freq == ch2_final_freq)\n\t\tand (self.old_isoMassCenter == isoMassCenter)\n\t\tand (self.old_isoMassRange == isoMassRange)\n\t\tand (self.old_iso_chirp_amp == iso_chirp_amp)\n\t\tand (self.old_msms_amp == msms_amp) ):\n\t\t\tself.paramChanged = False\n\t\telse:\n\t\t\tself.paramChanged = True\n\t\t# print(\"paramChanged = \" + str(self.paramChanged))\n\n\t\tself.ch1_trapping_amp = ch1_trapping_amp\n\t\tself.ch2_freq_factor = ch2_freq_factor\n\t\tself.ch2_final_freq = ch2_final_freq\n\t\tself.isoMassCenter = isoMassCenter\n\t\tself.isoMassRange = isoMassRange\n\n\t\tself.rfVolGain = rfVolGain\n\t\tself.ch1_freq = ch1_freq\n\t\tself.r0 = r0\n\t\tself.z0 = z0\n\t\tself.iso_chirp_amp = iso_chirp_amp\n\t\tself.msms_amp = msms_amp\n\n\tdef checkISOinit(self):\n\t\tif (self.paramChanged == True):\n\t\t\ttrapingV = self.ch1_trapping_amp * self.rfVolGain/1000\n\t\t\t#print(\"trapingVchekc:\"+str(self.ch1))\n\n\t\t\tisoMassMin = self.isoMassCenter - self.isoMassRange\n\t\t\tisoMassMax = self.isoMassCenter + self.isoMassRange\n\t\t\tfmax = self.ch2_freq_factor * self.ch1_freq\n\n\t\t\tresult, msg = self.isoInit(trapingV, self.ch1_freq, self.r0, self.z0, isoMassMin, isoMassMax, self.ch2_final_freq, fmax)\n\t\t\treturn result, msg\n\t\telse:\n\t\t\tmsg = \"No Change\"\n\t\t\treturn True, msg\n\n\n\tdef isoInit(self, trapingV, trapingF, r0, z0, isoMassMin, isoMassMax, fmin, fmax):\n\t\tself.freqlist = np.linspace(fmin, fmax, CHIRP_DATA_COUNT)\n\t\tself.tarray = np.linspace(0, DELTAT*(CHIRP_DATA_COUNT-1), CHIRP_DATA_COUNT)\n\t\t# print(\"trapingV: \"+str(trapingV) )\n\t\t# print(\"trapingF: \"+str(trapingF) )\n\t\t# print(\"r0: \"+str(r0) )\n\t\t# print(\"z0:\" +str(z0) )\n\t\t_, maxMass = self.massFreqTransfer(trapingV, trapingF, r0, z0, 0, fmin)\n\t\t_, minMass = self.massFreqTransfer(trapingV, trapingF, r0, z0, 0, fmax)\n\t\t# print(\"maxMass\" + str(maxMass))\n\t\t# print(\"minMass\" + str(minMass))\n\t\tif (isoMassMin < minMass) or (isoMassMax > maxMass):\n\t\t\tmaxMass_str = \"%2.2f\" % maxMass\n\t\t\tminMass_str = \"%2.2f\" % minMass\n\t\t\tmsg = ISO_ERROR_MSG1 + minMass_str + \" and \" + maxMass_str + ISO_ERROR_MSG2\n\t\t\t# print(msg)\n\t\t\tmsg = \"Error!\\n\" + msg\n\t\t\treturn False, msg\n\t\telse:\n\t\t\tself.calChirp(trapingV, trapingF, r0, z0, isoMassMin, isoMassMax)\n\t\t\tfil2a.ArraytoBinFile(ISO_OUT_FILE, self.isoChirpOut,'f')\n\t\t\tfil2a.ArraytoBinFile(MSMS_OUT_FILE, self.msmsOut,'f')\n\t\t\trm_iso_cmd = \"rm \" + ISO_OUT_FILE\n\t\t\trm_msms_cmd = \"rm \" + MSMS_OUT_FILE\n\t\t\tmsg = \"No Error\"\n\n\t\t\tif TEST_MODE:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tself.ssh.sendCmd(rm_iso_cmd)\n\t\t\t\tself.ssh.sendCmd(rm_msms_cmd)\n\t\t\t\tself.ssh.putFtpFile(ISO_OUT_FILE)\n\t\t\t\tself.ssh.putFtpFile(MSMS_OUT_FILE)\n\n\t\t\tself.old_ch1_trapping_amp = self.ch1_trapping_amp\n\t\t\tself.old_ch2_freq_factor = self.ch2_freq_factor\n\t\t\tself.old_ch2_final_freq = self.ch2_final_freq\n\t\t\tself.old_isoMassCenter = self.isoMassCenter\n\t\t\tself.old_isoMassRange = self.isoMassRange\n\t\t\tself.old_iso_chirp_amp = self.iso_chirp_amp\n\t\t\tself.old_msms_amp = self.msms_amp\n\n\t\t\treturn True, msg\n\n\tdef massFreqTransfer(self, trapingV, trapingF, r0, z0, mass, freq):\n\t\tprint(\"trapingV = \" + str(trapingV))\n\t\tprint(\"freq = \" + str(freq))\n\t\te = 1.602e-19 \n\t\tmol = 6.022e23\n\t\tpi = np.pi \n\t\ttrapingF = 1000*trapingF\n\t\tmassout1 = 8*e*mol*trapingV*1000\n\t\tmassout2 = (r0**2+2*z0**2)*1e-4*(2*pi*1050000)**2\n\t\tfreq = freq*1000\n\t\tif (mass == 0):\n\t\t\tprint(\"massFreqTransfer 1\")\n\t\t\ta = 1.5*(35-np.sqrt((1225-70*(1-np.cos(2*pi*freq/trapingF)))))\n\t\t\tqz = (16/np.power(pi,3))*np.sqrt(a)\t# Tina\n\t\t\t# qz = (4/np.power(pi,2))*np.sqrt(a)\t# GRC\n\t\t\tmassout = (massout1/massout2)/qz\n\t\t\tprint(\"massout = \" + str(massout))\n\t\t\tfout = freq \n\t\telse:\n\t\t\tprint(\"massFreqTransfer 2\")\n\t\t\tqz = (massout1/massout2)/mass\n\t\t\ta = np.power(qz/(16/np.power(pi,3)),2)\t# Tina\n\t\t\t# a = np.power(qz/(4/np.power(pi,2)),2)\t# GRC\n\t\t\tfout = trapingF*np.arccos(1-((1225-np.power((a/1.5)-35,2))/70))/(2*pi)\n\t\t\tprint(\"fout = \" + str(fout))\n\t\t\tmassout = mass\t\n\t\treturn fout, massout\n\n\n\t# def isoInit(self, trapingV, trapingF, r0, isoMassMin, isoMassMax, fmin, fmax):\n\t# \tprint(\"fmin = \"+str(fmin))\n\t# \tprint(\"fmax = \"+str(fmax))\n\t# \tself.freqlist = np.linspace(fmin, fmax, CHIRP_DATA_COUNT)\n\t# \tself.tarray = np.linspace(0, DELTAT*(CHIRP_DATA_COUNT-1), CHIRP_DATA_COUNT)\n\t# \tself.calChirp(trapingV, trapingF, r0, isoMassMin, isoMassMax)\n\t# \tfil2a.ArraytoBinFile(ISO_OUT_FILE, self.isoChirpOut,'f')\n\t# \tfil2a.ArraytoBinFile(MSMS_OUT_FILE, self.msmsOut,'f')\n\t# \trm_iso_cmd = \"rm \" + ISO_OUT_FILE\n\t# \trm_msms_cmd = \"rm \" + MSMS_OUT_FILE\n\t# \tself.ssh.sendCmd(rm_iso_cmd)\n\t# \tself.ssh.sendCmd(rm_msms_cmd)\n\t# \tself.ssh.putFtpFile(ISO_OUT_FILE)\n\t# \tself.ssh.putFtpFile(MSMS_OUT_FILE)\n\n\t# def massToFreq(self, trapingV, trapingF, r0, mass):\n\t# \tprint(\"r0 = \"+str(r0))\n\t# \tprint(\"mass = \"+str(mass))\n\t# \te = 1.6021766208e-19\n\t# \tmol = 6.022e23\n\t# \t# trapingV = trapingV/1000\n\n\t# \tqz1 = 8*e*mol*trapingV*1000\n\t# \ttrapingF = trapingF*1000\n\t# \tqz2 = mass*r0*r0*np.power(2*np.pi*trapingF,2)\n\t# \tqz = qz1/qz2\n\t# \tprint(\"trapingV = \"+str(trapingV))\n\t# \tprint(\"trapingF = \"+str(trapingF))\n\t# \t# print(\"qz in new module=\"+ str(qz))\n\n\t# \tfout =np.arccos(1-(1225-np.power(35-np.power(qz*np.pi*np.pi/4,2)*2/3, 2))/70)*trapingF/2/np.pi\n\t# \treturn fout \n\n\tdef calChirp(self, trapingV, trapingF, r0, z0, isoMassMin, isoMassMax):\n\t\t#isoFreq1 = self.massToFreq(trapingV, trapingF, r0, isoMassMin)\n\t\tisoFreq1, _=self.massFreqTransfer(trapingV, trapingF, r0, z0, isoMassMin, 0)\n\t\tisoFreq2, _=self.massFreqTransfer(trapingV, trapingF, r0, z0, isoMassMax, 0)\n\t\tprint(\"isoFreq1=\"+str(isoFreq1))\n\t\tprint(\"isoFreq2=\"+str(isoFreq2))\n\t\tself.isoChirpOut = np.zeros(len(self.freqlist))\n\t\tself.msmsOut = np.zeros(len(self.freqlist))\n\t\tfp1 = open(\"iso.txt\",\"w\")\n\t\tfp2 = open(\"msms.txt\",\"w\")\n\n\t\tfor f in self.freqlist:\n\t\t\tif (isoFreq2/1000) < f < (isoFreq1/1000):\n\t\t\t\tself.msmsOut = self.msmsOut + np.sin(2*np.pi*f*self.tarray)\n\t\t\t\tfp1.write(str(f)+\"\\n\")\n\t\t\telse:\n\t\t\t\tself.isoChirpOut = self.isoChirpOut + np.sin(2*np.pi*f*self.tarray)\n\t\t\t\tfp2.write(str(f)+\"\\n\")\n\t\tfp1.close()\n\t\tfp2.close()\n\t\tampmax_msms = max(self.msmsOut)\n\t\tampmax_iso = max(self.isoChirpOut)\n\t\tmsmsConst = self.msms_amp/ampmax_msms\n\t\tisoConst = self.iso_chirp_amp/ampmax_iso\n\t\tself.msmsOut = msmsConst*self.msmsOut\n\t\tself.isoChirpOut = isoConst*self.isoChirpOut\n\t\tself.setChrip = False\n\n#gauge \nclass qss005ActionHK(QObject):\n\tgauge_update_text = pyqtSignal(str)\n\tgauge_finished = pyqtSignal()\n\n\tdef __init__(self, ssh, loggername, paraent = None):\t\n\t\tsuper(QObject, self).__init__(paraent)\n\t\tself.loggername = loggername\n\t\tself.gauge_init()\n\t\tself.ssh = ssh\n\n\tdef gauge_init(self):\n\t\tself.gauge_runFlag = False\n\n\tdef gauge_readData(self):\n\t\tcmd = UART_CMD + \"1 \\\"@254PR1?;FF\\\"\"\n\t\tprint(cmd)\n\t\t#i = 0\n\t\tErrStr = \"\"\n\t\twhile (self.gauge_runFlag):\n\t\t\tstdout, stderr = self.ssh.sendQuerryWithError(cmd)\n\t\t\tErrStr = stderr.readline()\n\t\t\tprint(\"ErrStr=\"+ErrStr)\n\t\t\tif (ErrStr != \"\"):\n\t\t\t\tErrStr = \"ERROR\"\n\t\t\t\toutlist = []\n\t\t\t\tself.gauge_update_text.emit(ErrStr)\n\t\t\t\tcmd = \"ps aux | grep UART\"\n\t\t\t\tstdout = self.ssh.sendQuerry(cmd)\n\t\t\t\tline = stdout.readline()\n\t\t\t\t#print(line)\n\t\t\t\tif (line != \"\"):\n\t\t\t\t\tsubline = line.rstrip('\\n')\n\t\t\t\t\t#print(subline)\n\t\t\t\t\toutlist.append(subline.split(' '))\n\t\t\t\t\t#print(outlist)\n\t\t\t\t\tcmd = \"kill -9 \" + outlist[0][6]\n\t\t\t\t\t#print(cmd)\n\t\t\t\t\tstdout = self.ssh.sendQuerry(cmd)\n\t\t\t\tself.gauge_runFlag = False\n\t\t\telse:\n\t\t\t\toutput = stdout.readline()\n\t\t\t\tif (output != \"\"):\n\t\t\t\t\toutput2 = output[7:-4]\n\t\t\t\t\toutput = str(float(output2))\n\t\t\t\t\tself.gauge_update_text.emit(output)\n\t\t\t\t#print(i)\n\t\t\t\t#self.gauge_update_text.emit(str(i))\n\t\t\t\t#i = i + 1\n\t\t\t\ttime.sleep(1)\n\t\t# while end\n\t\tself.gauge_finished.emit()\n\nif __name__ == '__main__':\n\n\taa = qss005Action(\"test\")\n\tfout, massout = aa.massFreqTransfer(757, 800, 0.667, 0.522, 0, 10000)\n\tfout, massout = aa.massFreqTransfer(757, 800, 0.667, 0.522, massout, 0)\n\n\n","repo_name":"adamShiau/adamShiau_Python","sub_path":"QuanPY3/QSS005_UI/QSS005_Action2.py","file_name":"QSS005_Action2.py","file_ext":"py","file_size_in_byte":18606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"71853389834","text":"\"\"\"\nname: Cyril PARODI\ndate: 26/09/2019\nmodule: cpu.py\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\nfrom PyQt5 import QtCore\nfrom .metric import MetricRequest, MetricCallback\nfrom src.utils.conversions import convert_human\n\nclass CPUDataProcessing(QtCore.QThread):\n cpu_ready_signal = QtCore.pyqtSignal(object)\n\n def __init__(self, _expr_dict):\n super(CPUDataProcessing, self).__init__()\n\n self.expr_dict = _expr_dict\n\n self.metrics = {}\n\n self.metric_process = MetricRequest()\n self.metric_post_process = MetricCallback()\n\n self.metric_process.add_observer(self.metric_post_process)\n\n def generate(self):\n nbr_cores_tuple = (\n \"Total number of Cores\",\n self.metric_process.process(self.expr_dict[\"panels\"][0][\"targets\"][0][\"expression\"])\n )\n self.metrics[\"CPU_NBR_CORES\"] = nbr_cores_tuple\n\n for core_index in range(int(nbr_cores_tuple[1])):\n core_freq = 0\n max_core_freq = 0\n\n final_core_freq_tuple = (\n f\"CPU Core {core_index + 1} Frequency\",\n convert_human(core_freq) + \"Hz / \" + convert_human(max_core_freq) + \"Hz\"\n )\n self.metrics[f\"CPU_CORE_{core_index}_FREQ\"] = final_core_freq_tuple\n\n return self.metrics\n\n def refresh(self):\n while True:\n nbr_cores_tuple = (\n \"Total number of Cores\",\n self.metric_process.process(self.expr_dict[\"panels\"][0][\n \"targets\"][0][\"expression\"])\n )\n self.metrics[\"CPU_NBR_CORES\"] = nbr_cores_tuple\n\n for core_index in range(int(nbr_cores_tuple[1])):\n core_freq = self.metric_process.process(\n self.expr_dict[\"panels\"][1][\"targets\"][0][\"expression\"],\n core_index\n )\n max_core_freq = self.metric_process.process(\n self.expr_dict[\"panels\"][1][\"targets\"][1][\"expression\"],\n core_index\n )\n\n core_freq = eval(core_freq)\n max_core_freq = eval(max_core_freq)\n\n final_core_freq_tuple = (\n f\"CPU Core {core_index + 1} Frequency\",\n convert_human(core_freq) + \"Hz / \" + convert_human(\n max_core_freq) + \"Hz\"\n )\n\n self.metrics[f\"CPU_CORE_{core_index}_FREQ\"] = \\\n final_core_freq_tuple\n\n self.cpu_ready_signal.emit(self.metrics)\n self.sleep(1)\n\n def run(self):\n self.refresh()","repo_name":"crlparodi/butterscotch","sub_path":"src/metrics/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"30786205879","text":"from integration.DBHandler import DBHandler\nfrom model.stopwords import Stopword\n\n\n\n# prints all words that occur more than @occurence_per_movie times for all moods.\ndef find_common_words(occurence_per_movie, mood):\n db = DBHandler()\n i = 0\n list = db.get_all_word(mood)\n moods = db.list_moods()\n movie_count_for_joy = db.get_count_movies_for_mood(\"joy\")\n movie_count_for_fear = db.get_count_movies_for_mood(\"fear\")\n movie_count_for_surprise = db.get_count_movies_for_mood(\"surprise\")\n movie_count_for_sadness = db.get_count_movies_for_mood(\"sadness\")\n\n for element in list:\n if db.get_word_count(element[0], 'fear')[0] / movie_count_for_fear > occurence_per_movie \\\n and db.get_word_count(element[0], 'sadness')[0] / movie_count_for_sadness > occurence_per_movie \\\n and db.get_word_count(element[0], 'surprise')[0] / movie_count_for_surprise > occurence_per_movie \\\n and db.get_word_count(element[0], 'joy')[0] / movie_count_for_joy > occurence_per_movie:\n print(\"\\\"\" + element[0] + \"\\\",\", end='')\n print(\"}\", end='', flush=True)\n\n\n# find_common_words(1, \"sadness\")\n\n\n# @param - mood - most common words for which mood\n# @param - stopwords - boolean, should stopwords be included or not\ndef latex_format(mood,stopwords):\n db = DBHandler()\n i = 0\n list = db.get_all_word(mood)\n stopword_list = Stopword().stopwords\n count_movies = db.get_count_movies_for_mood(mood)\n for x in list:\n if i == 10:\n break\n if stopwords or (x[0] not in stopword_list):\n print(x[0] + \" & \" + str(round(x[1]/count_movies,1)) + \"\\\\\" + \"\\\\\")\n i += 1\nlatex_format(\"joy\",True)\n","repo_name":"moodlabeler/mood-labeling-movies","sub_path":"scripts/word_count.py","file_name":"word_count.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"19267991727","text":"#9 - 7. Admin:An administrator is a special kind of user. Write a class called \n#Adminthat inherits from the User class you wrote in Exercise 9-3 (page 162) \n#or Exercise 9-5 (page 167). Add an attribute, privileges, that stores a list \n#of strings like \"can add post\", \"can delete post\", \"can ban user\", and so on. \n#Write a method called show_privileges() that lists the administrator’s set of \n#privileges. Create an instance of Admin, and call your method.\n\n#import tha Admin class\nfrom admin import Admin\n\nuser_1 = Admin('nina', 'amis', 'naoya@yahoo.com')\nuser_1.describe_user()\nuser_1.show_privileges()","repo_name":"Mart1nDimtrov/Python-Crash-Course","sub_path":"09. Classes/my_admin.py","file_name":"my_admin.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"30287162544","text":"from cock_information import *\r\n\r\nall_cocktail = cocktails()\r\n# 만들 수 있는 칵테일의 정보를 담고, 원하는 칵테일의 레시피를 보여주기 위한 변수.\r\nvalid_cocktail = []\r\n\r\n\r\n# 칵테일의 정보들을 출력해주는 함수\r\ndef information(name):\r\n if name[1] == 1:\r\n chill = '칠링'\r\n else:\r\n chill = '칠링안함'\r\n if name[2] == 0:\r\n check_garnish = '가니쉬 없음'\r\n else:\r\n check_garnish = name[2]\r\n print(\"{}:\\n칠링 - {}\\n가니쉬 - {}\\n재료 -\".format(name[0], chill, check_garnish))\r\n for j in range(len(name[3])):\r\n print(name[3][j])\r\n\r\n\r\n# 칵테일 이름을 찾는 함수\r\ndef search():\r\n cock_name = input(\"찾고싶은 칵테일 이름을 영어로 입력하십시오. \")\r\n check = 0\r\n for i in all_cocktail:\r\n if cock_name in i[0]:\r\n print(\"찾았습니다\\n\")\r\n print(\"=====================================\")\r\n information(i) # 칵테일 정보 출력\r\n print(\"=====================================\")\r\n else:\r\n check += 1\r\n pass\r\n\r\n if check == len(all_cocktail):\r\n print(\"찾는 칵테일이 없습니다.\")\r\n\r\n\r\ndef valid_search(name):\r\n check = 0\r\n print(\"{}의 레시피를 출력합니다.\".format(name))\r\n for i in all_cocktail:\r\n if name == i[0]:\r\n print(\"=====================================\")\r\n information(i) # 칵테일 정보 출력\r\n print(\"=====================================\")\r\n else:\r\n check += 1\r\n pass\r\n\r\n if check == len(all_cocktail):\r\n print(\"찾는 칵테일이 없습니다.\")\r\n\r\n\r\n# 현재 내가 가지고 있는 재료를 가지고 뭘 만들 수 있는지 찾는 함수\r\ndef available_to_make():\r\n inhand = ['peach liqueur', 'tequila', 'rum', 'gin', 'bodka',\r\n 'triple sec', 'blue currasso', 'kahlua', 'midori', 'grenadine syrup',\r\n 'creme de cassis', 'salt/pepper', 'sugar', 'club soda']\r\n\r\n global valid_cocktail\r\n\r\n for i in range(len(all_cocktail)):\r\n name = all_cocktail[i]\r\n stack = 0\r\n for j in range(len(name[3])):\r\n str1, num1 = name[3][j].split(\", \") # 만약 not enough values to unpack 일경우 데이터에 \",\"가 있는지 확인할 것.\r\n if str1 in inhand or 'juice' in str1:\r\n stack += 1\r\n else:\r\n break\r\n if stack >= len(name[3]):\r\n print(\"{}은 가지신 재료로 만들 수 있습니다.\".format(name[0]))\r\n valid_cocktail.append(name[0])\r\n\r\n\r\ndef main():\r\n while True:\r\n work = input(\"무엇을 하시겠습니까?\\n[1] 칵테일 레시피 검색\\n[2] 가지고있는 재료로 만들 수 있는 칵테일 찾기\\n[x] 프로그램 종료\\n--> \")\r\n if work == 'x':\r\n print(\"프로그램을 종료합니다.\")\r\n break\r\n\r\n if work == '1':\r\n search()\r\n elif work == '2':\r\n available_to_make()\r\n print(\"==========================================================================\")\r\n print(\"칵테일 이름을 출력합니까?\\n만들 수 있는 칵테일 목록에서 찾고싶은 칵테일의 순번을 입력해주십시오.\")\r\n print(\"취소는 x 입니다.\")\r\n while True:\r\n show_recipe = input(\"--> \")\r\n if show_recipe == 'x':\r\n break\r\n elif 0 <= int(show_recipe) <= len(valid_cocktail)+1:\r\n valid_search(valid_cocktail[int(show_recipe) - 1])\r\n break\r\n else:\r\n print(\"잘못된 값입니다, 다시 입력해주세요.\", end='')\r\n else:\r\n print(\"잘못된 값입니다, 다시 입력해주세요.\", end='')\r\n work = 0\r\n print(\"\\n\")\r\n\r\n\r\nmain()\r\n","repo_name":"sanghyeon1/Cocktail_Recipe","sub_path":"Cocktail_Recipe.py","file_name":"Cocktail_Recipe.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"27613724875","text":"import random\r\n\r\nall_items = []\r\n\r\ndef main():\r\n command = input(\"\\nCommand: \")\r\n \r\n #walk\r\n if command == \"walk\":\r\n rand_item = random.randint(0,len(all_items)-1)\r\n \r\n print(\"While walking down a path, you see \" + all_items[rand_item] + \".\")\r\n \r\n if input(\"Do you want to grab it? (y/n): \").lower() == \"y\":\r\n f = open(\"inventory.txt\",\"r\")\r\n cnt = 0\r\n for l in f:\r\n if l != \"\\n\":\r\n cnt += 1\r\n f.close()\r\n \r\n if cnt < 4:\r\n f = open(\"inventory.txt\",\"a\")\r\n f.write(all_items[rand_item])\r\n print(\"You picked up a \" + all_items[rand_item] + \".\")\r\n else:\r\n print(\"You can't carry any more items. Drop something first.\")\r\n f.close()\r\n\r\n #show\r\n if command == \"show\":\r\n cnt = 1\r\n\r\n try:\r\n f = open(\"inventory.txt\")\r\n for line in f:\r\n print(str(cnt) + \". \" + line[:-1])\r\n cnt += 1\r\n except:\r\n print(\"You dont have any items.\")\r\n \r\n f.close()\r\n \r\n #drop\r\n if command == \"drop\":\r\n item_num = int(input(\"Number: \"))\r\n try:\r\n f = open(\"inventory.txt\")\r\n lines = f.readlines()\r\n item = lines[item_num-1]\r\n del lines[item_num-1]\r\n\r\n f = open(\"inventory.txt\",\"w\")\r\n for line in lines:\r\n f.write(line)\r\n print(\"You dropped \" + item)\r\n f.close()\r\n except:\r\n print(\"That item doesnt exist.\")\r\n\r\n \r\n\r\n if command == \"exit\":\r\n print(\"Bye!\")\r\n else:\r\n main()\r\n \r\n\r\nif __name__ == \"__main__\":\r\n f = open(\"wizard_all_items.txt\")\r\n for l in f:\r\n all_items.append(l)\r\n f.close()\r\n \r\n print(\"The Wizard Inventory program\")\r\n print(\"\\nCOMMAND MENU\")\r\n print(\"walk - Walk down the path\")\r\n print(\"show - Show all items\")\r\n print(\"drop - Drop an item\")\r\n print(\"exit - Exit program\")\r\n \r\n main()\r\n","repo_name":"snowlance7/Python-Projects","sub_path":"week3/Chapter 07 Starter Files/wizard_inventory.py","file_name":"wizard_inventory.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"19241016408","text":"import urllib\nimport urlparse\nimport math\n\nimport xbmc\nimport xbmcgui\nimport xbmcplugin\n\nfrom de.generia.kodi.plugin.backend.zdf.SearchResource import SearchResource \n\nfrom de.generia.kodi.plugin.frontend.base.Pagelet import Item \nfrom de.generia.kodi.plugin.frontend.base.Pagelet import Action \nfrom de.generia.kodi.plugin.frontend.base.Pagelet import Pagelet \n\nfrom de.generia.kodi.plugin.frontend.zdf.AbstractPage import AbstractPage\nfrom de.generia.kodi.plugin.frontend.zdf.Constants import Constants\n\nfrom de.generia.kodi.plugin.frontend.zdf.search.SearchHistory import HistoryEntry \n\n\nclass SearchPage(AbstractPage):\n searchHistory = None\n \n def __init__(self, searchHistory):\n super(SearchPage, self).__init__()\n self.searchHistory = searchHistory\n\n def service(self, request, response):\n pages = int(request.getParam('pages', -1))\n page = int(request.getParam('page', 1))\n \n query = dict(request.params)\n if 'pages' in query:\n del query['pages']\n \n query['from'] = ''\n query['to'] = ''\n query['sender'] = 'alle Sender'\n query['attrs'] = ''\n \n if 'q' not in query:\n self.info(\"Timer - getting search-string from keyboard ...\")\n start = self.context.log.start()\n text = self._getKeyboardInput()\n self.info(\"Timer - getting search-string from keyboard ... done. [{} ms]\", self.context.log.stop(start))\n if text is not None:\n query['q'] = text\n else:\n response.sendInfo(self._(32006))\n return\n\n self.info(\"Timer - loading results ...\")\n start = self.context.log.start()\n self._progress = xbmcgui.DialogProgress()\n try:\n msg = self._(32021)\n if pages != -1:\n msg = self._(32022, page, pages)\n self._progress.create(self._(32020), msg)\n self._progress.update(0, msg)\n self._loadResults(request, response, pages, page, query)\n \n # add search history entry\n self._saveQuery(query)\n \n #except:\n # self.warn(\"Timer - loading results ... exception\") \n finally:\n self.info(\"Timer - loading results ... done. [{} ms]\", self.context.log.stop(start))\n self._progress.close();\n\n def _saveQuery(self, query):\n if self.results > 0:\n contentTypes = None\n if 'contentTypes' in query:\n contentTypes = query['contentTypes']\n self.searchHistory.addEntry(HistoryEntry(query['q'].strip(), contentTypes))\n\n def _loadResults(self, request, response, pages, page, query):\n queryParams = urllib.urlencode(query)\n searchUrl = Constants.baseUrl + \"/suche?\" + queryParams\n \n self.info(\"searching url: '{}' ...\", searchUrl)\n searchPage = SearchResource(searchUrl)\n self._parse(searchPage)\n self.info(\"found '{}' results.\", len(searchPage.teasers))\n\n if len(searchPage.teasers) == 0:\n response.sendInfo(self._(32013))\n \n pages = int(math.ceil(float(searchPage.results) / float(searchPage.resultsPerPage)))\n \n self.results = 0\n self._addItems(response, searchPage.teasers)\n \n if len(searchPage.teasers) == 0:\n return\n \n if self.settings.loadAllSearchResults:\n self._addMoreResults(response, searchPage.moreUrl, pages, page)\n else:\n self._addMoreFolder(response, searchPage.moreUrl, pages, page)\n \n self._progress.update(percent=100)\n self.info(\"added '{}' result-items.\", self.results)\n\n\n def _addItems(self, response, teasers):\n self.debug(\"Timer - creating list items ...\")\n start = self.context.log.start()\n for teaser in teasers:\n if not self.settings.showOnlyPlayableSearchResults or teaser.playable: \n item = self._createItem(teaser)\n response.addItem(item)\n self.results += 1\n self.debug(\"Timer - creating list items ... done. [{} ms]\", self.context.log.stop(start))\n\n def _addMoreResults(self, response, moreUrl, pages, page):\n\n while moreUrl is not None and page < pages and not self._progress.iscanceled():\n moreUrl = moreUrl.replace('=', '=')\n moreUrl = moreUrl.replace('&', '&')\n\n page += 1\n percent = page*100/pages\n self._progress.update(percent, self._(32022, page, pages))\n\n searchUrl = Constants.baseUrl + moreUrl\n self.info(\"searching url: '{}' ...\", searchUrl)\n searchPage = SearchResource(searchUrl)\n self._parse(searchPage)\n \n if len(searchPage.teasers) > 0:\n self._addItems(response, searchPage.teasers)\n moreUrl = searchPage.moreUrl\n else: \n moreUrl = None\n self.info(\"found '{}' results.\", len(searchPage.teasers))\n\n def _addMoreFolder(self, response, moreUrl, pages, page):\n if page < pages: \n page += 1\n moreAction = self._getMoreAction(moreUrl, pages, page)\n response.addFolder(self._(32017, page, pages), moreAction)\n\n def _getMoreAction(self, moreUrl, pages, page):\n i = moreUrl.find('?')\n if i != -1:\n moreQuery = moreUrl[i+1:]\n moreQuery = moreQuery.replace('=', '=')\n moreQuery = moreQuery.replace('&', '&')\n searchArgs = urlparse.parse_qs(moreQuery)\n for key, value in searchArgs.iteritems():\n searchArgs[key] = value[0]\n searchArgs['pages'] = pages\n searchArgs['page'] = page\n moreAction = Action('SearchPage', searchArgs)\n return moreAction\n \n\n def _getKeyboardInput(self):\n keyboard = xbmc.Keyboard('', self._(32005))\n keyboard.doModal()\n text = None\n if keyboard.isConfirmed() and keyboard.getText():\n text = keyboard.getText()\n return text\n\n","repo_name":"generia/plugin.video.zdf_de_2016","sub_path":"de/generia/kodi/plugin/frontend/zdf/search/SearchPage.py","file_name":"SearchPage.py","file_ext":"py","file_size_in_byte":6222,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"28"} +{"seq_id":"37550471978","text":"import http.server\nimport http.client\nimport socketserver\nfrom pathlib import Path\nimport json\n\n# -- THE PORT\nPORT = 8080\nServer = \"rest.ensembl.org\"\nParams = \"?content-type=application/json\"\n\n# -- This is for preventing the error: \"Port already in use\"\nsocketserver.TCPServer.allow_reuse_address = True\n\n\ndef server(Request_line):\n # Connect with the server\n conn = http.client.HTTPConnection(Server)\n try:\n conn.request(\"GET\", Request_line)\n except ConnectionRefusedError:\n print(\"ERROR! Cannot connect to the Server\")\n exit()\n\n # Read the response message from the server\n r1 = conn.getresponse()\n\n # Read the response's body\n data = r1.read().decode(\"utf-8\")\n data1 = json.loads(data)\n\n return data1\n# Class with our Handler. It is a called derived from BaseHTTPRequestHandler\n# It means that our class inheritates all his methods and properties\n\n\nclass TestHandler(http.server.BaseHTTPRequestHandler):\n\n def do_GET(self):\n \"\"\"This method is called whenever the client invokes the GET method\n in the HTTP protocol request\"\"\"\n\n # Print the request line\n print(self.requestline)\n\n # Analize the request line\n req_line = self.requestline.split(' ')\n\n # Get the path. It always start with the / symbol\n path = req_line[1]\n\n # Separamos el path antes del ? y depués\n arguments = path.split(\"?\")\n\n endpoint = arguments[0]\n\n if endpoint == \"/\": # Main page\n contents = Path(\"index.html\").read_text()\n status = 200\n # Option1\n elif endpoint == \"/listSpecies\":\n try:\n # Get what is after ? (limit=10)\n limit_numb = arguments[1]\n # Get selected specie\n limit = limit_numb.split(\"=\")[1]\n Endpoint = \"/info/species\"\n # This is the req line to search the info\n Request_line = Endpoint + Params\n\n # Create a variable with the data, form the JSON received\n name_specie = server(Request_line)[\"species\"]\n\n count = 0\n for element in name_specie:\n count = count + 1\n contents = f\"\"\" \n \n \n \n \n List of species \n \n \n

    The total number of species in the ensembl is: {count}

    \n \n \n \"\"\"\n if limit == \"\":\n contents += f\"\"\"Main page\"\"\"\n status = 200\n\n elif int(limit) > count:\n contents = Path('Error.html').read_text()\n status = 404\n else:\n contents += f\"\"\"

    The limit you have selected is: {limit}

    \n

    The name of the species are:

    \"\"\"\n\n status = 200\n for element in name_specie[:int(limit)]: # From beginning to limit\n contents += f\"\"\"

    - {element[\"common_name\"]}

    \"\"\"\n contents += f\"\"\"Main page\"\"\"\n except ValueError:\n contents = Path('Error.html').read_text()\n status = 404\n\n # Option2\n elif endpoint == \"/infoKaryotype\":\n # Get what is after ? (specie=mouse)\n specie_name = arguments[1]\n # Get selected specie\n name_specie = specie_name.split(\"=\")[1]\n Endpoint = \"/info/assembly/\"\n # This is the req line to search the info\n Request_line = Endpoint + name_specie + Params\n try:\n # Check if the req line is ok\n Request_line.isidentifier()\n # Create a variable with the data\n kar_sp = server(Request_line)\n contents = f\"\"\" \n \n \n \n \n Karyotype of a specific specie \n \n \n \n

    The names of the chromosomes are:

    \n \n \n \"\"\"\n status = 200\n for element in kar_sp[\"karyotype\"]:\n contents += f\"\"\"

    {element}

    \"\"\"\n contents += f\"\"\"Main page\"\"\"\n\n except KeyError:\n contents = Path(\"Error.html\").read_text()\n status = 404\n\n # Option3\n elif endpoint == \"/ChromLength\":\n # Get what is after ? (specie=mouse&chromo=18)\n sp_chromo = arguments[1]\n # Get the specie that we select\n name_specie = sp_chromo.split(\"&\")[0].split(\"=\")[1]\n numb_chromo = sp_chromo.split(\"&\")[1].split(\"=\")[1]\n Endpoint = \"/info/assembly/\"\n # This is the req line to search the info\n Request_line = Endpoint + name_specie + \"/\" + numb_chromo + Params\n try:\n # Check if the req line is ok\n Request_line.isidentifier()\n # Create a variable with the data\n l_chromosome = server(Request_line)\n contents = f\"\"\" \n \n \n \n \n Length of the selected chromosome \n \n \n \n

    The length of the chromosome is: {l_chromosome[\"length\"]}

    \n Main page\n \n \n \"\"\"\n status = 200\n\n except KeyError:\n contents = Path(\"Error.html\").read_text()\n status = 404\n\n # Generating the response message\n self.send_response(status) # -- Status line: OK!\n\n # Define the content-type header:\n self.send_header('Content-Type', 'text/html')\n self.send_header('Content-Length', len(str.encode(contents)))\n\n # The header is finished\n self.end_headers()\n\n # Send the response message\n self.wfile.write(str.encode(contents))\n\n return\n\nHandler = TestHandler\n\n# -- Open the socket server\nwith socketserver.TCPServer((\"\", PORT), Handler) as httpd:\n print(\"Serving at PORT\", PORT)\n\n # -- Main loop: Attend the client. Whenever there is a new\n # -- clint, the handler is called\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n print(\"\")\n print(\"Stopped by the user\")\n httpd.server_close()\n","repo_name":"luisgago08/2019-2020-PNE-Practices","sub_path":"Final-Project/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"37991069598","text":"\"\"\"\n *****************************************************************************\n FILE: turtle_drawing.py\n\n AUTHOR: Xingyu He\n\n ASSIGNMENT: Project 6 trutle_drawing\n\n DATE: Oct 8 2017\n\n DESCRIPTION: This program takes user input and execute the commands. Inputs \n can make turtle to draw upwards, downwards, left, and right. \n Turtle can change color and the length of each move. It can go\n directly to a location. \n *****************************************************************************\n\"\"\"\n\nimport turtle\n\n\ndistance = 30\n\n\ndef main():\n michelangelo = turtle.Turtle()\n turtle_drawing(michelangelo)\n\n\ndef turtle_drawing(t):\n \"\"\"this turtle takes commands and draw acoording the commands \"\"\"\n\n # This allows you to change the value of the global variable distance\n global distance\n\n print(\"\\nThe commands are: w, a, s, d, color, distance, width, goto, quit\")\n\n user_input = input(\"Enter a command: \")\n\n if user_input == \"w\":\n t.setheading(90)\n t.forward(distance)\n elif user_input == \"a\":\n t.setheading(180)\n t.forward(distance)\n elif user_input == \"s\":\n t.setheading(270)\n t.forward(distance)\n elif user_input == \"d\":\n t.setheading(0)\n t.forward(distance)\n elif user_input == \"color\":\n #ask user input and change the color of t\n color_input = input(\"Enter a color: \")\n t.color(color_input)\n\n elif user_input == \"distance\":\n #ask user input and change the distance each time turtl travels\n distance_input = input(\"Enter a distance: \")\n distance = float(distance_input)\n elif user_input == \"width\":\n #ask user input the width of turtle\n width_input = input(\"Enter width: \")\n t.width(float(width_input))\n elif user_input == \"goto\":\n #ask user input the coordinate t should be\n x_goto_input = input(\"Enter x-coordinate: \")\n y_goto_input = input(\"Enter y-coordinate: \")\n t.up()\n t.goto(float(x_goto_input),float(y_goto_input))\n t.down()\n elif user_input == \"quit\":\n return\n\n turtle_drawing(t)\n\n\nmain()\n","repo_name":"XingyuHe/cs110","sub_path":"turtles/turtle_drawing.py","file_name":"turtle_drawing.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"36278048092","text":"from CompanyTest import get_data, percentage_calc, find_stock_list_wrapper, inorder_percents\nfrom main_functions import update, display_by_date, display_week, display_by_ticker, menu, display_hundred\nimport datetime as dt\n\n# globals\ntoday_date = dt.datetime.date(dt.datetime.now())\n\ndef main():\n print('STOCK CALENDAR')\n choice = 0\n while choice != 6:\n choice = menu()\n if choice == 1:\n #UPDATE DATA\n update()\n elif choice == 2:\n display_week()\n elif choice == 3:\n date_to_compare = input(\"\\nEnter Date (Month-Day) (ex. 06-01): \")\n display_by_date(date_to_compare)\n elif choice == 4:\n ticker_to_compare = input(\"\\nEnter Ticker Symb: \")\n ticker_to_compare = ticker_to_compare.upper()\n display_by_ticker(ticker_to_compare)\n elif choice == 5:\n display_hundred()\n\n print(\"Thank you\")\nif __name__ == '__main__':\n main()\n","repo_name":"ericanderson333/SP500_Calendar","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"37031315431","text":"from django.contrib import admin\n\n# Register your models here.\nfrom .models import *\n\n# 把模型注册到admin中\n@admin.register(UserFav)\nclass UserFavAdmin(admin.ModelAdmin):\n list_display = ['id', 'goods', 'add_time']\n list_display_links = ['goods']\n\n\nadmin.site.register(UserLeavingMessage)\nadmin.site.register(UserAddress)\n","repo_name":"lize240810/Shop","sub_path":"apps/user_operation/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"17945469362","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Read the original CSV file\ndf_original = pd.read_csv('output.csv')\n\n# Get all unique values from 'translations_from' and 'translations_to' columns\nlanguages = pd.unique(df_original[['translations_from', 'translations_to']].values.ravel('K'))\n\n# Create a new dataframe with the languages as the only column\ndf_languages = pd.DataFrame({'languages': languages})\n\n# Add translations from count column\ndf_languages['translations_from_count'] = df_languages['languages'].apply(\n lambda x: df_original[df_original['translations_from'] == x]['article_count'].sum())\n\n# Add translations to count column\ndf_languages['translations_to_count'] = df_languages['languages'].apply(\n lambda x: df_original[df_original['translations_to'] == x]['article_count'].sum())\n\n# Sort the dataframe by 'translations_from_count' column in descending order\ndf_languages = df_languages.sort_values(by='translations_from_count', ascending=False)\n\n# Write the new CSV file\ndf_languages.to_csv('translation_ratio_on_the_basis_of_article_count.csv', index=False)\n\n# Read the CSV file into a Pandas DataFrame\ndf = pd.read_csv('translation_ratio_on_the_basis_of_article_count.csv')\n\n# Group languages by their coordinates using a dictionary\nlang_groups = {}\nfor i, row in df.iterrows():\n coord = (row['translations_from_count'], row['translations_to_count'])\n lang = row['languages']\n if coord not in lang_groups:\n lang_groups[coord] = [lang]\n else:\n lang_groups[coord].append(lang)\n\n# Create a list of labels where each label is either a single language name or a comma-separated list of language names\nlabels = []\nfor i, row in df.iterrows():\n coord = (row['translations_from_count'], row['translations_to_count'])\n if len(lang_groups[coord]) == 1:\n labels.append(lang_groups[coord][0])\n else:\n labels.append(', '.join(lang_groups[coord]))\n\n# Create a scatter plot\nplt.scatter(df['translations_from_count'], df['translations_to_count'])\nplt.xlabel('Translations from Count')\nplt.ylabel('Translations to Count')\n\n# Add annotations for each point\nfor i, label in enumerate(labels):\n plt.annotate(label, (df['translations_from_count'][i], df['translations_to_count'][i]))\n\nplt.show()","repo_name":"Abhishek02bhardwaj/Flow-Diagrams-Illustrating-Translation-Imbalances","sub_path":"translation_ratios_on_the _basis_of_article_count.py","file_name":"translation_ratios_on_the _basis_of_article_count.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"7309854960","text":"#! /usr/bin/python\n\n'''\nThis script takes the tables of miRNAs identified with \nmiRDeep-P and puts them into one long list and also makes a \nfasta file out of them that can be used to align/compare \nwith. This script collapses samples (output has no\nduplicates) and numbers of reads are converted into\npercentages (from the corresponding sample(s)). Percentages \nare calculated to mean percentages if a sequence is present \nin multiple samples.\nInput = Sample_#_mature-miRBase.csv\n Sample_#_mature-notinmiRBase.csv\n (where # is all the samples)\nOutput = miRDP_found-all.csv + miRDP_found-all.fasta\n'''\n\nfrom sys import argv\nimport os\n\n### argvs to be used are:\n# 1: folder with the files (Sample_#_mature-*.csv)\n# 2: output .csv\n# 3: output .fasta\n\ndef read_lists(read_folder):\n '''\n This function reads the separate files with miRBase hits.\n It returns a dictionary of data to be written by the next\n function.\n Input = miRBase_found_#.csv (files)\n Output = dictionary of data to be used\n '''\n inputfiles_miRBase = [read_folder + csv for csv in os.listdir(read_folder) if csv.endswith(\"-miRBase.csv\")]\n inputfiles_notIn = [read_folder + csv for csv in os.listdir(read_folder) if csv.endswith(\"_notinmiRBase.csv\")]\n ## automatically detect files to read from the supplied\n ## folder\n \n writedata = {}\n total_sequences = 0\n ## keep track of the number of sequences\n for inputfile in inputfiles_miRBase:\n with open(inputfile, 'r') as tables:\n tables.readline()\n ## skip the first line (header)\n for line in tables:\n line = line.strip('\\n').split('\\t')\n number = int(line[10])\n total_sequences += number\n print(total_sequences, \" in \", inputfile)\n \n for inputfile in inputfiles_notIn:\n with open(inputfile, 'r') as tables:\n tables.readline()\n ## skip the first line (header)\n for line in tables:\n line = line.strip('\\n').split('\\t')\n number = int(line[8])\n total_sequences += number\n print(total_sequences, \" in \", inputfile)\n\n for inputfile in inputfiles_miRBase:\n ## loop over each file from the list\n writelist = []\n ## keep a list of the data per file\n with open(inputfile, 'r') as tables:\n ## open each file\n tables.readline()\n ## skip the first line (header)\n for line in tables:\n line = line.strip('\\n').split('\\t')\n source_name = line[0]\n name = line[1]\n prec_len = line[2]\n MFEI = line[5]\n sequence = line[6]\n mismatches = line[7]\n length = line[8]\n mat_seq_arm = line[9]\n number = int(line[10])\n writelist.append([name, number, sequence])\n ## read each file and note name, number and \n ## sequence and append these to the list\n \n for lists in writelist:\n lists[1] = float(lists[1] / total_sequences * 100)\n \n if lists[2] not in writedata:\n writedata[lists[2]] = [lists[0], lists[1]]\n else:\n sum = (writedata[lists[2]][1] + lists[1])\n writedata[lists[2]] = [writedata[lists[2]][0], sum]\n\n for inputfile in inputfiles_notIn:\n ## loop over each file from the list\n writelist = []\n ## keep a list of the data per file\n with open(inputfile, 'r') as tables:\n ## open each file\n tables.readline()\n ## skip the first line (header)\n for line in tables:\n line = line.strip('\\n').split('\\t')\n name = line[0]\n prec_len = line[1]\n MFEI = line[4]\n sequence = line[5]\n length = line[6]\n mat_seq_arm = line[7]\n number = int(line[8])\n writelist.append([name, number, sequence])\n ## read each file and note name, number and \n ## sequence and append these to the list\n \n for lists in writelist:\n lists[1] = float(lists[1] / total_sequences * 100)\n \n# if lists[2] not in writedata:\n# n = 1\n# # keep an 'n' to make a weighted average\n# writedata[lists[2]] = [lists[0], lists[1], n]\n# else:\n# n = writedata[lists[2]][2] + 1\n# average = (writedata[lists[2]][1] * n + lists[1]) / (n + 1)\n# writedata[lists[2]] = [writedata[lists[2]][0], average, n]\n\n if lists[2] not in writedata:\n writedata[lists[2]] = [lists[0], lists[1]]\n else:\n sum = (writedata[lists[2]][1] + lists[1])\n writedata[lists[2]] = [writedata[lists[2]][0], sum]\n\n print(\"Total number of sequences identified as miRNA:\", total_sequences)\n return writedata\n \ndef write_output(data, outputcsv, outputfasta):\n '''\n This function writes all the required data to a new,\n collapsed .csv file and to a fasta file. It takes the\n dictionary created by the function above.\n Input = dictionary of data to be used\n Output = miRBase_found_all.csv (file) +\n miRBase_found_all.fasta (file)\n '''\n sortlist = []\n with open(outputcsv, 'w') as csv:\n csv.write(\"Name\\tOccurrence (%)\\tSequence\\n\")\n for keys in data.keys():\n sortlist.append([data[keys][0], data[keys][1], keys])\n\n sortlist.sort(key=lambda x: x[1], reverse = True)\n ## I want to have the miRNAs sorted by their occurence\n ## and descending (from high to low)\n \n for lists in sortlist:\n csv.write(str(lists[0]) + '\\t' + '{0:.5f}'.format(lists[1]) + '\\t' + lists[2] + '\\n')\n\n with open(outputfasta, 'w') as fasta:\n for lists in sortlist:\n fasta.write('>' + lists[0] + '_x' + \"{0:.5f}%\".format(lists[1]) + '\\n' + lists[2].replace('U', 'T').replace('u', 't') + '\\n')\n \n print(\"The list and fasta file are ready!\")\n\ndef main(argv):\n '''\n This function simply serves to combine all previous\n functions into one working script.\n '''\n write_output(read_lists(argv[0]), argv[0]+argv[1], argv[0]+argv[2])\n \nif __name__ == \"__main__\":\n main(argv[1:])\n","repo_name":"Naktuinbouw/miRNA_discovery_pipeline","sub_path":"Scripts/miRDP_summary+fasta.py","file_name":"miRDP_summary+fasta.py","file_ext":"py","file_size_in_byte":6593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"12363095279","text":"import argparse\nimport pathlib\nimport re\nimport subprocess\nimport os\nimport logging\nimport socket\nimport json\n\nimport simplejson as simplejson\n\nfrom hdfssb.common.buffer import *\nfrom hdfssb.common.hash import sha1_file\nfrom hdfssb.client.hdfssb_client import *\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(dest='command', help=\"Command: send or download\")\n parser.add_argument(dest='file_name')\n # parser.add_argument(dest='user')\n parser.add_argument(dest='url_ledger_node')\n parser.add_argument(dest='key_file')\n\n args = parser.parse_args()\n\n if args.command == 'send':\n send_file(args.file_name, args.url_ledger_node, args.key_file)\n elif args.command == 'download':\n download_file(args.file_name, args.url_ledger_node, args.key_file)\n elif args.command == 'list_files':\n list_files(args.url_ledger_node, args.key_file)\n elif args.command == 'list_nodes':\n list_nodes(args.url_ledger_node, args.key_file)\n\n# export PYTHONPATH=../.. ; python3 rq3.py send SampleAudio_0.7mb.mp3 ddarczuk 192.168.0.150:31454 /project/keys/root.priv\n# export PYTHONPATH=../.. ; python3 rq3.py download SampleAudio_0.7mb.mp3 ddarczuk 192.168.0.150:31454 /project/keys/root.priv\n# export PYTHONPATH=../.. ; python3 rq3.py send SampleAudio_0.7mb.mp3 ddarczuk 192.168.0.150:8008 /project/keys/root.priv\n# export PYTHONPATH=../.. ; python3 rq3.py download SampleAudio_0.7mb.mp3 ddarczuk 192.168.0.150:31454 /project/keys/root.priv\n\n\ndef list_files(url_ledger_node, key_file):\n hdfssb_client = HdfssbClient(base_url=url_ledger_node, keyfile=key_file)\n print(str(hdfssb_client.list_files_decoded()).replace('\\'', '\"'))\n\n\ndef list_nodes(url_ledger_node, key_file):\n hdfssb_client = HdfssbClient(base_url=url_ledger_node, keyfile=key_file)\n print(str(hdfssb_client.list_nodes_decoded()).replace('\\'', '\"'))\n\n\ndef send_file(file_name, url_ledger_node, key_file):\n #key_file = 'root.priv'\n #url_ledger_node = '127.22.0.1:8008'\n #user = 'ddarczuk'\n #file_name = 'SampleAudio_0.7mb.mp3'\n HOST = socket.gethostname()\n PORT = 60000\n\n hdfssb_client = HdfssbClient(base_url=url_ledger_node, keyfile=key_file)\n public_key = hdfssb_client.get_public_key()\n user = public_key\n\n owner_folder = './tmp_send/' + user + '/'\n folder = owner_folder + file_name + '/'\n pathlib.Path(folder).mkdir(parents=True, exist_ok=True)\n\n # 1. Encode file by raptor\n\n # s = min(file_size - 1, 65528) # 65528 is 2^16 - 8, max uint16_t\n # s = s - s mod 8\n file_size = os.path.getsize(file_name)\n s = min(file_size - 1, 65528)\n s = s - (s % 8)\n\n ###\n # liczba nodów\n # liczba nodów które potrzebne jest\n # drop-rate > repair-symbols-rate\n # private public ilosć bloków\n # procent bloków do pirvate\n # (blocks - (blocks*drop_rate)) / nodes > repair-symbols-rate\n\n # ./python-libraptorq/rq --debug encode -s1600 -m200 --repair-symbols-rate 1 --drop-rate 0.5 README.rst README.rst.enc\n\n # launch your python2 script using bash\n encode = \"\"\"./hdfssb/client/python-libraptorq/rq \\\n --debug \\\n encode \\\n -s{s} \\\n -m200 \\\n --repair-symbols-rate 1 \\\n {path_src} \\\n {path_dst}\"\"\".format(s=s, path_src=file_name, path_dst=owner_folder + file_name+'.enc') #.split(\"\\n\") # --drop-rate 0.5 \\\n\n process = subprocess.Popen(encode, stdout=subprocess.PIPE, shell=True)\n output, error = process.communicate() # receive output from the python2 script\n\n # \"needed: >(\\d*),\"\n # dla małych plików jak mp3\n # file_encoded_map = os.read_fiel as map\n\n # x = re.search(r'(?P\\d*) symbols \\(needed: >(?P\\d*)', output.decode(\"utf-8\"))\n # needed = int(x['needed'])\n # symbols = int(x['symbols'])\n x = re.search(r'(?P\\d*) symbols \\(needed: >(?P\\d*)', output.decode(\"utf-8\"))\n symbols = int(x.group(1))\n needed = int(x.group(2))\n\n # 2. Load encoded file and split blocks to files\n\n with open(owner_folder + file_name+'.enc', 'r') as myfile:\n mapa = json.load(myfile)\n\n for block in mapa['symbols']:\n block_string = json.dumps(block)\n hash_name = hashlib.sha1(str.encode(block_string)).hexdigest()\n with open(folder + hash_name, \"w+\") as file:\n file.write(block_string)\n # logging.info(\"Create block \", block_string)\n\n del mapa['symbols']\n logging.info(\"MAPA: \", mapa)\n\n # 3. Read ledger, to find nodes where send blocks\n\n nodes = hdfssb_client.list_nodes_decoded()\n logging.info(\"Nodes \", nodes)\n\n max_number_public_node = needed - 1\n max_number_private_node = symbols - needed + 1\n\n valid_nodes = []\n number_private_node = 0\n number_public_node = 0\n\n for node_name, dict_atribute in nodes.items():\n #node = node.decode().split(\",\")\n #if node[] # dodaj jesli jest wolne miejsce na nodzie i dodaj procent private i publick\n #valid_nodes.append(node[0])\n\n if dict_atribute['capacity'] < dict_atribute['taken_space'] + dict_atribute['reversed_space']:\n continue\n\n if dict_atribute['cluster'] == 'private' and number_private_node <= max_number_private_node:\n number_private_node += 1\n valid_nodes.append(node_name)\n elif dict_atribute['cluster'] == 'public' and number_public_node <= max_number_public_node:\n number_public_node += 1\n valid_nodes.append(node_name)\n\n\n # 4. Assign blocks to nodes\n\n dict_block_to_node = {}\n length = len(valid_nodes)\n i = 0\n files_names = os.listdir(folder)\n\n for block in files_names:\n dict_block_to_node[block] = valid_nodes[i]\n i += 1\n if i == length:\n i = 0\n\n # 5. Save information in ledger\n\n file_to_send = dict(file_name=file_name,\n owner=user,\n state=\"requested\",\n size=file_size,\n file_hash=sha1_file(file_name),\n blocks_of_file=dict_block_to_node,\n checksums=mapa['checksums']['sha256'],\n data_bytes=mapa['data_bytes'],\n oti_common=mapa['oti_common'],\n oti_scheme=mapa['oti_scheme'],\n last_update=str(time.time()))\n logging.info(file_to_send)\n tx = hdfssb_client.add_file(name=file_name, payload_object=file_to_send)\n logging.info(\"Add file \", tx)\n hdfssb_client.wait_for_transaction(tx)\n\n # 6. Send file\n # for block, node in os.listdir(folder):\n for block, node in dict_block_to_node.items():\n\n try:\n # exception if not name know socket.gaierror: [Errno -2] Name or service not known\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((node, PORT))\n\n with s:\n sbuf = Buffer(s)\n\n print(block)\n\n sbuf.put_utf8(file_name)\n\n file_size = os.path.getsize(folder + block)\n sbuf.put_utf8(str(file_size))\n\n with open(folder + block, 'rb') as f:\n sbuf.put_bytes(f.read())\n print('File Sent')\n except Exception as e:\n logging.warning(\"NO node\")\n\n\ndef download_file(file_name, url_ledger_node, key_file):\n HOST = socket.gethostname()\n PORT = 60002\n #file_name = 'SampleAudio_0.7mb.mp3'\n #key_file = 'root.priv'\n #url_ledger_node = '127.22.0.1:8008'\n\n # 1. Find where file is\n\n hdfssb_client = HdfssbClient(base_url=url_ledger_node, keyfile=key_file)\n\n file_metadata = hdfssb_client.show_file(file_name)\n print(\"Show file \", file_metadata)\n\n file_size = file_metadata['size']\n\n # 2. For each block, connect to node and download\n\n owner_folder = './tmp_download/' + file_metadata['owner'] + '/'\n folder = owner_folder + file_metadata['file_name'] + '/'\n pathlib.Path(folder).mkdir(parents=True, exist_ok=True)\n\n for block_hash, node in file_metadata[\"blocks_of_file\"].items():\n print(block_hash + \":\" + node)\n\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((node, PORT))\n\n with s:\n sbuf = Buffer(s)\n\n sbuf.put_utf8(file_metadata[\"owner\"] + \"/\" + file_metadata[\"file_name\"] + \"/\" + block_hash)\n\n with open(folder + block_hash, 'wb+') as f:\n remaining = int(file_size) + 4000\n while remaining:\n chunk_size = 4096 if remaining >= 4096 else remaining\n chunk = sbuf.get_bytes(chunk_size)\n if not chunk: break\n f.write(chunk)\n remaining -= len(chunk)\n if remaining:\n print('File incomplete. Missing', remaining, 'bytes.')\n else:\n print('File received successfully.')\n print('Connection closed.')\n except Exception as e:\n logging.warning(\"NO node\")\n\n # 3. Restore raptor file\n\n mapa = {'checksums': {'sha256': file_metadata['checksums']},\n 'data_bytes': int(file_metadata['data_bytes']),\n 'oti_common': int(file_metadata['oti_common']),\n 'oti_scheme': int(file_metadata['oti_scheme']),\n 'symbols': []}\n\n time.sleep(5)\n\n for filename in os.listdir(folder):\n with open(folder + filename, 'r') as myfile:\n try:\n data = json.load(myfile)\n mapa['symbols'].append(data)\n except:\n logging.error(\"Not read blopck\", filename)\n\n # with open(owner_folder + 'enc', \"w+\") as json_file:\n # json.dump(mapa, json_file)\n\n encoded_file_name = owner_folder + file_name + '.encoded_file'\n\n with open(encoded_file_name, \"w\") as twitterDataFile:\n # magic happens here to make it pretty-printed\n twitterDataFile.write(simplejson.dumps(mapa, indent=4, sort_keys=True))\n\n # 4. Decode file\n\n decode = \"\"\"./hdfssb/client/python-libraptorq/rq \\\n --debug \\\n decode \\\n {path_src} \\\n {path_dst}\"\"\".format(path_src=encoded_file_name, path_dst=owner_folder + file_name + '.decoded')\n\n process = subprocess.Popen(decode, stdout=subprocess.PIPE, shell=True)\n output, error = process.communicate() # receive output from the python2 script\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Damenus/blockchain-hybridcloud","sub_path":"hdfssb/client/rq3.py","file_name":"rq3.py","file_ext":"py","file_size_in_byte":10539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"9764259549","text":"from logging import Logger\nfrom typing import Dict\n\nfrom lxml import etree\nfrom lxml.etree import Element, XMLSyntaxError\n\nfrom .element import LxmlElement\n\n\nclass LxmlParser:\n \"\"\"A wrapper of the lxml etree document tree and parser.\n\n Args:\n logger: The instance of the Python logger\n\n Attributes:\n _tree : The ``lxml.etree`` DOM\n _log: The Python logger\n\n \"\"\"\n\n def __init__(self, logger: Logger):\n\n self._tree: Element = None\n self._log: Logger = logger\n\n def parse(self, file: str, store_ids: bool = False) -> bool:\n \"\"\"Parses an XML file\n Args:\n file: The file name of the xml document to be parsed. The parser can only parse from a local file.\n store_ids: If set to True, the parser will create a hash table of the xml IDs\n\n Returns:\n The return value. True for success, False otherwise.\n\n \"\"\"\n # Parsing the XML file.\n parsed = False\n try:\n my_parser = etree.XMLParser(\n remove_comments=False,\n remove_blank_text=True,\n ns_clean=True,\n collect_ids=store_ids,\n )\n self._tree = etree.parse(file, parser=my_parser)\n parsed = True\n except XMLSyntaxError as e:\n self._log.error(e)\n except OSError:\n self._log.error(\"Failed to open file %s\" % file, exc_info=True)\n return parsed\n\n def get_root(self) -> Element:\n \"\"\"The XML root\n\n Returns:\n The XML root node\n\n \"\"\"\n return self._tree.getroot()\n\n def lxml_version(self) -> str:\n \"\"\"lxml version tag\n\n Returns:\n The lxml version tag\n\n \"\"\"\n return etree.LXML_VERSION\n\n def doc_public_id(self) -> str:\n \"\"\"\n\n Returns:\n The XML document type\n\n \"\"\"\n return self._tree.docinfo.public_id\n\n def doc_url(self) -> str:\n \"\"\"\n Returns:\n The XML document url\n\n \"\"\"\n return self._tree.docinfo.URL\n\n def doc_encoding(self) -> str:\n \"\"\"\n\n Returns:\n The XML document encoding\n\n \"\"\"\n return self._tree.docinfo.encoding\n\n def doc_root_name(self) -> str:\n \"\"\"\n\n Returns:\n The XML document root name\n\n \"\"\"\n return self._tree.docinfo.root_name\n\n def doc_system_url(self) -> str:\n \"\"\"\n\n Returns:\n The XML document system URL\n\n \"\"\"\n return self._tree.docinfo.system_url\n\n def doc_xml_version(self) -> str:\n \"\"\"\n\n Returns:\n The XML document version\n\n \"\"\"\n return self._tree.docinfo.xml_version\n\n def get_namespaces(self) -> Dict:\n \"\"\"The dict of the defined namespaces of (prefix, namespace) as (key,value) pairs\n\n Returns:\n (prefix, namespace) as (key,value) pairs\n\n \"\"\"\n root = self.get_root()\n if __debug__:\n if root is None:\n raise AssertionError(f\"{__name__}: The root node is None\")\n return root.nsmap\n\n def get_target_namespace(self) -> str:\n \"\"\"The target namespace of the schema\n\n Returns:\n The target namespace as a str\n\n \"\"\"\n root = self.get_root()\n if __debug__:\n if root is None:\n raise AssertionError(f\"{__name__}: The root node is None\")\n return root.get(\"targetNamespace\")\n\n def get_referenced_files(self) -> Dict:\n \"\"\"The XML imports (xs:import tags)\n\n Returns:\n A dict of key, value pairs (namespace: location/URL) of all xs:import tags.\n \"\"\"\n root = self.get_root()\n if __debug__:\n if root is None:\n raise AssertionError(f\"{__name__}: The root node is None\")\n urls = {}\n references = LxmlElement.find_all_children_with_name(root, \"import\")\n for ref in references:\n loc = ref.get(\"schemaLocation\")\n ns = ref.get(\"namespace\")\n urls[ns] = loc\n return urls\n","repo_name":"OCXStandard/ocx-schema-reader","sub_path":"ocx_tools/schema_xml/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"16655334601","text":"\nimport pydot\n\ndef makeDisjoinsetGraph(partition_set):\n graphviz = pydot.Dot(graph_type='digraph')\n\n for n in partition_set.values():\n graphviz.add_node(pydot.Node(str(n.data)))\n\n for n in partition_set.values():\n graphviz.add_edge(pydot.Edge(pydot.Node(str(n.data)), pydot.Node(str(n.parent.data))))\n\n graphviz.write_png('disjoinset_graph.png')\n\ndef tupleToString(tuple):\n string = \"\"\n first = True\n for i in range(0, len(tuple) % 10):\n if tuple[i] != None:\n if first: first = False\n else: string += \", \"\n string += str(tuple[i])\n\n if len(tuple) > 10:\n string += \" and \" + str(len(tuple)- len(tuple) % 10) + \" others\"\n return string\n\ndef getColor(tuple):\n if len(tuple) == 1:\n return \"green\"\n elif len(tuple) > 10:\n return \"red\"\n elif len(tuple) > 2:\n return \"yellow\"\n elif len(tuple) > 1:\n return \"lightblue\"\n\ndef printCondensedGraph(condensedFromSet, outputFile):\n graphviz = pydot.Dot(graph_type='digraph')\n graphviz_nodes = {}\n\n for n in condensedFromSet.nodes():\n print_node = pydot.Node(tupleToString(n), style=\"filled\", fillcolor=getColor(n))\n graphviz.add_node(print_node)\n graphviz_nodes[n] = print_node\n\n\n for e in condensedFromSet.edges():\n graphviz.add_edge(pydot.Edge(graphviz_nodes[e[0]], graphviz_nodes[e[1]]))\n\n graphviz.write_png(outputFile)","repo_name":"thedamfr/uqac-8INF809","sub_path":"tp1-condensation/graphviz.py","file_name":"graphviz.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"19476934787","text":"import pandas as pd\nimport numpy as np\nimport os\nfrom sklearn.ensemble import GradientBoostingRegressor,AdaBoostRegressor,BaggingRegressor,ExtraTreesRegressor,RandomForestRegressor\nfrom sklearn.model_selection import GridSearchCV\n\ntrain_data=pd.read_csv('train_data.csv')\ntest_data=pd.read_csv('test_data.csv')\n\nIDs=test_data['ID']\n\n#get the featuresets generated by imputing missing values\ntr1=pd.read_csv('featuresset/tr1.csv')\ntr2=pd.read_csv('featuresset/tr2.csv')\ntr3=pd.read_csv('featuresset/tr3.csv')\ntr4=pd.read_csv('featuresset/tr4.csv')\ntr5=pd.read_csv('featuresset/tr5.csv')\ntr6=pd.read_csv('featuresset/tr6.csv')\n\nts1=pd.read_csv('featuresset/ts1.csv')\nts2=pd.read_csv('featuresset/ts2.csv')\nts3=pd.read_csv('featuresset/ts3.csv')\nts4=pd.read_csv('featuresset/ts4.csv')\nts5=pd.read_csv('featuresset/ts5.csv')\nts6=pd.read_csv('featuresset/ts6.csv')\n\n\ndef get_train_test_set(tr,ts):\n ytrain=tr['Footfall']\n xtrain=tr.drop(['Footfall','Unnamed: 0'],1)\n xtest=ts.drop(['Unnamed: 0'],1)\n return(xtrain,ytrain,xtest)\n\n\nxtrain1,ytrain1,xtest1=get_train_test_set(tr1,ts1)\nxtrain2,ytrain2,xtest2=get_train_test_set(tr2,ts2)\nxtrain3,ytrain3,xtest3=get_train_test_set(tr3,ts3)\nxtrain4,ytrain4,xtest4=get_train_test_set(tr4,ts4)\nxtrain5,ytrain5,xtest5=get_train_test_set(tr5,ts5)\nxtrain6,ytrain6,xtest6=get_train_test_set(tr6,ts6)\n\n\n\n#initialize parameters for gbm using grid search\nparams_gbm=[{'min_samples_split':list(range(100,1000,100)),'max_depth':list(range(3,10,1))}]\n\n#grid search \ngsearch=GridSearchCV(estimator=GradientBoostingRegressor(n_estimators=250,learning_rate=0.1,max_depth=4,\n subsample=0.8,random_state=10),\n param_grid=params_gbm,scoring='neg_mean_squared_error',n_jobs=4,cv=5,verbose=10)\n\n\n#fit the gbm using grid search on all the approaches to get best fit\n\n#1st\ngsearch.fit(xtrain1,ytrain1)\nprint('Best parameter',gsearch.best_params_)\nprint('Grid scores on development set',gsearch.grid_scores_)\n\n\n#2nd\ngsearch.fit(xtrain2,ytrain2)\nprint('Best parameter',gsearch.best_params_)\nprint('Grid scores on development set',gsearch.grid_scores_)\n\n#3rd\ngsearch.fit(xtrain3,ytrain3)\nprint('Best parameter',gsearch.best_params_)\nprint('Grid scores on development set',gsearch.grid_scores_)\n\n#4th\ngsearch.fit(xtrain4,ytrain4)\nprint('Best parameter',gsearch.best_params_)\nprint('Grid scores on development set',gsearch.grid_scores_)\n\n#5th\ngsearch.fit(xtrain5,ytrain5)\nprint('Best parameter',gsearch.best_params_)\nprint('Grid scores on development set',gsearch.grid_scores_)\n\n#6th\ngsearch.fit(xtrain6,ytrain6)\nprint('Best parameter',gsearch.best_params_)\nprint('Grid scores on development set',gsearch.grid_scores_)\n\n","repo_name":"sanuamb/Predicting-Footfall","sub_path":"Predicting_Footfall_GBM_Grid_Search.py","file_name":"Predicting_Footfall_GBM_Grid_Search.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"38670768039","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import cross_val_score, GridSearchCV, train_test_split\nfrom sklearn.preprocessing import OrdinalEncoder, MinMaxScaler, StandardScaler\n\ndf.drop_duplicates(keep='first', inplace=True)\ndf_cats = df.select_dtypes(include=['object']).copy()\nfor i in df_cats.columns:\n mode = df_cats[i].mode()[0]\n df[i].replace('unknown',mode, inplace=True)\ndf['y'].replace(['no', 'yes'],[0,1], inplace=True)\ndf.rename(columns={\"y\":\"subscribe\"}, inplace=True)\nnp.random.seed(42)\n#Modeling\nenc = OrdinalEncoder()\ndf_cat = df.select_dtypes(include='object')\ndf_cats = df_cat.columns.tolist()\nenc.fit(df[df_cats])\ndf[df_cats] = enc.transform(df[df_cats])\ndf_model = df[[\"subscribe\", \"contact\", \"duration\",\"pdays\",\"previous\", \"poutcome\",\"emp.var.rate\", \"cons.price.idx\",\"euribor3m\", \"nr.employed\"]]\nX = df_model.drop(['subscribe'],axis=1)\ny = df_model['subscribe']\n\n#split data to train ,test and sample\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = 42, stratify = y)\n\nscaler = StandardScaler()\nX_train_trf = scaler.fit_transform(X_train)\nX_test_trf = scaler.transform(X_test)\n\n#Define Model\ntry:\n import xgboost as xgb\nexcept ImportError as ex:\n print(\"Error: the xgboost library is not installed.\")\n xgboost = None\n\nif xgb is not None: \n xgb_model = xgb.XGBClassifier(verbosity = 0)\n\nwarnings.simplefilter(action='ignore', category=UserWarning)\n#train the model with our data train\nxgb_model.fit(X_train_trf, y_train)\npredictions_XGB = xgb_model.predict(X_test_trf)\n\n#Tuning\nwarnings.simplefilter(action='ignore', category=UserWarning)\nC = [0.1, 0.5, 1]\nsolver = ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga']\nverbose = [1, 2, 3]\nparams_grid = {'C': C,\n 'solver': solver,\n 'verbose': verbose}\n\nmodel_log = xgb.XGBClassifier(verbosity = 0)\nmodel_log_gridCV = GridSearchCV(model_log, params_grid, scoring=\"f1\", cv=3, verbose=2, n_jobs=-1)\n\nmodel_log_gridCV.fit(X_train_trf, y_train)\n\nfinal_model = model_log_gridCV.best_params_\nXGB_Best = xgb.XGBClassifier(**final_model)\n\nXGB_Best.fit(X_train_trf, y_train)\n\npred_result = []\n\nmodel_pred = XGB_Best.predict(X_test_trf)\npred_result.append(model_pred)","repo_name":"naufalwj/Customer-Term-Deposit-Subscriptions-Predictions","sub_path":"apps/algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"6587980097","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom __future__ import print_function, unicode_literals, division, absolute_import\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\n# Helper libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nprint('Tensorflow version >> ', tf.__version__)\n\nprint('Import the Fashion MNIST dataset')\nfashion_mnist = keras.datasets.fashion_mnist\n\nprint('Splitting the dataset...')\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\n# The images are 28x28 NumPy arrays, with pixel values ranging from 0 to 255.\n# The labels are an array of integers, ranging from 0 to 9.\n\nprint('Class labels ...')\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\nprint(class_names)\n\nprint('Training shape >> ', train_images.shape)\nprint('Testing shape >> ', test_images.shape)\n\nprint('Sample plot...')\nplt.figure()\nplt.imshow(train_images[0])\nplt.colorbar()\nplt.grid(False)\nplt.show()\n\n# Scale these values to a range of 0 to 1 before feeding them to the neural network model.\n# To do so, divide the values by 255. It's important that the training set and the testing \n# set be preprocessed in the same way:\nprint('Scaling...')\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\n# Check data is in correct format or not.\n\nprint('Checking data is in correct format or not???')\nplt.figure(figsize=(10, 10))\n\nfor i in range(25):\n plt.subplot(5, 5, i + 1)\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n plt.imshow(train_images[i], cmap=plt.cm.binary)\n plt.xlabel(class_names[train_labels[i]])\nplt.show()\n\n# Build the model.\nprint('Building neural network using KERAS library...')\nmodel = keras.Sequential([\n keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(128, activation='relu'),\n keras.layers.Dense(10, activation='softmax')\n])\n\n# The first layer in this network, tf.keras.layers.Flatten, transforms the format \n# of the images from a two-dimensional array (of 28 by 28 pixels) to a one-dimensional \n# array (of 28 * 28 = 784 pixels). Think of this layer as unstacking rows of pixels in the image and lining them up. \n# This layer has no parameters to learn; it only reformats the data. \n\n# After the pixels are flattened, the network consists of a sequence of two tf.keras.layers.Dense layers. \n# These are densely connected, or fully connected, neural layers. The first Dense layer has 128 nodes (or neurons). \n# The second (and last) layer is a 10-node softmax layer that returns an array of 10 probability scores that sum to 1.\n# Each node contains a score that indicates the probability that the current image belongs to one of the 10 classes.\n\nprint('Setting optimize, loss function ....')\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nprint('Start model training....')\nmodel.fit(train_images, train_labels, epochs=10)\ntest_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n\nprint('\\nTest accuracy:', test_acc)\n\npredictions = model.predict(test_images)\n\n\ndef plot_image(i, predictions_array, true_label, img):\n predictions_array, true_label, img = predictions_array, true_label[i], img[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n\n plt.imshow(img, cmap=plt.cm.binary)\n\n predicted_label = np.argmax(predictions_array)\n\n if predicted_label == true_label:\n color = 'blue'\n else:\n color = 'red'\n\n plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\n 100 * np.max(predictions_array),\n class_names[true_label]),\n color=color)\n\n\ndef plot_value_array(i, predictions_array, true_label):\n predictions_array, true_label = predictions_array, true_label[i]\n plt.grid(False)\n plt.xticks(range(10))\n plt.yticks([])\n thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\n plt.ylim([0, 1])\n predicted_label = np.argmax(predictions_array)\n\n thisplot[predicted_label].set_color('red')\n thisplot[true_label].set_color('blue')\n\n\n# Verify predictions\nprint('Verifying predictions by plotting input vs predicted label...')\ni = 0\nplt.figure(figsize=(6, 3))\nplt.subplot(1, 2, 1)\nplot_image(i, predictions[i], test_labels, test_images)\nplt.subplot(1, 2, 2)\nplot_value_array(i, predictions[i], test_labels)\nplt.show()\n\n# Plot the first X test images, their predicted labels, and the true labels.\n# Color correct predictions in blue and incorrect predictions in red.\nnum_rows = 5\nnum_cols = 3\nnum_images = num_rows * num_cols\nplt.figure(figsize=(2 * 2 * num_cols, 2 * num_rows))\nfor i in range(num_images):\n plt.subplot(num_rows, 2 * num_cols, 2 * i + 1)\n plot_image(i, predictions[i], test_labels, test_images)\n plt.subplot(num_rows, 2 * num_cols, 2 * i + 2)\n plot_value_array(i, predictions[i], test_labels)\nplt.tight_layout()\nplt.show()\n","repo_name":"AbhishekKargawal/Tensorflow-ImageClassification","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"20767451605","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport logging\nimport sqlite3\nimport sys\nimport json\nimport os\nimport shutil\nimport re\n\nlogging.basicConfig(level=logging.INFO)\n\nDEBUGTOFILE = False\ndebugFile = \"\"\n\n\n\"\"\" Yes, dirty but no time to something better \"\"\"\ng_sourceBaseDirectory = \"\"\ng_destinationBaseDirectory = \"\"\n\n\n\"\"\"\n/*\n * This file is part of ChronoJump\n *\n * ChronoJump is free software; you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation; either version 2 of the License, or\n * (at your option) any later version.\n *\n * ChronoJump is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program; if not, write to the Free Software\n * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n *\n * Copyright (C) 2016-2017 Carles Pina i Estany \n * Copyright (C) 2019-2021 Xavier de Blas \n */\n\"\"\"\n\n\"\"\"\n/*\n * note there is a diagram on diagrams/processes/import\n */\n\"\"\"\n\n\nclass Row:\n \"\"\" A row represents a row in a table: it has column-names and their values.\n It can contain column names that are not in the database (this can be used\n to store other information if needed) \"\"\"\n\n def __init__(self):\n self._row = {}\n\n def set(self, column_name, value):\n \"\"\" Sets the value to the column_name \"\"\"\n self._row[column_name] = value\n\n def get(self, column_name):\n \"\"\" Returns the value of column_name. Raise an exception if column_name in this row doesn't exist \"\"\"\n return self._row[column_name]\n\n def has_column(self, column_name):\n \"\"\" Returns true if the row has the column column_name \"\"\"\n return column_name in self._row\n\n def columns(self):\n \"\"\" Returns a list of columns in this row \"\"\"\n return self._row.keys()\n\n def __eq__(self, other):\n # noinspection PyProtectedMember\n return self._row == other._row\n\n\nclass Table:\n \"\"\" This class has Table operations: inserts rows, removes duplicates, updates sessionIDs, etc. \"\"\"\n\n def __init__(self, table_name):\n self._table_data = []\n self._table_name = table_name\n\n def insert_row(self, row):\n self._table_data.append(row)\n\n def concatenate_table(self, other):\n \"\"\" Concatenates other in this table. It doesn't change the table names \"\"\"\n self._table_data += other\n\n def remove_duplicates(self):\n \"\"\" Remove duplicate rows of the table. The order of the rows in the table could change \"\"\"\n new_data = []\n\n for index, element in enumerate(self._table_data):\n if element not in self._table_data[index + 1:]:\n new_data.append(element)\n\n self._table_data = new_data\n\n @property\n def name(self):\n \"\"\" Property holding the table name \"\"\"\n return self._table_name\n\n def update_session_ids(self, new_session_id):\n \"\"\" Updates all the sessionID of each row to new_session_id \"\"\"\n changed = False\n\n for row in self._table_data:\n row.set(\"sessionID\", new_session_id)\n changed = True\n\n if len(self._table_data) > 0:\n assert changed\n\n def update_ids(self, column_to_update, referenced_table, old_referenced_column, new_referenced_column):\n \"\"\" For each row: matches column_to_update values with a row in referenced_table old_referenced_column values.\n If they are the same it updates column_to_update with new_referenced_column\n \"\"\"\n for row_to_update in self._table_data:\n old_id = row_to_update.get(column_to_update)\n for row_referenced in referenced_table:\n old_column_name = old_referenced_column\n\n if row_referenced.has_column(old_column_name) and row_referenced.get(old_referenced_column) == old_id:\n row_to_update.set(column_to_update, row_referenced.get(new_referenced_column))\n\n def __iter__(self):\n return iter(self._table_data)\n\n def __len__(self):\n return len(self._table_data)\n\n def __getitem__(self, index):\n return self._table_data[index]\n\n\nclass Database:\n \"\"\" A database represents the database and read/writes tables. \"\"\"\n\n def __init__(self, source_path, read_only):\n self._is_opened = False\n self._cursor = None\n self._conn = None\n\n self.open(source_path, read_only)\n self._is_opened = True\n\n def __del__(self):\n self.close()\n\n def open(self, filename, read_only):\n \"\"\"Opens the database specified by filename. On Python3 If read_only is True\n the database is opened in read only mode\n \"\"\"\n if sys.version_info >= (3, 0):\n if read_only:\n mode = \"ro\"\n else:\n mode = \"rw\"\n\n uri = \"file:{}?mode={}\".format(filename, mode)\n self._conn = sqlite3.connect(uri, uri=True)\n else:\n # On Python2 there is no uri support. This opens\n # the database always on rw\n self._conn = sqlite3.connect(filename)\n\n self._conn.execute(\"pragma foreign_keys=ON\")\n self._cursor = self._conn.cursor()\n\n def close(self):\n if self._is_opened:\n self._conn.commit()\n self._conn.close()\n self._is_opened = False\n\n def column_names(self, table, skip_columns=None):\n \"\"\" Returns a list with the column names of the table. Doesn't return columns mentioned in skip_columns \"\"\"\n\n self._cursor.execute(\"PRAGMA table_info({})\".format(table))\n result = self._cursor.fetchall()\n\n names = []\n\n for row in result:\n column_name = row[1]\n if skip_columns is None or column_name not in skip_columns:\n names.append(column_name)\n\n assert len(names) > 0\n return names\n\n def read(self, table_name, where_condition, join_clause=\"\", group_by_clause=\"\", extra_tables=\"\"):\n \"\"\" Returns a new table with the contents of this table with where_condition. \"\"\"\n column_names = self.column_names(table_name)\n\n column_names_with_prefixes = self._add_prefix(column_names, \"{}.\".format(table_name))\n\n where_condition = \" WHERE {} \".format(where_condition)\n assert '\"' not in where_condition # Easy way to avoid problems - where_condition is only used by us (programmers) and\n # it doesn't depend on user data.\n\n if group_by_clause != \"\":\n group_by = \" GROUP BY {}\".format(group_by_clause)\n else:\n group_by = \"\"\n\n table_names_str = table_name\n if extra_tables != \"\":\n table_names_list = [table_names_str] + extra_tables\n table_names_str = \",\".join(table_names_list)\n\n format_data = {\"column_names\": \",\".join(column_names_with_prefixes), \"table_names_str\": table_names_str,\n \"join_clause\": join_clause, \"where\": where_condition, \"group_by\": group_by}\n\n sql = \"SELECT {column_names} FROM {table_names_str} {join_clause} {where} {group_by}\".format(**format_data)\n self._execute_query_and_log(sql, [])\n\n results = self._cursor.fetchall()\n\n table = Table(table_name)\n\n for row in results:\n table_row = Row()\n for i, col in enumerate(row):\n table_row.set(column_names[i], col)\n\n table.insert_row(table_row)\n\n return table\n\n def write(self, table, matches_columns, avoids_duplicate_column=None):\n \"\"\" Writes table into the database.\n\n Inserts the data and modifies table adding new_unique_id. This is the new uniqueID\n if the row has been inserted or the old one if the row has been reused. This\n depends on avoids_duplicate_column.\n\n For example, if matches_columns = [\"Name\"] it will insert a new row\n in the table if the name didn't exist and will add new_unique_id\n with this unique id.\n If name already existed it will NOT insert anything in the table\n but will add a new_unique_id with the ID of this person.\n\n If matches_columns is None it means that will insert the data\n regardless of any column.\n \"\"\"\n\n for row in table:\n if type(matches_columns) == list:\n where = \"\"\n where_values = []\n for column in matches_columns:\n if where != \"\":\n where += \" AND \"\n where += \"{} = ?\".format(column)\n where_values.append(row.get(column))\n\n format_data = {'table_name': table.name,\n 'where_clause': \" WHERE {}\".format(where)\n }\n\n sql = \"SELECT uniqueID FROM {table_name} {where_clause}\".format(**format_data)\n self._execute_query_and_log(sql, where_values)\n\n results = self._cursor.fetchall()\n\n \"\"\" If we don't find results on a name (typical avoids_duplicate_column field),\n then try also on name + \" (%)\" with like statement,\n maybe we can check also if % matched content is a number.\n Better description here:\n https://gitlab.gnome.org/GNOME/chronojump/-/issues/691\n Following code fixes it\n \"\"\"\n if avoids_duplicate_column is not None and len(results) == 0:\n where = \"\"\n where_values = []\n for column in matches_columns:\n if where != \"\":\n where += \" AND \"\n if column == avoids_duplicate_column:\n where += \"{} like ?\".format(column)\n where_values.append(row.get(column) + \" (%)\")\n else:\n where += \"{} = ?\".format(column)\n where_values.append(row.get(column))\n\n format_data = {'table_name': table.name,\n 'where_clause': \" WHERE {}\".format(where)\n }\n\n sql = \"SELECT uniqueID FROM {table_name} {where_clause}\".format(**format_data)\n self._execute_query_and_log(sql, where_values)\n\n results = self._cursor.fetchall()\n # TODO: need to delete the results: name + (notanumber)\n\n if matches_columns is None or len(results) == 0:\n # Needs to insert it\n self._avoid_duplicate_value(table_name=table.name, column_name=avoids_duplicate_column, data_row=row)\n\n new_id = self._write_row(table.name, row)\n row.set('importer_action', 'inserted')\n\n else:\n # Uses the existing id as new_unique_id\n new_id = results[0][0]\n row.set('importer_action', 'reused')\n\n row.set('new_uniqueID', new_id)\n\n if table.name == \"ForceSensorExercise\":\n self.copyExerciseImages (\"forceSensor\", str(row.get('uniqueID')), str(new_id))\n\n self._print_summary(table)\n\n @staticmethod\n def copyExerciseImages (exImageDir, oldIdStr, newIdStr):\n # to use the global variable on this function\n global g_sourceBaseDirectory\n global g_destinationBaseDirectory\n\n imageOriginPath = os.path.join (g_sourceBaseDirectory, \"multimedia\", \"exercises\", exImageDir, oldIdStr)\n imageDestinationPath = os.path.join (g_destinationBaseDirectory, \"multimedia\", \"exercises\", exImageDir, newIdStr)\n\n if os.path.exists (imageOriginPath + \".png\"):\n shutil.copy (imageOriginPath + \".png\", imageDestinationPath + \".png\")\n\n if os.path.exists (imageOriginPath + \".jpg\"):\n shutil.copy (imageOriginPath + \".jpg\", imageDestinationPath + \".jpg\")\n\n # same for small\n imageOriginPath = os.path.join (g_sourceBaseDirectory, \"multimedia\", \"exercises\", exImageDir, \"small\", oldIdStr)\n imageDestinationPath = os.path.join (g_destinationBaseDirectory, \"multimedia\", \"exercises\", exImageDir, \"small\", newIdStr)\n\n if os.path.exists (imageOriginPath + \".png\"):\n shutil.copy (imageOriginPath + \".png\", imageDestinationPath + \".png\")\n\n if os.path.exists (imageOriginPath + \".jpg\"):\n shutil.copy (imageOriginPath + \".jpg\", imageDestinationPath + \".jpg\")\n\n @staticmethod\n def increment_suffix(value):\n suffix = re.match(\"(.*) \\(([0-9]+)\\)\", value)\n\n if suffix is None:\n return u\"{} (1)\".format(value)\n else:\n base_name = suffix.group(1)\n counter = int(suffix.group(2))\n counter += 1\n return u\"{} ({})\".format(base_name, counter)\n\n @staticmethod\n def _add_prefix(list_of_elements, prefix):\n \"\"\" Returns a copy of list_of_elements prefixing each element with prefix. \"\"\"\n result = []\n\n for element in list_of_elements:\n result.append(\"{}{}\".format(prefix, element))\n\n return result\n\n @staticmethod\n def _print_summary(table):\n \"\"\" Prints a summary of which rows has been inserted, which ones reused, during the write operation \"\"\"\n inserted_ids = []\n reused_ids = []\n for row in table:\n if row.get('importer_action') == 'inserted':\n inserted_ids.append(row.get('uniqueID'))\n\n elif row.get('importer_action') == 'reused':\n reused_ids.append(row.get('uniqueID'))\n else:\n assert False\n\n print(\"{table_name}\".format(table_name=table.name))\n print(\"\\tinserted: {inserted_counter} uniqueIDs: {inserted}\".format(inserted_counter=len(inserted_ids),\n inserted=inserted_ids))\n print(\n \"\\treused: {reused_counter} uniqueIDs: {reused}\".format(reused_counter=len(reused_ids),\n reused=reused_ids))\n\n def _write_row(self, table_name, row, skip_columns=None):\n \"\"\" Inserts the row into the table. Returns the new_id. By default skips uniqueID \"\"\"\n\n if skip_columns is None:\n skip_columns = [\"uniqueID\"]\n\n values = []\n column_names = []\n place_holders = []\n table_column_names = self.column_names(table_name)\n\n for column_name in row.columns():\n if column_name in skip_columns or column_name not in table_column_names:\n continue\n\n values.append(row.get(column_name))\n column_names.append(column_name)\n place_holders.append(\"?\")\n\n sql = \"INSERT INTO {table_name} ({column_names}) VALUES ({place_holders})\".format(table_name=table_name,\n column_names=\",\".join(\n column_names),\n place_holders=\",\".join(\n place_holders))\n self._execute_query_and_log(sql, values)\n\n new_id = self._cursor.lastrowid\n\n return new_id\n\n def _avoid_duplicate_value(self, table_name, column_name, data_row):\n \"\"\" Makes sure that data_row[column_name] doesn't exist in table_name (accessing the database).\n If it exists it changes data_row[column_name] to the same with (1) or (2)\"\"\"\n if column_name is None:\n return\n\n original_value = data_row.get(column_name)\n\n while True:\n sql = \"SELECT count(*) FROM {table_name} WHERE {column}=?\".format(table_name=table_name, column=column_name)\n binding_values = [data_row.get(column_name)]\n self._execute_query_and_log(sql, binding_values)\n\n results = self._cursor.fetchall()\n\n if results[0][0] == 0:\n break\n else:\n data_row.set(column_name, self.increment_suffix(data_row.get(column_name)))\n data_row.set('new_' + column_name, data_row.get(column_name))\n data_row.set('old_' + column_name, original_value)\n\n def _execute_query_and_log(self, sql, where_values):\n logging.debug(\"SQL: {} - values: {}\".format(sql, where_values))\n if(DEBUGTOFILE):\n debugFile.write(\"\\n\")\n debugFile.write(\"SQL: {} - values: {}\".format(sql, where_values))\n\n self._cursor.execute(sql, where_values)\n\n\nclass ImportSession:\n def __init__(self, source_path, destination_path, source_base_directory, source_temp_directory):\n \"\"\" Creates the object to import the session source_session from source_db into destination_db. \"\"\"\n\n logging.debug(\"source path:\" + source_path)\n logging.debug(\"destination path:\" + destination_path)\n\n self.source_path = source_path\n self.destination_path = destination_path\n self.source_base_directory = source_base_directory\n self.source_temp_directory = source_temp_directory\n\n self.source_db = Database(source_path, read_only=True)\n self.destination_db = Database(destination_path, read_only=False)\n\n self.source_session = None\n self.new_session_id = None\n\n self.persons77 = None\n\n def import_into_session(self, source_session, destination_session):\n self.source_session = source_session\n self.new_session_id = destination_session\n self.import_data()\n\n def import_as_new_session(self, source_session):\n self.source_session = source_session\n self.new_session_id = self._import_session()\n\n self._import_sport()\n self._import_speciality()\n self.import_data()\n\n def import_data(self):\n self.persons77 = self._import_persons77()\n\n self._import_person_session77()\n\n self._import_jumps()\n self._import_runs()\n self._import_pulse()\n trigger = self._import_encoder()\n triggerForceSensor = self._import_forceSensor()\n triggerRunEncoder = self._import_runEncoder()\n\n trigger.concatenate_table(triggerForceSensor)\n trigger.concatenate_table(triggerRunEncoder)\n trigger.remove_duplicates()\n self.destination_db.write(table=trigger, matches_columns=None)\n\n self._print_status(self, \"allData\")\n\n def _import_session(self):\n \"\"\"\n Imports the Session information saved in self._source_session (only table Session).\n Returns the new session ID.\n \"\"\"\n\n session = self.source_db.read(table_name=\"Session\",\n where_condition=\"Session.uniqueID={}\".format(self.source_session))\n\n number_of_matching_sessions = len(session)\n\n if number_of_matching_sessions == 0:\n print(\"Trying to import {session} from {source_file} and it doesn't exist. Cancelling...\".format(\n session=self.source_session,\n source_file=self.source_path))\n sys.exit(1)\n elif number_of_matching_sessions > 1:\n print(\"Found {number_of_sessions} in {source_file} which is not possible. Cancelling...\".format(\n number_of_sessions=number_of_matching_sessions,\n source_file=self.source_path))\n sys.exit(1)\n\n self.destination_db.write(table=session, matches_columns=None,\n avoids_duplicate_column=\"name\")\n\n return session[0].get('new_uniqueID')\n\n def _import_sport(self):\n sports = self.source_db.read(table_name=\"sport\",\n where_condition=\"Sport.uniqueID=Session.personsSportID AND Session.uniqueID={}\".format(self.source_session),\n extra_tables=[\"Session\"])\n\n self.destination_db.write(table=sports,\n matches_columns=[\"name\", \"userDefined\", \"hasSpeciallities\", \"graphLink\"])\n\n def _import_speciality(self):\n # It should change the hasSpeciallities: maybe in the original database didn't have but now after\n # doing this it will have speciallities\n specialities = self.source_db.read(table_name=\"speciallity\",\n where_condition=\"Sport.uniqueID=Session.personsSportID AND Speciallity.sportId=Sport.uniqueID AND Session.uniqueID={}\".format(self.source_session),\n extra_tables=[\"Sport\", \"Session\"])\n\n self.destination_db.write(table=specialities,\n matches_columns=[\"sportID\", \"name\"])\n\n\n def _import_persons77(self):\n self._print_status(self, \"persons\")\n\n persons77 = self.source_db.read(table_name=\"Person77\",\n where_condition=\"personSession77.sessionID={}\".format(self.source_session),\n join_clause=\"LEFT JOIN personSession77 ON personSession77.personID=Person77.uniqueID\",\n group_by_clause=\"Person77.uniqueID\")\n\n self.destination_db.write(table=persons77,\n matches_columns=[\"name\"])\n\n return persons77\n\n def _import_jumps(self):\n self._print_status(self, \"jumps\")\n # Imports JumpType table\n jump_types = self.source_db.read(table_name=\"JumpType\",\n where_condition=\"Session.uniqueID={}\".format(self.source_session),\n join_clause=\"LEFT JOIN Jump ON JumpType.name=Jump.type LEFT JOIN Session ON Jump.sessionID=Session.uniqueID\",\n group_by_clause=\"JumpType.uniqueID\")\n\n self.destination_db.write(table=jump_types,\n matches_columns=self.destination_db.column_names(\"JumpType\", [\"uniqueID\"]),\n avoids_duplicate_column=\"name\")\n\n # Imports JumpRjType table\n jump_rj_types = self.source_db.read(table_name=\"JumpRjType\",\n where_condition=\"Session.uniqueID={}\".format(self.source_session),\n join_clause=\"LEFT JOIN JumpRj ON JumpRjType.name=JumpRj.type LEFT JOIN Session on JumpRj.sessionID=Session.uniqueID\",\n group_by_clause=\"JumpRjType.uniqueID\")\n\n self.destination_db.write(table=jump_rj_types,\n matches_columns=self.destination_db.column_names(\"JumpRjType\", [\"uniqueID\"]),\n avoids_duplicate_column=\"name\")\n\n # Imports JumpRj table (with the new Person77's uniqueIDs)\n jump_rj = self.source_db.read(table_name=\"JumpRj\",\n where_condition=\"JumpRj.sessionID={}\".format(self.source_session))\n\n jump_rj.update_ids(\"personID\", self.persons77, \"uniqueID\", \"new_uniqueID\")\n jump_rj.update_session_ids(self.new_session_id)\n jump_rj.update_ids(\"type\", jump_rj, \"old_name\", \"new_name\")\n\n self.destination_db.write(table=jump_rj, matches_columns=self.destination_db.column_names(\"JumpRj\", skip_columns=[\"uniqueID\", \"personID\"]))\n\n # Imports Jump table (with the new Person77's uniqueIDs)\n jump = self.source_db.read(table_name=\"Jump\",\n where_condition=\"Jump.sessionID={}\".format(self.source_session))\n\n jump.update_ids(\"personID\", self.persons77, \"uniqueID\", \"new_uniqueID\")\n jump.update_session_ids(self.new_session_id)\n jump.update_ids(\"type\", jump_types, \"old_name\", \"new_name\")\n\n self.destination_db.write(table=jump, matches_columns=self.destination_db.column_names(\"Jump\", skip_columns=[\"uniqueID\", \"personID\"]))\n\n def _import_runs(self):\n self._print_status(self, \"runs\")\n # Imports RunTypes table\n run_types = self.source_db.read(table_name=\"RunType\",\n where_condition=\"Session.uniqueID={}\".format(self.source_session),\n join_clause=\"LEFT JOIN Run ON RunType.name=Run.type LEFT JOIN Session ON Run.sessionID=Session.uniqueID\",\n group_by_clause=\"RunType.uniqueID\")\n\n self.destination_db.write(table=run_types,\n matches_columns=self.destination_db.column_names(\"RunType\", [\"uniqueID\"]),\n avoids_duplicate_column=\"name\")\n\n # Imports RunIntervalTypes table\n run_interval_types = self.source_db.read(table_name=\"RunIntervalType\",\n where_condition=\"Session.uniqueID={}\".format(self.source_session),\n join_clause=\"LEFT JOIN RunInterval ON RunIntervalType.name=RunInterval.type LEFT JOIN Session on RunInterval.sessionID=Session.uniqueID\",\n group_by_clause=\"RunIntervalType.uniqueID\")\n\n self.destination_db.write(table=run_interval_types,\n matches_columns=self.destination_db.column_names(\"RunIntervalType\", [\"uniqueID\"]),\n avoids_duplicate_column=\"name\")\n\n # Imports Run table (with the new Person77's uniqueIDs)\n run = self.source_db.read(table_name=\"Run\",\n where_condition=\"Run.sessionID={}\".format(self.source_session))\n run.update_ids(\"personID\", self.persons77, \"uniqueID\", \"new_uniqueID\")\n run.update_session_ids(self.new_session_id)\n run.update_ids(\"type\", run_types, \"old_name\", \"new_name\")\n self.destination_db.write(table=run,\n matches_columns=self.destination_db.column_names(\"Run\", skip_columns=[\"uniqueID\", \"personID\"]))\n\n # Imports RunInterval table (with the new Person77's uniqueIDs)\n run_interval = self.source_db.read(table_name=\"RunInterval\",\n where_condition=\"RunInterval.sessionID={}\".format(self.source_session))\n run_interval.update_ids(\"personID\", self.persons77, \"uniqueID\", \"new_uniqueID\")\n run_interval.update_session_ids(self.new_session_id)\n run_interval.update_ids(\"type\", run_interval_types, \"old_name\", \"new_name\")\n self.destination_db.write(table=run_interval,\n matches_columns=self.destination_db.column_names(\"RunInterval\", skip_columns=[\"uniqueID\", \"personID\"]))\n\n def _import_pulse(self):\n #self._print_status(self, \"pulses\")\n # Imports PulseTypes table\n pulse_types = self.source_db.read(table_name=\"PulseType\",\n where_condition=\"Session.uniqueID={}\".format(self.source_session),\n join_clause=\"LEFT JOIN Pulse ON PulseType.name=Pulse.type LEFT JOIN Session on Pulse.sessionID=Session.uniqueID\",\n group_by_clause=\"PulseType.uniqueID\")\n\n self.destination_db.write(table=pulse_types,\n matches_columns=self.destination_db.column_names(\"PulseType\", [\"uniqueID\"]),\n avoids_duplicate_column=\"name\")\n\n # Imports Pulse table\n pulse = self.source_db.read(table_name=\"Pulse\",\n where_condition=\"Pulse.sessionID={}\".format(self.source_session))\n pulse.update_session_ids(self.new_session_id)\n pulse.update_ids(\"type\", pulse_types, \"old_name\", \"new_name\")\n self.destination_db.write(pulse, self.destination_db.column_names(\"Pulse\", skip_columns=[\"uniqueID\", \"personID\"]))\n\n def _import_person_session77(self):\n # Imports PersonSession77\n person_session_77 = self.source_db.read(table_name=\"PersonSession77\",\n where_condition=\"PersonSession77.sessionID={}\".format(self.source_session))\n person_session_77.update_ids(\"personID\", self.persons77, \"uniqueID\", \"new_uniqueID\")\n person_session_77.update_session_ids(self.new_session_id)\n\n # Inserts the person_session_77 table but not for personsIDs that already existed in this session. This is\n # the case if a user imports a session into an existing session and the persons would be already imported.\n self.destination_db.write(table=person_session_77, matches_columns=[\"sessionID\", \"personID\"])\n\n def _import_encoder(self):\n self._print_status(self, \"encoder\")\n # Imports EncoderExercise\n encoder_exercise_from_encoder = self.source_db.read(table_name=\"EncoderExercise\",\n where_condition=\"Encoder.sessionID={}\".format(self.source_session),\n join_clause=\"LEFT JOIN Encoder ON Encoder.exerciseID=EncoderExercise.uniqueID\",\n group_by_clause=\"EncoderExercise.uniqueID\")\n\n encoder_exercise_from_encoder_1rm = self.source_db.read(table_name=\"EncoderExercise\",\n where_condition=\"Encoder1RM.sessionID={}\".format(\n self.source_session),\n join_clause=\"LEFT JOIN Encoder1RM ON Encoder1RM.exerciseID=EncoderExercise.uniqueID\",\n group_by_clause=\"EncoderExercise.uniqueID\")\n\n encoder_exercise = Table(\"encoderExercise\")\n encoder_exercise.concatenate_table(encoder_exercise_from_encoder)\n encoder_exercise.concatenate_table(encoder_exercise_from_encoder_1rm)\n encoder_exercise.remove_duplicates()\n\n self.destination_db.write(table=encoder_exercise,\n matches_columns=self.destination_db.column_names(\"EncoderExercise\", [\"uniqueID\"]),\n avoids_duplicate_column=\"name\")\n\n # Imports Encoder1RM\n encoder_1rm = self.source_db.read(table_name=\"Encoder1RM\",\n where_condition=\"Encoder1RM.sessionID={}\".format(self.source_session))\n encoder_1rm.update_session_ids(self.new_session_id)\n encoder_1rm.update_ids(\"personID\", self.persons77, \"uniqueID\", \"new_uniqueID\")\n encoder_1rm.update_ids(\"exerciseID\", encoder_exercise, \"uniqueID\", \"new_uniqueID\")\n self.destination_db.write(table=encoder_1rm,\n matches_columns=None)\n\n # Imports Encoder\n encoder = self.source_db.read(table_name=\"Encoder\",\n where_condition=\"Encoder.sessionID={}\".format(self.source_session))\n encoder.update_ids(\"personID\", self.persons77, \"uniqueID\", \"new_uniqueID\")\n encoder.update_ids(\"exerciseID\", encoder_exercise, \"uniqueID\", \"new_uniqueID\")\n encoder.update_session_ids(self.new_session_id)\n\n self._import_encoder_files(encoder)\n\n self.destination_db.write(table=encoder,\n matches_columns=self.destination_db.column_names(\"encoder\", skip_columns=[\"uniqueID\", \"personID\", \"exerciseID\"]))\n\n # Imports EncoderSignalCurve\n encoder_signal_curve_signals = self.source_db.read(table_name=\"EncoderSignalCurve\",\n where_condition=\"Encoder.signalOrCurve='signal' AND Encoder.sessionID={}\".format(self.source_session),\n join_clause=\"LEFT JOIN Encoder ON Encoder.uniqueID=EncoderSignalCurve.SignalID\")\n\n encoder_signal_curve_curves = self.source_db.read(table_name=\"EncoderSignalCurve\",\n where_condition=\"Encoder.signalOrCurve='curve' AND Encoder.sessionID={}\".format(self.source_session),\n join_clause=\"LEFT JOIN Encoder ON Encoder.uniqueID=EncoderSignalCurve.curveID\")\n\n encoder_signal_curve = Table(\"encoderSignalCurve\")\n encoder_signal_curve.concatenate_table(encoder_signal_curve_signals)\n encoder_signal_curve.concatenate_table(encoder_signal_curve_curves)\n encoder_signal_curve.remove_duplicates()\n\n print(\"encoder_signal_curve before update_ids\")\n for row in encoder_signal_curve:\n print(str(row.get(\"signalID\")) + \" \" + str(row.get(\"curveID\")))\n\n encoder_signal_curve.update_ids(\"signalID\", encoder, \"uniqueID\", \"new_uniqueID\")\n encoder_signal_curve.update_ids(\"curveID\", encoder, \"uniqueID\", \"new_uniqueID\")\n\n print(\"encoder_signal_curve after update_ids\")\n for row in encoder_signal_curve:\n print(str(row.get(\"signalID\")) + \" \" + str(row.get(\"curveID\")))\n\n self.destination_db.write(table=encoder_signal_curve,\n avoids_duplicate_column=None,\n matches_columns=None)\n\n # Imports trigger (can be encoder, forceSensor or raceanalyzer. Right now force sensor is not programmed)\n trigger = self.source_db.read(table_name=\"trigger\",\n where_condition=\"mode='ENCODER' AND Encoder.sessionID={}\".format(self.source_session),\n join_clause=\"LEFT JOIN Encoder ON Encoder.uniqueID=trigger.modeID\")\n\n trigger.update_ids(\"modeID\", encoder, \"uniqueID\", \"new_uniqueID\")\n return trigger #to be concatenated and written after forceSensor and runEncoder\n\n\n def _import_forceSensor(self):\n self._print_status(self, \"forceSensor\")\n # Imports ForceSensorExercise\n # based on encoder exercise code because rest of the code exercises and tests are linked by names\n # but on encoder and forceSensor is linked by ex.uniqueID\n\n if(DEBUGTOFILE):\n debugFile.write(\" start _import_forceSensor\\n\")\n\n forceSensor_exercise = self.source_db.read(table_name=\"ForceSensorExercise\",\n where_condition=\"ForceSensor.sessionID={}\".format(self.source_session),\n join_clause=\"LEFT JOIN ForceSensor ON ForceSensor.exerciseID=ForceSensorExercise.uniqueID\",\n group_by_clause=\"ForceSensorExercise.uniqueID\")\n\n\n self.destination_db.write(table=forceSensor_exercise,\n matches_columns=self.destination_db.column_names(\"ForceSensorExercise\", [\"uniqueID\"]),\n avoids_duplicate_column=\"name\")\n\n # Imports ForceSensor\n forceSensor = self.source_db.read(table_name=\"ForceSensor\",\n where_condition=\"ForceSensor.sessionID={}\".format(self.source_session))\n forceSensor.update_ids(\"personID\", self.persons77, \"uniqueID\", \"new_uniqueID\")\n forceSensor.update_ids(\"exerciseID\", forceSensor_exercise, \"uniqueID\", \"new_uniqueID\")\n forceSensor.update_session_ids(self.new_session_id)\n\n\n self._import_forceSensor_or_runEncoder_files(forceSensor, \"forceSensor\")\n\n self.destination_db.write(table=forceSensor,\n matches_columns=self.destination_db.column_names(\"forceSensor\", skip_columns=[\"uniqueID\", \"personID\", \"exerciseID\"]))\n\n # Imports trigger (can be encoder, forceSensor or raceanalyzer).\n trigger = self.source_db.read(table_name=\"trigger\",\n where_condition=\"mode='FORCESENSOR' AND ForceSensor.sessionID={}\".format(self.source_session),\n join_clause=\"LEFT JOIN ForceSensor ON ForceSensor.uniqueID=trigger.modeID\")\n\n trigger.update_ids(\"modeID\", forceSensor, \"uniqueID\", \"new_uniqueID\")\n\n if(DEBUGTOFILE):\n debugFile.write(\" end _import_forceSensor\\n\")\n\n return trigger\n\n\n\n def _import_runEncoder(self):\n self._print_status(self, \"runEncoder\")\n # Imports RunEncoderExercise\n # VERY similar to _import_forceSensor\n\n if(DEBUGTOFILE):\n debugFile.write(\" start _import_runEncoder\\n\")\n\n runEncoder_exercise = self.source_db.read(table_name=\"RunEncoderExercise\",\n where_condition=\"RunEncoder.sessionID={}\".format(self.source_session),\n join_clause=\"LEFT JOIN RunEncoder ON RunEncoder.exerciseID=RunEncoderExercise.uniqueID\",\n group_by_clause=\"RunEncoderExercise.uniqueID\")\n\n\n self.destination_db.write(table=runEncoder_exercise,\n matches_columns=self.destination_db.column_names(\"RunEncoderExercise\", [\"uniqueID\"]),\n avoids_duplicate_column=\"name\")\n\n\n # Imports RunEncoder\n runEncoder = self.source_db.read(table_name=\"RunEncoder\",\n where_condition=\"RunEncoder.sessionID={}\".format(self.source_session))\n runEncoder.update_ids(\"personID\", self.persons77, \"uniqueID\", \"new_uniqueID\")\n runEncoder.update_ids(\"exerciseID\", runEncoder_exercise, \"uniqueID\", \"new_uniqueID\")\n runEncoder.update_session_ids(self.new_session_id)\n\n\n self._import_forceSensor_or_runEncoder_files(runEncoder, \"runEncoder\")\n\n self.destination_db.write(table=runEncoder,\n matches_columns=self.destination_db.column_names(\"runEncoder\", skip_columns=[\"uniqueID\", \"personID\", \"exerciseID\"]))\n\n # Imports trigger (can be encoder, forceSensor or raceanalyzer).\n trigger = self.source_db.read(table_name=\"trigger\",\n where_condition=\"mode='RACEANALYZER' AND RunEncoder.sessionID={}\".format(self.source_session),\n join_clause=\"LEFT JOIN RunEncoder ON RunEncoder.uniqueID=trigger.modeID\")\n\n trigger.update_ids(\"modeID\", runEncoder, \"uniqueID\", \"new_uniqueID\")\n\n if(DEBUGTOFILE):\n debugFile.write(\" end _import_runEncoder\\n\")\n\n return trigger\n\n\n\n\n\n @staticmethod\n def _encoder_filename(person_id, original_filename):\n \"\"\" original_filename is like 1-Carmelo-89-2014-12-03_12-48-54.txt. It only replaces the person_id (1 in this case)\"\"\"\n filename=original_filename.split(\"-\", 1)\n filename[0] = str(person_id)\n return \"-\".join(filename)\n\n @staticmethod\n def _encoder_url(session_id, signal_or_curve):\n return os.path.join(\"encoder\", str(session_id), \"data\", signal_or_curve)\n\n @staticmethod\n def _forceSensor_filename(person_id, original_filename):\n \"\"\" original_filename is like 19_some person_2019-05-26_15-09-25.csv. It only replaces the person_id (1 in this case)\"\"\"\n \"\"\" but as we originally do not have database for forceSensor and runEncoder, we just have written the name, in this case: add the id before\"\"\"\n pattern = '\\A\\d+_' #\\A for the beginning of the file, then digits and then the _\n result = re.match(pattern, original_filename)\n if result:\n filename = original_filename.split(\"_\", 1)\n filename[0] = str(person_id)\n return \"_\".join(filename)\n else:\n return str(person_id) + \"_\" + original_filename\n\n @staticmethod\n def _forceSensor_url(session_id):\n return os.path.join(\"forceSensor\", str(session_id))\n\n @staticmethod\n def _runEncoder_filename(person_id, original_filename):\n \"\"\" original_filename is like 1-Carmelo-89-2014-12-03_12-48-54.csv. It only replaces the person_id (1 in this case)\"\"\"\n filename=original_filename.split(\"-\", 1)\n filename[0] = str(person_id)\n return \"-\".join(filename)\n\n @staticmethod\n def _runEncoder_url(session_id):\n return os.path.join(\"raceAnalyzer\", str(session_id))\n\n @staticmethod\n def _normalize_path(path):\n \"\"\"\n The path that it is read from the database might use Windows separators but\n we might be on a Linux system (or OS-X). This function should replace the directory\n separators to the system's ones.\n\n It assumes that the \"/\" and \"\\\" characters are only used to separate directories.\n \"\"\"\n if os.sep == \"/\":\n # We are on Linux, OS-X or some other system with \"/\" separators.\n # If the path had \"\\\" then replace them to \"/\".\n return path.replace(\"\\\\\", \"/\")\n elif os.sep == \"\\\\\":\n return path.replace(\"/\", \"\\\\\")\n\n @staticmethod\n def _print_status(self, name):\n statusPath = self._normalize_path(self.source_temp_directory + \"/status/\")\n if not os.path.exists(statusPath):\n os.makedirs(statusPath)\n open(statusPath + name + \".txt\", 'a').close()\n\n @staticmethod\n def _fix_strange_chars(name):\n \"\"\"\n check if there are ñ and change to ñ or backwards.\n yes, these characters are different,\n copy on browser url to see differences.\n \"\"\"\n if \"ñ\" in name:\n name = name.replace(\"ñ\", \"ñ\")\n elif \"ñ\" in name:\n name = name.replace(\"ñ\", \"ñ\")\n\n return name\n\n def _import_encoder_files(self, encoder_table):\n if self.source_base_directory is None:\n # We are skipping to copy the Encoding files. This is used in unit tests.\n return\n\n for row in encoder_table:\n # Gets information from row\n person_id = row.get(\"personID\")\n original_filename = row.get(\"filename\")\n original_url = self._normalize_path(row.get(\"url\"))\n session_id = row.get(\"sessionID\")\n signal_or_curve = row.get(\"signalOrCurve\")\n\n # Prepares the new filename and destination_url\n filename=self._encoder_filename(person_id, original_filename)\n destination_url = self._encoder_url(session_id, signal_or_curve)\n\n # Sets it to the row\n row.set(\"filename\", filename)\n row.set(\"url\", destination_url)\n\n # Copies the files to the new place\n destination_directory = os.path.join(self.destination_path, \"..\", \"..\", destination_url)\n destination_directory = os.path.abspath(destination_directory) # os.makedirs() can't handle directories with \"..\"\n\n destination_filename = os.path.join(destination_directory, filename)\n source_file = os.path.join(self.source_base_directory, original_url, original_filename)\n\n if not os.path.isdir(destination_directory):\n os.makedirs(destination_directory)\n\n \"\"\"\n if sys.version_info >= (3, 0):\n print (\"python 3\")\n else:\n print (\"python 2\")\n \"\"\"\n\n if os.path.exists(source_file):\n shutil.copy(source_file, destination_filename)\n else:\n source_file = self._fix_strange_chars(source_file)\n if os.path.exists(source_file):\n shutil.copy(source_file, destination_filename)\n\n\n # If on origin there are no curves, curve folder will not be created\n # and after import curves will not be saved on clicking at capture treeview\n # create \"curve\" folder if not exists\n destination_url = self._encoder_url(session_id, \"curve\")\n destination_directory = os.path.join(self.destination_path, \"..\", \"..\", destination_url)\n destination_directory = os.path.abspath(destination_directory) # os.makedirs() can't handle directories with \"..\"\n if not os.path.isdir(destination_directory):\n os.makedirs(destination_directory)\n\n # valid for forceSensor and runEncoder files, theses are the values on tableName\n def _import_forceSensor_or_runEncoder_files(self, table, tableName):\n if self.source_temp_directory is None:\n # We are skipping to copy the Encoding files. This is used in unit tests.\n return\n\n if(DEBUGTOFILE):\n debugFile.write(\" at import_forceSensor_or_runEncoder_files\\n\")\n debugFile.write(tableName)\n\n for row in table:\n #if(DEBUGTOFILE):\n # debugFile.write(\" row: \")\n # debugFile.write(row.get(\"url\"))\n\n # Gets information from row\n person_id = row.get(\"personID\")\n original_filename = row.get(\"filename\")\n original_url = self._normalize_path(row.get(\"url\"))\n session_id = row.get(\"sessionID\")\n\n if(DEBUGTOFILE):\n debugFile.write(\"original_filename: \" + original_filename + \"\\n\")\n\n # Prepares the new filename and destination_url\n filename = \"\"\n destination_url = \"\"\n if tableName == \"forceSensor\":\n filename=self._forceSensor_filename(person_id, original_filename)\n destination_url = self._forceSensor_url(session_id)\n else:\n filename=self._runEncoder_filename(person_id, original_filename)\n destination_url = self._runEncoder_url(session_id)\n\n # Sets it to the row\n row.set(\"filename\", filename)\n row.set(\"url\", destination_url)\n\n if(DEBUGTOFILE):\n debugFile.write(\"filename: \" + filename + \"\\n\")\n\n # Copies the files to the new place\n destination_directory = os.path.join(self.destination_path, \"..\", \"..\", destination_url)\n destination_directory = os.path.abspath(destination_directory) # os.makedirs() can't handle directories with \"..\"\n\n destination_filename = os.path.join(destination_directory, filename)\n source_file = os.path.join(self.source_temp_directory, original_url, original_filename)\n\n if not os.path.isdir(destination_directory):\n os.makedirs(destination_directory)\n\n if os.path.exists(source_file):\n shutil.copy(source_file, destination_filename)\n else:\n source_file = self._fix_strange_chars(source_file)\n if os.path.exists(source_file):\n shutil.copy(source_file, destination_filename)\n\n\ndef json_information(database_path):\n information = {}\n information['sessions'] = []\n\n database = Database(database_path, read_only=True)\n\n sessions = database.read(table_name=\"Session\", where_condition=\"1=1\")\n\n for session in sessions:\n data = {'uniqueID': session.get('uniqueID'),\n 'date': session.get('date'),\n 'place': session.get('place'),\n 'comments': session.get('comments'),\n 'name': session.get('name')\n }\n information['sessions'].append(data)\n\n preferences = database.read(table_name=\"Preferences\", where_condition=\"name='databaseVersion'\")\n information['databaseVersion'] = preferences[0].get(\"value\")\n\n return information\n\n\ndef show_json_information(database_path):\n information = json_information(database_path)\n information_str = json.dumps(information, sort_keys=True, indent=4)\n\n print(information_str)\n\n\ndef process_command_line():\n parser = argparse.ArgumentParser(\n description=\"Allows to import a session from one Chronojump database file into another one\")\n parser.add_argument(\"--source\", type=str, required=True,\n help=\"chronojump.sqlite that we are importing from\")\n parser.add_argument(\"--source_base_directory\", type=str, required=False,\n help=\"Directory where the encoder/ directory (amongst database/, logs/ and multimedia/ can be found\\n\" +\n \"By default is parent as --source\")\n parser.add_argument(\"--source_temp_directory\", type=str, required=False,\n help=\"Directory where the temp forceSensor and runAnalyzer files are\\n\" +\n \"they are at temp folder because name have been changed\")\n parser.add_argument(\"--destination\", type=str, required=False,\n help=\"chronojump.sqlite that we import to\")\n parser.add_argument(\"--source_session\", type=int, required=False,\n help=\"Session from source that will be imported to the session specified by --destination-session\\n\"\n \"or to a new session if no --destination-session is specified\")\n parser.add_argument(\"--destination_session\", type=int, required=False,\n help=\"Imports the [source_session] into the [destination_session]. If not specified imports as\\n\"\n \"new session.\")\n parser.add_argument(\"--json_information\", required=False, action='store_true',\n help=\"Shows information of the source database\")\n parser.add_argument(\"--debug_to_file\", type=str, required=False,\n help=\"path to print debug info to file\")\n args = parser.parse_args()\n\n if args.json_information:\n show_json_information(args.source)\n else:\n if args.destination and args.source_session:\n if args.source_base_directory:\n source_base_directory = args.source_base_directory\n else:\n source_base_directory = os.path.join(args.source, \"../..\")\n\n # to use the global variable on this function\n global DEBUGTOFILE\n global debugFile\n if args.debug_to_file == \"NONE\":\n DEBUGTOFILE = False\n elif args.debug_to_file != \"\":\n DEBUGTOFILE = True\n debugFile = open(args.debug_to_file, 'w')\n\n # to use the global variable on this function\n global g_sourceBaseDirectory\n global g_destinationBaseDirectory\n\n g_sourceBaseDirectory = source_base_directory\n g_sourceBaseDirectory = os.path.abspath (g_sourceBaseDirectory)\n if(DEBUGTOFILE):\n debugFile.write(\"g_sourceBaseDirectory: \" + g_sourceBaseDirectory + \"\\n\")\n\n g_destinationBaseDirectory = os.path.join (args.destination, \"..\", \"..\")\n g_destinationBaseDirectory = os.path.abspath (g_destinationBaseDirectory)\n if(DEBUGTOFILE):\n debugFile.write(\"g_destinationBaseDirectory: \" + g_destinationBaseDirectory + \"\\n\")\n\n importer = ImportSession(args.source, args.destination, source_base_directory, args.source_temp_directory)\n\n if args.destination_session is None:\n importer.import_as_new_session(args.source_session)\n else:\n importer.import_into_session(args.source_session, args.destination_session)\n\n if(DEBUGTOFILE):\n debugFile.close()\n else:\n print(\"if --information not used --source, --destination and --source_session parameters are required\")\n\n\nif __name__ == \"__main__\":\n process_command_line()\n","repo_name":"GNOME/chronojump","sub_path":"src/chronojump-importer/chronojump_importer.py","file_name":"chronojump_importer.py","file_ext":"py","file_size_in_byte":51430,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"28"} +{"seq_id":"24860609327","text":"\"\"\"empty message\n\nRevision ID: e9e3df51c342\nRevises: d550de214e83\nCreate Date: 2023-11-03 15:46:21.059169\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e9e3df51c342'\ndown_revision = 'd550de214e83'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('viaje',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('nombre', sa.String(length=250), nullable=True),\n sa.Column('destino', sa.String(length=250), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('viaje')\n # ### end Alembic commands ###\n","repo_name":"JonnyJaccob/MultiParadigTeam9","sub_path":"PracticasQQ/Practica2/Pract3/myapp/migrations/versions/e9e3df51c342_.py","file_name":"e9e3df51c342_.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"20213978809","text":"#!/usr/bin/python\n\n# Simple Logo interpreter written in Python using turtle module and parser\n# generator: PLY (Python Lex Yacc). See also: dokumentacja.pdf\n# Authors: Michal Mrowczyk, Damian Kudas\n# Feel free to report any possible bug to: mrowczyk@student.agh.edu.pl\n# Convention: In comments we often use names: procedure, function, subroutine\n# They all basically have the same notion in our program\n\nimport sys;\nimport ply.lex as lex;\nimport ply.yacc as yacc;\nimport turtle\nimport math\nimport random\nimport copy\nimport AST\n\n# If 0 then normal, if 1 then interactive\nmode = None\n\n# Representing Abstract Syntax Tree (AST)\ntree = None\n\n# Type of variable: possibly integer or float\ntyp = None\n\n# Built in looping variable in repeat statement (used by Logo interpreter)\nrepcount = -1\n\n# Dictionary of pairs: varname --> varvalue (also called: environment)\n# varname - name of variable\n# varvalue - value of variable\nvardict = {}\n\n# Global flag depicting if there is syntax error in current parsing\nerror = False\n\n# Global name representing information in which function interpreter is in\n# given time\nfunkyname = \"__main__\"\n\n# Denoting whether or not current function returns value (based on\n# interpreter evaluations)\nreturning = False\n\ndef addToClass(cls):\n \"\"\"\n Adds function to class: cls\n Used to inject eval functions into\n AST module classes\n \"\"\"\n def decorator(func):\n setattr(cls,func.__name__,func)\n return func\n return decorator\n\nclass Interpreter:\n \"\"\"\n Main class containing interpreter functionality\n It defines eval functions for AST module classes\n \"\"\"\n @addToClass(AST.Prog)\n def eval(self):\n # Evaluating program is equivalent to sequential evaluation of\n # it's instructions\n for instruction in self.instructions:\n instruction.eval(self.functions)\n \n @addToClass(AST.Var)\n def eval(self, context):\n global vardict, mode\n \n # Checking if variable is defined during interpreter pass.\n # If not defined then actions are taken based on interpreting mode:\n # possibly interactive or interpreting file.\n if self.name in vardict.keys():\n return vardict[self.name]\n else:\n print(\"Undefined variable: \" + self.name)\n if mode == 0:\n sys.exit(-1)\n return 0\n \n @addToClass(AST.Make)\n def eval(self, context):\n global vardict, funkyname, returning\n \n # Make statement's eval evaluates expression and assign it's value to\n # global variable dictionary vardict\n val = self.expr.eval(context)\n name = self.name.name\n vardict[name] = val\n \n # If make statement assigned value to variable with current function\n # name then updating returning flag\n if name == funkyname:\n returning = True\n \n @addToClass(AST.Call)\n def eval(self, context):\n global vardict, repcount, mode, error, returning, funkyname\n \n # Main point - calling procedure\n # There is an attempt to implement closures.\n # Admittedly it is not the clearest way to handle function call\n # so code is bit convoluted...\n \n # Protecting global vardict against local changes\n tmp = vardict.copy()\n \n # Protecting returning state of outer functions\n tmpreturning = returning\n \n # Protecting funkyname for outer function\n tmpfunkyname = funkyname\n \n # Setting returning to be False since no make \"this_func_name\n # statement has been seen during this call\n returning = False\n \n # Setting this funkyname to name of this function\n funkyname = self.name\n \n # Result of calling this procedure / function\n result = None\n \n # Obtaining names of locally defined procedures / functions\n localprocs = []\n for proc in context:\n localprocs.append(proc.name)\n \n # Traversing vardict in order to find possible function names\n # Note that Python empowers us to treat functions like ordinary\n # variables in environment, so we use this approach heavily...\n for elem in vardict.keys():\n if isinstance(vardict[elem], AST.Func):\n localprocs.append(elem)\n \n # Evaluating arguments for procedure call.\n # This list will be passed into environment of called subroutine\n argvalues = []\n for arg in self.args:\n val = arg.eval(context)\n argvalues.append(val)\n \n # Checking if it is procedure defined locally (in Logo program):\n if self.name in localprocs:\n funcobject = None\n \n # Trying to obtain function from vardict based on function name\n # This is done mainly for closures purposes \n for elem in vardict.keys():\n if elem == self.name and isinstance(vardict[elem], AST.Func):\n funcobject = vardict[elem]\n\n # Obtaining function object from context\n # Note that those are the functions which are defined at the\n # beggining of the program or defined in interactive mode\n # using ' to func_name (args) end ' statement\n if funcobject == None:\n for func in context:\n if func.name == self.name:\n funcobject = func\n \n # Counter for argvalues list elements.\n i = 0\n \n # Obtaining names of formal parameters for function we try to call\n names = [p.name for p in funcobject.params]\n \n # Checking if funcobject params are unique\n # Node that uniqueness checking is actually performed only when\n # function / procedure is called and not when definition happens!\n if len(names) != len(set(names)):\n print(\"Duplicated param name in function declaration: \" + self.name)\n if mode == 0:\n sys.exit(-1)\n else:\n error = True\n \n # Setting up environment for funcobject call\n # It actually contains consists of two steps\n # 1) Obtaining closure dictionary (if not None) and updating environment\n # 2) Taking values of arguments passed to function and updating environment\n # Please note that what is understood as closure dictionary params\n # may be overriden by arguments that are passed to function indirectly.\n cdict = funcobject.closuredict\n if cdict != None:\n for elem in cdict.keys():\n vardict[elem] = cdict[elem]\n for p in funcobject.params:\n vardict[p.name] = argvalues[i]\n i += 1\n \n # Calling funcobject with argvalues as arguments:\n for instr in funcobject.instructions:\n instr.eval(context)\n \n # Setting up the result of the call (if function call returned value)\n # Please note that there is no way to check whether or not\n # one branch of if-else statement returns something and other one not\n # (at least using our approach). Also Python interpreter is not\n # capable of doing such checks. (At least Python 2)\n # Also there is a check that function's returned value was actually assigned\n # inside this function\n if (self.name in vardict.keys()) and returning:\n result = vardict[self.name]\n else:\n result = None\n # Checking for built in subroutines:\n else:\n # Stringifying argvalues for eval call.\n # Note that more elegant solution could require Python\n # join function to be employed\n strparams = \"(\"\n for val in argvalues:\n strparams += str(val)\n strparams += ','\n \n # Removing trailing comma and replacing it with ')'\n strparams = strparams[0:len(strparams)-1] + \")\"\n \n # Handling case when there are no args:\n if len(argvalues) == 0:\n strparams = \"()\"\n \n # Checking if it is turtle subroutine\n if self.name in dir(turtle):\n result = eval(\"turtle.\" + self.name + strparams)\n # Checking if it is math subroutine\n elif self.name in dir(math):\n result = eval(\"math.\" + self.name + strparams)\n # Checking if it is random subroutine \n elif self.name in dir(random):\n result = eval(\"random.\" + self.name + strparams)\n # Checking if it is repcount built in subroutine\n elif self.name == 'repcount':\n if repcount < 0:\n print(\"Repcount undefined\")\n if mode == 0:\n sys.exit(-1)\n else:\n error = True\n result = repcount\n # If function was not recognized...\n else:\n print(\"Unrecognized function: \" + self.name)\n if mode == 0:\n sys.exit(-1)\n else:\n error = True\n \n # Recreating some globals in spirit of calling convention...\n vardict = tmp.copy()\n returning = tmpreturning\n funkyname = tmpfunkyname\n \n return result\n \n @addToClass(AST.Repeat)\n def eval(self, context):\n global vardict, repcount\n \n # Protecting vardict from local changes (useful when implementing scoping)\n tmp = vardict.copy()\n \n # Evaluating number of iterations...\n rang = self.times.eval(context)\n \n # ... And evaluating instructions that many times.\n for i in range(rang):\n repcount = i\n for ins in self.instructions:\n ins.eval(context)\n repcount = -1\n vardict = tmp.copy()\n \n @addToClass(AST.IfElse)\n def eval(self, context):\n global vardict\n \n # Protecting vardict from local changes (useful when implementing scoping)\n tmp = vardict.copy()\n \n # Evaluating condition and based on that evaluating different list of instructions\n cond = self.condition.eval(context)\n if cond:\n for i in self.instr1:\n i.eval(context)\n else:\n for i in self.instr2:\n i.eval(context)\n \n vardict = tmp.copy()\n \n @addToClass(AST.For)\n def eval(self, context):\n global vardict\n \n # Protecting vardict from local changes (useful when implementing scoping)\n tmp = vardict.copy()\n \n # Preparing start, stop and step for for statement\n start = self.start.eval(context)\n stop = self.stop.eval(context)\n step = self.step.eval(context)\n \n # Simulating Logo for loop using Python while loop\n i = start\n while i <= stop:\n vardict[self.varname] = i\n for ins in self.instructions:\n ins.eval(context)\n i += step\n \n vardict = tmp.copy()\n \n @addToClass(AST.Arithm)\n def eval(self, context):\n \n # Evaluating recursively arithmetic expression\n val1 = self.expr1.eval(context)\n val2 = self.expr2.eval(context)\n return eval(str(val1) + self.operator + str(val2))\n \n @addToClass(AST.Compar)\n def eval(self, context):\n \n # Performing comparison operation in Logo using Python\n # comparison capabilities. Note that there is special\n # treatment of equality checking\n val1 = self.expr1.eval(context)\n val2 = self.expr2.eval(context)\n if self.operator == \"=\":\n return eval(str(val1) + self.operator + \"=\" + str(val2))\n else:\n return eval(str(val1) + self.operator + str(val2))\n \n @addToClass(AST.Const)\n def eval(self, context):\n return self.val\n \n # One thing to reckon is that when creating closure one have\n # to remember current vardict state and save it under closuredict\n @addToClass(AST.Func)\n def eval(self, context):\n global vardict\n \n # Note that because there is really no special calling convention\n # treatment it is possible that closures assigned in inner function\n # (using closure mechanisms) will be visible in outer functions\n # It may feel buggy but it's intentional, that once we evaluated function\n # we keep it in global state the same way we deal with other user defined\n # functions.\n self.closuredict = vardict.copy()\n vardict[self.name] = self\n \n # When evaluating Closure object we return Func object for it\n @addToClass(AST.Closure)\n def eval(self, context):\n global vardict\n return vardict[self.name]\n \n# Literals used by interpreter\nliterals = ':\"[](),+-*/=<>'\n\n# Tokens which are not literals\ntokens = [ \"WORD\", \"FLOAT\", \"INTEGER\", \n \"NEQ\", \"LE\", \"GE\" ];\n\n# Keywords or reserved words in Logo language\nreserved = {\n 'ifelse' : 'IFELSE',\n 'repeat' : 'REPEAT',\n 'make' : 'MAKE',\n 'for' : 'FOR',\n 'to' : 'TO',\n 'end' : 'END'\n}\ntokens += reserved.values()\n\n# Ignoring tabs and spaces\nt_ignore = ' \\t'\n\n# Ignoring comments which are created using ';' delimeter\nt_ignore_COMMENT = r'\\;.*'\n\n# Handling new lines\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n\n# On scanner error\ndef t_error(t):\n global mode, error\n print (\"Illegal character {0} at line {1}\".format(t.value[0], t.lexer.lineno))\n if mode == 0:\n sys.exit(-1)\n else:\n error = True\n \n# Lexems:\ndef t_WORD(t):\n r\"[a-zA-Z_]\\w*\"\n if t.value in reserved:\n t.type = reserved[t.value]\n return t\n \ndef t_FLOAT(t):\n r\"\\d+(\\.\\d*)|\\.\\d+\"\n global typ\n typ = t.type\n return t\n\ndef t_INTEGER(t):\n r\"\\d+\"\n global typ\n typ = t.type\n return t\n\ndef t_LE(t):\n r\"<=\"\n return t\n\ndef t_GE(t):\n r\">=\"\n return t\n\ndef t_NEQ(t):\n r\"!=\"\n return t\n\n# Precedence definitions\nprecedence = (\n (\"nonassoc\", '<', '>', '=', 'NEQ', 'LE', 'GE'),\n (\"left\", '+', '-'),\n (\"left\", '*', '/') )\n\n# Handling parser errors\ndef p_error(p):\n global mode, error\n if p != None:\n print(\"Syntax error at token: {0} in line: {1}\".format(p.type, p.lineno))\n if mode == 0:\n sys.exit(-1)\n else:\n error = True\n\n# Parsing stuff (consult chapter 2 in dokumentacja.pdf for more info about grammar)\ndef p_program(p):\n \"\"\"program : instr_list\n | func_list\n | func_list instr_list\"\"\"\n global tree\n \n if tree == None:\n if len(p) == 2:\n if isinstance(p[1][0], AST.Func):\n tree = AST.Prog(p[1], [])\n else: \n tree = AST.Prog([], p[1])\n else:\n tree = AST.Prog(p[1], p[2])\n else:\n if len(p) == 2:\n if isinstance(p[1][0], AST.Func):\n tree = AST.Prog(combine(tree.functions, p[1]), [])\n else:\n tree = AST.Prog(tree.functions, p[1])\n else:\n tree = AST.Prog(combine(tree.functions, p[1]), p[2])\n p[0] = tree \n\ndef p_func_list(p):\n \"\"\"func_list : func\n | func_list func\"\"\"\n if len(p) == 2:\n p[0] = p[1]\n else:\n p[0] = p[1] + p[2]\n \ndef p_instr_list(p):\n \"\"\"instr_list : instr\n | instr_list instr\"\"\"\n if len(p) == 2:\n p[0] = p[1]\n else:\n p[0] = p[1] + p[2]\n \ndef p_func(p):\n \"\"\"func : TO WORD '(' params ')' extended_instr_list END\"\"\"\n p[0] = [AST.Func(p[2], p[4], p[6])]\n \ndef p_params(p):\n \"\"\"params :\n | ref\n | params ref\"\"\"\n if len(p) == 1:\n p[0] = []\n elif len(p) == 2:\n if not isinstance(p[1], list):\n p[1] = [p[1]]\n p[0] = p[1]\n else:\n if not isinstance(p[1], list):\n p[1] = [p[1]]\n if not isinstance(p[2], list):\n p[2] = [p[2]]\n p[0] = p[1] + p[2]\n \ndef p_extended_instr_list(p):\n \"\"\"extended_instr_list : instr\n | func\n | extended_instr_list instr\n | extended_instr_list func\"\"\"\n if len(p) == 2:\n p[0] = p[1]\n else:\n p[0] = p[1] + p[2]\n \ndef p_ref(p):\n \"\"\"ref : ':' WORD\"\"\"\n p[0] = AST.Var(p[2])\n \ndef p_instr(p):\n \"\"\"instr : MAKE quoted expr\n | REPEAT expr '[' instr_list ']'\n | FOR '[' WORD expr expr expr ']' '[' instr_list ']'\n | FOR '[' WORD expr expr ']' '[' instr_list ']'\n | IFELSE condition '[' instr_list ']' '[' instr_list ']'\n | WORD '(' expr_list ')'\"\"\"\n if len(p) == 4:\n p[0] = [AST.Make(p[2], p[3])]\n elif len(p) == 5:\n p[0] = [AST.Call(p[1], p[3])]\n elif len(p) == 6:\n p[0] = [AST.Repeat(p[2], p[4])]\n elif len(p) == 9:\n p[0] = [AST.IfElse(p[2], p[4], p[7])]\n elif len(p) == 10:\n p[0] = [AST.For(p[3], p[4], p[5], AST.Const(1, \"integer\"), p[8])]\n else:\n p[0] = [AST.For(p[3], p[4], p[5], p[6], p[9])]\n \ndef p_expr_list(p):\n \"\"\"expr_list :\n | expr\n | expr_list ',' expr\"\"\"\n if len(p) == 1:\n p[0] = []\n elif len(p) == 2:\n if not isinstance(p[1], list):\n p[1] = [p[1]]\n p[0] = p[1]\n else:\n if not isinstance(p[1], list):\n p[1] = [p[1]]\n if not isinstance(p[3], list):\n p[3] = [p[3]]\n p[0] = p[1] + p[3]\n \ndef p_expr(p):\n \"\"\"expr : const\n | name\n | ref\n | WORD '(' expr_list ')'\n | expr '+' expr\n | expr '-' expr\n | expr '*' expr\n | expr '/' expr\n | '(' expr ')'\"\"\"\n if len(p) == 2:\n p[0] = p[1]\n elif len(p) == 5:\n p[0] = AST.Call(p[1], p[3])\n elif p[1] == '(':\n p[0] = p[2]\n else:\n p[0] = AST.Arithm(p[2], p[1], p[3])\n\ndef p_condition(p):\n \"\"\"condition : expr '=' expr\n | expr '<' expr\n | expr '>' expr\n | expr NEQ expr\n | expr GE expr\n | expr LE expr\"\"\"\n p[0] = AST.Compar(p[2], p[1], p[3])\n \ndef p_quoted(p):\n \"\"\"quoted : '\"' WORD\"\"\"\n p[0] = AST.Var(p[2])\n \ndef p_name(p):\n \"\"\"name : WORD \"\"\"\n p[0] = AST.Closure(p[1])\n \ndef p_const(p):\n \"\"\"const : FLOAT\n | INTEGER\"\"\"\n global typ, mode, error\n if typ == 'FLOAT':\n p[0] = AST.Const(float(p[1]), \"float\")\n elif typ == 'INTEGER':\n p[0] = AST.Const(int(p[1]), \"integer\")\n else:\n print(\"Unrecognized type!\")\n if mode == 0:\n sys.exit(-1)\n else:\n error = True\n\ndef combine(oldlist, newlist):\n \"\"\"Combines oldlist and newlist of functions\n by replacing functions from oldlist which have the same name with\n functions from newlist\"\"\"\n result = []\n for f1 in oldlist:\n app = True\n for f2 in newlist:\n if f1.name == f2.name:\n app = False\n if app:\n result.append(f1)\n \n for f in newlist:\n result.append(f)\n \n return result\n\ndef treeinfo(tree):\n \"\"\"\n Debugging function used to display something from constructed AST\n \"\"\"\n print(tree)\n for f in tree.functions:\n print(\"Function: \" + f.name)\n print(\"Params: \")\n for p in f.params:\n print(p.name)\n print(\"Instructions: \")\n for i in f.instructions:\n print(i)\n \n print(\"Module instructions: \") \n for i in tree.instructions:\n print(i)\n \ndef main():\n global mode, error\n lexer = lex.lex()\n parser = yacc.yacc()\n \n # Normal mode: \n if len(sys.argv) > 1:\n mode = 0\n file = open(sys.argv[1])\n text = file.read()\n parser.parse(text, lexer=lexer)\n # treeinfo(tree)\n # Evaluating or interpreting program\n tree.eval() \n raw_input(\"Type anything to exit the program...\")\n else:\n # Interactive mode:\n mode = 1\n while True:\n error = False\n text = raw_input(\"<< \")\n if text == \"exit\":\n break\n elif text == \"help\":\n print(\"Type exit to exit the program.\")\n print(\"Type logo command to continue.\")\n else:\n parser.parse(text, lexer=lexer)\n if not error: \n tree.eval()\n else:\n print(\"Syntax error has occurred\")\n \nif __name__ == '__main__':\n main()\n","repo_name":"michal3141/logo_interpreter","sub_path":"logo/logo.py","file_name":"logo.py","file_ext":"py","file_size_in_byte":21323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"2246742339","text":"import logging\nimport options.iex\nimport os\nimport csv\n\nroot = logging.getLogger()\nroot.setLevel(logging.INFO)\n\nBATCH_SIZE = 500\n\n\ndef update_membership(data_path):\n logging.info('starting to update universe.symbols')\n\n symbols = iex.get_all_symbols()\n\n with open(f'{data_path}/universe.csv', 'w') as w:\n writer = csv.writer(w)\n for i, params in enumerate(symbols):\n symbol = params['symbol']\n if (i != 0) and (i % 100) == 0:\n logging.info(f'processed: {i}')\n\n if (i != 0) and (i % BATCH_SIZE) == 0:\n logging.info(f'batch {i / BATCH_SIZE}')\n in_universe = iex.has_dividends(symbol) and iex.has_options(symbol)\n\n if in_universe:\n writer.writerow([symbol])\n\n\nif __name__ == '__main__':\n iex = options.iex.IEX()\n iex.token = os.getenv('IEX_TOKEN')\n update_membership(os.getenv('DATA_PATH'))\n","repo_name":"gstvolvr/options","sub_path":"options/daos/yearly/update_universe.py","file_name":"update_universe.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"41694637392","text":"from django.shortcuts import redirect, render\nfrom django.contrib.auth.models import User\n\nfrom account.forms import StartForm, NameForm\nfrom account.models import Account\n\n\ndef start(request):\n context = {}\n if request.POST:\n form = StartForm(request.POST)\n if form.is_valid():\n new_account = form.save(commit=False)\n email = form.cleaned_data.get('email').lower()\n password = form.cleaned_data.get('phone')\n first_name = form.cleaned_data.get('first_name').lower()\n last_name = form.cleaned_data.get('last_name').lower()\n new_user = User.objects.create_user(username=email, email=email, password=password)\n print(new_user)\n new_account.user = new_user\n new_account.save()\n request.session['email'] = new_user.email \n return redirect(\"questions\")\n # return redirect(\"register\")\n\n else:\n context['form'] = form\n return render(request, 'account/start.html', context)\n\n\ndef register(request):\n context = {}\n # del form.fields[\"email\"]\n # del form.fields[\"phone\"]\n email = request.session.get('email')\n request.session['email'] = email\n user = User.objects.get(username=email)\n account = Account.objects.get(user=user)\n\n if request.POST:\n print('Reauest.POST Method')\n form = NameForm(request.POST or None, instance=account)\n print(form)\n if form.is_valid():\n updated_account = form.save(commit=False)\n first_name = form.cleaned_data.get('first_name').lower()\n last_name = form.cleaned_data.get('last_name').lower()\n updated_account.first_name = first_name\n updated_account.last_name = last_name\n updated_account.save()\n return redirect(\"questions\")\n else:\n context['form'] = form\n\n return render(request, 'account/register.html', context)\n","repo_name":"Francis-Ebere-Emeafu/gcosfinance","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"26396212044","text":"#library of openCV\nimport cv2\n\n#first, we define main program as a function \ndef main():\n\n #here 0 means the first webcam, you can use 1 to sepcify the second, and so on\n _cap = cv2.VideoCapture(0)\n \n #if you want to specify webcam resolution\n #x (horizontal)\n #x = 1024\n #_cap.set(3,x);\n #y (vertical)\n #y=768\n #_cap.set(4,y)\n\n try:\n while(True): #repeartly do the following except 1) press q or 2) exception\n \n ret, _frame = _cap.read() #read a frame from the webcam\n \n _my_name = \"Dan Shor\"\n \n cv2.putText(_frame,'Name: - %s'%_my_name,(100, 100),cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), 2)\n \n cv2.imshow(\"My Mirror\", _frame)#show the image in a windows\n \n if cv2.waitKey(1) & 0xFF == ord('q'): #specify how to quit the program\n break\n \n except (KeyboardInterrupt, SystemExit):\n print(\"wrong exit\") \n\n _cap.release() # When everything done, release the capture\n \n cv2.destroyAllWindows() #destroy the cv windows\n \n print (\"Main program finished\") #specify that main program is finished\n\n#here is the start of the main program\nif __name__ == '__main__':\n main()","repo_name":"Dutchie3719/SST","sub_path":"Assignment 1/SST_001_Mirror.py","file_name":"SST_001_Mirror.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"38632906375","text":"'''\nAgenda:\nWorking with radio button & check boxes.\n1.) check radio button & check box is selected or not : isSelected()\n2.) Click radio button & checkbox\n'''\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\ndriver=webdriver.Chrome(executable_path='C:\\\\Users\\\\Dell\\\\PycharmProjects\\\\selenium\\\\drivers\\\\chromedriver.exe')\n# driver=webdriver.Firefox(executable_path='C:\\\\Users\\\\Dell\\\\PycharmProjects\\\\selenium\\\\drivers\\\\geckodriver.exe')\n# driver=webdriver.Ie(executable_path='C:\\\\Users\\\\Dell\\\\PycharmProjects\\\\selenium\\\\drivers\\\\IEDriverServer.exe')\n\ndriver.get('https://fs2.formsite.com/meherpavan/form2/index.html?')\nstatus=driver.find_element_by_id('RESULT_RadioButton-7_0').is_selected()\nprint(\"Radio button status is:\",status) # False\n\n\ndriver.find_element_by_css_selector(\"[for='RESULT_RadioButton-7_0']\").click() # selected radio button\n\nstatus = driver.find_element_by_css_selector(\"[name='RESULT_RadioButton-7']\").is_selected()\nprint(\"Radio button status is:\",status,'\\n') #True\n\n\n\n\n\nstatus=driver.find_element_by_id('RESULT_CheckBox-8_0').is_selected()\nprint(\"Check box status is:\",status) # False\n\n\ndriver.find_element_by_css_selector(\"[for='RESULT_CheckBox-8_0']\").click() # selected check box\n\n# driver.find_element_by_css_selector(\"[for='RESULT_CheckBox-8_6']\").click() # selected check box\n\n\nstatus=driver.find_element_by_id('RESULT_CheckBox-8_0').is_selected()\nprint(\"Check box status is:\",status) #True\n\n\n\n","repo_name":"pks369/selenium-py","sub_path":"radioButton&Checkboxes_08.py","file_name":"radioButton&Checkboxes_08.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"43951214000","text":"from typing import Dict, List\nimport requests\n\nfrom model.bible_model import Book, Chapter, Testament, Verse\nfrom dotenv import dotenv_values\n\nclass BibleService():\n \n def get_token(self):\n config = dotenv_values(\".env\") \n return config['TOKEN']\n\n async def get_chapter(self, book_name: str, chapter_number: int) -> List[Chapter]:\n url = f'https://www.abibliadigital.com.br/api/verses/nvi/{book_name}/{chapter_number}'\n response = requests.get(url, headers={\n 'Authorization': 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6Ik1vbiBBcHIgMTEgMjAyMiAwOTo1NjowMiBHTVQrMDAwMC5mbGF2aW9qbWVuZGVzQGdtYWlsLmNvbSIsImlhdCI6MTY0OTY3MDk2Mn0.AVgPcTMfIrFduhHt5v0ytVmGbHJGENtnK1RIujM4bBE'})\n data = response.json()\n chapter_model = Chapter()\n for chapter in data['verses']:\n chapter_model.verses.append(chapter['text'])\n return chapter_model\n\n async def get_bible(self) -> Dict[str, Testament]:\n r = requests.get(url='https://www.abibliadigital.com.br/api/books', headers={\n 'Authorization': 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6Ik1vbiBBcHIgMTEgMjAyMiAwOTo1NjowMiBHTVQrMDAwMC5mbGF2aW9qbWVuZGVzQGdtYWlsLmNvbSIsImlhdCI6MTY0OTY3MDk2Mn0.AVgPcTMfIrFduhHt5v0ytVmGbHJGENtnK1RIujM4bBE'})\n data = r.json()\n books = {'vt': Testament(abbrev=\"vt\", name=\"Velho Testamento\"), 'nt': Testament(\n abbrev=\"vt\", name=\"Novo Testamento\")}\n\n for book in data:\n book_model = Book(\n name=book['name'], abbrev=book['abbrev']['pt'], chapters=book['chapters'])\n if book['testament'] == 'VT':\n books['vt'].books.append(book_model)\n else:\n books['nt'].books.append(book_model)\n\n return books\n\n async def get_random_verse(self) -> Verse:\n r = requests.get(url='https://www.abibliadigital.com.br/api/verses/nvi/random', headers={\n 'Authorization': 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6Ik1vbiBBcHIgMTEgMjAyMiAwOTo1NjowMiBHTVQrMDAwMC5mbGF2aW9qbWVuZGVzQGdtYWlsLmNvbSIsImlhdCI6MTY0OTY3MDk2Mn0.AVgPcTMfIrFduhHt5v0ytVmGbHJGENtnK1RIujM4bBE'})\n data = r.json()\n verse_model = Verse(book=data['book']['name'], chapter=data['chapter'], verse=data['number'], text=data['text'])\n return verse_model","repo_name":"flaviojmendes/bible-api","sub_path":"service/bible_service.py","file_name":"bible_service.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"17750848032","text":"def parse(input):\n input = input.replace(\"#\", \"1\").replace(\".\", \"0\")\n lines = input.strip().split(\"\\n\")\n\n algo = lines[0]\n image = [list(line) for line in lines[2:]]\n return algo, image\n\n\ndef solve(input):\n algo, image = input\n border = \"0\"\n image = pad(image, 3, border)\n for k in range(50):\n image, border = compute(algo, image, border)\n image = pad(image, 2, border)\n if k == 1:\n part1 = sum(\n sum(int(c) for c in row[1:-1]) for row in image[1:-1]\n )\n\n return part1, sum(\n sum(int(c) for c in row[1:-1]) for row in image[1:-1]\n )\n\n\ndef pad(image, n, b=\"0\"):\n # This is a bit of a hack. Basically we want to pad around the edges of\n # the current image so there are at least n rows with the border state\n # b. The easy way to do that is to add n rows each time. But that makes\n # calculations slower. A better way is to \"top up\" the border by detecting\n # the number of border rows already present.\n current = 0\n for row in image:\n if all(c == b for c in row):\n current += 1\n else:\n break\n\n current2 = 0\n for row in reversed(image):\n if all(c == b for c in row):\n current2 += 1\n else:\n break\n\n n -= min(current, current2)\n\n zeros = [b for _ in range(len(image[0]) + 2 * n)]\n return [zeros for _ in range(n)] + [\n [b for _ in range(n)] + row + [b for _ in range(n)]\n for row in image\n ] + [zeros for _ in range(n)]\n\n\ndef compute(algo, image, border):\n\n new = [[\"0\" for _ in range(len(image[0]))] for _ in range(len(image))]\n\n border_shading = algo[int(border * 9, 2)]\n\n for r in range(len(image)):\n for c in range(len(image[0])):\n new[r][c] = algo[get_binary(image, r, c, border)]\n\n # Colour the border too\n for row in image:\n row[0] = row[-1] = border_shading\n\n image[0] = [border_shading for _ in range(len(image[0]))]\n image[-1] = [border_shading for _ in range(len(image[0]))]\n\n return new, border_shading\n\n\ndef get_binary(image, r, c, border):\n res = []\n for rr in (r-1, r, r+1):\n for cc in (c-1, c, c+1):\n try:\n res.append(image[rr][cc])\n except IndexError:\n res.append(border)\n\n return int(\"\".join(res), 2)\n\n\ndef print_image(image):\n for row in image:\n print(\"\".join(row[:175]).replace(\"0\", \".\").replace(\"1\", \"#\"))\n","repo_name":"olliemath/AdventOfCode","sub_path":"2021/python/day_20/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"6847656642","text":"'''\nCreated on 13/8/2014\n\n@author: victor\n'''\nimport unittest\nimport hivprotmut.sequences.test.data as data\nimport os\nfrom hivprotmut.sequences.fastaFile import FastaFile, FastaFileHandler\nimport cStringIO\n\nclass TestFasta(unittest.TestCase):\n \n def test_open_and_close(self):\n fasta_handler = FastaFile.open(os.path.join(data.__path__[0], \"HIV.fasta\"), \"r\")\n \n expected_contents = open(os.path.join(data.__path__[0], \"HIV.fasta\"),\"r\").read()\n fasta_contents = fasta_handler.handler.read()\n fasta_handler.close()\n \n self.assertEqual(expected_contents, fasta_contents)\n self.assertRaises(IOError, fasta_handler.close)\n\n def test_write(self):\n sequence = open(os.path.join(data.__path__[0], \"HIV.seq\"), \"r\").read()\n expected_contents = open(os.path.join(data.__path__[0], \"HIV.fasta\"),\"r\").read()\n handler = cStringIO.StringIO()\n fasta_handler = FastaFileHandler(handler)\n fasta_handler.write(\"HIV\", sequence)\n self.assertEqual(expected_contents, handler.getvalue())\n fasta_handler.close()\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()","repo_name":"victor-gil-sepulveda/PhD-HIVProteaseMutation","sub_path":"hivprotmut/sequences/test/testFastaFileHandler.py","file_name":"testFastaFileHandler.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"22929426130","text":"a=input().split()\nx=int(a[0])\ny=int(a[1])\n\n\nfor i in range(x+1,y):\n t=i\n sum=0\n while(t!=0):\n r=t%10\n sum=sum+(r*r*r)\n t=t//10\n if(sum==i):\n print(i)\n else:\n continue\n \n \n","repo_name":"Dheepashree/guvi2","sub_path":"armst_print.py","file_name":"armst_print.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"3833280133","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\nimport time\nfrom requests.adapters import HTTPAdapter\nfrom requests.exceptions import ConnectionError\n\nfrom multiprocessing import Pool\n\nheaders = {'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'} # имуляция действия поведения браузера\n\n\ndef get_html(url):\n # r = requests.get(url) # Response\n # return r.text # возвращает html код\n\n session = requests.Session() # непрерывность действия во времени (имитация человека)\n request = session.get(url, headers=headers) # имуляция открытия странички в браузере\n return request.content\n\n\ndef get_html_for_page(url):\n adapter = HTTPAdapter(max_retries=3)\n session = requests.Session()\n # использование `adapter` для всех запросов, которые начинаются с указанным URL\n session.mount(url, adapter)\n\n try:\n r = session.get(url)\n return r.content\n except ConnectionError as ce:\n print(ce)\n\n\ndef get_all_links(html):\n soup = BeautifulSoup(html, 'lxml')\n\n tds = soup.find('div', class_=\"cmc-table\").find_all('td',\n class_=\"cmc-table__cell cmc-table__cell--sticky cmc-table__cell--sortable cmc-table__cell--left cmc-table__cell--sort-by__name\")\n links = []\n for td in tds:\n a = td.find('a').get('href')\n link = 'https://coinmarketcap.com' + a\n links.append(link)\n\n return links\n\n\ndef get_page_data(html):\n soup = BeautifulSoup(html, 'lxml')\n try:\n name = soup.find('div', class_=\"cmc-details-panel-header\").find('h1').text\n except:\n name = ''\n try:\n price = soup.find('div', class_=\"cmc-details-panel-price jta9t4-0 fcilTk\").find('span',\n class_=\"cmc-details-panel-price__price\").text.strip()\n except:\n price = ''\n\n data = {'name': name, 'price': price}\n\n return data\n\n\ndef write_csv(data):\n with open('coinmarketcap.csv', 'a') as f:\n writer = csv.writer(f)\n writer.writerow((data['name'],\n data['price']))\n\n print(data['name'], 'parsed')\n\n\ndef make_all(url):\n html = get_html_for_page(url)\n data = get_page_data(html)\n write_csv(data)\n\n\ndef main():\n url = \"https://coinmarketcap.com/all/views/all/\"\n all_links = get_all_links(get_html(url))\n\n # for url in all_links:\n # html = get_html_for_page(url)\n # data = get_page_data(html)\n # write_csv(data)\n\n with Pool(40) as p:\n p.map(make_all, all_links)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ShostakAnton/parsing_coinmarketcap","sub_path":"coinmarketcap.py","file_name":"coinmarketcap.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"1010496517","text":"import pygame\nimport time\nfrom keys import Key\nfrom copy import deepcopy\n\nclass KeyState:\n\tpressed: bool = False\n\treleased: bool = False\n\tcode: int = -1\n\n\tdef __init__(self, code, pressed, released):\n\t\tself.code = code\n\t\tself.pressed = pressed\n\t\tself.released = released\n\n\tdef __deepcopy__(self, memo):\n\t\tcls = self.__class__;\n\t\tresult = cls.__new__(cls)\n\t\tmemo[id(self)] = result\n\t\tfor k, v in self.__dict__.items():\n\t\t\tsetattr(result, k, deepcopy(v, memo))\n\t\treturn result\n\nclass InputManager:\n\t_currkeys: dict = {}\n\t_prevkeys: dict = {}\n\t_start: float = 0\n\n\tdef __init__(self):\n\t\tfor key in Key:\n\t\t\tself._currkeys.update({int(key): KeyState(int(key), False, True)})\n\t\t\tself._prevkeys.update({int(key): KeyState(int(key), False, True)})\n\n\tdef update(self):\n\t\t\"\"\"\\\n\t\t\\nMust be called after methods that\\\n\t\t\\ncall is_pressed is_justpressed or is_justreleased.\\\n\t\t\\nIn other words at the end of your update method.\\\n\t\t\"\"\"\n\t\tself._prevkeys = deepcopy(self._currkeys)\n\n\tdef setstate(self, event):\n\t\tif event.type == pygame.KEYDOWN:\n\t\t\tcode = event.key\n\t\t\tself._currkeys[code].pressed = True\n\t\t\tself._currkeys[code].released = False\n\n\t\tif event.type == pygame.KEYUP:\n\t\t\tcode = event.key\n\t\t\tself._currkeys[code].released = True\n\t\t\tself._currkeys[code].pressed = False\n\n\tdef is_justpressed(self, key: Key):\n\t\tcurrstate = self._currkeys[int(key)].pressed\n\t\tprevstate = self._prevkeys[int(key)].pressed\n\n\t\tif currstate and currstate != prevstate:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef is_justreleased(self, key: Key):\n\t\tcurrstate = self._currkeys[int(key)].released\n\t\tprevstate = self._prevkeys[int(key)].released\n\n\t\tif currstate and currstate != prevstate:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef is_pressed(self, key: Key, **keywords):\n\t\tdelay = keywords.get(\"delay\", 0)\n\t\tresult = self._currkeys[int(key)].pressed\n\t\tif result and (time.time() - self._start) >= delay:\n\t\t\tself._start = time.time()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False","repo_name":"MonkeyToiletLadder/Olex","sub_path":"input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"896825107","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 8 18:37:57 2018\n\n@author: tnye\n\"\"\"\n\n# Third party imports\nimport numpy as np\nimport pandas as pd\n\n\n# Path to rake values that original data frame code could not grab. \nrake_path = '/Users/tnye/PROJECTS/Duration/data/missing_rake.csv'\n\n# Import list of rake angles.\nrake = []\nwith open(rake_path) as f:\n lines = f.readlines()\nfor i in range(len(lines)):\n lines[i] = lines[i].split(',')\n rake.append((lines[i][0], float(lines[i][1])))\nrake_names = []\nfor i in range(len(lines)):\n rake_names.append(lines[i][0])\nrake_names_str = ','.join(rake_names)\n\n# Read in data frame.\ndf = pd.read_csv('/Users/tnye/PROJECTS/Duration/data/dataframes/duration_data.csv')\n\n# Import station names and rake angles. \nevids = np.array(df['USGS_eventID'])\ndfrake = np.array(df['rake_angle'])\n\n# Search for Vs30 values in textfile of missing Vs30 data.\nfor i in range(len(evids)):\n if evids[i] in rake_names_str:\n for data in rake:\n if data[0] == evids[i]:\n df.set_value(i, 'rake_angle', data[1])\n\n# Add changes to data frame\ndf.to_csv('/Users/tnye/PROJECTS/Duration/data/dataframes/duration_data.csv', index=False)\n","repo_name":"taranye96/duration2","sub_path":"add_rake.py","file_name":"add_rake.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"23774683310","text":"#!/usr/bin/env python3\n# -*- coding: utf━8 -*-\n# This is the template for BMI3 Coding Challenge (A)\n\n# Technical Instructions\n# Timing:\n# You have 2 hours to complete this assignment. \n# How many questions:\n# There are 11 questions (question section 1━6), you only need to choose \n# one out of the two for each question section 1-5 (i.e. In total, you only \n# need to answer 6 questions). If you answer more than 6 question, the staff \n# will only mark the first question of the two within the same question section. \n\n\n# Installation before the exam:\n# As stated in the final exam instructions (released 2 weeks before the final), \n# you should install numpy, pandas, matplotlib, seaborn python package before the exam. \n# If you have any problem installing these packages, please contact the teaching faculties \n# or TAs as soon as possible. \n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom typing import List, Optional\n\n# What python module you can use:\n# You can use any module you feel is useful unless it's specified in the question \n# that you cannot use it.\n\n# SECTION 1 (15 points)\n# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n# ┃ Question 1.1: Debug the code finding overlaping sequences ┃\n# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n#\n# sequence_1 = 'TNEKLFFGSGTQLSVL'\n# sequence_2 = 'CASSGGPENEKLFF'\n#\n# def overlap_with_truncation(sequence_1: str, sequence_2: str):\n# for n in range(len(sequence_1)):\n# for i in range(1,n):\n# if sequence_2.endswith(sequence_1[i:n]):\n# return sequence_1[i:n]\n#\n# print(overlap_with_truncation(sequence_1, sequence_2))\n#\n# #Explanation (less than 50 words):\n#\n# #Correct Code:\n# def overlap_with_truncation_corrected(sequence_1: str, sequence_2: str):\n# # REMOVE THE FOLLOWING LINE AND WRITE YOUR CODE HERE\n# raise NotImplementedError()\n#\n# print(overlap_with_truncation_corrected(sequence_1, sequence_2))\n\n# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n# ┃ Question 1.2: Debug the code adding ones to each element ┃\n# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n# Note: You should not use the `copy` module in your correction\n\na = [[1, 2], [2, 3], [3, 4]]\n\n\ndef add_one(arr: List[List[int]]):\n ret = []\n for i in arr:\n i[0] += 1\n ret.append(i)\n return arr, ret\n\n\nprint(add_one(a))\n\n\n# Explanation (less than 50 words):\n# This code only add 1 to the first element of each subarray. That is, it returns [[2, 2], [3, 3], [4, 4]]\n\n# Correct Code:\ndef add_one_corrected(arr: List[List[int]]):\n modified = []\n for i in arr:\n modified_sublist = []\n for number in i:\n modified_sublist.append(number + 1)\n modified.append(modified_sublist)\n return arr, modified\n\n\nprint(add_one_corrected(a))\n\n# SECTION 2 (15 points)\n# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n# ┃ Question 2.1: Sorting genomic locations ┃\n# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n\n# example_input_2_1 = [\n# 'chr11-9159454-9159490',\n# 'chr8-37559304-37559340',\n# 'chr1-28764748-28764784',\n# 'chr7-142219166-142219202',\n# 'chr1-205493625-205493661'\n# ]\n#\n#\n# def sort_genomic_locations(locations: List[str]):\n# # REMOVE THE FOLLOWING LINE AND WRITE YOUR CODE HERE\n# raise NotImplementedError()\n#\n#\n# print(sort_genomic_locations(example_input_2_1))\n\n# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n# ┃ Question 2.2: Sorting amino acid sequences ┃\n# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n\nmolecular_weights = {\n 'A': 89.1,\n 'R': 174.2,\n 'N': 132.12,\n 'D': 133.11,\n 'C': 121.16,\n 'E': 147.13,\n 'Q': 146.15,\n 'G': 75.07,\n 'H': 155.16,\n 'O': 131.13,\n 'I': 131.18,\n 'L': 131.18,\n 'K': 146.19,\n 'M': 149.21,\n 'F': 165.19,\n 'P': 115.13,\n 'U': 139.11,\n 'S': 105.09,\n 'T': 119.12,\n 'W': 204.23,\n 'Y': 181.19,\n 'V': 117.15\n}\n\nexample_input_2_2 = [\n 'FEAOHLGPQF',\n 'GORPDWMDO',\n 'TSICEERU',\n 'AMUTATQE',\n 'IWEUEGWVY',\n 'NGMKKOHYDU',\n 'RUNWYIRCD',\n 'AVUYEYOVTM',\n 'FURGIWDEF',\n 'DLQPRYPAS',\n]\n\n# Implement the Merge sort\ndef MergeSort(alist):\n if len(alist) <= 1:\n return alist[:]\n else:\n medium = len(alist)//2\n left_list = MergeSort(alist[:medium])\n right_list = MergeSort(alist[medium:])\n return Merge(left_list, right_list)\n\n\ndef Merge(left_list, right_list):\n result = []\n i = 0\n j = 0\n while i < len(left_list) and j < len(right_list):\n if left_list[i] < right_list[j]:\n result.append(left_list[i])\n i += 1\n else:\n result.append(right_list[j])\n j += 1\n while i < len(left_list):\n result.append(left_list[i])\n i += 1\n while j < len(right_list):\n result.append(right_list[j])\n j += 1\n return result\n\n\ndef sortby_molecular_weight(sequences: List[str]):\n res = []\n weights = []\n for sequence in sequences:\n one_weight = 0\n for aa in sequence:\n one_weight += molecular_weights[aa]\n weights.append(one_weight)\n unsort_weighs = weights[:]\n sorted_weights = MergeSort(weights)\n for i in sorted_weights:\n res.append(sequences[unsort_weighs.index(i)])\n print(res)\n return res\n\n\nsortby_molecular_weight(example_input_2_2)\n\n\n# SECTION 3 (15 points)\n# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━��┓\n# ┃ Question 3.1: Maximal number of slicing windows. ┃\n# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n\nexample_input_3_1 = dict(\n nums=[1, 3, 0, 0, 5, 3, 6, 7],\n k=3\n)\n\n\ndef maximal_number_of_slicing_windows(nums: List[int], k: int):\n max_numbers = []\n for i in range(len(nums)-k+1):\n max_number = float('-inf')\n num = nums[i: i+k]\n for i in num:\n if i > max_number:\n max_number = i\n max_numbers.append(max_number)\n print(max_numbers)\n return max_numbers\n\n\nmaximal_number_of_slicing_windows(example_input_3_1['nums'], example_input_3_1['k'])\n\n# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n# ┃ Question 3.2: Wild Card Matching ┃\n# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n#\n# example_input_3_2 = dict(\n# s=\"TCATGTGATTGTAGGGGCTGTGTGGTCTGAAATCTGTGGGACAGGCCAGCAGGCTGGAAACTCAGGTAGGAGTTGATGCTGGGGGTTTTTCGTTTTGTTTGTTTAGTTTTGGTTTTGGTTTGGGGACTTTTGGAGACTGGGTCTCACTCCTGTCGCCCAGGCTAGAGTGCAGTGGGAGCAATCACAGCTCACTGCAGCCTTGACTTCCTGGGCTCAGGTGATTCTCCCACCTCAGCCTCCCGAGTAGCTGGGATTACAGGTGTGAGCCACCATGCTCGGCTATTTTTTTTTTTTGTATTTTTAGTAGAGACAGACTTTTTCCATATTGCCCAGGCTGGTCTCAAAACTTCCGAGCTCAAGCAATCTTCCCTCCTCGGCCTCCCAAAGTGCAGGGATTACAGGCATGAGCCACTGTGCCTG\",\n# pattern=\"DCAWG\"\n# )\n#\n#\n# def wild_card_matching(s: str, pattern: str):\n# # REMOVE THE FOLLOWING LINE AND WRITE YOUR CODE HERE\n# raise NotImplementedError()\n#\n#\n# wild_card_matching(example_input_3_2['s'], example_input_3_2['pattern'])\n\n# SECTION 4 (20 points)\n# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n# ┃ Question 4.1 Lowest common ancestors of trie tree ┃\n# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n\n# example_input_4_1 = dict(\n# trie=['AT', 'CG', 'AC', 'GT', 'G', 'C', None, None, 'TA', 'C', None, 'G', None, None, None, None, 'GA', 'T', None,\n# 'T', None, None],\n# node_1='ATG',\n# node_2='ATC'\n# )\n#\n#\n# def lca_trie(trie: List[Optional[str]], node_1: str, node_2: str):\n# # REMOVE THE FOLLOWING LINE AND WRITE YOUR CODE HERE\n# raise NotImplementedError()\n#\n#\n# print(lca_trie(example_input_4_1['trie'], example_input_4_1['node_1'], example_input_4_1['node_2']))\n\n# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n# ┃ Question 4.2 Graph With A Cycle ┃\n# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n\nexample_input_4_2 = [\n ('v1', 'v2'),\n ('v2', 'v3'),\n ('v3', 'v4'),\n ('v3', 'v7'),\n ('v4', 'v5'),\n ('v5', 'v6'),\n ('v7', 'v8'),\n ('v8', 'v9'),\n ('v9', 'v3')\n]\n\n\ndef Generate_graph(graph: dict, graph_line: tuple) -> dict:\n start, end = graph_line\n if start not in graph:\n graph[start] = [end]\n else:\n graph[start].append(end)\n return graph\n\n\ndef graph_with_cycle(graph):\n # Generate the graph\n graph_dict = {}\n for i in graph:\n graph_dict = Generate_graph(graph_dict, i)\n # print(graph_dict)\n # Store the visited vertices\n visit, cur_visit = set(), set()\n for vertex in graph_dict:\n if vertex not in visit:\n if dfs(vertex, graph_dict, visit, cur_visit):\n return True\n return False\n\n\ndef dfs(vertex, graph_dict, visit, cur_visit):\n cur_visit.add(vertex)\n if vertex in graph_dict:\n for neighbor in graph_dict[vertex]:\n if neighbor not in visit:\n if dfs(neighbor, graph_dict, visit, cur_visit):\n return True\n else:\n return True\n cur_visit.remove(vertex)\n visit.add(vertex)\n # No cycle is found\n return False\n\n\nprint(graph_with_cycle(example_input_4_2))\n\n# SECTION 5 (25 points)\n# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n# ┃ Question 5.1 Finding Substring ┃\n# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n\nexample_input_5_1 = dict(\n matrix=[\n ['A', 'C', 'C', 'G', 'T', 'C', 'T', 'T', 'A', 'T'],\n ['C', 'C', 'C', 'A', 'G', 'T', 'A', 'G', 'G', 'T'],\n ['T', 'G', 'G', 'G', 'A', 'T', 'G', 'G', 'G', 'C'],\n ['C', 'A', 'T', 'C', 'G', 'C', 'A', 'G', 'T', 'G'],\n ['C', 'C', 'T', 'T', 'T', 'T', 'C', 'T', 'G', 'C']\n ],\n substring=\"AGATGA\")\n\n# DFS: Test whether the rest of the substring can be found in the matrix\ndef check_substring(matrix, i, j, substring):\n if not substring:\n return []\n\n # Check the below position\n if i < len(matrix)-1 and matrix[i+1][j] == substring[0]:\n path = check_substring(matrix, i+1, j, substring[1:])\n if path is not None:\n return [(i+1, j)] + path\n\n # Check the right position\n if j < len(matrix[0])-1 and matrix[i][j+1] == substring[0]:\n path = check_substring(matrix, i, j+1, substring[1:])\n if path is not None:\n return [(i, j+1)] + path\n\n # The substring was not found\n return None\n\n\ndef find_substring(matrix: List[List[str]], substring: str):\n # m: row, n: col\n m = len(matrix)\n n = len(matrix[0])\n has_path = False\n\n for i in range(m):\n for j in range(n):\n # Test if the start position is correct\n if matrix[i][j] == substring[0]:\n path = check_substring(matrix, i, j, substring[1:])\n if path is not None:\n has_path = True\n # Add the start position of the path\n path = [(i, j)] + path\n return has_path, path\n\n # No Path was found\n return has_path\n\n\nprint(find_substring(example_input_5_1['matrix'], example_input_5_1['substring']))\n\n# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n# ┃ Question 5.2 Building De Bruijin Graph ┃\n# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n#\n# example_input_5_2 = [\n# 'CTGTGCAACCGATGTGCTTA',\n# 'AAGCGTCCCCAGCCTGTATT'\n# ]\n#\n#\n# def build_de_bruijin_graph(sequences: List[str]):\n# # REMOVE THE FOLLOWING LINE AND WRITE YOUR CODE HERE\n# raise NotImplementedError()\n#\n#\n# print(build_de_bruijin_graph(example_input_5_2))\n\n# SECTION 6 (10 points)\n# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n# ┃ Question 6 Short Answer Question ┃\n# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n\n# Use specific example to illustrate how bioinformatics algorithms could be used \n# to solve real-life biomedical problem. Note: you cannot use your ICA mini-project \n# as example (10points) [no more than 150 words]\n\n# Answer:\n# One specific example of the application of bioinformatics algorithms in biomedical problems is the analysis of genetic mutations in some diseases.\n# To be specific, if a patient has a genetic disease and had done the RNA-seq. We can use BWT algorithms to analyze the patient’s sequence\n# and align it to the reference genome, which could help us identify the mutations.\n# Another example: In tumors, in order to study the tumor microenvironment, single-cell RNA-seq is an excellent approach.\n# However, it has a very high dimensionality (a large number of cells and genes) which is difficult to visualize and perform downstream analysis.\n# We can first use linear dimensionality reduction like PCA and then perform non-linear dimensionality reduction like t-SNE or UMAP\n# and then use clustering algorithms to visualize.\n\n\n","repo_name":"jianzhang-lu/Undergraduate","sub_path":"ZJE_courses/6. BMI3/Workshop/L14 Exam/0049_answer.py","file_name":"0049_answer.py","file_ext":"py","file_size_in_byte":16196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"73902281674","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Datenvisualisierung mit Python/Matplotlib: 1D, 2D und 3D\n# \n# Beispiel: Simulation der Brownschen Bewegung in einer, zwei und drei Dimensionen. Die Zeitachse ist als weitere Dimension auch immer Teil der Visualisierung.\n# \n# Die benötigten Module Numpy und MatPlotLib sind in den meisten Standardinstallation schon mit dabei; das Modul muss Scipy zusätzlich installiert werden.Installieren von Scipy mit Anaconda (im Anaconda Prompt (Windows) oder Terminal (Linux oder MacOS):\n# \n# conda install scipy\n# \n\n# #### Module importieren\n\n# In[1]:\n\n\n#Numpy importieren\nimport numpy as np\n\n\n# In[2]:\n\n\n# Matplotlib importieren\nimport matplotlib.pyplot as plt\n\n\n# In[3]:\n\n\n# Scipy: benötigte Funktionen importieren\nfrom scipy.stats import norm\n\n\n# ## Brownsche Bewegung simulieren: Grundlagen\n# \n# Brownsche Bewegung: Erklärung bei Wolfram Alpha: \n# https://www.wolframalpha.com/input?i=Brownian+motion\n# \n# Link zum SciPy Cookbook:\n# https://scipy-cookbook.readthedocs.io/items/BrownianMotion.html\n# \n# Von Scipy wird die Funktion scipy.stats.norm() benötigt: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.norm.html\n# Damit können normalverteilte Zufallszahlen generiert werden.\n\n# Die Daten können effizienter erzeugt werden wenn die SciPy-Funktion nur einmal pro Simulation aufgerufen wird. Dazu wird die Funktion brownian(x0, n, dt, delta, out=None) definiert. Parameter der Funktion:\n# \n# x0 = Anfangsposition\n# n = Anzahl Schritte\n# dt = Zeitschritt\n# delta = Parameter für die Geschwindigkeit der Brownschen Bewegung\n# out = Output-Array (wird generiert falls nicht anders spezifiziert)\n\n# In[4]:\n\n\n# Funktion zur Berechnung der Brownschen Bewegung:\ndef brownian(x0, n, dt, delta, out=None):\n\n x0 = np.asarray(x0) # Punkte auf der x-Achse\n\n # Generiere für jedes x0-Element n Zahlen aus einer Normalverteilung\n r = norm.rvs(size=x0.shape + (n,), scale=delta*np.sqrt(dt))\n\n # Output-Array generieren (falls nicht vorhanden)\n if out is None:\n out = np.empty(r.shape)\n\n # Brownsche Bewegung wird als kummulative Summe der n sample berechnet\n np.cumsum(r, axis=-1, out=out)\n\n # Anfangsbedingung\n out += np.expand_dims(x0, axis=-1)\n\n return out\n\n\n# ## Brownsche Bewegung in 1D\n\n# Die Parameter der Simulation werden zuerst definiert.\n\n# In[5]:\n\n\n# Parameter definieren\ndelta = 0.25 # Paramenter für die Geschwindigkeit der Brownschen Bewegung \nT = 10.0 # Zeit insgesamt\nn = 500 # Anzahl Schritte\ndt = T/n # Zeitschritt\nm = 10 # Anzahl Realisierungen (Trajektorien)\n\n\n# Die Arrays für die Trajektorien werden initialisiert und dann durch Aufrufen der Funktion brownian() berechnet.\n\n# In[6]:\n\n\n# Initialisieren und Trajektorien Berechnen\nx = np.empty((m,n+1)) # Array initialisieren\n# Anfangsbedingungen für x\nx[:, 0] = 0.0\n# Trajektorien Berechnen\nbrownian(x[:,0], n, dt, delta, out=x[:,1:])\n# Zeitachse erzeugen\nt = np.linspace(0.0, n*dt, n+1)\n\n\n# Die m Realisierungen der Brownschen Bewegung können nun visualisiert werden:\n\n# In[7]:\n\n\n# Trajektorien visualisieren\nfor k in range(m):\n plt.plot(t, x[k])\nplt.xlabel('Zeit [arbitrary units]', fontsize=16)\nplt.ylabel('Position x [arbitrary units]', fontsize=16)\nplt.grid(True)\nplt.savefig('Brownian.png')\nplt.show()\n\n\n# ## Brownsche Bewegung in 2D\n# \n# Für die Simulation der Brownschen Bewegung in 2D werden pro Zeitschritt zwei Zufallszahlen (eine für jede Dimension) benötigt.\n\n# In[8]:\n\n\n# Parameter definieren\ndelta = 0.25 # Paramenter für die Geschwindigkeit der Brownschen Bewegung \nT = 10.0 # Zeit insgesamt\nn = 200 # Anzahl Schritte\ndt = T/n # Zeitschritt\nnD = 2 # Anzahl Dimensionen \n\n\n# In[9]:\n\n\n# Initialisierung des arrays\nx = np.zeros((nD,n+1))\n\n\n# In[10]:\n\n\n# Berechnen der trajektorie\nbrownian(x[:,0], n, dt, delta, out=x[:,1:])\n\n\n# #### Visualisierung in 2D\n# \n# Die Visualisierung zeigt die gesamte Trajektorie. Die Zeitdimension ist durch das Markieren der Anfangs und Endpunkte dargestellt.\n\n# In[11]:\n\n\n# Plotten der Trajektorie in 2D\nplt.plot(x[0],x[1]) \n\n# Startpunkt und Endpunkt markieren\nplt.plot(x[0,0],x[1,0], 'go') # Startpunkt grün\nplt.plot(x[0,-1], x[1,-1], 'ro') # Endpunkt rot\n\n# Achsen und Plot beschriften\nplt.title('2D Brownsche Bewegung') # Titel\nplt.xlabel('x', fontsize=16) # Beschriftung x-Achse\nplt.ylabel('y', fontsize=16) # Beschriftung y-Achse\nplt.axis('equal')\nplt.grid(True) # Grid im Plot anzeigen\nplt.legend([ \"Trajektorie\",\"Startpunkt\",\"Endpunkt\"], fontsize=12) # Datensätze beschriften\nplt.show()\n\n\n# ## Brownsche Bewegung in 3D\n# \n# Für die Simulation der Brownschen Bewegung in 3D werden pro Zeitschritt drei Zufallszahlen (eine für jede Dimension) benötigt.\n\n# Für das 3D-Plotting werden einige zusätzliche Matplotlib-Funktionen benötigt:\n\n# In[12]:\n\n\nfrom pylab import rcParams\nfrom mpl_toolkits.mplot3d.axes3d import Axes3D\n\n\n# In[13]:\n\n\n# Parameter definieren\ndelta = 0.25 # Paramenter für die Geschwindigkeit der Brownschen Bewegung \nT = 10.0 # Zeit insgesamt\nn = 100 # Anzahl Schritte\ndt = T/n # Zeitschritt\nnD = 3 # Anzahl Dimensionen \n\n\n# In[14]:\n\n\n# Initialisierung des arrays\nx = np.zeros((nD,n+1))\n\n\n# In[15]:\n\n\n# Berechnen der trajektorie\nbrownian(x[:,0], n, dt, delta, out=x[:,1:])\nx.shape\nx[0]\n\n\n# #### Visualisierung der Brownschen Bewegung in 3D\n\n# In[16]:\n\n\nrcParams['figure.figsize'] = 14, 14 # Grösse des Plots\nplt.rcParams.update({'font.size': 10}) # Grösse der Beschriftung\n\nax = plt.figure().add_subplot(projection='3d') # 3D Plot initialisieren\nxdata, ydata, zdata = x[:3,:] # Simulationsdaten in 3 Dimensionen aufteilen\n\n# Startpunkt und Endpunkt markieren\nax.plot(xdata[0], ydata[0], zdata[0],'go') # Startpunkt grün\nax.plot(xdata[n], ydata[n], zdata[n], 'ro') # Endpunkt rot\n\nax.set_xlabel('x-Achse', fontsize=15) # Label x-Achse\nax.set_ylabel('y-Achse', fontsize=15) # Label y-Achse\nax.set_zlabel(\"z-Achse\", fontsize=15, rotation=90) # Label z-Achse\n\nax.plot3D(xdata, ydata, zdata) # Daten plotten\nax.set_title('3D Brownsche Bewegung', fontsize=22) # Titel des Plots\n\nax.legend([\"Startpunkt\",\"Endpunkt\", \"Trajektorie\"], fontsize=15) # Beschrifgung der Daten\n\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"ubnpl/Datenkompetenz_HS2023","sub_path":"01_Visualisierung_Basics/Brownian_motion_1D_2D_3D.py","file_name":"Brownian_motion_1D_2D_3D.py","file_ext":"py","file_size_in_byte":6167,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"23606646994","text":"import copy\r\n\r\nfrom lhhload import myDataset\r\nfrom torch.utils.data import DataLoader\r\n\r\n# encoding: utf-8\r\nimport sys\r\nimport argparse\r\nimport os\r\nimport shutil\r\nimport socket\r\nimport time\r\n\r\nimport torch\r\nimport torch.backends.cudnn as cudnn\r\nimport torch.nn as nn\r\nimport torch.nn.init as init\r\nimport torch.nn.parallel\r\nimport torch.optim as optim\r\nimport torch.utils.data\r\nimport torchvision.utils as vutils\r\nfrom tensorboardX import SummaryWriter\r\nfrom torch.autograd import Variable\r\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\r\nfrom torch.utils.data import DataLoader\r\nimport cv2\r\nimport os\r\n# import utils.transformed as transforms\r\nfrom torchvision import transforms\r\n# from data.ImageFolderDataset import MyImageFolder\r\nfrom models.HidingUNet import UnetGenerator\r\nfrom models.RevealNet import RevealNet\r\nfrom torchvision.datasets import ImageFolder\r\nimport pdb\r\nimport math\r\nimport random\r\nimport numpy as np\r\nfrom skimage.metrics import structural_similarity as SSIM, peak_signal_noise_ratio as PSNR\r\nimport cv2\r\nimport PerceptualSimilarity.models\r\nfrom tqdm import tqdm\r\nfrom detection import *\r\nimport datetime\r\n\r\nnow_time = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M')\r\n\r\n\r\ndef weights_init(m):\r\n classname = m.__class__.__name__\r\n if classname.find('Conv') != -1:\r\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_out')\r\n elif classname.find('BatchNorm') != -1:\r\n m.weight.data.fill_(1.0)\r\n m.bias.data.fill_(0)\r\n\r\n\r\ndef save_checkpoint(state, save_path):\r\n filename = os.path.join(save_path, 'best_loss_model.pth')\r\n torch.save(state, filename)\r\n\r\n\r\nHnet = UnetGenerator(input_nc=3, output_nc=3, num_downs=5, norm_layer=nn.BatchNorm2d,\r\n output_function=nn.Tanh)\r\nRnet = RevealNet(input_nc=3, output_nc=3, nhf=64,\r\n norm_layer=nn.BatchNorm2d, output_function=nn.Sigmoid)\r\n# Hnet.apply(weights_init)\r\n# Rnet.apply(weights_init)\r\n\r\nHnet = torch.nn.DataParallel(Hnet).cuda()\r\nRnet = torch.nn.DataParallel(Rnet).cuda()\r\n\r\ncheckpoint = torch.load(\"./training/main_udh/checkPoints/\" + \"checkpoint.pth.tar\")\r\nHnet.load_state_dict(checkpoint['H_state_dict'])\r\nRnet.load_state_dict(checkpoint['R_state_dict'], False)\r\ntrain_cover_dir = r'D:\\code\\DIPW-main\\DIPW\\dataset\\dataset_before_process\\cover'\r\ntrain_watermark_dir = r'D:\\code\\DIPW-main\\DIPW\\dataset\\dataset_before_process\\watermark'\r\ntamp_c_dir = r'D:\\code\\DIPW-main\\DIPW\\dataset\\trainingdata\\temp_container_mini'\r\ntamp_w_dir = r'D:\\code\\DIPW-main\\DIPW\\dataset\\trainingdata\\temp_watermark_mini'\r\nrev_dir = r'D:\\code\\DIPW-main\\DIPW\\dataset\\temp_rev'\r\n\r\nwidth = 128\r\nhigh = 128\r\nepoch = 100\r\nbatch_size = 20\r\nresize = True\r\ntrain_dataset = myDataset(train_cover_dir, train_watermark_dir, width, high, resize=resize)\r\ntrain_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False, num_workers=0)\r\n\r\n# val_dataset = myDataset(train_cover_dir, train_watermark_dir, width, high, resize=False)\r\n# val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=0)\r\n\r\nsave_path = 'D:\\code\\DIPW-main\\DIPW\\ckpt\\checkpoint_' + now_time\r\n\r\nif not os.path.exists(save_path):\r\n os.makedirs(save_path)\r\noptimizer = optim.Adam(Rnet.parameters(), lr=0.001)\r\nL1_loss = nn.L1Loss().cuda()\r\nL2_loss = nn.MSELoss().cuda()\r\na = 0.75\r\nloss = 10000\r\n\r\npsnr_p = np.zeros((batch_size, 3))\r\npsnr_w = np.zeros((batch_size, 3))\r\nssim_p = np.zeros(batch_size)\r\nssim_w = np.zeros(batch_size)\r\npredictor = Predictor(\r\n model, exp, COCO_CLASSES, trt_file, decoder,\r\n args.device, args.fp16, args.legacy,\r\n)\r\n\r\n\r\nclass AverageMeter(object):\r\n \"\"\"\r\n Computes and stores the average and current value.\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.reset()\r\n\r\n def reset(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r\n\r\n\r\n# print(\"dataset generate\")\r\n# for patch, cover, watermark, box, cover_name, water_name in train_loader:\r\n# Hnet.eval()\r\n# patch = patch.cuda()\r\n# cover = cover.cuda()\r\n# watermark = watermark.cuda()\r\n# itm_secret_img = Hnet(watermark)\r\n# container_patch = itm_secret_img + patch\r\n#\r\n# container_patch_numpy = container_patch.clone().cpu().detach().numpy()\r\n# container_patch_numpy = container_patch_numpy.transpose(0, 2, 3, 1)\r\n#\r\n# cover_numpy = cover.clone().cpu().detach().numpy()\r\n# cover_numpy = cover_numpy.transpose(0, 2, 3, 1)\r\n#\r\n# watermark_numpy = watermark.clone().cpu().detach().numpy()\r\n# watermark_numpy = watermark_numpy.transpose(0, 2, 3, 1)\r\n#\r\n# cover_name_numpy = cover_name.clone().cpu().detach().numpy()\r\n#\r\n# water_name_numpy = water_name.clone().cpu().detach().numpy()\r\n#\r\n# N, _, _, _ = container_patch.shape\r\n# for B1 in range(batch_size):\r\n# box_p = box[B1]\r\n# cover_numpy[B1][box_p[0]:box_p[1], box_p[2]:box_p[3]] = container_patch_numpy[B1]\r\n# watermark_show = cv2.cvtColor(watermark_numpy[B1], cv2.COLOR_RGB2BGR)\r\n# container_patch_show = cv2.cvtColor(cover_numpy[B1], cv2.COLOR_RGB2BGR)\r\n# # print('======save container======')\r\n#\r\n# cover_name_str = '%012d' % cover_name_numpy[B1]\r\n# water_name_str = '%012d' % water_name_numpy[B1]\r\n# print(cover_name_str)\r\n# cv2.imwrite(tamp_c_dir + \"//\" + cover_name_str + '.png', container_patch_show * 255)\r\n# cv2.imwrite(tamp_w_dir + \"//\" + cover_name_str + '.png', watermark_show * 255)\r\n# print('======generation done======')\r\n\r\ntest_dataset = myDataset(tamp_c_dir, tamp_w_dir, width, high, resize=resize)\r\ntest_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=0)\r\nmin_loss_val = 10\r\nbest_model = None\r\nprint('======Rnet training======')\r\nfor i in range(epoch):\r\n batch_time = AverageMeter()\r\n data_time = AverageMeter()\r\n Hlosses = AverageMeter()\r\n Rlosses = AverageMeter()\r\n SumLosses = AverageMeter()\r\n Hdiff = AverageMeter()\r\n Rdiff = AverageMeter()\r\n Psnr = AverageMeter()\r\n\r\n for patch_r, cover_r, watermark_r, box_r, cover_r_name, water_r_name in test_loader:\r\n # cover_name_numpy = cover_r_name.clone().cpu().detach().numpy()\r\n # #\r\n watermark_r_numpy = watermark_r.clone().cpu().detach().numpy()\r\n watermark_r_numpy = watermark_r_numpy.transpose(0, 2, 3, 1)\r\n # cover_name_str = '%012d' % cover_name_numpy\r\n # water_name_str = '%012d' % water_name_numpy\r\n # print(cover_name_str)\r\n Rnet.train()\r\n optimizer.zero_grad()\r\n patch_r = patch_r.cuda()\r\n watermark_r = watermark_r.cuda()\r\n rev_secret_img = Rnet(patch_r)\r\n # L_1 = L1_loss(watermark_r, rev_secret_img)\r\n L_2 = L2_loss(watermark_r, rev_secret_img)\r\n L_R = L_2 * 100\r\n L_R.backward()\r\n optimizer.step()\r\n\r\n diffR = (rev_secret_img - watermark_r).abs().mean() * 255\r\n rev_secret_img_numpy = rev_secret_img.clone().cpu().detach().numpy()\r\n rev_secret_img_numpy = rev_secret_img_numpy.transpose(0, 2, 3, 1)\r\n\r\n watermark_r_numpy = watermark_r.clone().cpu().detach().numpy()\r\n watermark_r_numpy = watermark_r_numpy.transpose(0, 2, 3, 1)\r\n\r\n for i1 in range(batch_size):\r\n rev_secret_img_show = cv2.cvtColor(rev_secret_img_numpy[i1], cv2.COLOR_RGB2BGR)\r\n # cv2.imwrite(os.path.join(rev_dir, str(i1) + '.jpg'), rev_secret_img_show * 255)\r\n # cv2.imwrite(os.path.join(save_path, str(i1) + '.jpg'), rev_secret_numpy[i1]*255)\r\n psnr_p[i1, 0] = PSNR(rev_secret_img_numpy[i1, :, :, 0], watermark_r_numpy[i1, :, :, 0])\r\n psnr_p[i1, 1] = PSNR(rev_secret_img_numpy[i1, :, :, 1], watermark_r_numpy[i1, :, :, 1])\r\n psnr_p[i1, 2] = PSNR(rev_secret_img_numpy[i1, :, :, 2], watermark_r_numpy[i1, :, :, 2])\r\n # # print(\"Avg. PSNR P:\", psnr_p.mean().item())\r\n\r\n Rlosses.update(L_R.item(), batch_size) # R loss\r\n Psnr.update(psnr_p.mean().item(), batch_size)\r\n Rdiff.update(diffR.item(), batch_size)\r\n log = '[%d/%d]\\t Loss_R: %.6f ADP: %.4f PSNR: %.4f' % (\r\n i, epoch, Rlosses.val, Rdiff.val, Psnr.val)\r\n # if i % batch_size == 0:\r\n print(log)\r\n if Rlosses.val < min_loss_val:\r\n min_loss_val = Rlosses.val\r\n best_model = copy.deepcopy(Rnet)\r\n torch.save(Rnet, os.path.join(save_path, 'DIPW_' + str(i) + '_epoch.pth'))\r\ntorch.save(best_model, os.path.join(save_path, 'DIPW_best_loss.pth'))\r\n","repo_name":"1024yy/DIPW","sub_path":"DIPW_train.py","file_name":"DIPW_train.py","file_ext":"py","file_size_in_byte":8635,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"5179197589","text":"from javax.swing import JButton,JLabel,JTextField,JPanel,JFrame\nfrom java.awt import Color\n\nimport adminform as af\nimport teacherloginedstudentattendence as tlsa\nimport studentattendenceform as saf\n\nframe= None\ntfStudentNameChoice = None\nvalue = None\n\ndef getStudentName(check): \n global frame\n global tfStudentNameChoice\n global value\n \n value = check\n frame = JFrame(\"Student Name \")\n frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE)\n frame.setSize(500,250)\n frame.setLocation(200,200)\n frame.setLayout(None)\n frame.setVisible(True)\n \n panel = JPanel()\n panel.setSize(500,250)\n panel.setLocation(0,0)\n panel.setLayout(None)\n panel.setVisible(True)\n panel.setBackground(Color.LIGHT_GRAY)\n \n heading = JLabel(\"Student Name\")\n heading.setBounds(200,30,150,40)\n \n lbStudentNameChoice = JLabel(\"Get Student Name\")\n tfStudentNameChoice = JTextField()\n \n lbStudentNameChoice.setBounds(50,70,150,30)\n tfStudentNameChoice.setBounds(220,70,150,30)\n \n btnEnter = JButton(\"Enter\",actionPerformed=clickStudentNameChoice)\n btnCancel = JButton(\"Cancel\",actionPerformed=clickBtnCancel)\n \n btnEnter.setBounds(350,150,100,30)\n btnCancel.setBounds(50,150,100,30)\n \n panel.add(heading)\n panel.add(lbStudentNameChoice)\n panel.add(tfStudentNameChoice)\n panel.add(btnEnter)\n panel.add(btnCancel)\n frame.add(panel)\n\ndef clickBtnCancel(event):\n global frame\n frame.dispose()\n\ndef clickStudentNameChoice(event):\n # call the method in admin form with course name in textfield\n global tfStudentNameChoice\n global frame\n global value\n \n studentName = tfStudentNameChoice.getText()\n frame.dispose()\n if(value == \"for student id password\"):\n af.takeStudentName(studentName) \n elif(value == \" get student name for specific student attendence in month\"):\n saf.takeStudentNameForSpecificStudentAttendenceInMonth(studentName)\n elif(value ==\"pay student fee\"):\n af.takeStudentNameForPaidFee(studentName)\n elif(value ==\"for teacher logined show specific student attendence in month\"):\n tlsa.takeStudentNameForStudentAttendenceInTeacherLogined(studentName)","repo_name":"chandansingh954/Jython-student-management-system","sub_path":"student management system/gui/getstudentname.py","file_name":"getstudentname.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"34996179624","text":"import torch\nimport numpy as np\nfrom skimage.metrics import peak_signal_noise_ratio\nfrom model.IRCNN import IRCNN\nimport cv2\nimport os\n# 生成去噪后的图像\ndef test(image, noise_image, net, DEVICE):\n net.eval()\n with torch.no_grad():\n output_x = noise_image.to(DEVICE) - net(noise_image.to(DEVICE))\n\n output_x = output_x.cpu().numpy()\n GT = image.cpu().numpy()\n batch, _, _, _ = GT.shape\n psnr = peak_signal_noise_ratio(output_x[0, 0, :, :], GT[0, 0, :, :], data_range=1)\n # print(psnr)\n output_x = np.squeeze(output_x, axis=0)\n output_x = np.squeeze(output_x, axis=0)\n return output_x, psnr\n\nif __name__ == '__main__':\n test_root = 'data/Set12/'\n model_path = 'model/50/sigma50epoch145loss220.7874.pth'\n save_img_path = 'experiment/Set12_50'\n if not os.path.exists(save_img_path):\n os.mkdir(save_img_path)\n sigma = 50\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print(f'Use device is {DEVICE}')\n\n net = IRCNN(1).to(DEVICE)\n net.load_state_dict(torch.load(model_path))\n\n names = os.listdir(test_root)\n for name in names:\n img_path = os.path.join(test_root, name)\n image = cv2.imread(img_path, 0)\n image = (image/255).astype('float32')\n image = torch.from_numpy(image)\n # image = torch.\n # image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # 扩展2个维度\n noise = torch.randn(image.size()).mul_(sigma/255.0)\n noise_image = image + noise\n noise_img = noise_image.unsqueeze(0)\n noise_image = noise_img.unsqueeze(0)\n img = image.unsqueeze(0)\n image = img.unsqueeze(0)\n clean_img, psnr = test(image, noise_image, net, DEVICE)\n print('{} PSNR = {:.2f}, noise image PSNR = {:.2f}'.format(name, psnr))\n # 保存图像\n save_path = os.path.join(save_img_path, name)\n cv2.imwrite(save_path, clean_img*255)","repo_name":"zsxpdsyz/DPPIR-python","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"41754479295","text":"from bpy.types import Panel\nimport bpy\n\ntry:\n import blenderbim.tool as tool\n import blenderbim.bim.helper\n import blenderbim.bim.module.pset.data.Data\nexcept:\n pass\nfinally:\n\n class GU_PT_IFC_PCV(Panel):\n bl_label = \"Point Cloud Visualizer\"\n bl_space_type = \"PROPERTIES\"\n bl_region_type = \"WINDOW\"\n bl_context = \"object\"\n\n @classmethod\n def poll(cls, context):\n if not context.active_object:\n return False\n return tool.Ifc.get() and blenderbim.bim.helper.get_obj_ifc_definition_id(\n context, context.active_object.name, \"Object\"\n )\n\n def draw(self, context):\n self.draw_store_update(self.layout)\n\n @staticmethod\n def draw_store_update(layout, obj=None):\n row = layout.row(align=True)\n if obj:\n row.context_pointer_set(\"active_object\", obj)\n row.operator(\"ifc.pcv_store_settings\", icon=\"IMPORT\")\n row.operator(\"ifc.pcv_update_settings\", icon=\"EXPORT\")\n\n def pcv_topbar_menu(self, context):\n for obj in bpy.context.scene.objects:\n element = tool.Ifc.get_entity(obj)\n if not element:\n continue\n pset = tool.Pset.get_element_pset(element, \"PointCloudVisualizerProps\")\n if not pset:\n continue\n box = self.layout.box()\n box.label(text=element.Name)\n row = box.row(align=True)\n op = row.operator(\"gu.select_and_set_active\", text=\"\", icon=\"RESTRICT_SELECT_OFF\")\n row.prop(obj, \"hide_viewport\", text=\"\", icon=\"HIDE_OFF\")\n op.object_name = obj.name\n GU_PT_IFC_PCV.draw_store_update(row, obj)\n row.context_pointer_set(\"object\", obj)\n row.context_pointer_set(\"active_object\", obj) # Not necessary but keep it if pcv author changes their mind\n row.operator(\"point_cloud_visualizer.mechanist_draw\")\n for prop in pset.HasProperties:\n if prop.Name == \"Clip Object Name\":\n value = prop.NominalValue.wrappedValue\n if value:\n op = row.operator(\"gu.select_and_set_active\", text=\"\", icon=\"SNAP_PEEL_OBJECT\")\n op.object_name = value\n break\n\n def register():\n bpy.types.PCV_PT_view3d_menu.append(pcv_topbar_menu)\n\n def unregister():\n try:\n bpy.types.PCV_PT_view3d_menu.remove(pcv_topbar_menu)\n except AttributeError:\n pass\n","repo_name":"Gorgious56/gorgious_utilities","sub_path":"ifc/pcv/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"28"} +{"seq_id":"30891468269","text":"\nclass Solution:\n def mergeAlternately(self, word1: str, word2: str) -> str:\n n, m = len(word1), len(word2)\n arr = [None] * (m+n)\n \n i, j, index, first = 0, 0, 0, True\n while i < n and j < m:\n if first:\n arr[index] = word1[i]\n first = not first\n i, index = i+1, index+1\n else:\n arr[index] = word2[j]\n first = not first\n j, index = j+1, index+1\n \n while i < n:\n arr[index] = word1[i]\n i, index = i+1, index+1\n \n while j < m:\n arr[index] = word2[j]\n j, index = j+1, index+1\n \n return ''.join(arr)\n \n","repo_name":"rissh/Data-Structures","sub_path":"LeetCoding Challenge 2023/April 2023/Merge Strings Alternately.py","file_name":"Merge Strings Alternately.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"39780613785","text":"import json\nimport psycopg2\n\n\ndef row_creator(device, devices_type):\n result = ()\n result += (None, devices_type + str(device[\"name\"]))\n # ifs in this function could be shortened to one and iterated through,\n # but because of port_channel_id there would have to be more conditions.\n # So i feel it up to personal preference\n if \"description\" in device:\n result += (device[\"description\"],)\n else:\n result += (None,)\n\n result += (str(device), None, None)\n\n if \"Cisco-IOS-XE-ethernet:channel-group\" in device:\n result += (device[\"Cisco-IOS-XE-ethernet:channel-group\"][\"number\"],)\n else:\n result += (None,)\n\n if \"mtu\" in device:\n result += (device[\"mtu\"],)\n else:\n result += (None,)\n\n return result\n\n# other types could be added here in the future\nacceptable_types = [\"Port-channel\", \"TenGigabitEthernet\", \"GigabitEthernet\"]\n\nwith open(\"configClear_v2.json\", \"r\") as file:\n data = json.load(file)\n # correct dbname and password should be added for the code to work properly\n conn = psycopg2.connect(\"dbname=yyy user=postgres password=XXX\")\n cur = conn.cursor()\n for devices_type in acceptable_types:\n path = data[\"frinx-uniconfig-topology:configuration\"][\"Cisco-IOS-XE-native:native\"][\"interface\"][devices_type]\n for device in path:\n cur.execute(\"\"\"INSERT INTO jsontosql (connection, name, description, config_json,\n type, infra_type, port_channel_id, max_frame_size)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\"\", (row_creator(device, devices_type)))\n conn.commit()\n\ncur.close()\nconn.close()\n","repo_name":"MarSeljr/hw_repo","sub_path":"jsontosql.py","file_name":"jsontosql.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"21348184942","text":"import sys\nfrom PyQt5 import QtWidgets\n\n\nclass MyWindow(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n self.resize(600, 600)\n self.setWindowTitle('QVBoxLayout')\n\n layout = QtWidgets.QVBoxLayout()\n\n btn1 = QtWidgets.QPushButton('one')\n btn2 = QtWidgets.QPushButton('two')\n btn3 = QtWidgets.QPushButton('three')\n\n layout.addStretch(1)\n layout.addWidget(btn1)\n layout.addWidget(btn2)\n layout.addWidget(btn3)\n layout.addStretch(2)\n\n self.setLayout(layout)\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n\n w = MyWindow()\n w.show()\n\n app.exec_()\n","repo_name":"Nostalgic7312/TwitterPyQt","sub_path":"Train/06_BoxLayout.py","file_name":"06_BoxLayout.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"74085141195","text":"from django.db.models import Max\nfrom django.utils import timezone\nimport logging\nfrom wamtram.models import (\n TrtPlaces,\n TrtPersons,\n TrtEntryBatches,\n TrtTurtles,\n TrtObservations,\n TrtTagOrders,\n TrtTags,\n TrtPitTags,\n TrtMeasurementTypes,\n TrtMeasurements,\n TrtDamage,\n TrtRecordedTags,\n TrtRecordedPitTags,\n TrtSamples,\n TrtIdentification,\n)\nfrom users.models import User\nfrom .models import (\n TurtleSpecies,\n Location,\n Place,\n EntryBatch,\n Turtle,\n TurtleObservation,\n TagOrder,\n TurtleTag,\n TurtlePitTag,\n MeasurementType,\n TurtleMeasurement,\n TurtleDamage,\n TurtleTagObservation,\n TurtlePitTagObservation,\n TurtleSample,\n TurtleIdentification,\n)\n\n\nLOGGER = logging.getLogger(\"turtles\")\nTRT_SPECIES_MAP = {\n 'FB': {'scientific_name': 'Natator depressus', 'common_name': 'Flatback Turtle', 'old_species_code': 'F', 'hide_dataentry': False},\n 'GN': {'scientific_name': 'Chelonia mydas', 'common_name': 'Green Turtle', 'old_species_code': 'G', 'hide_dataentry': False},\n 'HK': {'scientific_name': 'Eretmochelys imbricata', 'common_name': 'Hawksbill Turtle', 'old_species_code': 'H', 'hide_dataentry': False},\n 'LB': {'scientific_name': 'Dermochelys coriacea', 'common_name': 'Leatherback Turtle', 'old_species_code': 'LB', 'hide_dataentry': False},\n 'LO': {'scientific_name': 'Caretta caretta', 'common_name': 'Loggerhead Turtle', 'old_species_code': 'LO', 'hide_dataentry': False},\n 'OR': {'scientific_name': 'Lepidochelys olivacea', 'common_name': 'Olive Ridley Turtle', 'old_species_code': 'OR', 'hide_dataentry': False},\n}\nTRT_LOCATION_MAP = {\n 'AI': {'name': 'Airlie Island'},\n 'AR': {'name': 'Ashmore Reef area'},\n 'BR': {'name': 'Browse Island'},\n 'BW': {'name': 'Barrow Island'},\n 'BZ': {'name': 'Brazil coastal'},\n 'CD': {'name': 'Cape Domett'},\n 'CL': {'name': 'Cape Lambert'},\n 'DA': {'name': 'Dampier Archipelago area'},\n 'DH': {'name': 'Dirk Hartog Island'},\n 'DI': {'name': 'Dorre Island'},\n 'EG': {'name': 'Exmouth Gulf area'},\n 'EI': {'name': 'Eastern Indian Ocean region'},\n 'EM': {'name': 'Eighty Mile Beach - WA'},\n 'GA': {'name': 'Gascoyne coastal - Not Ningaloo MP'},\n 'GC': {'name': 'Gulf of Carpentaria area'},\n 'IN': {'name': 'Indonesian territory'},\n 'IR': {'name': 'Imperieuse Reef - Rowley Shoals'},\n 'KS': {'name': 'King Sound area'},\n 'LA': {'name': 'Lacepede Islands'},\n 'LO': {'name': 'Lowendal Islands area'},\n 'MB': {'name': 'Monte Bello Islands'},\n 'MI': {'name': 'Montgomery Islands (Yawajaba: Yawijibaya people)'},\n 'MN': {'name': 'Mundabullangana coast'},\n 'MU': {'name': 'Muiron Islands'},\n 'NI': {'name': 'Ningaloo MP coastal'},\n 'NK': {'name': 'North Kimberley coastal'},\n 'NT': {'name': 'Northern Territory coastal'},\n 'NW': {'name': 'North West Cape area'},\n 'PB': {'name': 'Pilbara offshore & coastal area'},\n 'PE': {'name': 'Perth area'},\n 'PH': {'name': 'Port Hedland coastal'},\n 'QD': {'name': 'Queensland'},\n 'RI': {'name': 'Rosemary Island - Dampier Archipelago'},\n 'SB': {'name': 'Shark Bay area'},\n 'SC': {'name': 'Southern WA coastal'},\n 'SE': {'name': 'S-Eastern WA coastal'},\n 'SR': {'name': 'Scott Reef'},\n 'SW': {'name': 'S-Western WA coastal'},\n 'TH': {'name': 'Thevenard Island'},\n 'VA': {'name': 'Varanus Island - Lowendals'},\n 'WC': {'name': 'Mid-Western WA coastal'},\n 'WK': {'name': 'West Kimberley coastal'},\n 'XX': {'name': 'Not otherwise assigned'},\n}\n\n\ndef import_wamtram(reload=False):\n \"\"\"Utility function to import/convert data from wamtram (SQL Server) to turtle_data (local).\n The function is idempotent, and may be run multiple times safely without creating duplicate data.\n\n If `reload` is False, some existing records will be skipped (those having the PK brought across).\n \"\"\"\n admin = User.objects.get(pk=1)\n\n LOGGER.info(\"Importing species\")\n TurtleSpecies.objects.get_or_create(\n scientific_name='Unknown', common_name='Not recorded - uncertain', old_species_code=None, hide_dataentry=False\n )\n for sp in TRT_SPECIES_MAP.values():\n TurtleSpecies.objects.get_or_create(**sp)\n LOGGER.info(f\"TurtleSpecies object count: {TurtleSpecies.objects.count()}\")\n\n LOGGER.info(\"Importing locations\")\n for loc in TRT_LOCATION_MAP.values():\n Location.objects.get_or_create(**loc)\n LOGGER.info(f\"Location object count: {Location.objects.count()}\")\n\n LOGGER.info(\"Importing places\")\n for pl in TrtPlaces.objects.all():\n if not Place.objects.filter(name=pl.place_name).exists():\n # First, match the newly-created Location obj.\n location = Location.objects.get(name=pl.location_code.location_name)\n Place.objects.get_or_create(\n location=location,\n name=pl.place_name,\n rookery=True if pl.rookery == \"Y\" else False if pl.rookery == \"N\" else None,\n beach_approach=pl.beach_approach,\n aspect=pl.aspect,\n point=pl.get_point(),\n comments=pl.comments,\n )\n LOGGER.info(f\"Place object count: {Place.objects.count()}\")\n\n LOGGER.info(\"Importing measurement types\")\n for t in TrtMeasurementTypes.objects.all():\n MeasurementType.objects.get_or_create(\n short_desc=t.measurement_type,\n description=t.description,\n unit=t.measurement_units,\n minimum_value=t.minimum_value,\n maximum_value=t.maximum_value,\n comments=t.comments,\n )\n LOGGER.info(f\"MeasurementType object count: {MeasurementType.objects.count()}\")\n\n LOGGER.info(\"Importing persons\")\n for p in TrtPersons.objects.all():\n name = p.get_name()\n if not User.objects.filter(name__iexact=name.lower(), is_active=True).exists():\n User.objects.create(\n username=p.email if p.email else \"_\".join([p.first_name, p.surname if p.surname else \"\"]),\n first_name=p.first_name,\n last_name=p.surname if p.surname else \"\",\n email=p.email if p.email else \"\",\n name=name,\n phone=p.mobile if p.mobile else p.telephone,\n role=\" \".join([p.specialty if p.specialty else \"\", p.comments if p.comments else \"\"]).strip(),\n )\n elif User.objects.filter(name__iexact=name.lower(), is_active=True).count() > 1:\n LOGGER.info(f\"POSSIBLE DUPLICATE USER: {p}\")\n LOGGER.info(f\"User object count: {User.objects.count()}\")\n\n LOGGER.info(\"Importing entry batches\")\n for b in TrtEntryBatches.objects.all():\n if b.entered_person_id:\n try:\n person = TrtPersons.objects.get(person_id=b.entered_person_id)\n name = f\"{person.first_name} {person.surname}\".strip()\n user = User.objects.get(name__iexact=name.lower(), is_active=True)\n except:\n user = admin\n else:\n user = admin\n\n if EntryBatch.objects.filter(pk=b.entry_batch_id).exists():\n if reload:\n eb = EntryBatch.objects.get(pk=b.entry_batch_id)\n eb.entry_date = b.entry_date.date() if b.entry_date else None\n eb.entered_by = user\n eb.filename = b.filename\n eb.comments = b.comments\n eb.pr_date_convention = b.pr_date_convention\n eb.save()\n else:\n continue\n else:\n EntryBatch.objects.get_or_create(\n pk=b.entry_batch_id,\n entry_date=b.entry_date.date() if b.entry_date else None,\n entered_by=user,\n filename=b.filename,\n comments=b.comments,\n pr_date_convention=b.pr_date_convention,\n )\n LOGGER.info(f\"EntryBatch object count: {EntryBatch.objects.count()}\")\n\n LOGGER.info(\"Importing tag orders\")\n for o in TrtTagOrders.objects.all():\n if TagOrder.objects.filter(pk=o.tag_order_id).exists():\n if reload:\n to = TagOrder.objects.get(pk=o.tag_order_id)\n to.order_number = o.order_number\n to.order_date = o.order_date.date() if o.order_date else None\n to.tag_prefix = o.tag_prefix\n to.start_tag_number = o.start_tag_number\n to.end_tag_number = o.end_tag_number\n to.total_tags = o.total_tags\n to.date_received = o.date_received.date() if o.date_received else None\n to.paid_by = o.paid_by\n to.comments = o.comments\n to.save()\n else:\n continue\n else:\n TagOrder.objects.get_or_create(\n pk=o.tag_order_id,\n order_number=o.order_number,\n order_date=o.order_date.date() if o.order_date else None,\n tag_prefix=o.tag_prefix,\n start_tag_number=o.start_tag_number,\n end_tag_number=o.end_tag_number,\n total_tags=o.total_tags,\n date_received=o.date_received.date() if o.date_received else None,\n paid_by=o.paid_by,\n comments=o.comments,\n )\n LOGGER.info(f\"TagOrder object count: {TagOrder.objects.count()}\")\n\n LOGGER.info(\"Importing turtles\")\n count = 0\n bobp = User.objects.get(username='bobp')\n turtle_ids = TrtTurtles.objects.values_list('turtle_id', flat=True)\n\n for id in turtle_ids:\n # Fast-path skip existing records, no reload.\n if Turtle.objects.filter(pk=id).exists() and not reload:\n continue\n else:\n t = TrtTurtles.objects.get(turtle_id=id)\n if t.species_code_id == \"?\":\n species = TurtleSpecies.objects.get(scientific_name='Unknown')\n elif t.species_code_id == \"0\":\n species = None\n else:\n species = TurtleSpecies.objects.get(scientific_name=t.species_code.scientific_name)\n if t.location_code:\n location = Location.objects.get(name=TRT_LOCATION_MAP[t.location_code_id]['name'])\n else:\n location = None\n if t.entered_by == 'bobp':\n entered_by = bobp\n else:\n entered_by = admin\n if t.sex == 'I':\n sex = 'U' # Unknown\n else:\n sex = t.sex\n\n if Turtle.objects.filter(pk=t.turtle_id).exists():\n if reload:\n tu = Turtle.objects.get(pk=t.turtle_id)\n tu.created = t.date_entered if t.date_entered else timezone.now()\n tu.entered_by = entered_by\n tu.species = species\n tu.sex = sex\n tu.status = t.turtle_status.turtle_status if t.turtle_status else None\n tu.name = t.turtle_name\n tu.location = location\n tu.cause_of_death = t.cause_of_death_id if t.cause_of_death else None\n tu.re_entered_population = t.re_entered_population\n tu.comments = t.comments\n tu.original_turtle_id = t.original_turtle_id\n tu.entry_batch = EntryBatch.objects.get(pk=t.entry_batch_id) if t.entry_batch_id else None\n tu.mund_id = t.mund_id\n tu.identification_confidence = t.identification_confidence\n tu.save()\n else:\n continue\n else:\n tu = Turtle.objects.get_or_create(\n pk=t.turtle_id,\n created=t.date_entered if t.date_entered else timezone.now(),\n entered_by=entered_by,\n species=species,\n status=t.turtle_status.turtle_status if t.turtle_status else None,\n name=t.turtle_name,\n location=location,\n cause_of_death=t.cause_of_death_id if t.cause_of_death else None,\n re_entered_population=t.re_entered_population,\n comments=t.comments,\n original_turtle_id=t.original_turtle_id,\n entry_batch=EntryBatch.objects.get(pk=t.entry_batch_id) if t.entry_batch_id else None,\n mund_id=t.mund_id,\n identification_confidence=t.identification_confidence,\n sex=sex,\n )[0]\n\n for ti in TrtIdentification.objects.filter(turtle=t):\n TurtleIdentification.objects.get_or_create(\n turtle_id=tu.pk,\n identification_type=ti.identification_type.identification_type,\n identifier=ti.identifier,\n comments=ti.comments,\n )\n\n count += 1\n if count % 1000 == 0:\n LOGGER.info(f\"{count} imported\")\n LOGGER.info(f\"Turtle object count: {Turtle.objects.count()}\")\n LOGGER.info(f\"TurtleIdentification object count: {TurtleIdentification.objects.count()}\")\n\n LOGGER.info(\"Importing tags\")\n count = 0\n tag_serials = TrtTags.objects.values_list('tag_id', flat=True)\n tag_serials = [(t.replace(\" \", \"\").strip(), t) for t in tag_serials]\n\n for serials in tag_serials:\n # Fast-path skip existing records, no reload.\n if TurtleTag.objects.filter(serial=serials[0]).exists() and not reload:\n continue\n else:\n t = TrtTags.objects.get(tag_id=serials[1])\n\n if t.custodian_person_id and TrtPersons.objects.filter(person_id=t.custodian_person_id).exists():\n person = TrtPersons.objects.get(person_id=t.custodian_person_id)\n custodian = User.objects.get(name__iexact=person.get_name(), is_active=True)\n else:\n custodian = None\n if t.field_person_id and TrtPersons.objects.filter(person_id=t.field_person_id).exists():\n person = TrtPersons.objects.get(person_id=t.field_person_id)\n field_person = User.objects.get(name__iexact=person.get_name(), is_active=True)\n else:\n field_person = None\n serial = serials[0]\n\n if TurtleTag.objects.filter(serial=serial).exists():\n if reload:\n tag = TurtleTag.objects.get(serial=serial)\n tag.turtle_id = t.turtle_id\n tag.issue_location = t.issue_location\n tag.custodian = custodian\n tag.side = t.side\n tag.status = t.tag_status.tag_status\n tag.return_date = t.return_date.date() if t.return_date else None\n tag.return_condition = t.return_condition\n tag.comments = t.comments\n tag.field_person = field_person\n tag.tag_order_id = t.tag_order_id if TagOrder.objects.filter(pk=t.tag_order_id).exists() else None\n tag.save()\n else:\n continue\n else:\n TurtleTag.objects.get_or_create(\n serial=serial,\n turtle_id=t.turtle_id,\n issue_location=t.issue_location,\n custodian=custodian,\n side=t.side,\n status=t.tag_status.tag_status,\n return_date=t.return_date.date() if t.return_date else None,\n return_condition=t.return_condition,\n comments=t.comments,\n field_person=field_person,\n tag_order_id=t.tag_order_id if TagOrder.objects.filter(pk=t.tag_order_id).exists() else None,\n )\n count += 1\n if count % 1000 == 0:\n LOGGER.info(f\"{count} imported\")\n LOGGER.info(f\"TurtleTag object count: {TurtleTag.objects.count()}\")\n\n LOGGER.info(\"Importing pit tags\")\n count = 0\n tag_serials = TrtPitTags.objects.values_list('pit_tag_id', flat=True)\n tag_serials = [(t.replace(\" \", \"\").strip(), t) for t in tag_serials]\n\n for serials in tag_serials:\n # Fast-path skip existing records, no reload.\n if TurtlePitTag.objects.filter(serial=serials[0]).exists() and not reload:\n continue\n else:\n t = TrtPitTags.objects.get(pit_tag_id=serials[1])\n if t.custodian_person_id and TrtPersons.objects.filter(person_id=t.custodian_person_id).exists():\n person = TrtPersons.objects.get(person_id=t.custodian_person_id)\n custodian = User.objects.get(name__iexact=person.get_name(), is_active=True)\n else:\n custodian = None\n if t.field_person_id and TrtPersons.objects.filter(person_id=t.field_person_id).exists():\n person = TrtPersons.objects.get(person_id=t.field_person_id)\n field_person = User.objects.get(name__iexact=person.get_name(), is_active=True)\n else:\n field_person = None\n serial = serials[0]\n\n if TurtlePitTag.objects.filter(serial=serial).exists():\n if reload:\n tag = TurtlePitTag.objects.get(serial=serial)\n tag.turtle_id = t.turtle_id\n tag.issue_location = t.issue_location\n tag.custodian = custodian\n tag.status = t.pit_tag_status\n tag.return_date = t.return_date.date() if t.return_date else None\n tag.return_condition = t.return_condition\n tag.comments = t.comments\n tag.field_person = field_person\n tag.tag_order_id = t.tag_order_id if TagOrder.objects.filter(pk=t.tag_order_id).exists() else None\n tag.batch_number = t.batch_number\n tag.box_number = t.box_number\n tag.save()\n else:\n continue\n else:\n TurtlePitTag.objects.get_or_create(\n serial=serial,\n turtle_id=t.turtle_id,\n issue_location=t.issue_location,\n custodian=custodian,\n status=t.pit_tag_status,\n return_date=t.return_date.date() if t.return_date else None,\n return_condition=t.return_condition,\n comments=t.comments,\n field_person=field_person,\n tag_order_id=t.tag_order_id if TagOrder.objects.filter(pk=t.tag_order_id).exists() else None,\n batch_number=t.batch_number,\n box_number=t.box_number,\n )\n count += 1\n if count % 1000 == 0:\n LOGGER.info(f\"{count} imported\")\n LOGGER.info(f\"TurtlePitTag object count: {TurtlePitTag.objects.count()}\")\n\n LOGGER.info(\"Importing observations\")\n count = 0\n turtle_observation_ids = TrtObservations.objects.values_list('observation_id', flat=True)\n\n for id in turtle_observation_ids:\n # Fast-path skip existing records, no reload.\n if TurtleObservation.objects.filter(pk=id).exists() and not reload:\n continue\n else:\n obs = TrtObservations.objects.get(observation_id=id)\n if obs.measurer_person:\n measurer = User.objects.get(name__iexact=obs.measurer_person.get_name(), is_active=True)\n else:\n measurer = None\n if obs.measurer_reporter_person:\n measurer_reporter = User.objects.get(name__iexact=obs.measurer_reporter_person.get_name(), is_active=True)\n else:\n measurer_reporter = None\n if obs.tagger_person:\n tagger = User.objects.get(name__iexact=obs.tagger_person.get_name(), is_active=True)\n else:\n tagger = None\n if obs.reporter_person:\n tagger_reporter = User.objects.get(name__iexact=obs.reporter_person.get_name(), is_active=True)\n else:\n tagger_reporter = None\n if obs.place_code and Place.objects.filter(name=obs.place_code.place_name).count() == 1:\n place = Place.objects.get(name=obs.place_code.place_name)\n else:\n place = None # There are a couple of places with identical names but different locations.\n if obs.entered_by_person:\n entered_by = User.objects.get(name__iexact=obs.entered_by_person.get_name(), is_active=True)\n else:\n entered_by = admin\n if obs.clutch_completed and obs.clutch_completed == 'y':\n clutch_completed = 'Y'\n elif obs.clutch_completed and obs.clutch_completed == 'n':\n clutch_completed = 'U'\n else:\n clutch_completed = 'U'\n\n if TurtleObservation.objects.filter(pk=obs.observation_id).exists():\n if reload:\n o = TurtleObservation.objects.get(pk=obs.observation_id)\n o.created = obs.date_entered if obs.date_entered else timezone.now()\n o.entered_by = entered_by\n o.turtle_id = obs.turtle_id\n o.observed = obs.get_observation_datetime_utc()\n o.observation_date_old = obs.observation_date_old.date() if obs.observation_date_old else None\n o.alive = True if obs.alive == \"Y\" else False if obs.alive == \"N\" else None\n o.measurer = measurer\n o.measurer_reporter = measurer_reporter\n o.tagger = tagger\n o.tagger_reporter = tagger_reporter\n o.place = place\n o.place_description = obs.place_description\n o.point = obs.get_point()\n o.activity = obs.activity_code.activity_code if obs.activity_code else None\n o.beach_position = obs.beach_position_code.beach_position_code if obs.beach_position_code else None\n o.condition = obs.condition_code.condition_code if obs.condition_code else None\n o.nesting = True if obs.nesting == \"Y\" else False if obs.nesting == \"N\" else None\n o.clutch_completed = clutch_completed\n o.number_of_eggs = obs.number_of_eggs\n o.egg_count_method = obs.egg_count_method.egg_count_method if obs.egg_count_method else None\n o.action_taken = obs.action_taken\n o.comments = obs.comments\n o.original_observation_id = obs.original_observation_id\n o.entry_batch = EntryBatch.objects.get(pk=obs.entry_batch_id) if obs.entry_batch else None\n o.comment_fromrecordedtagstable = obs.comment_fromrecordedtagstable\n o.scars_left = obs.scars_left\n o.scars_right = obs.scars_right\n o.transferid = obs.transferid\n o.mund = obs.mund\n o.scars_left_scale_1 = obs.scars_left_scale_1\n o.scars_left_scale_2 = obs.scars_left_scale_2\n o.scars_left_scale_3 = obs.scars_left_scale_3\n o.scars_right_scale_1 = obs.scars_right_scale_1\n o.scars_right_scale_2 = obs.scars_right_scale_2\n o.scars_right_scale_3 = obs.scars_right_scale_3\n o.cc_length_not_measured = obs.cc_length_not_measured\n o.cc_notch_length_not_measured = obs.cc_notch_length_not_measured\n o.cc_width_not_measured = obs.cc_width_not_measured\n o.tagscarnotchecked = obs.tagscarnotchecked\n o.didnotcheckforinjury = obs.didnotcheckforinjury\n o.date_convention = obs.date_convention\n o.status = obs.observation_status\n o.corrected_date = obs.corrected_date.date() if obs.corrected_date else None\n o.curation_status = TurtleObservation.CURATION_STATUS_IMPORTED\n o.save()\n else:\n continue\n else:\n o = TurtleObservation.objects.create(\n pk=obs.observation_id,\n created=obs.date_entered if obs.date_entered else timezone.now(),\n entered_by=entered_by,\n turtle_id=obs.turtle_id,\n observed=obs.get_observation_datetime_utc(),\n observation_date_old=obs.observation_date_old.date() if obs.observation_date_old else None,\n alive=True if obs.alive == \"Y\" else False if obs.alive == \"N\" else None,\n measurer=measurer,\n measurer_reporter=measurer_reporter,\n tagger=tagger,\n tagger_reporter=tagger_reporter,\n place=place,\n place_description=obs.place_description,\n point=obs.get_point(),\n activity=obs.activity_code.activity_code if obs.activity_code else None,\n beach_position=obs.beach_position_code.beach_position_code if obs.beach_position_code else None,\n condition=obs.condition_code.condition_code if obs.condition_code else None,\n nesting=True if obs.nesting == \"Y\" else False if obs.nesting == \"N\" else None,\n clutch_completed=clutch_completed,\n number_of_eggs=obs.number_of_eggs,\n egg_count_method=obs.egg_count_method.egg_count_method if obs.egg_count_method else None,\n action_taken=obs.action_taken,\n comments=obs.comments,\n original_observation_id=obs.original_observation_id,\n entry_batch=EntryBatch.objects.get(pk=obs.entry_batch_id) if obs.entry_batch else None,\n comment_fromrecordedtagstable=obs.comment_fromrecordedtagstable,\n scars_left=obs.scars_left,\n scars_right=obs.scars_right,\n transferid=obs.transferid,\n mund=obs.mund,\n scars_left_scale_1=obs.scars_left_scale_1,\n scars_left_scale_2=obs.scars_left_scale_2,\n scars_left_scale_3=obs.scars_left_scale_3,\n scars_right_scale_1=obs.scars_right_scale_1,\n scars_right_scale_2=obs.scars_right_scale_2,\n scars_right_scale_3=obs.scars_right_scale_3,\n cc_length_not_measured=obs.cc_length_not_measured,\n cc_notch_length_not_measured=obs.cc_notch_length_not_measured,\n cc_width_not_measured=obs.cc_width_not_measured,\n tagscarnotchecked=obs.tagscarnotchecked,\n didnotcheckforinjury=obs.didnotcheckforinjury,\n date_convention=obs.date_convention,\n status=obs.observation_status,\n corrected_date=obs.corrected_date.date() if obs.corrected_date else None,\n curation_status=TurtleObservation.CURATION_STATUS_IMPORTED,\n )\n\n for m in TrtMeasurements.objects.filter(observation=obs):\n mtype = MeasurementType.objects.get(short_desc=m.measurement_type.measurement_type)\n try:\n TurtleMeasurement.objects.get_or_create(\n observation=o,\n measurement_type=mtype,\n value=m.measurement_value,\n comments=m.comments,\n )\n except:\n pass # Pass on exception.\n\n for d in TrtDamage.objects.filter(observation=obs):\n try:\n TurtleDamage.objects.get_or_create(\n observation=o,\n body_part=d.body_part_id,\n damage=d.damage_code_id,\n cause=d.damage_cause_code_id,\n comments=d.comments,\n )\n except:\n pass # Pass on exception.\n\n for t in TrtRecordedTags.objects.filter(observation_id=obs.pk):\n try:\n tag = TurtleTag.objects.get(serial=t.tag_id)\n TurtleTagObservation.objects.get_or_create(\n tag=tag,\n observation=o,\n status=t.tag_state,\n position=t.tag_position,\n barnacles=t.barnacles,\n comments=t.comments,\n )\n except:\n pass # Pass on exception.\n\n for t in TrtRecordedPitTags.objects.filter(observation_id=obs.pk):\n try:\n pit_tag = TurtlePitTag.objects.get(serial=t.pit_tag_id)\n TurtlePitTagObservation.objects.get_or_create(\n tag=pit_tag,\n observation=o,\n status=t.pit_tag_state.pit_tag_state,\n position=t.pit_tag_position,\n checked=t.checked,\n comments=t.comments,\n )\n except:\n pass # Pass on exception.\n\n for t in TrtSamples.objects.filter(observation_id=obs.pk):\n try:\n TurtleSample.objects.get_or_create(\n observation=o,\n tissue_type=t.tissue_type.tissue_type,\n label=t.sample_label,\n sample_date=t.sample_date.date() if t.sample_date else None,\n arsenic=t.arsenic,\n selenium=t.selenium,\n zinc=t.zinc,\n cadmium=t.cadmium,\n copper=t.copper,\n lead=t.lead,\n mercury=t.mercury,\n comments=t.comments,\n )\n except:\n pass # Pass on exception.\n\n count += 1\n if count % 1000 == 0:\n LOGGER.info(f\"{count} imported\")\n\n LOGGER.info(f\"TurtleObservation object count: {TurtleObservation.objects.count()}\")\n LOGGER.info(f\"TurtleMeasurement object count: {TurtleMeasurement.objects.count()}\")\n LOGGER.info(f\"TurtleDamage object count: {TurtleDamage.objects.count()}\")\n LOGGER.info(f\"TurtleTagObservation object count: {TurtleTagObservation.objects.count()}\")\n LOGGER.info(f\"TurtlePitTagObservation object count: {TurtlePitTagObservation.objects.count()}\")\n LOGGER.info(f\"TurtleSample object count: {TurtleSample.objects.count()}\")\n\n LOGGER.info(\"Complete\")\n LOGGER.info(\"Set sequence values for: EntryBatch, TagOrder, Turtle, TurtleObservation\")\n entrybatch_id_max = EntryBatch.objects.aggregate(Max('pk'))['pk__max']\n LOGGER.info(f\"SELECT setval('tagging_entrybatch_id_seq', {entrybatch_id_max}, true);\")\n tagorder_id_max = TagOrder.objects.aggregate(Max('pk'))['pk__max']\n LOGGER.info(f\"SELECT setval('tagging_tagorder_id_seq', {tagorder_id_max}, true);\")\n turtle_id_max = Turtle.objects.aggregate(Max('pk'))['pk__max']\n LOGGER.info(f\"SELECT setval('tagging_turtle_id_seq', {turtle_id_max}, true);\")\n turtleobservation_id_max = TurtleObservation.objects.aggregate(Max('pk'))['pk__max']\n LOGGER.info(f\"SELECT setval('tagging_turtleobservation_id_seq', {turtleobservation_id_max}, true);\")\n ident_id_max = TurtleIdentification.objects.aggregate(Max('pk'))['pk__max']\n LOGGER.info(f\"SELECT setval('tagging_turtleidentification_id_seq', {ident_id_max}, true);\")\n","repo_name":"dbca-wa/wastd","sub_path":"tagging/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":32129,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"3483800879","text":"import cv2\nfrom cvzone.HandTrackingModule import HandDetector\n\n# Hand tracking module derived from https://www.computervision.zone/courses/multiple-hand-gesture-control/\n\nclass HandTracking:\n def __init__(self):\n self.cap = cv2.VideoCapture(0)\n self.detector = HandDetector(detectionCon=0.8, maxHands=2)\n self.lmList1 = None\n self.centerPoint1 = None\n self.fingers1 = None\n\n def update(self):\n success, img = self.cap.read()\n hands, img = self.detector.findHands(img) # With Draw\n\n if hands:\n # Hand 1\n hand1 = hands[0]\n self.lmList1 = hand1[\"lmList\"] # List of 21 Landmarks points\n self.centerPoint1 = hand1[\"center\"] # center of the hand cx,cy\n self.fingers1 = self.detector.fingersUp(hand1)\n\n\n if len(hands) == 2:\n hand2 = hands[1]\n\n fingers2 = self.detector.fingersUp(hand2)\n\n # cv2.imshow(\"Image\", img)\n # cv2.waitKey(1)\n\n def get_point(self, point = -1):\n if self.lmList1 is not None:\n if 0 <= point < 21:\n return self.lmList1[point]\n else:\n return self.lmList1\n\n def get_center(self):\n return self.centerPoint1\n\n def get_finger_up(self, finger=1):\n if self.fingers1 is not None:\n return self.fingers1[finger] == 1\n else:\n return False\n\n\nif __name__ == \"__main__\":\n HandTracker = HandTracking()\n while True:\n HandTracker.update()\n\n","repo_name":"Marro64/ZombieGame","sub_path":"hand_tracking/hand_tracking.py","file_name":"hand_tracking.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"71316450955","text":"# INF360 - Programming in Python\r\n# Dustin Riley\r\n# Assignment 3\r\n\r\n# import pprint #not sure if pprint is allowed\r\n# (5/5 points) Create a dictionary for each vehicle that contains the fields/keys and values listed above.\r\nv1 = {'Name':\"Ka\", 'Year Introduced':\"1996\", 'Production of the Current Model':\"2014\", 'Generation':\"3rd\", 'Vehicle Information':\"Developed by Ford Brazil as a super mini car\"}\r\nv2 = {'Name':\"Fiesta\", 'Year Introduced':\"1976\", 'Production of the Current Model':\"2017\", 'Generation':\"7th\", 'Vehicle Information':\"Ford's long running subcompact line based on global B-car Platform\"}\r\nv3 = {'Name':\"Focus\", 'Year Introduced':\"1998\", 'Production of the Current Model':\"2018\", 'Generation':\"3rd\", 'Vehicle Information':\"Ford's Compact car based on global C-car platform\"}\r\nv4 = {'Name':\"Mondeo\", 'Year Introduced':\"1992\", 'Production of the Current Model':\"2012\", 'Generation':\"2nd\", 'Vehicle Information':\"Mid sized passenger sedan with \\\"One-Ford\\\" design based on CD4 platform\"}\r\nv5 = {'Name':\"Fusion\", 'Year Introduced':\"2005\", 'Production of the Current Model':\"2014\", 'Generation':\"5th\", 'Vehicle Information':\"Similar to Mondero\"}\r\nv6 = {'Name':\"Taurus\", 'Year Introduced':\"1986\", 'Production of the Current Model':\"2009\", 'Generation':\"6th\", 'Vehicle Information':\"Full sized car based on D3 platform\"}\r\nv7 = {'Name':\"Fiesta ST\", 'Year Introduced':\"2013\", 'Production of the Current Model':\"2013\", 'Generation':\"1st\", 'Vehicle Information':\"Fiesta's high performance factory tune\"}\r\nv8 = {'Name':\"Focus RS\", 'Year Introduced':\"2015\", 'Production of the Current Model':\"2015\", 'Generation':\"1st\", 'Vehicle Information':\"Special high performance Focus developed by SVT\"}\r\nv9 = {'Name':\"Mustang\", 'Year Introduced':\"1964\", 'Production of the Current Model':\"2014\", 'Generation':\"6th\", 'Vehicle Information':\"Ford's long running pony/muscle car\"}\r\nv10 = {'Name':\"GT\", 'Year Introduced':\"2004\", 'Production of the Current Model':\"2016\", 'Generation':\"2nd\", 'Vehicle Information':\"Ford's limited production super car inspired by the legendary race car GT40\"}\r\n\r\ndef createDict(dictList): # (5/5 points) Write a function that will take a list of these dictionaries and return a new dictionary with the 'name' value as the key, and the dictionary as the value.\r\n newDict = {}\r\n for i in range(len(dictList)): # new dictionaries key = the name key's value from list item i\r\n newDict[dictList[i].get('Name')] = dictList[i]\r\n return newDict\r\n\r\ndef sortList(newDict): # (5/5 points) Write a function that will go through the newly created dictionary and return a list of all the car's names, sorted alphabetically.\r\n newList = list(newDict.keys()) # creates a list out of the dictionary's keys\r\n newList.sort() # sorts the list\r\n return newList\r\n\r\n\r\ndef nameAndYear(newDict): # (5/5 points) Write a function that will go through the newly created dictionary return a dictionary of all the cars names and year introduced.\r\n nameAndYearDict = {}\r\n for k, v in newDict.items():\r\n nameAndYearDict[k] = {v['Year Introduced']} #v is a dictionary the ['Year Introduced'] key gets the value from inside the dictionary\r\n return nameAndYearDict\r\n\r\ndictList = [v1,v2,v3,v4,v5,v6,v7,v8,v9,v10] # assigning the dictionaries into the list\r\nnewDict = createDict(dictList)\r\nsortedList = sortList(newDict)\r\nnameAndYearDict = nameAndYear(newDict)\r\n\r\nfor i in range(len(sortedList)):\r\n print(sortedList[i]) # (5/5 points) Use a print statement to show the results of the function from step 3, each on their own line.\r\n\r\ntempDict = {}\r\nfor k, v in nameAndYearDict.items(): # filling new dictionary with nameAndYearDict year:name\r\n tempDict[str(v)] = {k}\r\n# pprint.pprint(tempDict) # not sure if pprint is allowed\r\ntemp = sorted(tempDict.items()) # sorted sorts by key (which is why i made a new year:name dictionary)\r\nfor k, v in temp: # [2:-2] gets rid of {''}\r\n print(k[2:-2] + \" : \" + str(v)[2:-2]) # (5/5 points) Use a print statement to show the results of the function from step 4 to display in the format: year : name. Sort by year introduced.\r\n\r\n#getting .sort() and sorted() to work correctly was a pain\r\n#they both work diffrently\r\n#.sort() works on lists and modifies the list itself also cannot do (return list.sort())\r\n#sorted() sorts dictionaries by their keys but does not modify the dictionary itself gotta save the result to a variable\r\n","repo_name":"DustinBRiley/Python-3","sub_path":"DustinRileyAssignment3.py","file_name":"DustinRileyAssignment3.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"12931371229","text":"from academy.models import Subscription\n\ndef get_subscription(request):\n if request.user.is_authenticated:\n try:\n subscription = Subscription.objects.get(user=request.user)\n # print(subscription[0])\n return { 'sub': subscription }\n except Exception as ex:\n print(ex)\n return { 'sub': None }\n else:\n return { 'sub': None }\n","repo_name":"kenoalords/komotion","sub_path":"academy/context/subscription.py","file_name":"subscription.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"31138840756","text":"import os\nimport boto3\n\nDIR = os.path.dirname(os.path.realpath(__file__))\n\n\ndef send_email(subject='', text='', html=''):\n ses = boto3.client(\n 'ses',\n region_name=os.getenv('SES_REGION_NAME'),\n aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),\n aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY'),\n )\n\n ses.send_email(\n Source=os.getenv('SES_EMAIL_SOURCE'),\n Destination={'ToAddresses': [os.getenv('SES_EMAIL_DEST')]},\n Message={\n 'Subject': {'Data': subject},\n 'Body': {\n 'Text': {'Data': text},\n 'Html': {'Data': html}\n }\n }\n )\n\n\ndef send_timings_email():\n send_email(subject='Train Times',\n text=open(f'{DIR}/templates/email.txt', 'r').read(),\n html=open(f'{DIR}/templates/email.html', 'r').read())\n print('Completed')\n","repo_name":"richardtatum/train-times","sub_path":"send_email.py","file_name":"send_email.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"44851314728","text":"import warnings\nfrom collections.abc import MutableMapping\nfrom copy import copy as _copy\nfrom typing import TYPE_CHECKING, cast, overload, Any, Iterable, Iterator, \\\n List, MutableSequence, Optional, Tuple, Union\nfrom xml.etree import ElementTree\n\nfrom .. import limits\nfrom ..exceptions import XMLSchemaValueError\nfrom ..names import XSD_GROUP, XSD_SEQUENCE, XSD_ALL, XSD_CHOICE, XSD_ELEMENT, \\\n XSD_ANY, XSI_TYPE, XSD_ANY_TYPE, XSD_ANNOTATION\nfrom ..aliases import ElementType, NamespacesType, SchemaType, IterDecodeType, \\\n IterEncodeType, ModelParticleType, SchemaElementType, ComponentClassType\nfrom ..translation import gettext as _\nfrom ..helpers import get_qname, local_name, raw_xml_encode\nfrom ..converters import ElementData\n\nfrom .exceptions import XMLSchemaModelError, XMLSchemaModelDepthError, \\\n XMLSchemaValidationError, XMLSchemaChildrenValidationError, \\\n XMLSchemaTypeTableWarning\nfrom .xsdbase import ValidationMixin, XsdComponent, XsdType\nfrom .particles import ParticleMixin, OccursCalculator\nfrom .elements import XsdElement, XsdAlternative\nfrom .wildcards import XsdAnyElement, Xsd11AnyElement\nfrom .models import ModelVisitor, iter_unordered_content, iter_collapsed_content\n\nif TYPE_CHECKING:\n from .complex_types import XsdComplexType\n\nANY_ELEMENT = ElementTree.Element(\n XSD_ANY,\n attrib={\n 'namespace': '##any',\n 'processContents': 'lax',\n 'minOccurs': '0',\n 'maxOccurs': 'unbounded'\n })\n\nGroupDecodeType = List[Tuple[Union[str, int], Any, Optional[SchemaElementType]]]\nGroupEncodeType = Tuple[Optional[str], List[ElementType]]\n\n\nclass XsdGroup(XsdComponent, MutableSequence[ModelParticleType],\n ParticleMixin, ValidationMixin[ElementType, GroupDecodeType]):\n \"\"\"\n Class for XSD 1.0 *model group* definitions.\n\n .. \n Content: (annotation?, (all | choice | sequence)?)\n \n\n .. \n Content: (annotation?, element*)\n \n\n .. \n Content: (annotation?, (element | group | choice | sequence | any)*)\n \n\n .. \n Content: (annotation?, (element | group | choice | sequence | any)*)\n \n \"\"\"\n parent: Optional[Union['XsdComplexType', 'XsdGroup']]\n model: str\n mixed: bool = False\n ref: Optional['XsdGroup']\n restriction: Optional['XsdGroup'] = None\n\n # For XSD 1.1 openContent processing\n interleave: Optional[Xsd11AnyElement] = None # if openContent with mode='interleave'\n suffix: Optional[Xsd11AnyElement] = None # if openContent with mode='suffix'/'interleave'\n\n _ADMITTED_TAGS = {XSD_GROUP, XSD_SEQUENCE, XSD_ALL, XSD_CHOICE}\n\n def __init__(self, elem: ElementType,\n schema: SchemaType,\n parent: Optional[Union['XsdComplexType', 'XsdGroup']] = None) -> None:\n\n self._group: List[ModelParticleType] = []\n if parent is not None and parent.mixed:\n self.mixed = parent.mixed\n super(XsdGroup, self).__init__(elem, schema, parent)\n\n def __repr__(self) -> str:\n if self.name is None:\n return '%s(model=%r, occurs=%r)' % (\n self.__class__.__name__, self.model, list(self.occurs)\n )\n elif self.ref is None:\n return '%s(name=%r, model=%r, occurs=%r)' % (\n self.__class__.__name__, self.prefixed_name, self.model, list(self.occurs)\n )\n else:\n return '%s(ref=%r, model=%r, occurs=%r)' % (\n self.__class__.__name__, self.prefixed_name, self.model, list(self.occurs)\n )\n\n @overload\n def __getitem__(self, i: int) -> ModelParticleType: ...\n\n @overload\n def __getitem__(self, s: slice) -> MutableSequence[ModelParticleType]: ...\n\n def __getitem__(self, i: Union[int, slice]) \\\n -> Union[ModelParticleType, MutableSequence[ModelParticleType]]:\n return self._group[i]\n\n def __setitem__(self, i: Union[int, slice], o: Any) -> None:\n self._group[i] = o\n\n def __delitem__(self, i: Union[int, slice]) -> None:\n del self._group[i]\n\n def __len__(self) -> int:\n return len(self._group)\n\n def insert(self, i: int, item: ModelParticleType) -> None:\n self._group.insert(i, item)\n\n def clear(self) -> None:\n del self._group[:]\n\n def is_emptiable(self) -> bool:\n if self.model == 'choice':\n return self.min_occurs == 0 or not self or any(item.is_emptiable() for item in self)\n else:\n return self.min_occurs == 0 or not self or all(item.is_emptiable() for item in self)\n\n def is_single(self) -> bool:\n if self.max_occurs != 1 or not self:\n return False\n elif len(self) > 1 or not isinstance(self[0], XsdGroup):\n return True\n else:\n return self[0].is_single()\n\n def is_pointless(self, parent: 'XsdGroup') -> bool:\n \"\"\"\n Returns `True` if the group may be eliminated without affecting the model,\n `False` otherwise. A group is pointless if one of those conditions is verified:\n\n - the group is empty\n - minOccurs == maxOccurs == 1 and the group has one child\n - minOccurs == maxOccurs == 1 and the group and its parent have a sequence model\n - minOccurs == maxOccurs == 1 and the group and its parent have a choice model\n\n Ref: https://www.w3.org/TR/2004/REC-xmlschema-1-20041028/#coss-particle\n\n :param parent: effective parent of the model group.\n \"\"\"\n if not self:\n return True\n elif self.min_occurs != 1 or self.max_occurs != 1:\n return False\n elif len(self) == 1:\n return True\n elif self.model == 'sequence' and parent.model != 'sequence':\n return False\n elif self.model == 'choice' and parent.model != 'choice':\n return False\n else:\n return True\n\n @property\n def effective_min_occurs(self) -> int:\n if not self.min_occurs or not self:\n return 0\n\n effective_items: List[Any]\n min_occurs: int\n effective_items = [e for e in self.iter_model() if e.effective_max_occurs != 0]\n if not effective_items:\n return 0\n elif self.model == 'choice':\n min_occurs = min(e.effective_min_occurs for e in effective_items)\n return self.min_occurs * min_occurs\n elif self.model == 'all':\n min_occurs = max(e.effective_min_occurs for e in effective_items)\n return min_occurs\n\n not_emptiable_items = [e for e in effective_items if e.effective_min_occurs]\n if not not_emptiable_items:\n return 0\n elif len(not_emptiable_items) > 1:\n return self.min_occurs\n\n min_occurs = not_emptiable_items[0].effective_min_occurs\n return self.min_occurs * min_occurs\n\n @property\n def effective_max_occurs(self) -> Optional[int]:\n if self.max_occurs == 0 or not self:\n return 0\n\n effective_items: List[Any]\n max_occurs: int\n\n model_items = [(e, e.effective_max_occurs) for e in self.iter_model()]\n effective_items = [x for x in model_items if x[1] != 0]\n if not effective_items:\n return 0\n elif self.max_occurs is None:\n return None\n elif self.model == 'choice':\n if any(x[1] is None for x in effective_items):\n return None\n else:\n max_occurs = max(x[1] for x in effective_items)\n return self.max_occurs * max_occurs\n\n not_emptiable_items = [x for x in effective_items if x[0].effective_min_occurs]\n if not not_emptiable_items:\n if any(x[1] is None for x in effective_items):\n return None\n else:\n max_occurs = max(x[1] for x in effective_items)\n return self.max_occurs * max_occurs\n\n elif len(not_emptiable_items) > 1:\n if self.model == 'sequence':\n return self.max_occurs\n elif all(x[1] is None for x in not_emptiable_items):\n return None\n else:\n max_occurs = min(x[1] for x in not_emptiable_items if x[1] is not None)\n return max_occurs\n elif not_emptiable_items[0][1] is None:\n return None\n else:\n return self.max_occurs * cast(int, not_emptiable_items[0][1])\n\n def has_occurs_restriction(\n self, other: Union[ModelParticleType, ParticleMixin, 'OccursCalculator']) -> bool:\n\n if not self:\n return True\n elif isinstance(other, XsdGroup):\n return super(XsdGroup, self).has_occurs_restriction(other)\n\n # Group particle compared to element particle\n if self.max_occurs is None or any(e.max_occurs is None for e in self):\n if other.max_occurs is not None:\n return False\n elif self.model == 'choice':\n return self.min_occurs * min(e.min_occurs for e in self) >= other.min_occurs\n else:\n return self.min_occurs * sum(e.min_occurs for e in self) >= other.min_occurs\n\n elif self.model == 'choice':\n if self.min_occurs * min(e.min_occurs for e in self) < other.min_occurs:\n return False\n elif other.max_occurs is None:\n return True\n else:\n value: int\n try:\n value = max(e.max_occurs for e in self) # type: ignore[type-var, assignment]\n except TypeError:\n return False\n else:\n return self.max_occurs * value <= other.max_occurs\n\n else:\n if self.min_occurs * sum(e.min_occurs for e in self) < other.min_occurs:\n return False\n elif other.max_occurs is None:\n return True\n else:\n try:\n value = sum(e.max_occurs for e in self) # type: ignore[misc]\n except TypeError:\n return False\n else:\n return self.max_occurs * value <= other.max_occurs\n\n def iter_model(self) -> Iterator[ModelParticleType]:\n \"\"\"\n A generator function iterating elements and groups of a model group.\n Skips pointless groups, iterating deeper through them.\n Raises `XMLSchemaModelDepthError` if the *depth* of the model is over\n `limits.MAX_MODEL_DEPTH` value.\n \"\"\"\n iterators: List[Iterator[ModelParticleType]] = []\n particles = iter(self)\n\n while True:\n for item in particles:\n if isinstance(item, XsdGroup) and item.is_pointless(parent=self):\n iterators.append(particles)\n particles = iter(item)\n if len(iterators) > limits.MAX_MODEL_DEPTH:\n raise XMLSchemaModelDepthError(self)\n break\n else:\n yield item\n else:\n try:\n particles = iterators.pop()\n except IndexError:\n return\n\n def iter_elements(self) -> Iterator[SchemaElementType]:\n \"\"\"\n A generator function iterating model's elements. Raises `XMLSchemaModelDepthError`\n if the overall depth of the model groups is over `limits.MAX_MODEL_DEPTH`.\n \"\"\"\n if self.max_occurs == 0:\n return\n\n iterators: List[Iterator[ModelParticleType]] = []\n particles = iter(self)\n\n while True:\n for item in particles:\n if isinstance(item, XsdGroup):\n iterators.append(particles)\n particles = iter(item)\n if len(iterators) > limits.MAX_MODEL_DEPTH:\n raise XMLSchemaModelDepthError(self)\n break\n else:\n yield item\n else:\n try:\n particles = iterators.pop()\n except IndexError:\n return\n\n def get_subgroups(self, item: ModelParticleType) -> List['XsdGroup']:\n \"\"\"\n Returns a list of the groups that represent the path to the enclosed particle.\n Raises an `XMLSchemaModelError` if *item* is not a particle of the model group.\n \"\"\"\n subgroups: List[Tuple[XsdGroup, Iterator[ModelParticleType]]] = []\n group, children = self, iter(self)\n\n while True:\n for child in children:\n if child is item:\n _subgroups = [x[0] for x in subgroups]\n _subgroups.append(group)\n return _subgroups\n elif isinstance(child, XsdGroup):\n if len(subgroups) > limits.MAX_MODEL_DEPTH:\n raise XMLSchemaModelDepthError(self)\n subgroups.append((group, children))\n group, children = child, iter(child)\n break\n else:\n try:\n group, children = subgroups.pop()\n except IndexError:\n msg = _('{!r} is not a particle of the model group')\n raise XMLSchemaModelError(self, msg.format(item)) from None\n\n def overall_min_occurs(self, item: ModelParticleType) -> int:\n \"\"\"Returns the overall min occurs of a particle in the model.\"\"\"\n min_occurs = item.min_occurs\n\n for group in self.get_subgroups(item):\n if group.model == 'choice' and len(group) > 1:\n return 0\n min_occurs *= group.min_occurs\n\n return min_occurs\n\n def overall_max_occurs(self, item: ModelParticleType) -> Optional[int]:\n \"\"\"Returns the overall max occurs of a particle in the model.\"\"\"\n max_occurs = item.max_occurs\n\n for group in self.get_subgroups(item):\n if max_occurs == 0:\n return 0\n elif max_occurs is None:\n continue\n elif group.max_occurs is None:\n max_occurs = None\n else:\n max_occurs *= group.max_occurs\n\n return max_occurs\n\n def copy(self) -> 'XsdGroup':\n group: XsdGroup = object.__new__(self.__class__)\n group.__dict__.update(self.__dict__)\n group.errors = self.errors[:]\n group._group = self._group[:]\n return group\n\n __copy__ = copy\n\n def _parse(self) -> None:\n self.clear()\n self._parse_particle(self.elem)\n\n if self.elem.tag != XSD_GROUP:\n # Local group (sequence|all|choice)\n if 'name' in self.elem.attrib:\n msg = _(\"attribute 'name' not allowed in a local group\")\n self.parse_error(msg)\n self._parse_content_model(self.elem)\n\n elif self._parse_reference():\n assert self.name is not None\n try:\n xsd_group = self.maps.lookup_group(self.name)\n except KeyError:\n self.parse_error(_(\"missing group %r\") % self.prefixed_name)\n xsd_group = self.schema.create_any_content_group(parent=self)\n\n if isinstance(xsd_group, XsdGroup):\n self.model = xsd_group.model\n if self.model == 'all':\n if self.max_occurs != 1:\n msg = _(\"maxOccurs must be 1 for 'all' model groups\")\n self.parse_error(msg)\n if self.min_occurs not in (0, 1):\n msg = _(\"minOccurs must be (0 | 1) for 'all' model groups\")\n self.parse_error(msg)\n if self.xsd_version == '1.0' and isinstance(self.parent, XsdGroup):\n msg = _(\"in XSD 1.0 an 'all' model group cannot be nested\")\n self.parse_error(msg)\n self._group.append(xsd_group)\n self.ref = xsd_group\n else:\n # Disallowed circular definition, substitute with any content group.\n msg = _(\"Circular definition detected for group %r\")\n self.parse_error(msg % self.name, xsd_group[0])\n self.model = 'sequence'\n self.mixed = True\n self._group.append(self.schema.xsd_any_class(ANY_ELEMENT, self.schema, self))\n\n else:\n attrib = self.elem.attrib\n try:\n self.name = get_qname(self.target_namespace, attrib['name'])\n except KeyError:\n pass\n else:\n if self.parent is not None:\n msg = _(\"attribute 'name' not allowed in a local group\")\n self.parse_error(msg)\n else:\n if 'minOccurs' in attrib:\n msg = _(\"attribute 'minOccurs' not allowed in a global group\")\n self.parse_error(msg)\n if 'maxOccurs' in attrib:\n msg = _(\"attribute 'maxOccurs' not allowed in a global group\")\n self.parse_error(msg)\n\n content_model = self._parse_child_component(self.elem, strict=True)\n if content_model is not None:\n if self.parent is None:\n if 'minOccurs' in content_model.attrib:\n msg = _(\"attribute 'minOccurs' not allowed in a global group\")\n self.parse_error(msg, content_model)\n if 'maxOccurs' in content_model.attrib:\n msg = _(\"attribute 'maxOccurs' not allowed in a global group\")\n self.parse_error(msg, content_model)\n\n if content_model.tag in {XSD_SEQUENCE, XSD_ALL, XSD_CHOICE}:\n self._parse_content_model(content_model)\n else:\n msg = _('unexpected tag %r')\n self.parse_error(msg % content_model.tag, content_model)\n\n def _parse_content_model(self, content_model: ElementType) -> None:\n self.model = local_name(content_model.tag)\n if self.model == 'all':\n if self.max_occurs != 1:\n msg = _(\"maxOccurs must be 1 for 'all' model groups\")\n self.parse_error(msg)\n if self.min_occurs not in (0, 1):\n msg = _(\"minOccurs must be (0 | 1) for 'all' model groups\")\n self.parse_error(msg)\n\n child: ElementType\n for child in content_model:\n if child.tag == XSD_ANNOTATION or callable(child.tag):\n continue\n elif child.tag == XSD_ELEMENT:\n # Builds inner elements later, for avoid circularity.\n self.append(self.schema.xsd_element_class(child, self.schema, self, False))\n elif content_model.tag == XSD_ALL:\n self.parse_error(_(\"'all' model can contain only elements\"))\n elif child.tag == XSD_ANY:\n self._group.append(XsdAnyElement(child, self.schema, self))\n elif child.tag in (XSD_SEQUENCE, XSD_CHOICE):\n self._group.append(XsdGroup(child, self.schema, self))\n elif child.tag == XSD_GROUP:\n try:\n ref = self.schema.resolve_qname(child.attrib['ref'])\n except (KeyError, ValueError, RuntimeError) as err:\n if 'ref' not in child.attrib:\n msg = _(\"missing attribute 'ref' in local group\")\n self.parse_error(msg, child)\n else:\n self.parse_error(err, child)\n continue\n\n if ref != self.name:\n xsd_group = XsdGroup(child, self.schema, self)\n if xsd_group.model == 'all':\n msg = _(\"'all' model can appears only at 1st level of a model group\")\n self.parse_error(msg)\n else:\n self._group.append(xsd_group)\n elif self.redefine is None:\n msg = _(\"Circular definition detected for group %r\")\n self.parse_error(msg % self.name)\n else:\n if child.get('minOccurs', '1') != '1' or child.get('maxOccurs', '1') != '1':\n msg = _(\"Redefined group reference cannot have \"\n \"minOccurs/maxOccurs other than 1\")\n self.parse_error(msg)\n self._group.append(self.redefine)\n\n def build(self) -> None:\n for item in self._group:\n if isinstance(item, XsdElement):\n item.build()\n\n if self.redefine is not None:\n for group in self.redefine.iter_components(XsdGroup):\n group.build()\n\n @property\n def built(self) -> bool:\n for item in self:\n if isinstance(item, XsdElement):\n if not item.built:\n return False\n elif isinstance(item, XsdAnyElement):\n continue\n elif item.parent is None:\n continue\n elif item.parent is not self.parent and \\\n isinstance(item.parent, XsdType) and item.parent.parent is None:\n continue\n elif not item.ref and not item.built:\n return False\n\n return True if self.model else False\n\n @property\n def validation_attempted(self) -> str:\n if self.built:\n return 'full'\n elif any(item.validation_attempted == 'partial' for item in self):\n return 'partial'\n else:\n return 'none'\n\n @property\n def schema_elem(self) -> ElementType:\n return self.parent.elem if self.parent is not None else self.elem\n\n def iter_components(self, xsd_classes: Optional[ComponentClassType] = None) \\\n -> Iterator[XsdComponent]:\n if xsd_classes is None or isinstance(self, xsd_classes):\n yield self\n for item in self:\n if item.parent is None:\n continue\n elif item.parent is not self.parent and isinstance(item.parent, XsdType) \\\n and item.parent.parent is None:\n continue\n yield from item.iter_components(xsd_classes)\n\n if self.redefine is not None and self.redefine not in self:\n yield from self.redefine.iter_components(xsd_classes)\n\n def admits_restriction(self, model: str) -> bool:\n if self.model == model:\n return True\n elif self.model == 'all':\n return model == 'sequence'\n elif self.model == 'choice':\n return model == 'sequence' or len(self.ref or self) <= 1\n else:\n return model == 'choice' or len(self.ref or self) <= 1\n\n def is_empty(self) -> bool:\n return not self.mixed and (not self._group or self.max_occurs == 0)\n\n def is_restriction(self, other: ModelParticleType, check_occurs: bool = True) -> bool:\n if not self._group:\n return True\n elif not isinstance(other, ParticleMixin):\n raise XMLSchemaValueError(\"the argument 'other' must be an XSD particle\")\n elif not isinstance(other, XsdGroup):\n return self.is_element_restriction(other)\n elif not other:\n return False\n elif len(other) == other.min_occurs == other.max_occurs == 1:\n if len(self) > 1:\n return self.is_restriction(other[0], check_occurs)\n elif self.ref is None and isinstance(self[0], XsdGroup) \\\n and self[0].is_pointless(parent=self):\n return self[0].is_restriction(other[0], check_occurs)\n\n # Compare model with model\n if self.model != other.model and self.model != 'sequence' and \\\n (len(self) > 1 or self.ref is not None and len(self[0]) > 1):\n return False\n elif self.model == other.model or other.model == 'sequence':\n return self.is_sequence_restriction(other)\n elif other.model == 'all':\n return self.is_all_restriction(other)\n else: # other.model == 'choice':\n return self.is_choice_restriction(other)\n\n def is_element_restriction(self, other: ModelParticleType) -> bool:\n if self.xsd_version == '1.0' and isinstance(other, XsdElement) and \\\n not other.ref and other.name not in self.schema.substitution_groups:\n return False\n elif not self.has_occurs_restriction(other):\n return False\n elif self.model == 'choice':\n if other.name in self.maps.substitution_groups and \\\n all(isinstance(e, XsdElement) and e.substitution_group == other.name\n for e in self):\n return True\n return any(e.is_restriction(other, False) for e in self)\n else:\n min_occurs = 0\n max_occurs: Optional[int] = 0\n for item in self.iter_model():\n if isinstance(item, XsdGroup):\n return False\n elif item.min_occurs == 0 or item.is_restriction(other, False):\n min_occurs += item.min_occurs\n if max_occurs is not None:\n if item.max_occurs is None:\n max_occurs = None\n else:\n max_occurs += item.max_occurs\n continue\n return False\n\n if min_occurs < other.min_occurs:\n return False\n elif max_occurs is None:\n return other.max_occurs is None\n elif other.max_occurs is None:\n return True\n else:\n return max_occurs <= other.max_occurs\n\n def is_sequence_restriction(self, other: 'XsdGroup') -> bool:\n if not self.has_occurs_restriction(other):\n return False\n\n check_occurs = other.max_occurs != 0\n\n # Same model: declarations must simply preserve order\n other_iterator = iter(other.iter_model())\n for item in self.iter_model():\n for other_item in other_iterator:\n if other_item is item or item.is_restriction(other_item, check_occurs):\n break\n elif other.model == 'choice':\n if item.max_occurs != 0:\n continue\n elif not other_item.is_matching(item.name):\n continue\n elif all(e.max_occurs == 0 for e in self.iter_model()):\n return False\n else:\n break\n elif not other_item.is_emptiable():\n return False\n else:\n return False\n\n if other.model != 'choice':\n for other_item in other_iterator:\n if not other_item.is_emptiable():\n return False\n return True\n\n def is_all_restriction(self, other: 'XsdGroup') -> bool:\n if not self.has_occurs_restriction(other):\n return False\n\n check_occurs = other.max_occurs != 0\n if self.ref is None:\n restriction_items = [x for x in self]\n else:\n restriction_items = [x for x in self[0]]\n\n for other_item in other.iter_model():\n for item in restriction_items:\n if other_item is item or item.is_restriction(other_item, check_occurs):\n break\n else:\n if not other_item.is_emptiable():\n return False\n continue\n restriction_items.remove(item)\n\n return not bool(restriction_items)\n\n def is_choice_restriction(self, other: 'XsdGroup') -> bool:\n if self.ref is None:\n if self.parent is None and other.parent is not None:\n return False # not allowed restriction in XSD 1.0\n restriction_items = [x for x in self]\n elif other.parent is None:\n restriction_items = [x for x in self[0]]\n else:\n return False # not allowed restriction in XSD 1.0\n\n check_occurs = other.max_occurs != 0\n max_occurs: Optional[int] = 0\n other_max_occurs: Optional[int] = 0\n\n for other_item in other.iter_model():\n for item in restriction_items:\n if other_item is item or item.is_restriction(other_item, check_occurs):\n if max_occurs is not None:\n if item.max_occurs is None:\n max_occurs = None\n else:\n max_occurs += item.max_occurs\n\n if other_max_occurs is not None:\n if other_item.max_occurs is None:\n other_max_occurs = None\n else:\n other_max_occurs = max(other_max_occurs, other_item.max_occurs)\n break\n else:\n continue\n restriction_items.remove(item)\n\n if restriction_items:\n return False\n elif other_max_occurs is None:\n if other.max_occurs != 0:\n return True\n other_max_occurs = 0\n elif other.max_occurs is None:\n if other_max_occurs != 0:\n return True\n other_max_occurs = 0\n else:\n other_max_occurs *= other.max_occurs\n\n if max_occurs is None:\n return self.max_occurs == 0\n elif self.max_occurs is None:\n return max_occurs == 0\n else:\n return other_max_occurs >= max_occurs * self.max_occurs\n\n def check_dynamic_context(self, elem: ElementType,\n xsd_element: SchemaElementType,\n model_element: SchemaElementType,\n namespaces: NamespacesType) -> None:\n\n if model_element is not xsd_element and isinstance(model_element, XsdElement):\n if 'substitution' in model_element.block \\\n or xsd_element.type and xsd_element.type.is_blocked(model_element):\n reason = _(\"substitution of %r is blocked\") % model_element\n raise XMLSchemaValidationError(model_element, elem, reason)\n\n alternatives: Union[Tuple[()], List[XsdAlternative]] = []\n if isinstance(xsd_element, XsdAnyElement):\n if xsd_element.process_contents == 'skip':\n return\n\n try:\n xsd_element = self.maps.lookup_element(elem.tag)\n except LookupError:\n if self.schema.meta_schema is None:\n # Meta-schema groups ignore xsi:type (issue #350)\n return\n\n try:\n type_name = elem.attrib[XSI_TYPE].strip()\n except KeyError:\n return\n else:\n xsd_type = self.maps.get_instance_type(\n type_name, self.any_type, namespaces\n )\n else:\n alternatives = xsd_element.alternatives\n try:\n type_name = elem.attrib[XSI_TYPE].strip()\n except KeyError:\n xsd_type = xsd_element.type\n else:\n xsd_type = self.maps.get_instance_type(\n type_name, xsd_element.type, namespaces\n )\n\n else:\n if XSI_TYPE not in elem.attrib or self.schema.meta_schema is None:\n xsd_type = xsd_element.type\n else:\n alternatives = xsd_element.alternatives\n try:\n type_name = elem.attrib[XSI_TYPE].strip()\n except KeyError:\n xsd_type = xsd_element.type\n else:\n xsd_type = self.maps.get_instance_type(\n type_name, xsd_element.type, namespaces\n )\n\n if model_element is not xsd_element and \\\n isinstance(model_element, XsdElement) and model_element.block:\n for derivation in model_element.block.split():\n if xsd_type is not model_element.type and \\\n xsd_type.is_derived(model_element.type, derivation):\n reason = _(\"usage of {0!r} with type {1} is blocked by \"\n \"head element\").format(xsd_element, derivation)\n raise XMLSchemaValidationError(self, elem, reason)\n\n if XSI_TYPE not in elem.attrib or self.schema.meta_schema is None:\n return\n\n # If it's a restriction the context is the base_type's group\n group = self.restriction if self.restriction is not None else self\n\n # Dynamic EDC check of matched element\n for e in group.iter_elements():\n if not isinstance(e, XsdElement):\n continue\n elif e.name == elem.tag:\n other = e\n else:\n for other in e.iter_substitutes():\n if other.name == elem.tag:\n break\n else:\n continue\n\n if len(other.alternatives) != len(alternatives) or \\\n not xsd_type.is_dynamic_consistent(other.type):\n reason = _(\"{0!r} that matches {1!r} is not consistent with local \"\n \"declaration {2!r}\").format(elem, xsd_element, other)\n raise XMLSchemaValidationError(self, reason)\n\n if not all(any(a == x for x in alternatives) for a in other.alternatives) or \\\n not all(any(a == x for x in other.alternatives) for a in alternatives):\n msg = _(\"Maybe a not equivalent type table between elements \"\n \"{0!r} and {1!r}.\").format(self, xsd_element)\n warnings.warn(msg, XMLSchemaTypeTableWarning, stacklevel=3)\n\n def match_element(self, name: str) -> Optional[SchemaElementType]:\n \"\"\"\n Try a model-less match of a child element. Returns the\n matched element, or `None` if there is no match.\n \"\"\"\n for xsd_element in self.iter_elements():\n if xsd_element.is_matching(name, group=self):\n return xsd_element\n return None\n\n def iter_decode(self, obj: ElementType, validation: str = 'lax', **kwargs: Any) \\\n -> IterDecodeType[GroupDecodeType]:\n \"\"\"\n Creates an iterator for decoding an Element content.\n\n :param obj: an Element.\n :param validation: the validation mode, can be 'lax', 'strict' or 'skip'.\n :param kwargs: keyword arguments for the decoding process.\n :return: yields a list of 3-tuples (key, decoded data, decoder), \\\n eventually preceded by a sequence of validation or decoding errors.\n \"\"\"\n result_list: GroupDecodeType = []\n cdata_index = 1 # keys for CDATA sections are positive integers\n\n if not self._group and self.model == 'choice' and self.min_occurs:\n reason = _(\"an empty 'choice' group with minOccurs > 0 cannot validate any content\")\n yield self.validation_error(validation, reason, obj, **kwargs)\n yield result_list\n return\n\n if not self.mixed:\n # Check element CDATA\n if obj.text and obj.text.strip() or \\\n any(child.tail and child.tail.strip() for child in obj):\n if len(self) == 1 and isinstance(self[0], XsdAnyElement):\n pass # [XsdAnyElement()] equals to an empty complexType declaration\n else:\n reason = _(\"character data between child elements not allowed\")\n yield self.validation_error(validation, reason, obj, **kwargs)\n cdata_index = 0 # Do not decode CDATA\n\n if cdata_index and obj.text is not None:\n text = str(obj.text.strip())\n if text:\n result_list.append((cdata_index, text, None))\n cdata_index += 1\n\n level = kwargs['level'] = kwargs.pop('level', 0) + 1\n over_max_depth = 'max_depth' in kwargs and kwargs['max_depth'] <= level\n if level > limits.MAX_XML_DEPTH:\n reason = _(\"XML data depth exceeded (MAX_XML_DEPTH=%r)\") % limits.MAX_XML_DEPTH\n self.validation_error('strict', reason, obj, **kwargs)\n\n try:\n namespaces = kwargs['namespaces']\n except KeyError:\n namespaces = default_namespace = None\n else:\n try:\n default_namespace = namespaces.get('')\n except AttributeError:\n default_namespace = None\n\n errors: List[Tuple[int, ModelParticleType, int, Optional[List[SchemaElementType]]]]\n xsd_element: Optional[SchemaElementType]\n expected: Optional[List[SchemaElementType]]\n\n model = ModelVisitor(self)\n errors = []\n broken_model = False\n\n for index, child in enumerate(obj):\n if callable(child.tag):\n continue # child is a \n\n while model.element is not None:\n if model.element.max_occurs == 0:\n xsd_element = None\n else:\n xsd_element = model.element.match(\n child.tag, group=self, occurs=model.occurs\n )\n\n if xsd_element is None:\n if self.interleave is not None and self.interleave.is_matching(\n child.tag, default_namespace, self, model.occurs):\n xsd_element = self.interleave\n break\n\n for particle, occurs, expected in model.advance(False):\n errors.append((index, particle, occurs, expected))\n model.clear()\n broken_model = True # the model is broken, continues with raw decoding.\n xsd_element = self.match_element(child.tag)\n break\n else:\n continue\n break\n\n try:\n self.check_dynamic_context(child, xsd_element, model.element, namespaces)\n except XMLSchemaValidationError as err:\n yield self.validation_error(validation, err, obj, **kwargs)\n\n for particle, occurs, expected in model.advance(True):\n errors.append((index, particle, occurs, expected))\n break\n else:\n if self.suffix is not None and \\\n self.suffix.is_matching(child.tag, default_namespace, self):\n xsd_element = self.suffix\n else:\n xsd_element = self.match_element(child.tag)\n if xsd_element is None:\n errors.append((index, self, 0, None))\n broken_model = True\n elif not broken_model:\n errors.append((index, xsd_element, 0, []))\n broken_model = True\n\n if xsd_element is None:\n if kwargs.get('keep_unknown') and 'converter' in kwargs:\n for result in self.any_type.iter_decode(child, validation, **kwargs):\n result_list.append((child.tag, result, None))\n continue\n elif 'converter' not in kwargs:\n # Validation-only mode: do not append results\n for result in xsd_element.iter_decode(child, validation, **kwargs):\n if isinstance(result, XMLSchemaValidationError):\n yield result\n continue\n elif over_max_depth:\n if 'depth_filler' in kwargs:\n func = kwargs['depth_filler']\n result_list.append((child.tag, func(xsd_element), xsd_element))\n continue\n\n for result in xsd_element.iter_decode(child, validation, **kwargs):\n if isinstance(result, XMLSchemaValidationError):\n yield result\n else:\n result_list.append((child.tag, result, xsd_element))\n\n if cdata_index and child.tail is not None:\n tail = str(child.tail.strip())\n if tail:\n if result_list and isinstance(result_list[-1][0], int):\n tail = result_list[-1][1] + ' ' + tail\n result_list[-1] = result_list[-1][0], tail, None\n else:\n result_list.append((cdata_index, tail, None))\n cdata_index += 1\n\n if model.element is not None:\n index = len(obj)\n for particle, occurs, expected in model.stop():\n errors.append((index, particle, occurs, expected))\n\n if errors:\n source = kwargs.get('source')\n for index, particle, occurs, expected in errors:\n error = XMLSchemaChildrenValidationError(\n self, obj, index, particle, occurs, expected, source, namespaces\n )\n if validation == 'strict':\n raise error\n yield error\n\n yield result_list\n\n def iter_encode(self, obj: ElementData, validation: str = 'lax', **kwargs: Any) \\\n -> IterEncodeType[GroupEncodeType]:\n \"\"\"\n Creates an iterator for encoding data to a list containing Element data.\n\n :param obj: an ElementData instance.\n :param validation: the validation mode: can be 'lax', 'strict' or 'skip'.\n :param kwargs: keyword arguments for the encoding process.\n :return: yields a couple with the text of the Element and a list of child \\\n elements, eventually preceded by a sequence of validation errors.\n \"\"\"\n level = kwargs['level'] = kwargs.get('level', 0) + 1\n errors = []\n text = raw_xml_encode(obj.text)\n children: List[ElementType] = []\n try:\n indent = kwargs['indent']\n except KeyError:\n indent = 4\n\n padding = '\\n' + ' ' * indent * level\n\n try:\n converter = kwargs['converter']\n except KeyError:\n converter = kwargs['converter'] = self.schema.get_converter(**kwargs)\n\n default_namespace = converter.get('')\n model = ModelVisitor(self)\n index = cdata_index = 0\n wrong_content_type = False\n over_max_depth = 'max_depth' in kwargs and kwargs['max_depth'] <= level\n\n content: Iterable[Any]\n if not obj.content:\n content = []\n elif isinstance(obj.content, MutableMapping) or kwargs.get('unordered'):\n content = iter_unordered_content(obj.content, self, default_namespace)\n elif not isinstance(obj.content, MutableSequence):\n wrong_content_type = True\n content = []\n elif not isinstance(obj.content[0], tuple):\n if len(obj.content) > 1 or text is not None:\n wrong_content_type = True\n else:\n text = raw_xml_encode(obj.content[0])\n content = []\n elif converter.losslessly:\n content = obj.content\n else:\n content = iter_collapsed_content(obj.content, self, default_namespace)\n\n for index, (name, value) in enumerate(content):\n if isinstance(name, int):\n if not children:\n text = padding + value if text is None else text + value + padding\n elif children[-1].tail is None:\n children[-1].tail = padding + value\n else:\n children[-1].tail += value + padding\n cdata_index += 1\n continue\n\n xsd_element: Optional[SchemaElementType]\n if self.interleave and self.interleave.is_matching(name, default_namespace, group=self):\n xsd_element = self.interleave\n value = get_qname(default_namespace, name), value\n else:\n while model.element is not None:\n if model.element.max_occurs == 0:\n xsd_element = None\n else:\n xsd_element = model.element.match(\n name, group=self, occurs=model.occurs\n )\n\n if xsd_element is None:\n for particle, occurs, expected in model.advance():\n errors.append((index - cdata_index, particle, occurs, expected))\n continue\n elif isinstance(xsd_element, XsdAnyElement):\n value = get_qname(default_namespace, name), value\n\n for particle, occurs, expected in model.advance(True):\n errors.append((index - cdata_index, particle, occurs, expected))\n break\n else:\n if self.suffix and self.suffix.is_matching(name, default_namespace, group=self):\n xsd_element = self.suffix\n value = get_qname(default_namespace, name), value\n else:\n errors.append((index - cdata_index, self, 0, []))\n xsd_element = self.match_element(name)\n if isinstance(xsd_element, XsdAnyElement):\n value = get_qname(default_namespace, name), value\n elif xsd_element is None:\n if name.startswith('{') or ':' not in name:\n reason = _('{!r} does not match any declared element '\n 'of the model group').format(name)\n else:\n reason = _('{0} has an unknown prefix {1!r}').format(\n name, name.split(':')[0]\n )\n yield self.validation_error(validation, reason, value, **kwargs)\n continue\n\n if over_max_depth:\n continue\n\n for result in xsd_element.iter_encode(value, validation, **kwargs):\n if isinstance(result, XMLSchemaValidationError):\n yield result\n else:\n children.append(result)\n\n if model.element is not None:\n for particle, occurs, expected in model.stop():\n errors.append((index - cdata_index + 1, particle, occurs, expected))\n\n if children:\n if children[-1].tail is None:\n children[-1].tail = padding[:-indent] or '\\n'\n else:\n children[-1].tail = children[-1].tail.strip() + (padding[:-indent] or '\\n')\n\n cdata_not_allowed = not self.mixed and text and text.strip() and self and \\\n (len(self) > 1 or not isinstance(self[0], XsdAnyElement))\n\n if errors or cdata_not_allowed or wrong_content_type:\n attrib = {k: raw_xml_encode(v) for k, v in obj.attributes.items()}\n elem = converter.etree_element(obj.tag, text, children, attrib)\n\n if wrong_content_type:\n reason = _(\"wrong content type {!r}\").format(type(obj.content))\n yield self.validation_error(validation, reason, elem, **kwargs)\n\n if cdata_not_allowed:\n reason = _(\"character data between child elements not allowed\")\n yield self.validation_error(validation, reason, elem, **kwargs)\n\n for index, particle, occurs, expected in errors:\n error = XMLSchemaChildrenValidationError(\n validator=self,\n elem=elem,\n index=index,\n particle=particle,\n occurs=occurs,\n expected=expected,\n namespaces=converter.namespaces,\n )\n if validation == 'strict':\n raise error\n\n error.elem = None # replace with the element of the encoded tree\n yield error\n\n yield text, children\n\n\nclass Xsd11Group(XsdGroup):\n \"\"\"\n Class for XSD 1.1 *model group* definitions.\n\n .. The XSD 1.1 model groups differ from XSD 1.0 groups for the 'all' model,\n that can contains also other groups.\n .. \n Content: (annotation?, (element | any | group)*)\n \n \"\"\"\n def _parse_content_model(self, content_model: ElementType) -> None:\n self.model = local_name(content_model.tag)\n if self.model == 'all':\n if self.max_occurs not in (0, 1):\n msg = _(\"maxOccurs must be (0 | 1) for 'all' model groups\")\n self.parse_error(msg)\n if self.min_occurs not in (0, 1):\n msg = _(\"minOccurs must be (0 | 1) for 'all' model groups\")\n self.parse_error(msg)\n\n for child in content_model:\n if child.tag == XSD_ELEMENT:\n # Builds inner elements later, for avoid circularity.\n self.append(self.schema.xsd_element_class(child, self.schema, self, False))\n elif child.tag == XSD_ANY:\n self._group.append(Xsd11AnyElement(child, self.schema, self))\n elif child.tag in (XSD_SEQUENCE, XSD_CHOICE, XSD_ALL):\n self._group.append(Xsd11Group(child, self.schema, self))\n elif child.tag == XSD_GROUP:\n try:\n ref = self.schema.resolve_qname(child.attrib['ref'])\n except (KeyError, ValueError, RuntimeError) as err:\n if 'ref' not in child.attrib:\n msg = _(\"missing attribute 'ref' in local group\")\n self.parse_error(msg, child)\n else:\n self.parse_error(err, child)\n continue\n\n if ref != self.name:\n xsd_group = Xsd11Group(child, self.schema, self)\n self._group.append(xsd_group)\n if (self.model != 'all') ^ (xsd_group.model != 'all'):\n msg = _(\"an xs:{0} group cannot include a reference to an \"\n \"xs:{1} group\").format(self.model, xsd_group.model)\n self.parse_error(msg)\n self.pop()\n\n elif self.redefine is None:\n msg = _(\"Circular definition detected for group %r\")\n self.parse_error(msg % self.name)\n else:\n if child.get('minOccurs', '1') != '1' or child.get('maxOccurs', '1') != '1':\n msg = _(\"Redefined group reference cannot have \"\n \"minOccurs/maxOccurs other than 1\")\n self.parse_error(msg)\n self._group.append(self.redefine)\n\n def admits_restriction(self, model: str) -> bool:\n if self.model == model or self.model == 'all':\n return True\n elif self.model == 'choice':\n return model == 'sequence' or len(self.ref or self) <= 1\n else:\n return model == 'choice' or len(self.ref or self) <= 1\n\n def is_restriction(self, other: ModelParticleType, check_occurs: bool = True) -> bool:\n if not self._group:\n return True\n elif not isinstance(other, ParticleMixin):\n raise XMLSchemaValueError(\"the argument 'base' must be a %r instance\" % ParticleMixin)\n elif not isinstance(other, XsdGroup):\n return self.is_element_restriction(other)\n elif not other:\n return False\n elif len(other) == other.min_occurs == other.max_occurs == 1:\n if len(self) > 1:\n return self.is_restriction(other[0], check_occurs)\n elif self.ref is None and isinstance(self[0], XsdGroup) \\\n and self[0].is_pointless(parent=self):\n return self[0].is_restriction(other[0], check_occurs)\n\n if other.model == 'sequence':\n return self.is_sequence_restriction(other)\n elif other.model == 'all':\n return self.is_all_restriction(other)\n else: # other.model == 'choice':\n return self.is_choice_restriction(other)\n\n def has_occurs_restriction(\n self, other: Union[ModelParticleType, ParticleMixin, 'OccursCalculator']) -> bool:\n if not isinstance(other, XsdGroup):\n return super().has_occurs_restriction(other)\n elif not self:\n return True\n elif self.effective_min_occurs < other.effective_min_occurs:\n return False\n\n effective_max_occurs = self.effective_max_occurs\n if effective_max_occurs == 0:\n return True\n elif effective_max_occurs is None:\n return other.effective_max_occurs is None\n\n try:\n return effective_max_occurs <= other.effective_max_occurs # type: ignore[operator]\n except TypeError:\n return True\n\n def is_sequence_restriction(self, other: XsdGroup) -> bool:\n if not self.has_occurs_restriction(other):\n return False\n\n check_occurs = other.max_occurs != 0\n\n item_iterator = iter(self.iter_model())\n item = next(item_iterator, None)\n\n for other_item in other.iter_model():\n if item is not None and item.is_restriction(other_item, check_occurs):\n item = next(item_iterator, None)\n elif not other_item.is_emptiable():\n break\n else:\n if item is None:\n return True\n\n # Restriction check failed: try another check without removing pointless groups\n item_iterator = iter(self)\n item = next(item_iterator, None)\n\n for other_item in other.iter_model():\n if item is not None and item.is_restriction(other_item, check_occurs):\n item = next(item_iterator, None)\n elif not other_item.is_emptiable():\n break\n else:\n if item is None:\n return True\n\n # Restriction check failed again: try checking other items against self\n other_items = other.iter_model()\n for other_item in other_items:\n if self.is_restriction(other_item, check_occurs):\n return all(x.is_emptiable() for x in other_items)\n elif not other_item.is_emptiable():\n return False\n else:\n return False\n\n def is_all_restriction(self, other: XsdGroup) -> bool:\n restriction_items = [x for x in self.iter_model()]\n\n base_items = [x for x in other.iter_model()]\n\n # If the base includes more wildcard, calculates and appends a\n # wildcard union for validating wildcard unions in restriction\n wildcards: List[XsdAnyElement] = []\n for w1 in base_items:\n if isinstance(w1, XsdAnyElement):\n for w2 in wildcards:\n if w1.process_contents == w2.process_contents and w1.occurs == w2.occurs:\n w2.union(w1)\n w2.extended = True\n break\n else:\n wildcards.append(_copy(w1))\n\n base_items.extend(w for w in wildcards if hasattr(w, 'extended'))\n\n if self.model != 'choice':\n restriction_wildcards = [e for e in restriction_items if isinstance(e, XsdAnyElement)]\n\n for other_item in base_items:\n min_occurs, max_occurs = 0, other_item.max_occurs\n for k in range(len(restriction_items) - 1, -1, -1):\n item = restriction_items[k]\n\n if item.is_restriction(other_item, check_occurs=False):\n if max_occurs is None:\n min_occurs += item.min_occurs\n elif item.max_occurs is None or max_occurs < item.max_occurs or \\\n min_occurs + item.min_occurs > max_occurs:\n continue\n else:\n min_occurs += item.min_occurs\n max_occurs -= item.max_occurs\n\n restriction_items.remove(item)\n if not min_occurs or max_occurs == 0:\n break\n else:\n if self.model == 'all' and restriction_wildcards:\n if not isinstance(other_item, XsdGroup) and other_item.type \\\n and other_item.type.name != XSD_ANY_TYPE:\n\n for w in restriction_wildcards:\n if w.is_matching(other_item.name, self.target_namespace):\n return False\n\n if min_occurs < other_item.min_occurs:\n break\n else:\n if not restriction_items:\n return True\n return False\n\n # Restriction with a choice model: this a more complex case\n # because the not emptiable elements of the base group have\n # to be included in each item of the choice group.\n not_emptiable_items = {x for x in base_items if x.min_occurs}\n\n for other_item in base_items:\n min_occurs, max_occurs = 0, other_item.max_occurs\n for k in range(len(restriction_items) - 1, -1, -1):\n item = restriction_items[k]\n\n if item.is_restriction(other_item, check_occurs=False):\n if max_occurs is None:\n min_occurs += item.min_occurs\n elif item.max_occurs is None or max_occurs < item.max_occurs or \\\n min_occurs + item.min_occurs > max_occurs:\n continue\n else:\n min_occurs += item.min_occurs\n max_occurs -= item.max_occurs\n\n if not_emptiable_items:\n if len(not_emptiable_items) > 1:\n continue\n if other_item not in not_emptiable_items:\n continue\n\n restriction_items.remove(item)\n if not min_occurs or max_occurs == 0:\n break\n\n if min_occurs < other_item.min_occurs:\n break\n else:\n if not restriction_items:\n return True\n\n if any(not isinstance(x, XsdGroup) for x in restriction_items):\n return False\n\n # If the remaining items are groups try to verify if they are all\n # restrictions of the 'all' group and if each group contains all\n # not emptiable elements.\n for group in restriction_items:\n if not group.is_restriction(other):\n return False\n\n for item in not_emptiable_items:\n for e in group:\n if e.name == item.name:\n break\n else:\n return False\n else:\n return True\n\n def is_choice_restriction(self, other: XsdGroup) -> bool:\n restriction_items = [x for x in self.iter_model()]\n has_not_empty_item = any(e.max_occurs != 0 for e in restriction_items)\n\n check_occurs = other.max_occurs != 0\n max_occurs: Optional[int] = 0\n other_max_occurs: Optional[int] = 0\n\n for other_item in other.iter_model():\n for item in restriction_items:\n if other_item is item or item.is_restriction(other_item, check_occurs):\n if max_occurs is not None:\n effective_max_occurs = item.effective_max_occurs\n if effective_max_occurs is None:\n max_occurs = None\n elif self.model == 'choice':\n max_occurs = max(max_occurs, effective_max_occurs)\n else:\n max_occurs += effective_max_occurs\n\n if other_max_occurs is not None:\n effective_max_occurs = other_item.effective_max_occurs\n if effective_max_occurs is None:\n other_max_occurs = None\n else:\n other_max_occurs = max(other_max_occurs, effective_max_occurs)\n break\n elif item.max_occurs != 0:\n continue\n elif not other_item.is_matching(item.name):\n continue\n elif has_not_empty_item:\n break\n else:\n return False\n else:\n continue\n restriction_items.remove(item)\n\n if restriction_items:\n return False\n elif other_max_occurs is None:\n if other.max_occurs != 0:\n return True\n other_max_occurs = 0\n elif other.max_occurs is None:\n if other_max_occurs != 0:\n return True\n other_max_occurs = 0\n else:\n other_max_occurs *= other.max_occurs\n\n if max_occurs is None:\n return self.max_occurs == 0\n elif self.max_occurs is None:\n return max_occurs == 0\n else:\n return other_max_occurs >= max_occurs * self.max_occurs\n","repo_name":"sissaschool/xmlschema","sub_path":"xmlschema/validators/groups.py","file_name":"groups.py","file_ext":"py","file_size_in_byte":63200,"program_lang":"python","lang":"en","doc_type":"code","stars":367,"dataset":"github-code","pt":"28"} +{"seq_id":"10063904028","text":"# Input\ncredit = int(input())\nyears = int(input())\ninterest = float(input())\n\n# Calculate fee\nr = interest / 1200\nmonthly_fee = round((credit * r) / (1 - ((1 + r) ** (-12 * years))))\ntotal_fee = round(monthly_fee * years * 12)\npaid_interest = round(total_fee - credit)\npercentage_interest = round(((paid_interest * 100) / credit), 2)\n\n# Output\nprint(\n f\"Crédito por $ {credit} a un plazo de {years} años,\",\n f\"con una tasa de {interest} %\",\n)\nprint(f\"Cuota mensual a pagar: $ {monthly_fee}\")\nprint(f\"Monto total pagado: $ {total_fee}\")\nprint(f\"Intereses pagados: $ {paid_interest}\")\nprint(f\"Porcentaje que representan los intereses: {percentage_interest} %\")\n","repo_name":"LuckJMG/USM-IWI131","sub_path":"SMOJ/UVA 2/credito_hipotecario.py","file_name":"credito_hipotecario.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"75175492554","text":"# https://www.codewars.com/kata/525f3eda17c7cd9f9e000b39\n\ndef zero(*arg):\n if arg:\n return eval(f'0{arg[0]}')\n return 0\n\n\ndef one(*arg):\n if arg:\n return eval(f'1{arg[0]}')\n return 1\n\n\ndef two(*arg):\n if arg:\n return eval(f'2{arg[0]}')\n return 2\n\n\ndef three(*arg):\n if arg:\n return eval(f'3{arg[0]}')\n return 3\n\n\ndef four(*arg):\n if arg:\n return eval(f'4{arg[0]}')\n return 4\n\n\ndef five(*arg):\n if arg:\n return eval(f'5{arg[0]}')\n return 5\n\n\ndef six(*arg):\n if arg:\n return eval(f'6{arg[0]}')\n return 6\n\n\ndef seven(*arg):\n if arg:\n return eval(f'7{arg[0]}')\n return 7\n\n\ndef eight(*arg):\n if arg:\n return eval(f'8{arg[0]}')\n return 8\n\n\ndef nine(*arg):\n if arg:\n return eval(f'9{arg[0]}')\n return 9\n\n\ndef plus(arg):\n return '+' + str(arg)\n\n\ndef minus(arg):\n return '-' + str(arg)\n\n\ndef times(arg):\n return '*' + str(arg)\n\n\ndef divided_by(arg):\n return '//' + str(arg)\n\n\n# https://www.codewars.com/kata/5264d2b162488dc400000001\n\ndef spin_words(sentence):\n sentence += ' '\n list = []\n a = ''\n b = ''\n for i in sentence:\n if i == \" \":\n list.append(a)\n a = \"\"\n else:\n a += i\n for y in list:\n if len(y) >= 5:\n b += y[::-1]\n else:\n b += y\n b += \" \"\n return b[:-1]\n\n\n# https://www.codewars.com/kata/515bb423de843ea99400000a\n\n\nclass PaginationHelper:\n\n # The constructor takes in an array of items and a integer indicating\n # how many items fit within a single page\n def __init__(self, collection, items_per_page):\n self.collection = collection\n self.items_per_page = items_per_page\n self.amount = len(self.collection)\n self.pages = self.amount // self.items_per_page + 1\n\n # returns the number of items within the entire collection\n def item_count(self):\n return self.amount\n\n # returns the number of pages\n def page_count(self):\n return self.pages\n\n # returns the number of items on the current page. page_index is zero based\n # this method should return -1 for page_index values that are out of range\n def page_item_count(self, page_index):\n if page_index < self.pages:\n if page_index == self.pages - 1:\n return self.amount % self.items_per_page\n return self.items_per_page\n return -1\n\n # determines what page an item is on. Zero based indexes.\n # this method should return -1 for item_index values that are out of range\n def page_index(self, item_index):\n if item_index >= 0:\n if item_index < self.amount:\n return item_index // self.items_per_page\n return -1\n\n\n# https://www.codewars.com/kata/51b66044bce5799a7f000003/python\nclass RomanNumerals:\n\n @staticmethod\n def to_roman(val):\n amount_1 = (val // 1000) * \"M\"\n amount_2 = \"CM\" if val % 1000 // 900 == 1 else \"\"\n amount_3 = \"D\" if val % 1000 // 500 == 1 and val % 1000 // 900 == 0 else \"\"\n amount_4 = \"CD\" if val % 500 // 400 == 1 and val % 1000 // 900 == 0 else \"\"\n amount_5 = \"\" if val % 500 // 400 == 1 else (val % 500 // 100) * \"C\"\n amount_6 = \"XC\" if val % 100 // 90 == 1 else \"\"\n amount_7 = \"L\" if val % 100 // 90 == 0 and val % 100 // 50 == 1 else \"\"\n amount_8 = \"XL\" if val % 50 // 40 == 1 and val % 100 // 90 == 0 else \"\"\n amount_9 = \"\" if val % 50 // 40 == 1 else (val % 50 // 10) * \"X\"\n amount_10 = \"IX\" if val % 10 == 9 else \"\"\n amount_11 = \"V\" if val % 10 // 5 == 1 and val % 10 // 9 == 0 else \"\"\n amount_12 = \"IV\" if val % 5 == 4 and val % 10 < 9 else \"\"\n amount_13 = \"\" if val % 5 == 4 else (val % 5) * \"I\"\n\n return ''.join(amount_1 + amount_2 + amount_3 + amount_4 + amount_5 + amount_6 + amount_7 + amount_8\n + amount_9 + amount_10 + amount_11 + amount_12 + amount_13)\n\n @staticmethod\n def from_roman(roman_num):\n number = 0\n if \"IV\" in roman_num:\n number += 4\n if \"IX\" in roman_num:\n number += 9\n if \"IV\" not in roman_num and \"V\" in roman_num:\n number += 5\n if \"IV\" not in roman_num and \"I\" in roman_num and \"IX\" not in roman_num:\n for i in roman_num:\n if i == \"I\":\n number += 1\n if \"XL\" in roman_num:\n number += 40\n if \"XC\" in roman_num:\n number += 90\n if \"X\" in roman_num and \"XL\" not in roman_num and \"XC\" not in roman_num:\n for i in roman_num:\n if i == \"X\":\n number += 10\n if \"IX\" in roman_num:\n number -= 10\n\n if \"L\" in roman_num and \"XL\" not in roman_num:\n number += 50\n if \"CD\" in roman_num:\n number += 400\n if \"CM\" in roman_num:\n number += 900\n if \"C\" in roman_num and \"CM\" not in roman_num and \"CD\" not in roman_num:\n for i in roman_num:\n if i == \"C\":\n number += 100\n if \"XC\" in roman_num:\n number -= 100\n\n if \"D\" in roman_num and \"CD\" not in roman_num:\n number += 500\n if \"M\" in roman_num:\n for i in roman_num:\n if i == \"M\":\n number += 1000\n if \"CM\" in roman_num:\n number -= 1000\n\n return number\n","repo_name":"Maelllm/hillel_python_pro","sub_path":"task_19/task_19_codewars.py","file_name":"task_19_codewars.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"40688055531","text":"import dataclasses\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom generative_models_pytorch.mnist import MNIST\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n self.fc1 = nn.Linear(784, 512)\n self.fc2 = nn.Linear(512, 256)\n self.fc3 = nn.Linear(256, 1)\n\n def forward(self, x):\n x = x.view(-1, 784)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n x = F.relu(x)\n x = self.fc3(x)\n return x\n\n\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n self.fc1 = nn.Linear(128, 512)\n self.fc2 = nn.Linear(512, 1024)\n self.fc3 = nn.Linear(1024, 784)\n\n def forward(self, x):\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n x = F.relu(x)\n x = self.fc3(x)\n x = torch.tanh(x)\n x = x.view(-1, 1, 28, 28)\n return x\n\n\n@dataclasses.dataclass\nclass WassersteinGANTrainingOption:\n d_path: str\n g_path: str\n device: str\n n_critic: int = 5\n epochs: int = 10\n lr: float = 2e-4\n beta1: float = 0.5\n beta2: float = 0.999\n\n\nclass WassersteinGAN:\n def __init__(self):\n self.generator = Generator()\n self.discriminator = Discriminator()\n self.data = MNIST()\n\n def train(self, option: WassersteinGANTrainingOption):\n device = torch.device(option.device)\n self.generator.to(device)\n self.discriminator.to(device)\n loader = self.data.get_loader()\n g_optimizer = optim.Adam(\n self.generator.parameters(),\n lr=option.lr,\n betas=(option.beta1, option.beta2),\n )\n d_optimizer = optim.Adam(\n self.discriminator.parameters(),\n lr=option.lr,\n betas=(option.beta1, option.beta2),\n )\n\n for epoch in range(option.epochs):\n for idx, (X_real, _) in enumerate(loader):\n X_real = X_real.to(device)\n if idx % option.n_critic == 0:\n g_optimizer.zero_grad()\n noise = torch.rand(X_real.shape[0], 128, device=device)\n X_fake = self.generator(noise)\n y_fake = self.discriminator(X_fake)\n g_loss = -torch.mean(y_fake)\n g_loss.backward()\n g_optimizer.step()\n\n d_optimizer.zero_grad()\n noise = torch.rand(X_real.shape[0], 128, device=device)\n X_fake = self.generator(noise)\n X_fake = X_fake.detach()\n y_real = self.discriminator(X_real)\n y_fake = self.discriminator(X_fake)\n d_loss = -torch.mean(y_real) + torch.mean(y_fake)\n d_loss.backward()\n d_optimizer.step()\n torch.save(self.generator.state_dict(), option.g_path)\n torch.save(self.discriminator.state_dict(), option.d_path)\n","repo_name":"Catminusminus/generative-models-pytorch","sub_path":"src/generative_models_pytorch/wgan.py","file_name":"wgan.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"23366360220","text":"#Notes:\r\n# I assumed on both files that each one of the \"element type\" are seats \r\n# because I did not find references to that property on the XML file.\r\n\r\nimport sys\r\nimport json\r\nimport xml.etree.ElementTree as ET\r\n\r\n###############################################################################################\r\ndictionary_rows = {} # Dict that stores the following properties: SeatID, seatPrice, Availability, CabinClass and Element Type.\r\nid_prices = {} # Dict that stores the OfferItemID and their respective price\r\nprice_amount = [] # A List that stores the prices as a float in GBP\r\n###############################################################################################\r\n# Here I obtain the desired seatmap's path, which should be in the same folder as the script\r\n# The seadmap should be put as an argument in the terminal when you run the script (ie. python seatmap_parser.py seatmap2.xml)\r\npath = sys.argv[1]\r\n\r\nif path =='seatmap2.xml':\r\n #CODE FOR SEATMAP2:\r\n\r\n seatmap2 = ET.parse(path)\r\n root=seatmap2.getroot()\r\n\r\n ###############################################################################################\r\n \r\n dictionary_rows = {} # Dict that stores the following properties: SeatID, seatPrice, Availability and Element Type.\r\n id_prices = {} # Dict that stores the OfferItemID and their respective price\r\n price_amount = [] # A List that stores the prices as a float in GBP\r\n\r\n # Here I try to obtain the XML elements that contains the seat's prices and their IDs by using list comprehension\r\n prices = [child for child in root.iter() if child.tag == '{http://www.iata.org/IATA/EDIST/2017.2}ALaCarteOfferItem']\r\n\r\n # A loop that parses through the XML elements that contains the prices of the seats and\r\n # the attribute of each one, then appends every one of them to the price_amount list.\r\n for price in prices:\r\n for elements in price.iter():\r\n if elements.tag == '{http://www.iata.org/IATA/EDIST/2017.2}SimpleCurrencyPrice':\r\n price_amount.append(float(elements.text))\r\n\r\n # Here I complete the dictionary by putting the OfferItemID attribute as the key of each data value, and the\r\n # price as the value of that key.\r\n for i in range(len(price_amount)):\r\n id_prices[prices[i].attrib[\"OfferItemID\"]] = price_amount[i]\r\n\r\n ###############################################################################################\r\n #Positions:\r\n positions={}\r\n for child in root.iter():\r\n if child.tag == '{http://www.iata.org/IATA/EDIST/2017.2}Columns':\r\n if child.text==None:\r\n positions[child.attrib['Position']]='CENTER'\r\n else:\r\n positions[child.attrib['Position']]=child.text\r\n \r\n # Another variable using list comprenhension to obtain all of the rows of the seatmap, by comparing\r\n # the tagname of the XML elements to 'Row'\r\n rows_list = [child for child in root.iter() if child.tag == '{http://www.iata.org/IATA/EDIST/2017.2}Row'] \r\n\r\n # Main loop that goes through the rows obtained in rows_list, then parses along each one of the elements of those rows,\r\n # being the majority of those elements the seats, which I then obtain the data of the seatID, seatPrice (by using the\r\n # dictionary 'dictionary_rows') and their availability.\r\n # Then, I put them all on a dictionary organized by rows and their respective seats.\r\n for row in rows_list:\r\n seat_list = []\r\n for elements in row.iter():\r\n seat_dict = {}\r\n if elements.tag == '{http://www.iata.org/IATA/EDIST/2017.2}Number':\r\n row_number = elements.text\r\n elif elements.tag =='{http://www.iata.org/IATA/EDIST/2017.2}Seat':\r\n for child in elements.iter():\r\n seat_dict['Element Type']=\"Seat\"\r\n # In the \"seatemap2.xml\" I did not find any mention of the property \"Cabin class\".\r\n seat_dict['Cabin Class']='Not specified'\r\n\r\n if child.tag == '{http://www.iata.org/IATA/EDIST/2017.2}Column':\r\n columna = child.text\r\n seat_dict['seatID'] = str(row_number + columna)\r\n seat_dict['Position'] = positions[columna]\r\n elif child.tag == '{http://www.iata.org/IATA/EDIST/2017.2}OfferItemRefs':\r\n seat_dict['seatPrice'] = id_prices[child.text]\r\n seat_dict['Availability'] = True\r\n elif child.text == 'SD11' or child.text == 'SD19' or child.text=='SD22':\r\n seat_dict['Availability'] = False\r\n seat_dict['seatPrice'] = None\r\n \r\n \r\n seat_list.append(seat_dict)\r\n dictionary_rows['row_' + str(row_number)] = seat_list\r\n \r\n # Here I create the json file\r\n with open('seatmap2_parsed.json', 'w') as outfile:\r\n json.dump(dictionary_rows, outfile,indent = 4)\r\nelif path == 'seatmap1.xml':\r\n\r\n #CODE FOR 'SEATMAP1.XML:'\r\n\r\n seatmap1 = ET.parse(path)\r\n root = seatmap1.getroot()\r\n #Function that shifts the decimal place n times, where n is given as an argument to the function.\r\n def realprice(price, decimal):\r\n for i in range(abs(decimal)):\r\n\r\n if decimal>0:\r\n price /= 10\r\n else:\r\n price *= 10\r\n\r\n return float(price)\r\n\r\n # Another variable using list comprenhension to obtain all of the rows of the seatmap, by comparing\r\n # the tagname of the XML elements to 'RowInfo'.\r\n\r\n rows_list = [child for child in root.iter() if child.tag == '{http://www.opentravel.org/OTA/2003/05/common/}RowInfo']\r\n\r\n # Main loop that goes through the rows obtained in rows_list, then parses along each one of the elements of those rows,\r\n # being the majority of those elements the seats, which I then obtain the data of the seatID, seatPrice (by using the\r\n # dictionary 'dictionary_rows') and their availability.\r\n # Then, I put them all on a dictionary organized by rows and their respective seats.\r\n \r\n for child in rows_list:\r\n seat_list = []\r\n row_number2 = child.attrib['RowNumber']\r\n for child1 in child.iter():\r\n if child1.tag == '{http://www.opentravel.org/OTA/2003/05/common/}SeatInfo':\r\n seat_dict = {}\r\n for seats in child1.iter():\r\n if seats.tag == '{http://www.opentravel.org/OTA/2003/05/common/}Summary':\r\n seat_dict['Element type'] = 'Seat'\r\n seat_dict['Position'] = ''\r\n seat_dict['Cabin Class'] = child.attrib['CabinType']\r\n seat_dict['SeatID'] = seats.attrib['SeatNumber']\r\n availability = seats.attrib['AvailableInd']\r\n if availability == 'true':\r\n seat_dict['Available'] = True\r\n else:\r\n seat_dict['Available'] = False\r\n \r\n seat_dict['seatPrice'] = None\r\n elif seats.tag == '{http://www.opentravel.org/OTA/2003/05/common/}Features' and seats.text != 'Other_':\r\n seat_dict['Position'] = seats.text.upper()\r\n elif seats.tag == '{http://www.opentravel.org/OTA/2003/05/common/}Fee':\r\n decimalplace = int(seats.attrib['DecimalPlaces'])\r\n seat_dict['seatPrice'] = realprice(int(seats.attrib['Amount']),decimalplace)\r\n\r\n \r\n seat_list.append(seat_dict)\r\n \r\n\r\n dictionary_rows['row_' + str(row_number2)] = seat_list\r\n\r\n \r\n # Here I create the json file\r\n with open('seatmap1_parsed.json', 'w') as outfile:\r\n json.dump(dictionary_rows, outfile,indent = 4)\r\nelse:\r\n print(\"Wrong file path\")\r\n\r\n\r\n\r\n\r\n","repo_name":"lucaasmonzoon/Programming-Excercise_v2","sub_path":"APIEngineerTechnicalExercise_v2/seatmap_parser.py","file_name":"seatmap_parser.py","file_ext":"py","file_size_in_byte":8013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"29843568","text":"import flask\nimport logging\nfrom flask_pyoidc.flask_pyoidc import OIDCAuthentication\nfrom flask_pyoidc.provider_configuration import ProviderConfiguration, ClientMetadata\nfrom flask_pyoidc.user_session import UserSession\nfrom flask import Flask, jsonify\n\n\napp = Flask(__name__)\n\n\n# Our OIDC Provider\nISSUER = 'https://login.elixir-czech.org/oidc/'\n\n# This is portal-dev oidc client\nCLIENT = 'b649ab06-8633-4aaa-943e-fe97882c0039'\n\n\n# Enter the client secret for portal-dev client here!\nCLIENT_SECRET = 'CLIENT SECRET HERE'\n\n# Random name\nPROVIDER_NAME = 'provider1'\n\n# We configurate our provider with the fields from above\nPROVIDER_CONFIG = ProviderConfiguration(issuer=ISSUER, client_metadata=ClientMetadata(CLIENT, CLIENT_SECRET))\n\n\n# Make sure, that this server name (portal-dev.denbi.de) is 127.0.0.1 in your /etc/localhost\n# If you want to start this on standard port 80, you'll have to run this flask app as root.\napp.config.update({'SERVER_NAME': 'portal-dev.denbi.de',\n 'SECRET_KEY': 'dev_kefgdsgfdfsgy', # CHANGE THIS\n 'SESSION_PERMANENT': True, # turn on flask session support\n 'PERMANENT_SESSION_LIFETIME': 2592000, # session time in seconds (30 days)\n 'DEBUG': True})\n\n\n# Create auth object with our defined provider and link it with flask app\nauth = OIDCAuthentication({PROVIDER_NAME : PROVIDER_CONFIG}, app)\n\n# When simply pointing the browser to http://portal-dev.denbi.de, directly return token contents on screen.\n# Route to ELIXIR AAI and authenticate if user is not logged in.\n@app.route('/')\n@auth.oidc_auth(PROVIDER_NAME)\ndef login1():\n user_session = UserSession(flask.session)\n return jsonify(access_token=user_session.access_token, id_token=user_session.id_token, userinfo=user_session.userinfo)\n\n# Redirect to elixir log-out page\n@app.route('/logout')\n@auth.oidc_logout\ndef logout():\n return \"You've been successfully logged out!\"\n\n# If something is messed up\n@auth.error_view\ndef error(error=None, error_description=None):\n return jsonify({'error': error, 'message': error_description})\n\n# Be careful when running in production, use Apache with HAProxy and SSL in front.\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n auth.init_app(app)\n app.run()\n\n","repo_name":"awalende/flask_elixiraai_example","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"3316192190","text":"#!/usr/bin/env python\n\nimport rospy\nfrom sensor_msgs.msg import Joy\nfrom crazyflie_driver.srv import UpdateParams\nfrom std_srvs.srv import Empty\n\nclass Controller():\n def __init__(self, use_controller, joy_topic):\n rospy.wait_for_service('update_params')\n rospy.loginfo(\"found update_params service\")\n self._update_params = rospy.ServiceProxy('update_params', UpdateParams)\n\n # mellenger controller\n rospy.set_param(\"stabilizer/controller\", 2)\n self._update_params([\"stabilizer/controller\"])\n rospy.set_param(\"stabilizer/estimator\", 2)\n self._update_params([\"stabilizer/estimator\"])\n\n ###XY\n #rospy.set_param(\"ctrlMel/kp_xy\", .4) #default 0.4\n #self._update_params([\"ctrlMel/kp_xy\"])\n #rospy.set_param(\"ctrlMel/kd_xy\", 0.2) #default 0.2\n #self._update_params([\"ctrlMel/kd_xy\"])\n #rospy.set_param(\"ctrlMel/ki_xy\", 0.0) #default 0.05\n #self._update_params([\"ctrlMel/ki_xy\"])\n ### Z\n #rospy.set_param(\"ctrlMel/kp_z\", 1.25) #default 1.25\n #self._update_params([\"ctrlMel/kp_z\"])\n #rospy.set_param(\"ctrlMel/kd_z\", 0.4) #default 0.4\n #self._update_params([\"ctrlMel/kd_z\"])\n #rospy.set_param(\"ctrlMel/ki_z\", 0.0) #default 0.05\n #self._update_params([\"ctrlMel/ki_z\"])\n\n # Attitude\n # 10 and 30 for P,D are good for shaking\n rospy.set_param(\"ctrlMel/kR_xy\", 30000) #Pdefault: 70000 30000\n self._update_params([\"ctrlMel/kR_xy\"])\n rospy.set_param(\"ctrlMel/kw_xy\", 40000) #Ddefualt: 20000 35000\n self._update_params([\"ctrlMel/kw_xy\"])\n rospy.set_param(\"ctrlMel/ki_m_xy\", 0.0) #Idefualt: 0\n self._update_params([\"ctrlMel/ki_m_xy\"])\n\n rospy.set_param(\"ctrlMel/mass\", 0.04)\n self._update_params([\"ctrlMel/mass\"])\n\n #rospy.set_param(\"ctrlMel/massThrust\", 130000)\n #self._update_params([\"ctrlMel/massThrust\"])\n\n rospy.loginfo(\"waiting for emergency service\")\n rospy.wait_for_service('emergency')\n rospy.loginfo(\"found emergency service\")\n self._emergency = rospy.ServiceProxy('emergency', Empty)\n\n if use_controller:\n rospy.loginfo(\"waiting for land service\")\n rospy.wait_for_service('land')\n rospy.loginfo(\"found land service\")\n self._land = rospy.ServiceProxy('land', Empty)\n\n rospy.loginfo(\"waiting for takeoff service\")\n rospy.wait_for_service('takeoff')\n rospy.loginfo(\"found takeoff service\")\n self._takeoff = rospy.ServiceProxy('takeoff', Empty)\n else:\n self._land = None\n self._takeoff = None\n\n # subscribe to the joystick at the end to make sure that all required\n # services were found\n self._buttons = None\n rospy.Subscriber(joy_topic, Joy, self._joyChanged)\n\n def _joyChanged(self, data):\n for i in range(0, len(data.buttons)):\n if self._buttons == None or data.buttons[i] != self._buttons[i]:\n if i == 0 and data.buttons[i] == 1 and self._land != None:\n self._land()\n if i == 1 and data.buttons[i] == 1:\n self._emergency()\n if i == 2 and data.buttons[i] == 1 and self._takeoff != None:\n self._takeoff()\n if i == 4 and data.buttons[i] == 1:\n value = int(rospy.get_param(\"ring/headlightEnable\"))\n if value == 0:\n rospy.set_param(\"ring/headlightEnable\", 1)\n else:\n rospy.set_param(\"ring/headlightEnable\", 0)\n self._update_params([\"ring/headlightEnable\"])\n print(not value)\n\n self._buttons = data.buttons\n\nif __name__ == '__main__':\n rospy.init_node('crazyflie_demo_controller', anonymous=True)\n use_controller = rospy.get_param(\"~use_crazyflie_controller\", False)\n joy_topic = rospy.get_param(\"~joy_topic\", \"joy\")\n controller = Controller(use_controller, joy_topic)\n rospy.spin()\n","repo_name":"zhubiii/PogoDrone","sub_path":"pogo_drone/scripts/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"13866412545","text":"from django.db import models\n\n# Create your models here.\nfrom gestionarHorario.models import Horario\nimport unicodedata\n\n\ndef remove_accents(input_str):\n nfkd_form = unicodedata.normalize('NFKD', input_str)\n return u\"\".join([c for c in nfkd_form if not unicodedata.combining(c)])\n\n# Create your models here.\ndef substring_after(s, delim):\n return s.rpartition(delim)[-1]\n\ndef upload_location_archive(instance, filename):\n extension = substring_after(filename, '.')\n return 'archive/%s.%s' % (remove_accents(instance.horario.codigo), extension)\n\nclass EvidenciasxHorario(models.Model):\n ESTADOS = [\n ('0', 'Eliminado'),\n ('1', 'Activo'),\n ('2', 'Inactivo'),\n ]\n estado = models.CharField(max_length=2, choices=ESTADOS, default='1', null=True, blank=True)\n descripcion = models.CharField(max_length=300, default='', null=True, blank=True)\n concepto = models.CharField(max_length=20, default='', null=True, blank=True)\n horario = models.ForeignKey(Horario, on_delete=models.DO_NOTHING, null=False)\n archivo = models.FileField(null=True, blank=True, upload_to='archive/')\n fecha_creacion = models.DateTimeField(auto_now_add=True)","repo_name":"software3pucp/demosoftware3","sub_path":"gestionarEvidencias/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"13675202129","text":"from Wiki_Bot import getLogContents\nimport wikitextparser as wikiParse\nimport bs4\nimport datetime\nimport time\n\nheartRateLog_page = 'Heart Rate Log'\n\n\ndef get_hrLog_list():\n hrLogContents = getLogContents(heartRateLog_page, clearAfter=True)\n\n parsed_hrLog = bs4.BeautifulSoup(hrLogContents, 'lxml')\n hrLog_RawText = parsed_hrLog.page.text\n hrLog_wikiParsed = wikiParse.parse(hrLog_RawText)\n\n hrLog_RawList = hrLog_wikiParsed.lists()[0] # extract list as an array\n\n hrLog_ArrayList = hrLog_RawList.items\n\n return hrLog_ArrayList\n\n\ndef save_hrLogReading(hrLog_ArrayList):\n logArchive_path = './Log Archive/'\n dateNow = str(datetime.date.today())\n timeNow = str(time.strftime('%H_%M'))\n\n logFile_Name = logArchive_path + 'Heart Monitor Log_' + dateNow + timeNow + '.csv'\n\n logFile = open(logFile_Name, 'w')\n\n for entry in hrLog_ArrayList:\n logFile.write(entry + '\\n')\n\n logFile.close()\n\n return logFile_Name\n\n\nprint(save_hrLogReading(get_hrLog_list()))\n","repo_name":"SamVarney/lifewiki_mediawiki","sub_path":"heartRateMonitor_serverSide.py","file_name":"heartRateMonitor_serverSide.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"10647305764","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 13 13:16:40 2017\r\n\r\n@author: 王飞虎\r\n\"\"\"\r\nbackground_image_filename = 'bg.jpg'\r\n\r\nimport numpy as py\r\nimport math\r\nimport pygame\r\nimport matplotlib.pyplot as plt\r\nfrom pygame.locals import *\r\nfrom sys import exit\r\nx=0\r\ny=0\r\nf=[0]\r\nz=[0]\r\nv=float(input(\"请输入初始速度:\"))\r\nt=0.00001 \r\nangel=45\r\nvx=v*math.cos(math.radians(angel))\r\nvy=v*math.sin(math.radians(angel))\r\na=4*10**-5\r\nvw=-2.78\r\n\r\n\r\npygame.init()\r\n \r\nscreen = pygame.display.set_mode((500, 313), 0, 32)\r\npygame.display.set_caption(\"Canon Shell ATTACK\")\r\nbackground = pygame.image.load(background_image_filename).convert()\r\ncannon = pygame.draw.polygon(pygame.surface, (255,0,0), [(310,10),(305, 10),(305,20),(310,20),(307.5, 25)])\r\nwhile True:\r\n screen.blit(background,(0,0))\r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n exit()\r\n \r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_UP:\r\n angel=angel+1\r\n elif event.key == pygame.K_DOWN:\r\n angel=angel-1\r\nwhile y>=0:\r\n v=py.sqrt(vx**2+vy**2)\r\n vx=vx-a*(vx-vw)*py.sqrt(v**2+vw**2-2*v*vw*vy/v)*t\r\n vy=vy-9.8*t-a*vy*py.sqrt(v**2+vw**2-2*v*vw*vy/v)*t\r\n x1=x\r\n y1=y\r\n x=x+vx*t\r\n y=y+vy*t\r\n b=-y1/y\r\n t=t+0.00001\r\n f.append(x)\r\n z.append(y)\r\n l=(x+b*x1)/(b+1)\r\n\r\n\r\nplt.figure(figsize=(10,5))\r\nplt.plot(f,z,label=\"$Canon Shell Trajectory$\",marker='.',color='red')\r\nplt.xlabel(\"X/m\")\r\nplt.ylabel(\"Y/m\")\r\nplt.title(\"Canon Shell Trajectory\")\r\nplt.ylim(0,10000)\r\nplt.legend()\r\nplt.show()\r\n","repo_name":"lopo70/Computational_Physics_N2015301020170","sub_path":"Exercise 05/pygame.py","file_name":"pygame.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"22735044040","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def sortList(self, head: ListNode) -> ListNode:\n item = head\n l = []\n while(item is not None):\n l.append(item.val)\n item = item.next\n l.sort()\n item = head\n for i in range(len(l)):\n item.val = l[i]\n item = item.next\n return head","repo_name":"littlewhiteJ/LeetcodeSolutions","sub_path":"148.py","file_name":"148.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"10981992792","text":"import pandas as pd \nfrom Parser import ShiftXParser, expVt\nimport argparse\nimport mdtraj as md \nimport numpy as np\nfrom os import listdir\nfrom os.path import join as jn\nfrom tqdm import tqdm\nimport pickle as pkl\n\nparser = argparse.ArgumentParser(description='Gets the table of aa temperature coefficients')\nparser.add_argument('f', type=str, nargs=2,\n help='Two folders with SHIFTX2 results')\nparser.add_argument('t', type=str, nargs=3,\n help='Two file containing trajectories and one file containing topology')\nparser.add_argument('e', type=str,\n help='Experimental data')\nparser.add_argument('o', type=str,\n help='output folder') \nparser.add_argument('-T', type=float, default=20,\n help='Difference of temperature during the computation')\nparser.add_argument('-s', action='store_true', help='Computes also standard deviation')\n\n\nargs = parser.parse_args()\n# N_CHUNKS = 200\n# rmsd = []\n# for chunk1, chunk2 in tqdm(zip(md.iterload(args.t[0], top=args.t[2], chunk=N_CHUNKS), md.iterload(args.t[1], top=args.t[2], chunk=N_CHUNKS))):\n# for i in range(N_CHUNKS):\n# rmsd.append(md.rmsd(chunk1, chunk2, frame=i)[i])\n# rmsd = np.array(rmsd)\n\n# np.save(jn(args.o, 'rmsd.npy'), rmsd)\n# weights = np.exp(-rmsd)/np.sum(np.exp(-rmsd))\n# np.save(jn(args.o, 'weights.npy'), weights)\n\nweights = np.load(jn(args.o, 'weights.npy'))\n\n# def get_frames(d):\n# n = len(listdir(d))\n# return [jn(d, 'frame_%s.pdb.cs'% i) for i in range(n-10000, n)]\n\n# ts30 = ShiftXParser(get_frames(args.f[0]), ['H'], range(253), avg=False)\n# pkl.dump(ts30, open(jn(args.o, 'ts30_reweight.spy'), 'wb'))\n# ts50 = ShiftXParser(get_frames(args.f[1]), ['H'], range(253), avg=False)\n# pkl.dump(ts50, open(jn(args.o, 'ts50_reweight.spy'), 'wb'))\nts30 = pkl.load(open(jn(args.o, 'ts30_reweight.spy'), 'rb'))\nts50 = pkl.load(open(jn(args.o, 'ts50_reweight.spy'), 'rb'))\ntcoff = []\nfor i in range(ts50.shifts.shape[0]):\n tcoff.append(np.sum(np.multiply(weights, ts50.shifts[i] - ts30.shifts[i])))\n\nif args.s:\n std = np.std((ts50.shifts-ts30.shifts)/args.T*10**3, axis=-1)\n \ntcoff = np.array(tcoff)/args.T*10**3\nnp.save(jn(args.o, 'tcoff.npy'), tcoff)\n\n# tcoff = np.load(jn(args.o, 'tcoff.npy'))\ndH = ts30.df.copy()\ndH['TCOFF'] = tcoff\nif args.s:\n dH['STD'] = std\n\nedh = np.genfromtxt(args.e, delimiter=',')\nedh[edh== 0] = np.nan\nindexes = np.where(~np.isnan(edh)==True)[0]\nedh = edh[~np.isnan(edh)]\ndh_exp = pd.DataFrame({'NUM': indexes, 'TCOFF': edh})\nexpVt(dh_exp, dH, jn(args.o, 'dH.png'), name='TCOFF', axisstring='Temp. coefficient', offset=0, inf=5, sup=-13)\n\nL=[40, 41, 71, 74, 122, 123, 161, 195]\nfor elt in L:\n line = dH.loc[dH['NUM'] == elt-1]\n if not args.s:\n print(line['RES'].values[0]+str(line['NUM'].values[0]+1),line['TCOFF'].values[0])\n else:\n print(line['RES'].values[0]+str(line['NUM'].values[0]+1),line['TCOFF'].values[0], line['STD'].values[0])\n\n","repo_name":"agheeraert/Shiftx_analysis","sub_path":"reweight_tcoff.py","file_name":"reweight_tcoff.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"29425593203","text":"from tkinter import *\nfrom tkinter.ttk import *\nimport sys\nfrom tkinter.scrolledtext import ScrolledText\nfrom tkinter import filedialog, messagebox, _setit\nimport os, warnings, pandas as pd\nimport threading\nimport multiprocessing\nfrom queue import Empty\nfrom PIL import Image, ImageTk\nimport webbrowser\nfrom opendatabim.report import Report\nfrom datetime import datetime, timedelta\nimport tkinter.font as tkFont\n\nwarnings.simplefilter(action='ignore', category=UserWarning)\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.simplefilter(action='ignore', category=pd.errors.DtypeWarning)\n\nclass PrintLogger(object):\n\n def __init__(self, textbox):\n self.textbox = textbox\n\n def write(self, text):\n self.textbox.configure(state=\"normal\")\n self.textbox.insert(\"end\", text)\n self.textbox.see(\"end\")\n self.textbox.configure(state=\"disabled\")\n\n def flush(self):\n pass\n\n\nclass ReportAPP(Tk):\n def __init__(self):\n\n Tk.__init__(self)\n\n try:\n self.base_path = sys._MEIPASS\n except Exception:\n self.base_path = os.path.abspath(\".\")\n\n icon = os.path.join(self.base_path, 'img', 'icon.png')\n icon = PhotoImage(file=icon)\n self.call('wm', 'iconphoto', self._w, icon)\n\n theme = os.path.join(self.base_path, \"theme\", \"sv.tcl\")\n self.call(\"source\", theme)\n self.call(\"set_theme\", \"light\")\n\n\n self.title('OpenDataBIM')\n\n self.setupGUI()\n self.setStyles()\n\n def setupGUI(self):\n\n self.setGeometry()\n\n self.resizable(False, False)\n\n mainFrame = Frame(self)\n mainFrame.pack(side=LEFT, fill=BOTH, expand=1)\n\n frameL = Frame(mainFrame)\n frameR = Frame(mainFrame)\n\n frameL.grid(row=0, column=0, padx=10, pady=10, ipadx=0, ipady=0, sticky=\"NW\")\n frameR.grid(row=0, column=1, padx=10, pady=8, ipadx=0, ipady=0, sticky=\"NE\")\n\n header = Label(frameL)\n\n header.grid(row=0, column=0, padx=10, sticky=\"NW\")\n headerText = Label(header)\n headerText.grid(row=0, column=0, padx=10, sticky=\"W\")\n\n\n Label(headerText,\n text=\"Checking BIM models\",\n style=\"Header1.TLabel\"\n ).grid(\n row=0,\n column=0,\n sticky=\"W\"\n )\n\n Label(headerText,\n text=\"Revit and IFC projects\",\n style=\"Header2.TLabel\"\n ).grid(\n row=1,\n column=0,\n sticky=\"W\"\n )\n\n\n Link = Label(headerText,\n text=\"Read more about the check too\",\n style=\"Link.TLabel\",\n cursor=\"hand2\"\n )\n Link.grid(\n row=2,\n column=0,\n sticky=\"W\"\n )\n Link.bind(\"\",\n lambda _: webbrowser.open_new(\"https://opendatabim.io/index.php/quality-of-revit-and-ifc-projects/\")\n )\n odbLogo = os.path.join(self.base_path, 'img', 'odb.png')\n odbLogo = Image.open(odbLogo)\n odbLogo = odbLogo.resize((int(612*0.19), int(422*0.19)))\n\n odbLogo = ImageTk.PhotoImage(odbLogo)\n\n headerLogo = Label(header, image=odbLogo, cursor=\"hand2\")\n headerLogo.image = odbLogo\n headerLogo.grid(\n row=0,\n column=1,\n sticky=\"W\")\n\n headerLogo.bind(\"\", lambda _: webbrowser.open_new(\"https://opendatabim.io/\"))\n\n\n Label(frameL, text=\"📁 Path to folder with Revit or IFC files\", style=\"Option.TLabel\").grid(\n row=1,\n column=0,\n padx=10,\n pady=(7,0),\n sticky=\"W\",\n\n )\n\n pathToFolder = Entry(frameL,style=\"Placeholder.TEntry\")\n pathToFolder.grid(\n row=2,\n column=0,\n padx=10,\n sticky=\"WE\"\n )\n\n Button(frameL, text=\"Select Folder\", width=15, command=self.get_pathToFolder, style=\"Button.TButton\").grid(\n row=2,\n column=1,\n sticky=\"W\"\n )\n\n\n Label(frameL, text=\"🗃 Check all the projects in the subfolders?\", style=\"Option.TLabel\").grid(\n row=3,\n column=0,\n padx=10,\n pady=(7, 0),\n sticky=\"W\",\n\n )\n varCheckSubfolders = StringVar()\n varCheckSubfolders.set(\"No\")\n checkSubfolders = OptionMenu(frameL, varCheckSubfolders, \"No\", \"Yes\", \"No\", style=\"Dropdown.TMenubutton\")\n checkSubfolders.grid(\n row=4,\n column=0,\n padx=10,\n sticky=\"WE\"\n )\n\n\n\n\n\n Label(frameL, text=\"📁 Results output folder\", style=\"Option.TLabel\").grid(\n row=5,\n column=0,\n padx=10,\n pady=(10, 2),\n sticky=\"W\"\n )\n pathOutputFolder = Entry(frameL, style=\"Placeholder.TEntry\")\n pathOutputFolder.grid(\n row=6,\n column=0,\n padx=10,\n pady=2,\n sticky=\"WE\"\n )\n Button(frameL, text=\"Select Folder\", width=15, style=\"Button.TButton\", command=self.get_pathOutputFolder).grid(\n row=6,\n column=1,\n sticky=\"W\"\n )\n\n Label(frameL, text=\"📁 Path to noBIM converter files folder\", style=\"Option.TLabel\").grid(\n row=7,\n column=0,\n padx=10,\n pady=(10, 2),\n sticky=\"W\"\n )\n\n pathConverterFolder = Entry(frameL, style=\"Placeholder.TEntry\")\n pathConverterFolder.grid(\n row=8,\n column=0,\n padx=10,\n sticky=\"WE\"\n )\n\n Button(frameL, text=\"Select Folder\", width=15, style=\"Button.TButton\", command=self.get_pathConverterFolder).grid(\n row=8,\n column=1,\n sticky=\"W\"\n )\n\n imgConverter = os.path.join(self.base_path, 'img', 'converter.png')\n imgConverter = Image.open(imgConverter)\n imgConverter = imgConverter .resize((int(2034*0.25), int(687*0.25)))\n imgConverter = ImageTk.PhotoImage(imgConverter)\n placeImgConverter = Label(frameL, image=imgConverter, cursor=\"hand2\")\n placeImgConverter.image = imgConverter\n placeImgConverter.grid(\n row=9,\n column=0,\n columnspan=2,\n sticky=\"W\",\n padx=10,\n pady=(10, 0),\n )\n\n placeImgConverter.bind(\"\", lambda _: webbrowser.open_new(\"https://opendatabim.io/\"))\n\n Label(frameR, text=\"📑 Excel spreadsheet with checking parameters\", style=\"Option.TLabel\").grid(\n row=1,\n column=0,\n padx=10,\n pady=(0, 2),\n sticky=\"W\"\n )\n\n pathFileExcel = Entry(frameR, style=\"Placeholder.TEntry\")\n pathFileExcel.grid(\n row=2,\n column=0,\n padx=10,\n sticky=\"WE\"\n )\n\n Button(frameR, text=\"Select File\", width=15, command=self.get_pathFileExcel, style=\"Button.TButton\").grid(\n row=2,\n column=1,\n sticky=\"W\"\n )\n\n Label(frameR, text=\"📁 Path to PDF_Sources folder\", style=\"Option.TLabel\").grid(\n row=3,\n column=0,\n padx=10,\n pady=(10, 2),\n sticky=\"W\"\n )\n pathPdfSources = Entry(frameR, font=tkFont.Font(family='Helvetica', size=36, weight='bold'))\n pathPdfSources.grid(\n row=4,\n column=0,\n padx=10,\n pady=2,\n sticky=\"WE\"\n )\n Button(frameR, text=\"Select Folder\", width=15, command=self.get_pathPdfSources, style=\"Button.TButton\").grid(\n row=4,\n column=1,\n sticky=\"W\"\n )\n\n Label(frameR, text=\"📘 What projects are checked?\", style=\"Option.TLabel\", width=55).grid(\n row=5,\n column=0,\n padx=10,\n pady=(10, 2),\n sticky=\"W\"\n )\n varProjectType = StringVar()\n varProjectType.set(\"Revit\")\n projectType = OptionMenu(frameR, varProjectType, \"Revit\", \"Revit\", \"IFC\", style=\"Dropdown.TMenubutton\", command=self.set_groupingParam)\n projectType.grid(\n row=6,\n column=0,\n padx=10,\n sticky=\"WE\"\n )\n\n Label(frameR, text=\"Grouping Parameter\", style=\"Option.TLabel\", width=55).grid(\n row=7,\n column=0,\n padx=10,\n pady=(10, 2),\n sticky=\"W\"\n )\n\n varGroupingParam = StringVar()\n varGroupingParam.set(\"Category\")\n groupingParam = OptionMenu(frameR, varGroupingParam, \"Category\", \"Category\", \"Type Name\", style=\"Dropdown.TMenubutton\")\n groupingParam.grid(\n row=8,\n column=0,\n padx=10,\n sticky=\"WE\"\n )\n\n self.next_check = None\n Label(frameR, text=\"⏱ Run a check at the same time every 24 hours?\", style=\"Option.TLabel\", width=55).grid(\n row=9,\n column=0,\n padx=10,\n pady=(10, 2),\n sticky=\"W\"\n )\n\n varCheckEvery24Hours = StringVar()\n varCheckEvery24Hours.set(\"No\")\n checkEvery24Hours = OptionMenu(frameR, varCheckEvery24Hours, \"No\", \"Yes\", \"No\", style=\"Dropdown.TMenubutton\")\n checkEvery24Hours.grid(\n row=10,\n column=0,\n padx=10,\n sticky=\"WE\"\n )\n\n progressBar = Progressbar(frameR, orient=HORIZONTAL, mode='indeterminate', length=280)\n progressBar.grid(\n row=11,\n column=0,\n padx=10,\n pady=(10,0),\n sticky=\"WE\"\n )\n\n infoPlace = Label(frameR)\n infoPlace.grid(\n row=12,\n column=0,\n columnspan=2,\n padx=10,\n pady=(25, 0),\n sticky=\"W\"\n )\n\n\n imgODB = os.path.join(self.base_path, 'img', 'logo.png')\n\n imgODB = Image.open(imgODB)\n\n imgODB = ImageTk.PhotoImage(imgODB.resize((int(555*0.4), int(125*0.4))))\n\n placeImgODB = Label(infoPlace, image=imgODB, cursor=\"hand2\")\n placeImgODB.image = imgODB\n placeImgODB.grid(\n row=1,\n column=0,\n sticky=\"W\"\n )\n\n placeImgODB.bind(\"\", lambda _: webbrowser.open_new(\"https://opendatabim.io/\"))\n\n infoText = Label(infoPlace, text=\"Open application code\\nand parameterizable output\", style=\"Info.TLabel\", cursor = \"hand2\")\n infoText.grid(\n row=1,\n column=1,\n sticky=\"W\",\n padx=(10, 0)\n )\n infoText.bind(\"\", lambda _: webbrowser.open_new(\"https://github.com/OpenDataBIM/Checking-the-quality-of-Revit-and-IFC-projects\"))\n\n buttonStart = Button(frameR, text=\"Start\", width=15, command=lambda: self.startWork(\"button\"), style=\"Button.TButton\")\n buttonStart.grid(\n row=12,\n column=1,\n pady=(25,0),\n sticky=\"W\"\n )\n frameLog = Label(frameR)\n frameLog.grid(row=13, column=0, padx=10, sticky=\"W\")\n widgetLog = ScrolledText(frameLog, width=70, height=7, font=(\"Consolas\", \"10\", \"normal\"), )\n widgetLog.grid(column=0,\n row=0,\n sticky='W',\n padx=10,\n pady=10,\n )\n widgetLog.config(state=DISABLED)\n logger = PrintLogger(widgetLog)\n sys.stdout = logger\n sys.stderr = logger\n\n helpFrame = Frame(frameR)\n helpFrame.grid(row=15, column=0, padx=10, sticky=\"W\")\n email = Label(helpFrame, text = \"📧 Please send an email to info@opendatabim.io if you come across\\nany issues or errors\", cursor=\"hand2\", style=\"About.TLabel\")\n email.grid(\n row=0,\n column=0,\n columnspan=2,\n sticky=\"W\",\n pady=(5, 30)\n )\n\n email.bind(\"\",\n lambda _: webbrowser.open_new(\"mailto:info@opendatabim.io\")\n )\n\n self.pathToFolder = pathToFolder\n self.pathOutputFolder = pathOutputFolder\n self.pathConverterFolder = pathConverterFolder\n self.pathFileExcel = pathFileExcel\n self.pathPdfSources = pathPdfSources\n self.varProjectType = varProjectType\n self.varGroupingParam = varGroupingParam\n self.varCheckEvery24Hours = varCheckEvery24Hours\n self.varCheckSubfolders = varCheckSubfolders\n self.groupingParam = groupingParam\n self.progressBar = progressBar\n self.buttonStart = buttonStart\n\n return\n\n def set_groupingParam(self, e):\n if e == \"Revit\":\n self.varGroupingParam.set('Category')\n self.groupingParam['menu'].delete(0, 'end')\n\n for choice in ('Category', 'Type Name'):\n self.groupingParam['menu'].add_command(label=choice, command=_setit(self.varGroupingParam, choice))\n\n elif e == \"IFC\":\n self.varGroupingParam.set('IfcEntity')\n self.groupingParam['menu'].delete(0, 'end')\n\n for choice in ('IfcEntity', 'ObjectType'):\n self.groupingParam['menu'].add_command(label=choice, command=_setit(self.varGroupingParam, choice))\n\n return\n\n def get_pathToFolder(self):\n path = filedialog.askdirectory(\n initialdir=self.pathToFolder.get()\n )\n if path:\n path = os.path.normpath(path)\n self.pathToFolder.delete(0, END)\n self.pathToFolder.insert(0, path)\n\n if not self.pathPdfSources.get():\n self.pathPdfSources.insert(0, os.path.join(path, 'PDF_Sources'))\n if not self.pathOutputFolder.get():\n self.pathOutputFolder.insert(0, path)\n\n return\n\n def get_pathPdfSources(self):\n path = filedialog.askdirectory(initialdir=self.pathPdfSources.get())\n if path:\n path = os.path.normpath(path)\n self.pathPdfSources.delete(0, END)\n self.pathPdfSources.insert(0, path)\n\n return\n\n def get_pathFileExcel(self):\n path = filedialog.askopenfilename(defaultextension='.xlsx',\n filetypes=[(\"xlsx\", \"*.xlsx\"), (\"All files\", \"*.*\")])\n if path:\n path = os.path.normpath(path)\n self.pathFileExcel.delete(0, END)\n self.pathFileExcel.insert(0, path)\n\n return\n\n def get_pathOutputFolder(self):\n path = filedialog.askdirectory(initialdir=self.pathOutputFolder.get())\n if path:\n path = os.path.normpath(path)\n self.pathOutputFolder.delete(0, END)\n self.pathOutputFolder.insert(0, path)\n\n return\n\n def get_pathConverterFolder(self):\n path = filedialog.askdirectory(initialdir=self.pathConverterFolder.get())\n if path:\n path = os.path.normpath(path)\n self.pathConverterFolder.delete(0, END)\n self.pathConverterFolder.insert(0, path)\n return\n\n def setStyles(self):\n\n style = Style()\n\n\n style.configure('Header1.TLabel', font=(\"Poppins\", 24, \"bold\"), foreground=\"black\")\n style.configure('Header2.TLabel', font=(\"Poppins\", 12, \"normal\"))\n style.configure('Dropdown.TMenubutton', font=(\"Poppins\", 10, \"normal\"), anchor = 'w')\n style.configure('Option.TLabel', font=(\"Poppins\", 10, \"bold\"), padding=(0,3,0,0))\n style.configure('Placeholder.TEntry', background=\"gray\", font=(\"Poppins\", 20, \"normal\"))\n style.configure('Button.TButton', font=(\"Poppins\", 10, \"normal\"))\n style.configure('Link.TLabel', font=(\"Poppins\", 10, \"normal\", \"underline\"), foreground=\"blue\")\n style.configure('Info.TLabel', font=(\"Poppins\", 11, \"bold\"))\n style.configure('TProgressbar')\n style.configure('About.TLabel', font=(\"Poppins\", 10, \"normal\"))\n\n def getBestGeometry(self):\n\n ws = self.winfo_screenwidth()\n hs = self.winfo_screenheight()\n\n\n self.w = w = ws / 1.07\n self.h = h = hs / 1.55\n x = (ws / 2) - (w / 2)\n y = (hs / 2) - (h / 2)\n g = '%dx%d+%d+%d' % (w, h, x, y)\n\n return g\n\n def setGeometry(self):\n self.winsize = self.getBestGeometry()\n self.geometry(self.winsize)\n return\n\n def startWork(self, initiator):\n if not all([\n self.pathToFolder.get(),\n self.pathOutputFolder.get(),\n self.pathConverterFolder.get(),\n self.pathFileExcel.get(),\n self.pathPdfSources.get(),\n ]):\n messagebox.showerror(message=\"Please, select all parameters!\")\n return\n\n if self.next_check and initiator == \"button\":\n print(\"The previous check schedule has been canceled!\")\n self.after_cancel(self.next_check)\n self.next_check = None\n\n if self.varCheckEvery24Hours.get() == \"Yes\":\n now = datetime.now()\n next_check_time = now + timedelta(days=1)\n print(next_check_time.strftime(\"Next check at %Y-%m-%d %H:%M:%S\"))\n self.next_check = self.after(24*60*60*1000, lambda: self.startWork(\"schedule\"))\n\n self.buttonStart[\"state\"] = DISABLED\n self.Thread_ProgressBar = threading.Thread()\n self.Thread_ProgressBar.__init__(target=self.progressBar.start())\n self.Thread_ProgressBar.start()\n\n data = dict(\n pathToFolder=self.pathToFolder.get(),\n pathOutputFolder=self.pathOutputFolder.get(),\n pathConverterFolder=self.pathConverterFolder.get(),\n pathFileExcel=self.pathFileExcel.get(),\n pathPdfSources=self.pathPdfSources.get(),\n varProjectType=self.varProjectType.get(),\n varGroupingParam=self.varGroupingParam.get(),\n varCheckEvery24Hours=self.varCheckEvery24Hours.get(),\n varCheckSubfolders=self.varCheckSubfolders.get(),\n )\n\n self.queue = multiprocessing.Queue()\n proc = multiprocessing.Process(target=Report, args=(self.queue, data), daemon=True)\n proc.start()\n self.check()\n\n\n return\n\n def check(self):\n try:\n data = self.queue.get(block=False)\n except Empty:\n pass\n\n else:\n if data == 'done':\n self.progressBar.stop()\n self.buttonStart[\"state\"] = NORMAL\n return\n\n print(data)\n\n finally:\n self.after(1000, self.check)\n\ndef main():\n app = ReportAPP()\n\n app.mainloop()\n\n\nif __name__ == \"__main__\":\n multiprocessing.freeze_support()\n main()\n","repo_name":"OpenDataBIM/Checking-the-quality-of-Revit-and-IFC-projects","sub_path":"Files for building an EXE application with UI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20128,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"28"} +{"seq_id":"36639508776","text":"import requests\nfrom config import api_key\ncountry_code = 'US'\n\n\ndef kelvin_to_fahrenheit(temp):\n #(K − 273.15) × 9/5 + 32 = °F.\n return ((temp - 273.15) * 9/5 + 32)\n\ndef get_city(zip_code):\n zip_response = requests.get(f'http://api.openweathermap.org/geo/1.0/zip?zip={zip_code}%2CUS&appid={api_key}')\n if zip_response.status_code ==200:\n zip_data = zip_response.json()\n zip_code = zip_data['zip']\n city_name = zip_data['name']\n latitude = zip_data['lat']\n longitude = zip_data['lon']\n print(\"You entered: \" + zip_code)\n city_dict ={'lat': latitude, 'long': longitude, 'city' : city_name}\n return city_dict\n else:\n raise Exception(\"Invalid Zip Code\")#f\"Error {zip_response.status_code}: {zip_response.text}\"\n\n\ndef get_current_conditions(latitude, longitude):\n weather_response = requests.get(\n f'https://api.openweathermap.org/data/2.5/weather?lat={latitude}&lon={longitude}&appid={api_key}')\n if weather_response.status_code == 200:\n current_weather = weather_response.json()\n current_conditions = current_weather['weather'][0]['description']\n\n current_temp = current_weather['main']['temp']\n current_tempf = float(kelvin_to_fahrenheit(current_temp))\n\n temp_feelK = current_weather['main']['feels_like']\n temp_feel = float(kelvin_to_fahrenheit(temp_feelK))\n\n today_lowK = current_weather['main']['temp_min']\n today_low = float(kelvin_to_fahrenheit(today_lowK))\n\n today_highK = current_weather['main']['temp_max']\n today_high = float(kelvin_to_fahrenheit(today_highK))\n\n wind = current_weather['wind']['speed']\n\n conditions_dict = {'current_temp' : current_tempf, 'conditions' : current_conditions, 'current_feel' : temp_feel, 'low': today_low, 'high' : today_high, 'wind_speed' : wind}\n return conditions_dict\n else:\n raise Exception(f\"Error {weather_response.status_code}: {weather_response.text}\")\n\n\n","repo_name":"mrgoodwrench24/OpenWeatherMap_API","sub_path":"openweather_api.py","file_name":"openweather_api.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"20485362069","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\n\ndef add_to_history(result, image, time):\n data = pd.read_csv('./src/csv/history/history.csv')\n\n result_df = pd.DataFrame(result.items(), columns=[\"Parameter\", \"Value\"])\n result_df['Value'] = result_df['Value'].apply(lambda x: x[0][0])\n result_df = result_df.T\n result_df.columns = result_df.iloc[0]\n result_df = result_df[1:]\n result_df.insert(0, 'Times', time)\n\n # Concatenate the existing DataFrame with the new data DataFrame, excluding the \"time\" column from new_data\n updated_data = pd.concat([data, result_df], ignore_index=True)\n\n # Save the updated data back to the CSV file without index column\n updated_data.to_csv('./src/csv/history/history.csv', index=False)\n\ndef delete_data_history():\n # Load the CSV data (assuming you have a CSV file named 'data.csv' with headers)\n df = pd.read_csv('./src/csv/history/history.csv')\n\n df_header = df.iloc[:0]\n\n # Save the updated DataFrame back to the CSV file (excluding the index)\n df_header.to_csv('./src/csv/history/history.csv', index=False)\n \n# Function to display the history\ndef history():\n st.markdown(\"

    Riwayat Analisis

    \", unsafe_allow_html=True)\n data = pd.read_csv('./src/csv/history/history.csv')\n history_data = pd.DataFrame(data)\n st.dataframe(history_data)\n if st.button(\"Hapus Riwayat\"):\n delete_data_history()","repo_name":"shidiqfzr/urinalysis-app","sub_path":"src/pages/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"39323216945","text":"import os\nimport logging\nfrom ConfigParser import RawConfigParser as ConfigParser\n\nfrom twisted.internet import reactor\nfrom twisted.internet.protocol import Factory\n\nfrom myne2.world.worldprotocol import WorldProtocol\nfrom myne2.world.blockengine import BlockEngine\n\nclass World(Factory):\n \n protocol = WorldProtocol\n \n def __init__(self, base_path):\n assert os.path.isdir(base_path)\n self.base_path = base_path\n self.config_path = os.path.join(self.base_path, \"world.conf\")\n self.load_config()\n \n def load_config(self):\n self.config = ConfigParser()\n self.config.read(self.config_path)\n self.size = [\n self.config.getint(\"size\", \"x\"),\n self.config.getint(\"size\", \"y\"),\n self.config.getint(\"size\", \"z\"),\n ]\n self.spawn = [\n self.config.getint(\"spawn\", \"x\"),\n self.config.getint(\"spawn\", \"y\"),\n self.config.getint(\"spawn\", \"z\"),\n ]\n \n def start(self):\n logging.info(\"Starting world '%s'\" % self.base_path)\n self.engine = BlockEngine(self.base_path, self.size[0], self.size[1], self.size[2])\n self.engine.start_mmap()\n \n def stop(self):\n logging.info(\"Stopping world '%s'\" % self.base_path)\n self.engine.stop_mmap()\n del self.engine\n \n def listen(self):\n # Makes us listen on our configured port.\n reactor.listenTCP(self.config.getint(\"network\", \"port\"), self)","repo_name":"TheArchives/cloudBox","sub_path":"trunk/myne2/world/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"23618978122","text":"import torch\nfrom torch import nn as nn\nimport torch.nn.functional as F\nimport torch.nn.utils.prune as prune\nfrom basicsr.utils.registry import ARCH_REGISTRY\nimport numpy as np\nimport time\nimport random\n\n\ndef channel_shuffle(x, groups):\n\n batchsize, num_channels, height, width = x.data.size()\n channels_per_group = num_channels // groups\n # num_channels = groups * channels_per_group\n\n # grouping, 通道分组\n # b, num_channels, h, w =======> b, groups, channels_per_group, h, w\n x = x.view(batchsize, groups, channels_per_group, height, width)\n\n # channel shuffle, 通道洗牌\n x = torch.transpose(x, 1, 2).contiguous()\n # x.shape=(batchsize, channels_per_group, groups, height, width)\n # flatten\n x = x.view(batchsize, -1, height, width)\n\n return x\n\n\nclass HOAF_baseline(nn.Module):\n \"\"\"HOAF(High Order Activation Function) structure.\n\n Args:\n num_groups (int): number of groups to separate the channels into\n num_channels (int): number of channels expected in input\n \"\"\"\n __constants__ = ['num_groups', 'num_channels']\n num_groups: int\n num_channels: int\n\n def __init__(self, num_groups, num_channels, num_pow):\n super(HOAF_baseline, self).__init__()\n if num_channels % num_groups != 0:\n raise ValueError(\"num_channels need to be divisible by num_groups\")\n\n self.num_groups = num_groups\n self.num_channels = num_channels\n self.num_pow = num_pow\n self.ch_per_gp = self.num_channels // self.num_groups\n self.iter = 0\n\n self.out_channels = 0\n self.out_channels1 = self.ch_per_gp\n self.out_channels2 = self.ch_per_gp * (self.ch_per_gp + 1) // 2\n\n self.conv1 = nn.Conv2d(self.num_channels, self.out_channels2 * self.num_groups, 1, 1, 0, groups=self.num_groups)\n self.conv2 = nn.Conv2d(self.out_channels2 * self.num_groups, self.num_channels, 1, 1, 0, groups=self.num_groups)\n\n self.beta2 = nn.Parameter(torch.ones((1, self.num_channels, 1, 1)), requires_grad=True)\n\n # self.channelShuffle = nn.ChannelShuffle(self.ch_per_gp)\n # self.channelUnShuffle = nn.ChannelShuffle(self.num_groups)\n\n def forward(self, inp):\n # return HOAF_Function.apply(inp, self.num_groups, self.ch_per_gp, self.weight)\n # return HOAF_Function_without_weight.apply(inp, self.num_groups, self.ch_per_gp)\n self.iter += 1\n # B, C, H, W = inp.shape\n # output_c1 = torch.zeros([B, self.out_channels1 * self.num_groups])\n x = self.conv1(inp)\n x = self.conv2(x)\n\n return inp + self.beta2 * x\n\n\nclass HOAF_v3(nn.Module):\n \"\"\"HOAF(High Order Activation Function) structure.\n\n Args:\n num_groups (int): number of groups to separate the channels into\n num_channels (int): number of channels expected in input\n \"\"\"\n __constants__ = ['num_groups', 'num_channels']\n num_groups: int\n num_channels: int\n\n def __init__(self, num_groups, num_channels, num_pow):\n super(HOAF_v3, self).__init__()\n if num_channels % num_groups != 0:\n raise ValueError(\"num_channels need to be divisible by num_groups\")\n\n self.num_groups = num_groups\n self.num_channels = num_channels\n self.num_pow = num_pow\n self.ch_per_gp = self.num_channels // self.num_groups\n self.iter = 0\n\n self.out_channels = 0\n self.out_channels1 = self.ch_per_gp\n self.out_channels2 = self.ch_per_gp * (self.ch_per_gp + 1) // 2\n\n if 1 in self.num_pow:\n self.out_channels += self.out_channels1\n self.conv1 = nn.Conv2d(self.out_channels1 * self.num_groups, self.num_channels, kernel_size=1,\n groups=self.num_groups)\n if 2 in self.num_pow:\n self.out_channels += self.out_channels2\n self.conv2 = nn.Conv2d(self.out_channels2 * self.num_groups, self.num_channels, kernel_size=1,\n groups=self.num_groups)\n\n self.beta2 = nn.Parameter(torch.ones((1, self.num_channels, 1, 1)), requires_grad=True)\n\n # self.channelShuffle = nn.ChannelShuffle(self.ch_per_gp)\n # self.channelUnShuffle = nn.ChannelShuffle(self.num_groups)\n\n def forward(self, inp):\n # return HOAF_Function.apply(inp, self.num_groups, self.ch_per_gp, self.weight)\n # return HOAF_Function_without_weight.apply(inp, self.num_groups, self.ch_per_gp)\n self.iter += 1\n # B, C, H, W = inp.shape\n # output_c1 = torch.zeros([B, self.out_channels1 * self.num_groups])\n\n output = torch.zeros_like(inp)\n\n inp_c = torch.chunk(channel_shuffle(inp, self.ch_per_gp), self.ch_per_gp, dim=1)\n # output_c1 = []\n output_c2 = []\n\n for i in range(self.ch_per_gp):\n for j in range(i, self.ch_per_gp):\n output_c2.append(inp_c[i] * inp_c[j])\n\n if 1 in self.num_pow:\n output = output + inp\n if 2 in self.num_pow:\n output = output + self.conv2(channel_shuffle(torch.cat(output_c2, dim=1), self.num_groups)) * self.beta2\n\n return output\n\n\nif __name__ == \"__main__\":\n batchsize = 1\n channels = 32\n width = 1024\n _inp = torch.randn([batchsize, channels, width, width]).cuda()\n a = 0.1\n b = 0.2\n _target = a * _inp + b\n net_HOAF = HOAF_v3(channels // 4, channels, [1, 2]).cuda()\n # net_HOAF = HOAF_prune(channels // 4, channels, [1, 2]).cuda()\n # net_HOAF = HOAF_baseline(channels // 4, channels, [1, 2]).cuda()\n net_GELU = nn.GELU().cuda()\n cri = nn.MSELoss()\n optimizer = torch.optim.Adam(net_HOAF.parameters(), lr=1e-2)\n print(\"start\")\n bg = time.time()\n for i in range(10000):\n _output_HOAF = net_HOAF(_inp)\n net_HOAF.zero_grad()\n _loss_HOAF = cri(_output_HOAF, _target)\n # _loss_HOAF.backward()\n optimizer.step()\n if i % 100 == 0:\n print(\"time cost = {:.2f}s\".format(time.time() - bg))\n print(\"i = {:3d}, loss = {:.4f}\".format(i, _loss_HOAF.item()))\n bg = time.time()\n\n\n\n","repo_name":"namazabi12/CV_Reproduction","sub_path":"NAFNET/basicsr/archs/HOAF_arch_v2.py","file_name":"HOAF_arch_v2.py","file_ext":"py","file_size_in_byte":6113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"9387215529","text":"import linecache\r\nimport random\r\n\r\nimport torch\r\nimport os\r\nimport json\r\nfrom torch.utils.data import DataLoader, Dataset\r\nfrom argparse import ArgumentParser\r\n\r\nclass StoryInferencedataset(Dataset):\r\n\r\n def __init__(self, decoder_tokenizer, encoder_tokenizer, file_path, max_triple_num, max_triple_length,\r\n max_decoder_input_length, max_target_length, stage, **kwargs):\r\n\r\n self.stage = stage\r\n self.kwargs = kwargs\r\n self.max_decoder_input_length = max_decoder_input_length\r\n self.max_target_length = max_target_length\r\n self.max_triple_num = max_triple_num\r\n self.max_triple_length = max_triple_length\r\n self.data_length = None\r\n\r\n self.encoder_tokenizer = encoder_tokenizer\r\n self.decoder_tokenizer = decoder_tokenizer\r\n self.decoder_bos = self.decoder_tokenizer.bos_token_id if self.decoder_tokenizer.bos_token_id is not None else self.decoder_tokenizer.cls_token_id\r\n self.decoder_pad = self.decoder_tokenizer.pad_token_id\r\n self.decoder_eos = self.decoder_tokenizer.eos_token_id if self.decoder_tokenizer.eos_token_id is not None else self.decoder_tokenizer.sep_token_id\r\n self.encoder_cls = self.encoder_tokenizer.bos_token_id if self.encoder_tokenizer.bos_token_id is not None else self.encoder_tokenizer.cls_token_id\r\n self.encoder_pad = self.encoder_tokenizer.pad_token_id\r\n self.encoder_eos = self.encoder_tokenizer.eos_token_id if self.encoder_tokenizer.eos_token_id is not None else self.encoder_tokenizer.sep_token_id\r\n\r\n self.file_path = file_path\r\n\r\n self.relation_type_list = self.kwargs['relation_type_list']\r\n self.relation_type_list = [int(x) for x in self.relation_type_list]\r\n\r\n self.prompt_template = [\r\n 'event that directly causes is',\r\n 'emotion that motivates is',\r\n 'location state that enables is',\r\n 'possess state that enables is',\r\n 'other attributes that enables is',\r\n 'event that directly cause is',\r\n 'emotion that caused by is',\r\n 'change in location that results in is',\r\n 'change of possession that results in is',\r\n 'other changes that results in is',\r\n ]\r\n\r\n self.prompt_template = [self.prompt_template[x] for x in self.relation_type_list]\r\n\r\n self.position_offset = 2\r\n\r\n def __len__(self):\r\n if self.data_length is None:\r\n with open(self.file_path, 'r', encoding='utf8') as f:\r\n self.data_length = len(f.readlines())\r\n return self.data_length\r\n\r\n def __getitem__(self, idx):\r\n index = idx + 1\r\n data = linecache.getline(self.file_path, index).strip()\r\n data = json.loads(data)\r\n\r\n source = data['story']\r\n target = data['target']\r\n inference_sentence = data['origin_sentence']\r\n\r\n inference_sentence = [inference_sentence[idx] for idx in range(len(inference_sentence)) if idx % 10 in self.relation_type_list]\r\n\r\n source_input_ids = []\r\n for idx, s in enumerate(source):\r\n source_input_ids.extend(self.decoder_tokenizer.convert_tokens_to_ids(self.decoder_tokenizer.tokenize(s)))\r\n source_input_ids.append(self.encoder_eos)\r\n\r\n inference_prompt = []\r\n for idx, sentence in enumerate(inference_sentence):\r\n template = self.prompt_template[idx % len(self.relation_type_list)]\r\n inference_story = source[idx // 10]\r\n inference_story = inference_story.strip()\r\n while not (inference_story[-1].isalpha() or inference_story[-1].isdigit()):\r\n inference_story = inference_story[:-1]\r\n inference_prompt.append(template.replace('', inference_story))\r\n inference_prompt_ids = self.list_convert_ids(self.decoder_tokenizer, inference_prompt)\r\n inference_sentence_ids = self.list_convert_ids(self.decoder_tokenizer, [' ' + x for x in inference_sentence])\r\n\r\n encoder_input_ids = source_input_ids\r\n encoder_input_mask = [1 for _ in range(len(encoder_input_ids))]\r\n\r\n decoder_input_ids = [[self.decoder_eos, self.decoder_bos] + x for x in inference_prompt_ids]\r\n decoder_input_mask = [[1 for _ in range(len(x))] for x in decoder_input_ids]\r\n\r\n decoder_target_ids = [x + [self.decoder_eos] for x in inference_sentence_ids]\r\n decoder_target_mask = [1 for _ in range(len(decoder_target_ids))]\r\n\r\n second_decoder_input_ids = [self.decoder_eos, self.decoder_bos]\r\n # second_decoder_input_ids = [self.decoder_bos, self.decoder_eos]\r\n second_decoder_input_mask = [1 for _ in range(len(second_decoder_input_ids))]\r\n\r\n second_decoder_target_ids = self.decoder_tokenizer.convert_tokens_to_ids(self.decoder_tokenizer.tokenize(target)) + [self.decoder_eos]\r\n second_decoder_target_mask = [1 for _ in range(len(second_decoder_target_ids))]\r\n\r\n return {\r\n 'encoder_input_ids': encoder_input_ids,\r\n 'encoder_input_mask': encoder_input_mask,\r\n 'decoder_input_ids': decoder_input_ids,\r\n 'decoder_input_mask': decoder_input_mask,\r\n 'second_decoder_input_ids': second_decoder_input_ids,\r\n 'second_decoder_input_mask': second_decoder_input_mask,\r\n 'decoder_target_ids': decoder_target_ids,\r\n 'decoder_target_mask': decoder_target_mask,\r\n 'second_decoder_target_ids': second_decoder_target_ids,\r\n 'second_decoder_target_mask': second_decoder_target_mask\r\n }\r\n\r\n\r\n def collact_fn(self, batch):\r\n '''\r\n\r\n Args:\r\n batch:\r\n\r\n Returns:\r\n\r\n '''\r\n\r\n encoder_input_ids = [x['encoder_input_ids'] for x in batch]\r\n decoder_input_ids = [x['decoder_input_ids'] for x in batch]\r\n second_decoder_input_ids = [x['second_decoder_input_ids'] for x in batch]\r\n decoder_target_ids = [x['decoder_target_ids'] for x in batch]\r\n second_decoder_target_ids = [x['second_decoder_target_ids'] for x in batch]\r\n\r\n decoder_current_length = [[len(x) for x in y] for y in decoder_input_ids]\r\n second_decoder_current_length = [len(x) for x in second_decoder_input_ids]\r\n\r\n decoder_input_with_target_ids = [[decoder_input_ids[batch_idx][sample_idx] + decoder_target_ids[batch_idx][sample_idx] for sample_idx in range(len(decoder_input_ids[batch_idx]))] for batch_idx in range(len(decoder_input_ids))]\r\n decoder_label_ids = [[([-100 for _ in range(len(decoder_input_ids[batch_idx][sample_idx]) - 1)] + decoder_target_ids[batch_idx][sample_idx] + [self.decoder_eos]) for sample_idx in range(len(decoder_input_ids[batch_idx]))] for batch_idx in range(len(decoder_input_ids))]\r\n second_decoder_input_with_target_ids = [second_decoder_input_ids[idx] + second_decoder_target_ids[idx] for idx in range(len(second_decoder_input_ids))]\r\n second_decoder_label_ids = [[self.decoder_bos] + second_decoder_target_ids[idx] + [self.decoder_eos] for idx in range(len(second_decoder_target_ids))]\r\n\r\n encoder_input_ids, encoder_input_mask = self.pad_token(encoder_input_ids, self.encoder_pad)\r\n decoder_input_ids, decoder_input_mask = self.pad_token(decoder_input_ids, self.decoder_pad)\r\n second_decoder_input_ids, second_decoder_input_mask = self.pad_token(second_decoder_input_ids, self.decoder_pad)\r\n decoder_input_with_target_ids, decoder_input_with_target_mask = self.pad_token(decoder_input_with_target_ids, self.decoder_pad)\r\n decoder_label_ids, _ = self.pad_token(decoder_label_ids, -100)\r\n second_decoder_input_with_target_ids, second_decoder_input_with_target_mask = self.pad_token(second_decoder_input_with_target_ids, self.decoder_pad)\r\n second_decoder_label_ids, _ = self.pad_token(second_decoder_label_ids, -100)\r\n decoder_target_ids, decoder_target_mask = self.pad_token(decoder_target_ids, self.decoder_pad)\r\n second_decoder_target_ids, second_decoder_target_mask = self.pad_token(second_decoder_target_ids, self.decoder_pad)\r\n\r\n return_dict = {}\r\n return_dict['encoder_input_ids'] = encoder_input_ids\r\n return_dict['encoder_input_mask'] = encoder_input_mask\r\n return_dict['decoder_current_length'] = torch.LongTensor(decoder_current_length)\r\n return_dict['second_decoder_current_length'] = torch.LongTensor(second_decoder_current_length)\r\n return_dict['decoder_label_ids'] = decoder_label_ids\r\n return_dict['second_decoder_label_ids'] = second_decoder_label_ids\r\n return_dict['decoder_target_ids'] = decoder_target_ids\r\n return_dict['second_decoder_target_ids'] = second_decoder_target_ids\r\n return_dict['decoder_target_mask'] = decoder_target_mask\r\n return_dict['second_decoder_target_mask'] = second_decoder_target_mask\r\n\r\n if self.stage == 'test':\r\n return_dict['decoder_input_ids'] = decoder_input_ids\r\n return_dict['decoder_input_mask'] = decoder_input_mask\r\n return_dict['second_decoder_input_ids'] = second_decoder_input_ids\r\n return_dict['second_decoder_input_mask'] = second_decoder_input_mask\r\n\r\n else:\r\n return_dict['decoder_input_ids'] = decoder_input_with_target_ids\r\n return_dict['decoder_input_mask'] = decoder_input_with_target_mask\r\n return_dict['second_decoder_input_ids'] = second_decoder_input_with_target_ids\r\n return_dict['second_decoder_input_mask'] = second_decoder_input_with_target_mask\r\n\r\n return return_dict\r\n\r\n def list_convert_ids(self, tokenizer, l):\r\n res = []\r\n for s in l:\r\n res.append(tokenizer.convert_tokens_to_ids(tokenizer.tokenize(s)))\r\n return res\r\n\r\n def convert_ids(self, tokenizer, s):\r\n return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(s))\r\n\r\n def pad_token(self, id_list, pad_token, max_length=None, fix_max_length=False, suffix_truncate=True):\r\n if fix_max_length and max_length is None:\r\n raise ValueError('max_length can not be None if fix_max_length is True')\r\n\r\n if max_length is None:\r\n max_length = self.find_max_length(id_list)\r\n\r\n if not fix_max_length:\r\n max_length = min(self.find_max_length(id_list), max_length)\r\n\r\n if len(id_list[0]) != 0 and type(id_list[0][0]) == list:\r\n mask = []\r\n for i in range(len(id_list)):\r\n output = self.pad_token(id_list[i], pad_token, max_length=max_length, fix_max_length=True, suffix_truncate=suffix_truncate)\r\n id_list[i] = output[0]\r\n mask.append(output[1])\r\n return torch.stack(id_list), torch.stack(mask)\r\n else:\r\n for i in range(len(id_list)):\r\n if len(id_list[i]) <= max_length:\r\n id_list[i] += [pad_token] * (max_length - len(id_list[i]))\r\n else:\r\n if suffix_truncate:\r\n id_list[i] = id_list[i][:max_length]\r\n else:\r\n id_list[i] = id_list[i][-max_length:]\r\n\r\n id_tensor = torch.LongTensor(id_list)\r\n mask = torch.where(id_tensor == pad_token, 0, 1)\r\n return id_tensor, mask\r\n\r\n def find_max_length(self, id_list):\r\n ret = 0\r\n if len(id_list[0]) != 0 and type(id_list[0][0]) == list:\r\n for i in range(len(id_list)):\r\n ret = max(ret, self.find_max_length(id_list[i]))\r\n else:\r\n ret = max([len(x) for x in id_list])\r\n return ret\r\n\r\n def pad_batch(self, list_batch, max_list_length, pad_token=None):\r\n len_list = [len(x) for x in list_batch]\r\n max_batch_len = min([max(len_list), max_list_length])\r\n if max_batch_len == 0:\r\n max_batch_len = 1\r\n len_list = [min(x, max_batch_len) for x in len_list]\r\n for i in range(len(list_batch)):\r\n if len(list_batch[i]) > max_batch_len:\r\n list_batch[i] = list_batch[i][:max_batch_len]\r\n elif len(list_batch[i]) < max_batch_len:\r\n list_batch[i] += [[pad_token] for _ in range(max_batch_len - len(list_batch[i]))]\r\n\r\n\r\n batch_mask = torch.Tensor([[1 for _ in range(len_list[i])] + [0 for _ in range(max_batch_len - len_list[i])] for i in range(len(list_batch))])\r\n return list_batch, batch_mask\r\n\r\n def ground_true(self):\r\n res = []\r\n with open(self.file_path, 'r', encoding='utf8') as f:\r\n for line in f.readlines():\r\n data = json.loads(line)\r\n if hasattr(self.decoder_tokenizer, 'do_lower_case') and self.decoder_tokenizer.do_lower_case:\r\n res.append(data['target'].lower())\r\n else:\r\n res.append(data['target'])\r\n return res\r\n\r\n @staticmethod\r\n def add_dataset_specific_args(parser: ArgumentParser):\r\n parser.add_argument(\r\n \"--max_triple_num\",\r\n default=None,\r\n type=int,\r\n required=True,\r\n help=\"Max number of the triple\"\r\n )\r\n\r\n parser.add_argument(\r\n \"--max_triple_length\",\r\n default=None,\r\n type=int,\r\n required=True,\r\n help=\"Max_length of the triple\"\r\n )\r\n\r\n parser.add_argument(\r\n \"--max_decoder_input_length\",\r\n default=None,\r\n type=int,\r\n required=True,\r\n help=\"Max length of decoder input\"\r\n )\r\n\r\n parser.add_argument(\r\n \"--max_target_length\",\r\n default=None,\r\n type=int,\r\n required=True,\r\n help=\"Max length of target\"\r\n )\r\n\r\n parser.add_argument(\"--data_label\", type=str, help='threshold or hard', required=False, default=None)\r\n\r\n parser.add_argument(\"--relation_type_list\", nargs='+', default=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\r\n\r\n return parser","repo_name":"ZhangYuS/CEG","sub_path":"scripts/data/StoryInferencedataset.py","file_name":"StoryInferencedataset.py","file_ext":"py","file_size_in_byte":14147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"17271837037","text":"import requests\nfrom pathlib import Path\nimport shutil\nimport json\nfrom typing import Literal\n\nlanguage_map = {\n \"cn\": \"zh-cn\",\n \"cht\": \"zh-tw\",\n \"de\": \"de-de\",\n \"en\": \"en-us\",\n \"es\": \"es-es\",\n \"fr\": \"fr-fr\",\n \"id\": \"id-id\",\n \"jp\": \"ja-jp\",\n \"kr\": \"ko-kr\",\n \"pt\": \"pt-pt\",\n \"ru\": \"ru-ru\",\n \"th\": \"th-th\",\n \"vi\": \"vi-vn\"\n}\n\ndef get_data_and_update(lang: str, update_dict: dict, type: Literal[\"characters\", \"light_cones\"]):\n url = f\"https://raw.githubusercontent.com/Mar-7th/StarRailRes/master/index_new/{lang}/{type}.json\"\n api_lang = language_map[lang]\n res = requests.get(url)\n for id, meta in res.json().items():\n update_dict.setdefault(id, {})\n update_dict[id].setdefault(\"name_localization_map\", {})\n update_dict[id][\"icon_file_path\"] = meta[\"icon\"]\n update_dict[id][\"rank\"] = str(meta[\"rarity\"])\n update_dict[id][\"name_localization_map\"][api_lang] = meta[\"name\"]\n\ndef get_character_data() -> dict:\n result = {}\n for lang in language_map.keys():\n get_data_and_update(lang, result, type=\"characters\")\n return result\n\ndef get_light_cones_data() -> dict:\n result = {}\n for lang in language_map.keys():\n get_data_and_update(lang, result, type=\"light_cones\")\n return result\n\ndef get_data() -> dict:\n result = {}\n result[\"character\"] = get_character_data()\n result[\"light_cone\"] = get_light_cones_data()\n return result\n\ndef download_image(relative_url_path: str):\n base = \"https://raw.githubusercontent.com/Mar-7th/StarRailRes/master/\"\n if relative_url_path == \"\":\n return\n print(\"downloading: \", relative_url_path)\n url = f\"{base}{relative_url_path}\"\n res = requests.get(url)\n meta_folder = Path(\"./Assets/gacha_meta\")\n to = meta_folder / relative_url_path\n to.parent.mkdir(parents=True, exist_ok=True)\n print(\"saving to: \", to)\n with open(to, \"wb\") as f:\n f.write(res.content)\n\ndef main():\n assets_folder = Path(\"./Assets\")\n gacha_meta_folder = assets_folder / \"gacha_meta\"\n gacha_meta_file = gacha_meta_folder / \"gacha_meta.json\"\n if gacha_meta_folder.exists():\n shutil.rmtree(gacha_meta_folder)\n gacha_meta_folder.mkdir(parents=True)\n data = get_data()\n with open(gacha_meta_file, \"w\", encoding=\"utf-8\") as f:\n json.dump(data, f, ensure_ascii=False)\n for type, detail in data.items():\n for id, meta in detail.items():\n download_image(meta[\"icon_file_path\"])\n\nif __name__ == \"__main__\":\n main()","repo_name":"pizza-studio/HSRPizzaHelper","sub_path":"Script/update_gacha_meta.py","file_name":"update_gacha_meta.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"28"} +{"seq_id":"71695878476","text":"#!/usr/bin/python3\nimport os\nimport csv\n\n\ndef read_csc():\n path = os.path.dirname(os.path.realpath(__file__))\n radii_path = os.path.join(path, 'radii.csv')\n list = []\n with open(radii_path, encoding='utf-8') as f:\n radii_csv = csv.reader(f)\n headers = next(radii_csv)\n for row in radii_csv:\n list.append(row)\n\n return headers, list\n\n\ndef read_cov_rad(ele, unit='pm'):\n headers, list = read_csc()\n cov_rad = {}\n for e in list:\n cov_rad[e[0]] = e[6]\n rad = cov_rad[str(ele)]\n if rad == 'no data':\n return rad\n else:\n rad = float(rad)\n if unit.startswith('a'):\n rad = rad/100\n elif unit == 'nm':\n rad = rad/1000\n return rad\n\n\n\n\n\n\n\n\n\n\n\ndef test():\n read_cov_rad('15')\n\n#test()\n","repo_name":"cccccsf/Masterarbeit","sub_path":"Data/Cov_Rad.py","file_name":"Cov_Rad.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"25665634860","text":"# Retomemos un ejemplo que desarrollamos anteriormente modificándolo un poco\n# para ver como podemos acceder tanto a las llaves y los valores asociados a ellas a traves\n# de algunos for.\n\nlenguajes_favoritos: dict = {\n 'Jonathan': ['Python', 'C++'],\n 'Karen': ['Ruby'],\n 'Arnold': ['JAVA', 'VisualStudio'],\n 'Jenny': ['Pearl', 'Python', 'SQL'],\n}\n\nfor nombre in lenguajes_favoritos.keys():\n print('\\n' + '¡El lenguaje favorito de ' + nombre + ' es:')\n for lenguaje in lenguajes_favoritos[nombre]: # Se pone en corchetes porque estamos llamando al iterador que es una lista\n print(lenguaje)\n\n","repo_name":"JonathanDM-93/PythonCrashCourse","sub_path":"PythonExercices/Diccionarios/lenguajes_fav.py","file_name":"lenguajes_fav.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"21460960592","text":"#!/usr/bin/python3\n\ndef Say_Name():\n \"\"\"defines the function of the module\"\"\"\n from datetime import datetime as dt\n\n try:\n name = input(\"Enter your name here: \")\n birth_year = int(input(\"Enter your birth year(ex: 1999) here: \"))\n today = dt.now()\n today_year = today.year\n age = int(today_year) - birth_year\n print(f\"The length of your name is: {len(name)}\\nAnd you are {age} years old.\")\n except Exception as e:\n print(f\"Try again...\\n{e}\")\n finally:\n print(\"Code execution completed...\")\n\nif __name__ == \"__main__\":\n Say_Name()\nprint(\"\\nCode developed by Masino\")\n","repo_name":"JohnsonMasino/More_Python","sub_path":"More_OOP/file16.py","file_name":"file16.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"6904945573","text":"__metaclass__ = type\n\n__all__ = [\n 'DirectoryMailBox',\n 'IMailBox',\n 'MailBoxError',\n 'POP3MailBox',\n 'TestMailBox',\n ]\n\nimport os\nimport poplib\nimport socket\nimport threading\n\nimport scandir\nfrom zope.interface import (\n implementer,\n Interface,\n )\n\nfrom lp.services.mail import stub\n\n# XXX wgrant 2015-09-21: A Python 2.7 security update set the line\n# length limit to 2048 bytes, which real POP3 servers often exceed for\n# various reasons. http://bugs.python.org/issue23906\npoplib._MAXLINE = 10000000\n\n\nclass MailBoxError(Exception):\n \"\"\"Indicates that some went wrong while interacting with the mail box.\"\"\"\n\n\nclass IMailBox(Interface):\n def open():\n \"\"\"Opens the mail box.\n\n Raises MailBoxError if the mail box can't be opened.\n\n This method has to be called before any operations on the mail\n box is performed.\n \"\"\"\n\n def items():\n \"\"\"Returns all the ids and mails in the mail box.\n\n Returns an iterable of (id, mail) tuples.\n\n Raises MailBoxError if there's some error while returning the mails.\n \"\"\"\n\n def delete(id):\n \"\"\"Deletes the mail with the given id.\n\n Raises MailBoxError if the mail couldn't be deleted.\n \"\"\"\n\n def close():\n \"\"\"Closes the mailbox.\"\"\"\n\n\n@implementer(IMailBox)\nclass TestMailBox:\n \"\"\"Mail box used for testing.\n\n It operates on stub.test_emails.\n \"\"\"\n\n def __init__(self):\n self._lock = threading.Lock()\n\n def open(self):\n \"\"\"See IMailBox.\"\"\"\n if not self._lock.acquire(False):\n raise MailBoxError(\"The mail box is already open.\")\n\n def items(self):\n \"\"\"See IMailBox.\"\"\"\n id = 0\n # Loop over a copy of test_emails to avoid infinite loops.\n for item in list(stub.test_emails):\n if item is not None:\n from_addr, to_addr, raw_mail = item\n yield id, raw_mail\n id += 1\n\n def delete(self, id):\n \"\"\"See IMailBox.\"\"\"\n if id not in [valid_id for valid_id, mail in self.items()]:\n raise MailBoxError(\"No such id: %s\" % id)\n\n # Mark it as deleted. We can't really delete it yet, since the\n # ids need to be preserved.\n stub.test_emails[id] = None\n\n def close(self):\n \"\"\"See IMailBox.\"\"\"\n # Clean up test_emails\n stub.test_emails = [item for item in stub.test_emails\n if item is not None]\n self._lock.release()\n\n\n@implementer(IMailBox)\nclass POP3MailBox:\n \"\"\"Mail box which talks to a POP3 server.\"\"\"\n\n def __init__(self, host, user, password, ssl=False):\n self._host = host\n self._user = user\n self._password = password\n self._ssl = ssl\n\n def open(self):\n \"\"\"See IMailBox.\"\"\"\n try:\n if self._ssl:\n popbox = poplib.POP3_SSL(self._host)\n else:\n popbox = poplib.POP3(self._host)\n except socket.error as e:\n raise MailBoxError(str(e))\n try:\n popbox.user(self._user)\n popbox.pass_(self._password)\n except poplib.error_proto as e:\n popbox.quit()\n raise MailBoxError(str(e))\n self._popbox = popbox\n\n def items(self):\n \"\"\"See IMailBox.\"\"\"\n popbox = self._popbox\n try:\n count, size = popbox.stat()\n except poplib.error_proto as e:\n # This means we lost the connection.\n raise MailBoxError(str(e))\n\n for msg_id in range(1, count + 1):\n response, msg_lines, size = popbox.retr(msg_id)\n yield (msg_id, '\\n'.join(msg_lines))\n\n def delete(self, id):\n \"\"\"See IMailBox.\"\"\"\n try:\n self._popbox.dele(id)\n except poplib.error_proto as e:\n raise MailBoxError(str(e))\n\n def close(self):\n \"\"\"See IMailBox.\"\"\"\n self._popbox.quit()\n\n\n@implementer(IMailBox)\nclass DirectoryMailBox:\n \"\"\"Mail box which reads files from a directory.\"\"\"\n\n def __init__(self, directory):\n self.mail_dir = os.path.abspath(directory)\n\n def open(self):\n \"\"\"See IMailBox.\"\"\"\n # No-op.\n\n def items(self):\n \"\"\"See IMailBox.\"\"\"\n for entry in scandir.scandir(self.mail_dir):\n if entry.is_file():\n with open(entry.path) as mail_file:\n yield (entry.path, mail_file.read())\n\n def delete(self, id):\n \"\"\"See IMailBox.\"\"\"\n if not os.path.isfile(id):\n raise MailBoxError(\"No such id: %s\" % id)\n if not os.path.abspath(id).startswith(self.mail_dir):\n raise MailBoxError(\"No such id: %s\" % id)\n os.remove(id)\n\n def close(self):\n \"\"\"See IMailBox.\"\"\"\n # No-op.\n","repo_name":"pombredanne/launchpad-3","sub_path":"lib/lp/services/mail/mailbox.py","file_name":"mailbox.py","file_ext":"py","file_size_in_byte":4791,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"11808500837","text":"import logging\nimport sys\nlogging.basicConfig(stream=sys.stdout, filemode='a', level=logging.INFO)\n\nimport w2v_modeling_v2 as w2v\nimport similarity_functions_v2 as aux\nimport automation_v2 as aut\n\nimport visualization as vis\nimport argparse\nimport pickle\nimport os\n\n\"\"\" Web- application for Berliner-Zeitung:\n V 2.0. \n\n consists of 5 objects/modules plus a main function to generates recommendations by similarity. \n\n Units: \n 1. preprocessing.py (Module)\n 2. w2v_modeling_v2.py (Object)\n 3. similarity_functions_v2.py (Object)\n 4. visualization.py (Object)\n 5. automation.py (Object) automate the model uploading procedure. \"\"\"\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Berliner- Zeitung recommendation engine\")\n parser.add_argument(\"-A\", \"--automate\", help=\"automate server by time\", action=\"store_true\")\n parser.add_argument(\"-D\", \"--server_name\", help=\"initiate domain name\", nargs='+', type=str)\n parser.add_argument(\"-M\", \"--fit\", help=\"train the model\", nargs='+', type=int)\n parser.add_argument(\"-P\", \"--predict\", help=\"make a prediction\", action=\"store_true\")\n parser.add_argument(\"-R\", \"--report\", help=\"create visual report\", action=\"store_true\")\n parser.add_argument(\"-S\", \"--set\", help=\"set workspace directories\", action=\"store_true\")\n parser.add_argument(\"-V\", \"--visualization\", help=\"show visual report\", action=\"store_true\")\n\n args = parser.parse_args()\n\n # Workspace server_name\n if args.server_name:\n server_url = args.server_name[0]\n else:\n server_url = \"https://www.apiblzapp.ml\"\n\n logging.info(\"Server name is set to: {0}\".format(server_url))\n\n # Workspace settings: creating directories \"-S\"\n\n workspace_path = os.getcwd()\n path_data = workspace_path + \"/data/\"\n path_data_output = workspace_path + \"/data/output/\"\n path_data_output_models = workspace_path + \"/data/output/models/\"\n path_data_output_prediction = path_data_output + \"prediction/\"\n\n if args.set:\n\n if not os.path.exists(path_data):\n os.mkdir(path_data)\n\n if not os.path.exists(path_data_output):\n os.mkdir(path_data_output)\n\n if not os.path.exists(path_data_output_models):\n os.mkdir(path_data_output_models)\n\n if not os.path.exists(path_data_output_prediction):\n os.mkdir(path_data_output_prediction)\n\n model = w2v.W2V(models_directory=path_data_output_models)\n\n # Modeling (w2v model) \"-M\"\n # create a new model with parameters: embedding size, window size, min count, workers.\n\n if args.fit:\n model.fit(args.fit[0], args.fit[1], args.fit[2], args.fit[3])\n else:\n model.load_model()\n\n # Similarity \"-P\"\n # instantiate similarity object from an existing model.\n sim = aux.Similarity(model.model)\n sim.create_test_df_sample()\n sim.add_average_vector()\n\n if args.predict:\n logging.info(\"creating a prediction: \")\n\n # pickling\n pickle.dump(sim.predict(k=6), open(path_data_output_prediction + 'model.pkl', 'wb'))\n\n # Visualization \"-V\"\n if args.visualization:\n visualizer = vis.Visualization(model.model)\n\n # Report \"-R\"\n if args.report:\n # 1\n visualizer.plot_pca()\n # 2\n visualizer.plot_tsne()\n # 3\n visualizer.plot_keys_cluster()\n # 4\n visualizer.tsne_3d_plot()\n # 5\n visualizer.plot_average_vectors(sim.df)\n # 6\n visualizer.plot_relative_clusters()\n\n visualizer.plot_all_figures()\n\n ############\n\n if args.automate:\n automation = aut.AutoServer(server_url, model, sim, path_data_output_prediction)\n automation.automate(t=1200, s=50)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cohitai/BLZ","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":3827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"30661371053","text":"import random, logging\nfrom blueprints import db\nfrom flask_restful import fields\n\nclass Transactionevent(db.Model):\n\n __tablename__ = \"TransaksiEvent\"\n tr_id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n event_id = db.Column(db.Integer, nullable=False)\n user_id = db.Column(db.Integer, nullable =False)\n event_name = db.Column(db.String(100), nullable =False)\n user_name = db.Column(db.String(100), nullable =False)\n price = db.Column(db.Integer, nullable =False)\n quantity = db.Column(db.Integer, nullable =False)\n band_id = db.Column(db.Integer, nullable =False)\n bandName = db.Column(db.String(100), nullable =False)\n\n response_field = {\n 'tr_id': fields.Integer,\n 'event_id': fields.Integer,\n 'user_id': fields.Integer,\n 'event_name': fields.String,\n 'user_name': fields.String,\n 'price': fields.Integer,\n 'quantity': fields.Integer,\n 'band_id': fields.Integer,\n 'bandName': fields.String\n }\n\n public_response_field = {\n 'event_name': fields.String,\n 'user_name': fields.String,\n 'price': fields.Integer,\n 'quantity': fields.Integer,\n 'bandName': fields.String\n }\n\n def __init__(self, tr_id, event_id, user_id, event_name, user_name, price, quantity, band_id, bandName):\n self.tr_id = tr_id\n self.event_id = event_id\n self.user_id = user_id\n self.event_name = event_name\n self.user_name = user_name\n self.price = price\n self.quantity = quantity\n self.band_id = band_id\n self.bandName = bandName\n \n def __repr__(self):\n return '' %self.tr_id\n ","repo_name":"FahmiSyahrulah/commerce","sub_path":"blueprints/eventtrans/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"25103018670","text":"import re\nimport urllib\nimport json\nimport numpy as np\nimport pandas as pd\nimport csv\nimport nltk\nfrom nltk.stem import SnowballStemmer\nfrom nltk.corpus import stopwords\nfrom nltk import FreqDist\nfrom nltk import classify\nfrom nltk import NaiveBayesClassifier\nimport random\n\nstopwords = stopwords.words(\"english\")\n\nclass Classifier():\n def __init__(self):\n self.classifier = None\n self.df = None\n\n with open('config.json') as config_file:\n data = json.load(config_file)\n \n self.wassa = data[\"WASSA\"]\n self.archive = data[\"archive\"]\n self.corporate = data[\"corporate\"]\n\n self.global_dataset = np.array([])\n self.global_processed_model = {}\n\n self.init_wassa()\n self.init_archive()\n self.init_corporate()\n self.train_model()\n\n \n def clearify_wassa_post(self, post):\n data = list(map(lambda x: x.rstrip(), post.split('\\t')))\n return { \"text\": data[1], \"emotion\": data[2] }\n\n def get_wassa_dataset(self, url):\n file = urllib.request.urlopen(url)\n posts = list(map(lambda x : x.decode(\"utf-8\"), file.readlines()))\n return list(map(self.clearify_wassa_post, posts))\n\n def init_wassa(self):\n for key in self.wassa:\n for dataset_version in self.wassa[key]:\n local_dataset = self.get_wassa_dataset(self.wassa[key][dataset_version])\n self.global_dataset = np.concatenate((self.global_dataset, np.array(local_dataset)))\n \n def clearify_archive_comment(self, comment):\n clear_comment = comment.rstrip().split(';')\n return { \"text\" : clear_comment[0], \"emotion\": clear_comment[1] }\n\n def get_archive_dataset(self, url):\n with open(url) as file:\n data = file.readlines()\n return list(map(self.clearify_archive_comment, data))\n \n def init_archive(self):\n for key in self.archive:\n for dataset_version in self.archive[key]:\n local_dataset = self.get_archive_dataset(self.archive[key][dataset_version])\n self.global_dataset = np.concatenate((self.global_dataset, np.array(local_dataset)))\n \n def clearify_corporate(self, text):\n return { \"text\" : text, \"emotion\" : \"corporate\" }\n\n def init_corporate(self):\n with open(self.corporate, encoding = \"ISO-8859-1\") as csvfile:\n corporate_reader = csv.DictReader(csvfile, delimiter=',')\n reviews = [row['text'] for row in corporate_reader]\n local_dataset = list(map(self.clearify_corporate, reviews))\n self.global_dataset = np.concatenate((self.global_dataset, np.array(local_dataset)))\n \n def remove_noise(self, tokens, stop_words = ()):\n stemmer = SnowballStemmer(\"english\")\n cleaned_tokens = []\n for token in tokens:\n if len(token) > 0 and not re.search(r'[^0-9a-zA-Z]+', token) and token.lower() not in stop_words:\n cleaned_tokens.append(stemmer.stem(token))\n return cleaned_tokens\n\n def get_all_words(self, cleaned_tokens_list):\n for tokens in cleaned_tokens_list:\n for token in tokens:\n yield token\n\n def get_tokens_for_model(self, cleaned_tokens_list):\n for tokens in cleaned_tokens_list:\n yield dict([token, True] for token in tokens)\n \n def process_model(self):\n self.df = pd.DataFrame(list(self.global_dataset))\n\n emotions = self.df['emotion'].drop_duplicates().tolist()\n\n for em in emotions:\n dataset = self.df[self.df['emotion'] == em]['text'].astype('str').to_numpy()\n \n tokens = [nltk.word_tokenize(text) for text in dataset]\n cleaned_tokens = [self.remove_noise(token, stopwords) for token in tokens]\n \n self.global_processed_model[em] = [(text_dict, em) for text_dict in self.get_tokens_for_model(cleaned_tokens)]\n\n def train_model(self):\n self.process_model()\n\n dataset_for_model = []\n for key in self.global_processed_model.keys():\n dataset_for_model += self.global_processed_model[key]\n \n # random.shuffle(dataset_for_model)\n\n # text_count = (self.df.shape[0] * 80) // 100\n\n # train_data = dataset_for_model[:text_count]\n # test_data = dataset_for_model[text_count:]\n\n # print(\"train data:\", len(train_data))\n # print(\"test data:\", len(test_data))\n\n self.classifier = NaiveBayesClassifier.train(dataset_for_model)\n\n # print(\"Accuracy is:\", classify.accuracy(self.classifier, test_data))\n # print(self.classifier.show_most_informative_features(10))\n \n def classify_message(self, text):\n tokens = self.remove_noise(nltk.tokenize.word_tokenize(text))\n return self.classifier.classify(dict([token, True] for token in tokens))","repo_name":"almaszaurbekov/emmessage","sub_path":"app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"20080866770","text":"import mmcv\nimport numpy as np\nimport torch\n\nfrom mmdet.core import INSTANCE_OFFSET, bbox2roi, multiclass_nms\nfrom mmdet.core.visualization import imshow_det_bboxes\nfrom ..builder import DETECTORS, build_head\nfrom ..roi_heads.mask_heads.fcn_mask_head import _do_paste_mask\nfrom .two_stage import TwoStageDetector\n\n\n@DETECTORS.register_module()\nclass TwoStagePanopticSegmentor(TwoStageDetector):\n \"\"\"Base class of Two-stage Panoptic Segmentor.\n\n As well as the components in TwoStageDetector, Panoptic Segmentor has extra\n semantic_head and panoptic_fusion_head.\n \"\"\"\n\n def __init__(\n self,\n backbone,\n neck=None,\n rpn_head=None,\n roi_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None,\n # for panoptic segmentation\n semantic_head=None,\n panoptic_fusion_head=None):\n super(TwoStagePanopticSegmentor,\n self).__init__(backbone, neck, rpn_head, roi_head, train_cfg,\n test_cfg, pretrained, init_cfg)\n if semantic_head is not None:\n self.semantic_head = build_head(semantic_head)\n if panoptic_fusion_head is not None:\n panoptic_cfg = test_cfg.panoptic if test_cfg is not None else None\n panoptic_fusion_head_ = panoptic_fusion_head.deepcopy()\n panoptic_fusion_head_.update(test_cfg=panoptic_cfg)\n self.panoptic_fusion_head = build_head(panoptic_fusion_head_)\n\n self.num_things_classes = self.panoptic_fusion_head.\\\n num_things_classes\n self.num_stuff_classes = self.panoptic_fusion_head.\\\n num_stuff_classes\n self.num_classes = self.panoptic_fusion_head.num_classes\n\n @property\n def with_semantic_head(self):\n return hasattr(self,\n 'semantic_head') and self.semantic_head is not None\n\n @property\n def with_panoptic_fusion_head(self):\n return hasattr(self, 'panoptic_fusion_heads') and \\\n self.panoptic_fusion_head is not None\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network flops.\n\n See `mmdetection/tools/get_flops.py`\n \"\"\"\n raise NotImplementedError(\n f'`forward_dummy` is not implemented in {self.__class__.__name__}')\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None,\n gt_semantic_seg=None,\n proposals=None,\n **kwargs):\n x = self.extract_feat(img)\n losses = dict()\n\n # RPN forward and loss\n if self.with_rpn:\n proposal_cfg = self.train_cfg.get('rpn_proposal',\n self.test_cfg.rpn)\n rpn_losses, proposal_list = self.rpn_head.forward_train(\n x,\n img_metas,\n gt_bboxes,\n gt_labels=None,\n gt_bboxes_ignore=gt_bboxes_ignore,\n proposal_cfg=proposal_cfg)\n losses.update(rpn_losses)\n else:\n proposal_list = proposals\n\n roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,\n gt_bboxes, gt_labels,\n gt_bboxes_ignore, gt_masks,\n **kwargs)\n losses.update(roi_losses)\n\n semantic_loss = self.semantic_head.forward_train(x, gt_semantic_seg)\n losses.update(semantic_loss)\n\n return losses\n\n def simple_test_mask(self,\n x,\n img_metas,\n det_bboxes,\n det_labels,\n rescale=False):\n \"\"\"Simple test for mask head without augmentation.\"\"\"\n img_shapes = tuple(meta['ori_shape']\n for meta in img_metas) if rescale else tuple(\n meta['pad_shape'] for meta in img_metas)\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n masks = []\n for img_shape in img_shapes:\n out_shape = (0, self.roi_head.bbox_head.num_classes) \\\n + img_shape[:2]\n masks.append(det_bboxes[0].new_zeros(out_shape))\n mask_pred = det_bboxes[0].new_zeros((0, 80, 28, 28))\n mask_results = dict(\n masks=masks, mask_pred=mask_pred, mask_feats=None)\n return mask_results\n\n _bboxes = [det_bboxes[i][:, :4] for i in range(len(det_bboxes))]\n if rescale:\n if not isinstance(scale_factors[0], float):\n scale_factors = [\n det_bboxes[0].new_tensor(scale_factor)\n for scale_factor in scale_factors\n ]\n _bboxes = [\n _bboxes[i] * scale_factors[i] for i in range(len(_bboxes))\n ]\n\n mask_rois = bbox2roi(_bboxes)\n mask_results = self.roi_head._mask_forward(x, mask_rois)\n mask_pred = mask_results['mask_pred']\n # split batch mask prediction back to each image\n num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes]\n mask_preds = mask_pred.split(num_mask_roi_per_img, 0)\n\n # resize the mask_preds to (K, H, W)\n masks = []\n for i in range(len(_bboxes)):\n det_bbox = det_bboxes[i][:, :4]\n det_label = det_labels[i]\n\n mask_pred = mask_preds[i].sigmoid()\n\n box_inds = torch.arange(mask_pred.shape[0])\n mask_pred = mask_pred[box_inds, det_label][:, None]\n\n img_h, img_w, _ = img_shapes[i]\n mask_pred, _ = _do_paste_mask(\n mask_pred, det_bbox, img_h, img_w, skip_empty=False)\n masks.append(mask_pred)\n\n mask_results['masks'] = masks\n\n return mask_results\n\n def simple_test(self, img, img_metas, proposals=None, rescale=False):\n \"\"\"Test without Augmentation.\"\"\"\n x = self.extract_feat(img)\n\n if proposals is None:\n proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)\n else:\n proposal_list = proposals\n\n bboxes, scores = self.roi_head.simple_test_bboxes(\n x, img_metas, proposal_list, None, rescale=rescale)\n\n pan_cfg = self.test_cfg.panoptic\n # class-wise predictions\n det_bboxes = []\n det_labels = []\n for bboxe, score in zip(bboxes, scores):\n det_bbox, det_label = multiclass_nms(bboxe, score,\n pan_cfg.score_thr,\n pan_cfg.nms,\n pan_cfg.max_per_img)\n det_bboxes.append(det_bbox)\n det_labels.append(det_label)\n\n mask_results = self.simple_test_mask(\n x, img_metas, det_bboxes, det_labels, rescale=rescale)\n masks = mask_results['masks']\n\n seg_preds = self.semantic_head.simple_test(x, img_metas, rescale)\n\n results = []\n for i in range(len(det_bboxes)):\n pan_results = self.panoptic_fusion_head.simple_test(\n det_bboxes[i], det_labels[i], masks[i], seg_preds[i])\n pan_results = pan_results.int().detach().cpu().numpy()\n result = dict(pan_results=pan_results)\n results.append(result)\n return results\n\n def show_result(self,\n img,\n result,\n score_thr=0.3,\n bbox_color=(72, 101, 241),\n text_color=(72, 101, 241),\n mask_color=None,\n thickness=2,\n font_size=13,\n win_name='',\n show=False,\n wait_time=0,\n out_file=None):\n \"\"\"Draw `result` over `img`.\n\n Args:\n img (str or Tensor): The image to be displayed.\n result (dict): The results.\n\n score_thr (float, optional): Minimum score of bboxes to be shown.\n Default: 0.3.\n bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.\n The tuple of color should be in BGR order. Default: 'green'.\n text_color (str or tuple(int) or :obj:`Color`):Color of texts.\n The tuple of color should be in BGR order. Default: 'green'.\n mask_color (None or str or tuple(int) or :obj:`Color`):\n Color of masks. The tuple of color should be in BGR order.\n Default: None.\n thickness (int): Thickness of lines. Default: 2.\n font_size (int): Font size of texts. Default: 13.\n win_name (str): The window name. Default: ''.\n wait_time (float): Value of waitKey param.\n Default: 0.\n show (bool): Whether to show the image.\n Default: False.\n out_file (str or None): The filename to write the image.\n Default: None.\n\n Returns:\n img (Tensor): Only if not `show` or `out_file`.\n \"\"\"\n img = mmcv.imread(img)\n img = img.copy()\n pan_results = result['pan_results']\n # keep objects ahead\n ids = np.unique(pan_results)[::-1]\n legal_indices = ids != self.num_classes # for VOID label\n ids = ids[legal_indices]\n labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)\n segms = (pan_results[None] == ids[:, None, None])\n\n # if out_file specified, do not show image in window\n if out_file is not None:\n show = False\n # draw bounding boxes\n img = imshow_det_bboxes(\n img,\n segms=segms,\n labels=labels,\n class_names=self.CLASSES,\n bbox_color=bbox_color,\n text_color=text_color,\n mask_color=mask_color,\n thickness=thickness,\n font_size=font_size,\n win_name=win_name,\n show=show,\n wait_time=wait_time,\n out_file=out_file)\n\n if not (show or out_file):\n return img\n","repo_name":"fudan-zvg/SETR","sub_path":"hlg-detection/mmdet/models/detectors/panoptic_two_stage_segmentor.py","file_name":"panoptic_two_stage_segmentor.py","file_ext":"py","file_size_in_byte":10552,"program_lang":"python","lang":"en","doc_type":"code","stars":975,"dataset":"github-code","pt":"28"} +{"seq_id":"15023825557","text":"import threading\nimport socket\nimport sys\nimport time\n\n# Create a UDP socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nstate_host = ''\nstate_port = 8891 # 8890\naddress = (state_host, state_port)\nsock.bind(address)\n\n# Create socket to send commands and receive their response\ntello_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ntello_host = '192.168.10.1'\ntello_port = 8888 # 8889\ntello_address = (tello_host, tello_port)\n\nyaw_response = []\nyaw = 0\noppo_yaw = 0\n\n\n# # # GETTER # # #\n\ndef get_yaw_response():\n return int(yaw_response[-1])\n\n\n# # # UPLINK AND DOWNLINK # # #\n\ndef downlink():\n while True:\n data, server = sock.recvfrom(2046)\n if data == b'ok':\n print(\"ok\")\n elif data == b'error':\n print(\"error\")\n else:\n yaw_response.append(parser(data.decode(encoding=\"utf-8\")))\n\n\ndef uplink(cmd):\n # print(\"cmd\", cmd)\n try:\n if 'end' in cmd:\n print('ending')\n tello_sock.close()\n tello_sock.shutdown(1)\n pass\n cmd = cmd.encode(encoding=\"utf-8\")\n tello_sock.sendto(cmd, tello_address)\n except KeyboardInterrupt:\n print('\\nKeyboardInterrupt\\n')\n cmd = \"land\".encode(encoding=\"utf-8\")\n tello_sock.sendto(cmd, tello_address)\n tello_sock.close()\n tello_sock.shutdown(1)\n\n\n# # # PATH FOLLOWING\n\ndef straight(straight_yaw, flight_time):\n drone_speed = 100\n delay = 1\n is_deviating = False\n\n start_time = time.time()\n\n while (time.time() - start_time) < float(flight_time): # if the drone flies at 1 m/s then this works\n # if float(flight_time) - (time.time() - start_time) < 2:\n # print()\n # drone_speed = 40\n\n newest_yaw_response = get_yaw_response() # is redefined after each loop\n newest_yaw_response = int(newest_yaw_response) # for some reason it doesn't work in one line..\n\n if newest_yaw_response == int(straight_yaw): # no deviation\n pass\n else: # if there is deviation...\n print(\"is deviating: \", newest_yaw_response, int(straight_yaw))\n is_deviating = True # <-- this is set to true\n\n if is_deviating:\n from_yaw = newest_yaw_response + 180\n to_yaw = int(straight_yaw) + 180\n\n if from_yaw < to_yaw: # checking if the drone should turn cw or ccw\n diff = to_yaw - from_yaw\n if diff < 180:\n yaw_per_sec = abs(diff / delay) # these lines of code calculate\n else: # the angle between the current drone\n yaw_per_sec = -abs((360-diff) / delay) # yaw and the correct drone yaw\n else:\n diff = from_yaw - to_yaw # the code also takes the overflowing\n if diff < 180: # yaw values from -180 to 180 into account\n yaw_per_sec = -abs(diff / delay) # so that the shortest angle is always found\n else:\n yaw_per_sec = abs((360 - diff) / delay)\n\n if yaw_per_sec < -100:\n yaw_per_sec = -100\n elif yaw_per_sec > 100:\n yaw_per_sec = 100\n\n is_deviating = False\n else:\n yaw_per_sec = 0\n\n rc_string = \"rc 0 \" + str(drone_speed) + \" 0 \" + str(yaw_per_sec)\n uplink(rc_string)\n time.sleep(delay)\n\n\n# # # COMMANDS # # #\n\ndef base_commands(cmd):\n global yaw_response, yaw, oppo_yaw\n\n if cmd == \"init\":\n print(cmd)\n downlink_thread = threading.Thread(target=downlink)\n downlink_thread.start()\n uplink(\"command\")\n time.sleep(2)\n uplink(\"rc 0 0 0 0\")\n time.sleep(2)\n uplink(\"takeoff\")\n time.sleep(5)\n\n elif cmd == \"rc0\":\n print(cmd)\n uplink(\"rc 0 0 0 0\")\n\n elif cmd == \"stop\":\n print(cmd)\n uplink(cmd)\n\n elif cmd == \"land\":\n print(cmd)\n uplink(cmd)\n\n elif cmd == \"getyaw\":\n print(cmd)\n if len(yaw_response) != 0: # if array is empty ... this is due to no tello states coming in\n yaw = yaw_response[-1]\n else:\n yaw_response.append(0)\n print(\"yaw has been set to 0 - use this for test purposes\")\n\n elif cmd == \"getoppoyaw\":\n print(cmd)\n if yaw < 0:\n oppo_yaw = 180 + yaw\n elif yaw > 0:\n oppo_yaw = yaw - 180\n else:\n oppo_yaw = 179\n\n\ndef param_commands(cmd, value):\n global yaw, oppo_yaw\n if cmd == \"rotate\":\n if int(float(value)) < 0:\n pass\n print(\"ccw \", abs(int(float(value))))\n uplink(\"ccw \" + str(abs(int(float(value)))))\n elif int(float(value)) > 0:\n pass\n print(\"cw \", abs(int(float(value))))\n uplink(\"cw \" + str(abs(int(float(value)))))\n else:\n pass # don't rotate\n\n if cmd == \"wait\":\n print(cmd, value)\n time.sleep(int(value))\n\n if cmd == \"straight\":\n print(cmd, value[0], value[1])\n if value[0] == \"yaw\":\n straight(yaw, flight_time=value[1])\n elif value[0] == \"oppoyaw\":\n straight(oppo_yaw, flight_time=value[1])\n else:\n straight(int(value[0]), flight_time=value[1]) # if it already has a value\n\n if cmd == \"turn\":\n print(cmd, value[0], value[1])\n start_time = time.time()\n cmd = \"rc 0 50 0 \" + str(value[0])\n while (time.time() - start_time) < float(value[1]):\n uplink(cmd)\n\n\ndef parser(cmd_str):\n d = {}\n delim1 = \";\"\n delim2 = \":\"\n element = \"\"\n print(cmd_str)\n for _, v in enumerate(cmd_str):\n if v is not delim1:\n element += v\n else:\n arr = element.split(delim2)\n if len(arr) == 1:\n base_commands(arr[0])\n elif len(arr) == 2:\n if arr[0] == \"yaw\": # downlink() needs to parse yaw\n return arr[1]\n param_commands(arr[0], arr[1])\n elif len(arr) == 3:\n param_commands(arr[0], [arr[1], arr[2]])\n element = \"\"","repo_name":"CE-CF/P2-B214","sub_path":"hive/dmsRoutingModule/flightpackage/relayboxrouting.py","file_name":"relayboxrouting.py","file_ext":"py","file_size_in_byte":6624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"70357301194","text":"from .plot_signals import time_to_merger, chirp_mass\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import ticker, cm, colors\n\n\nplt.rcParams.update({\n \"text.usetex\": True,\n \"font.family\": \"Serif\"\n})\n\nmass_grid = np.geomspace(1, 1e7, num=200)\nfreq_grid = np.geomspace(1e-5, 1e2, num=200)\n\nF, M = np.meshgrid(freq_grid, mass_grid)\n\nM_CHIRP = chirp_mass(M/2, M/2)\n\nT = time_to_merger(F, M_CHIRP)\n\ntimes = {\n 1: 'second',\n 60: 'minute',\n 3600: 'hour',\n 3600*24: 'day',\n 3600*24*30: 'month',\n 3600*24*365.24: 'year',\n 3600*24*365.24*10: '10 years',\n}\n\nplt.contourf(F, M, T, levels=list(times.keys()), cmap=cm.PuBu_r, norm=colors.LogNorm())\n\n# plt.axvline(x=0.05, c='black')\n# plt.axvline(x=2, c='black')\nplt.fill_betweenx([mass_grid[0], mass_grid[-1]], 0.05, 2, alpha=.2, color='red')\n\nmchirp_170817 = 1.188\nm_170817 = 1.188 * 2**(6/5)\nm_190521 = 150\nplt.axhline(m_170817, c='black')\nplt.text(2e-5, m_170817*1.3, 'GW170817')\nplt.axhline(m_190521, c='black')\nplt.text(2e-5, m_190521*1.3, 'GW190521')\nplt.text(1e-1, 4e6, 'decihertz')\n\nplt.xscale('log')\nplt.yscale('log')\nplt.xlabel('Frequency [Hz]')\nplt.ylabel('Total binary mass [$M_\\odot$]')\nplt.colorbar(format=ticker.FixedFormatter(list(times.values())), label='Time to merger')\nplt.savefig('time_to_merger.pdf')","repo_name":"jacopok/gw-landscape","sub_path":"gw_landscape/time_to_merger.py","file_name":"time_to_merger.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"16172347956","text":"import argparse\nimport base64\nimport logging\n\nxor = b'1f903053jlfajsklj39019013ut098e77xhlajklqpozufoghi642098cbmdakandqiox536898jiqjpe6092smmkeut02906'\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('Snake Usermodule Log Decryptor')\n parser.add_argument('-p', '--log-path', required=True, help='Provide path to the log file.')\n parser.add_argument('-o', '--output', required=True, help='Provide output path for decrypted log file.')\n parser.add_argument('-l', '--log', dest='logLevel', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help='Set logging level', default='INFO')\n args = parser.parse_args()\n\n logging.basicConfig(level=getattr(logging, args.logLevel))\n logging.info('Reading in log file from %s', args.log_path)\n logging.info('Output path set to %s', args.output)\n\n with open(args.output, 'wb') as output_file:\n with open(args.log_path) as file:\n for line in file:\n decoded = base64.b64decode(line)\n decrypted_line = bytes(a ^ b for a, b in zip(decoded, xor))\n output_file.write(decrypted_line + b'\\n')\n\n\n \n \n","repo_name":"center-for-threat-informed-defense/adversary_emulation_library","sub_path":"turla/Resources/control_server/handlers/snake/decrypt_logs.py","file_name":"decrypt_logs.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":1363,"dataset":"github-code","pt":"28"} +{"seq_id":"70103353674","text":"#!/usr/bin/env python3\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport sys\n\nif len(sys.argv) == 1:\n print(\"\"\"Usage: \n head -n -1 sim_result.txt > tmp_results.txt; \n ./plotResults.py tmp_results.txt <>\n where n is the milestone number you want to start plotting at. For example,\nif the optimizer has gone through 5,000 generations and created 1000 lines of output, then you would use an n of 500 to look at the second half of the milestone outputs. \n The first line just trims off the last line of the results file, since it may not be a complete line and therefore it would be unparseable.\n The file sim_result.txt is the file that ga_main writes as its --output-file argument. \n Once the optimization has completed, you can run this script on the whole output file, of course. \"\"\")\n sys.exit(1)\n\nclass Flight:\n def __init__(self, flightLine):\n dats = [float(x) for x in flightLine.split(',')]\n self.generation = dats[0];\n self.collideFrame = dats[1];\n self.fitness = dats[2];\n self.distance_to_go_x = dats[3]\n self.distance_to_go_y = dats[4]\n flat_remaining = np.array(dats[5:])\n smoosh_remaining = np.reshape(flat_remaining, (-1, 5))\n self.controllerInput = smoosh_remaining[:,0]\n self.xPos = smoosh_remaining[:,1]\n self.yPos = smoosh_remaining[:,2]\n self.zPos = smoosh_remaining[:,3]\n self.baseSpeed = smoosh_remaining[:,4]\n \n\ninData = open(sys.argv[1], \"r\")\nlines = [x for x in inData]\nsliceStart = int(sys.argv[2]) if len(sys.argv) > 2 else (int(len(lines)* 0.8)) \nflights = [Flight(x) for x in lines[sliceStart:]]\nprint(\"Loaded flight data.\")\ngenerations = [f.generation for f in flights]\nfitnesses = [f.fitness for f in flights]\ngrid_space = 0.04\ngridspec = {\"left\" : grid_space, \"right\" : 1 - grid_space, \"top\" : 1 - grid_space, \"bottom\" : grid_space}\nfig,((axInp, axX, axPos), (axVel, axY, axFrames)) = \\\n plt.subplots(nrows=2, ncols=3, gridspec_kw = gridspec)\n\nminX = 1e10\nmaxX = -1e10\nminY = 1e10\nmaxY = -1e10\nmaxlen = 0\nfor flight in flights:\n fmaxX = np.max(flight.xPos)\n fminX = np.min(flight.xPos)\n fmaxY = np.max(flight.yPos)\n fminY = np.min(flight.yPos)\n minX = min(fminX, minX)\n maxX = max(fmaxX, maxX)\n minY = min(fminY, minY)\n maxY = max(fmaxY, maxY)\n maxlen = max(maxlen, len(flight.xPos))\nprint(\"Maxima & minima calculated.\")\nAX_MAXVAL=maxlen + 5\nminX -= 0.04 * (maxX - minX)\nmaxX += 0.04 * (maxX - minX)\nminY -= 0.04 * (maxY - minY)\nmaxY += 0.04 * (maxY - minY)\nprevFlight = flights[0]\nif False:\n for flight in flights:\n def plotInfo(ax, fn, xlim, ylim):\n ax.cla()\n ax.plot(fn(flight), '.', color='blue', markersize=5)\n ax.plot(fn(prevFlight), '.', color='red', markersize=1)\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n plotInfo(axInp, lambda x : x.controllerInput, \\\n (0, AX_MAXVAL), (-72,72))\n \n plotInfo(axVel, lambda x : x.baseSpeed, \\\n (0, AX_MAXVAL), (0, 5.1))\n \n plotInfo(axX, lambda x : x.xPos, \\\n (0, AX_MAXVAL), (minX, maxX))\n \n plotInfo(axY, lambda x : x.yPos, \\\n (0, AX_MAXVAL), (minY, maxY))\n \n \n axPos.cla()\n axPos.plot(flight.xPos, flight.yPos, '.', color='blue', markersize = 5)\n axPos.plot(prevFlight.xPos, prevFlight.yPos, '.', color='red', markersize = 1)\n axPos.set_xlim((minX, maxX))\n axPos.set_ylim((minY, maxY))\n prevFlight = flight\n plt.pause(0.1)\n\naxInp.cla()\naxX.cla()\naxVel.cla()\naxY.cla()\nfinalXVals = []\nfinalYVals = []\nfinalFramenos = []\nfrom scipy.ndimage import gaussian_filter1d, median_filter\nfilt = lambda x: gaussian_filter1d(x, 3)\nfor flight in flights:\n dispMode = '.'\n if(flight == flights[-1]):\n markersize = 5\n markercolor = 'blue'\n else: \n markersize = 0.5\n markercolor = 'red'\n axInp.plot(flight.controllerInput, dispMode, markersize = markersize, color=markercolor)\n axVel.plot(flight.baseSpeed, dispMode, markersize = markersize, color=markercolor)\n axX.plot(flight.xPos, dispMode, markersize = markersize, color=markercolor)\n axY.plot(flight.yPos, dispMode, markersize = markersize, color=markercolor)\n axPos.plot(flight.xPos, flight.yPos, dispMode, markersize = markersize, color=markercolor)\n finalXVals.append(flight.xPos[-1])\n finalYVals.append(flight.yPos[-1])\n finalFramenos.append(len(flight.xPos)-1)\n \naxInp.plot(filt(flights[-1].controllerInput), '-', color='green')\n\naxX.plot(finalFramenos, finalXVals, '-')\naxY.plot(finalFramenos, finalYVals, '-')\n\naxInp.set_xlim((0, AX_MAXVAL))\naxInp.set_ylim((-73, 73))\naxInp.set_xlabel(\"Frame\")\naxInp.set_ylabel(\"Controller position\")\naxVel.set_xlim((0, AX_MAXVAL))\naxVel.set_ylim((0, 5.1))\naxVel.set_xlabel(\"Frame\")\naxVel.set_ylabel(\"Base speed\")\naxX.set_xlim((0, AX_MAXVAL))\naxX.set_ylim((minX, maxX))\naxX.set_xlabel(\"frame\")\naxX.set_ylabel(\"x position\")\naxY.set_xlim((0, AX_MAXVAL))\naxY.set_ylim((minY, maxY))\naxY.set_xlabel(\"frame\")\naxY.set_ylabel(\"y position\")\naxPos.set_xlabel(\"x position\")\naxPos.set_ylabel(\"y position\")\naxPos.set_xlim((minX, maxX))\naxPos.set_ylim((minY, maxY))\naxFrames.plot(generations, fitnesses, \".\")\naxFrames.set_xlabel(\"Generation number\")\naxFrames.set_ylabel(\"Fitness score\")\nplt.show()\n","repo_name":"malleoz/ttydPlaneSim","sub_path":"plotResults.py","file_name":"plotResults.py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"28"} +{"seq_id":"150285460","text":"__author__ = 'xi'\nimport socket,time\n\nclass Get:\n\n url=\"\"\n path=\"\"\n def __init__(self,url,path):\n self.url=url\n self.path=path\n\n def submit(self):\n host=self.url\n oldtime=time.time()\n try:\n se=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n se.connect((host,80))\n se.send(\"GET \"+self.path+\"/ HTTP/1.1\\n\")\n se.send(\"Accept:text/html,application/xhtml+xml,*/*;q=0.8\\n\")\n se.send(\"Accept-Language:zh-CN,zh;q=0.8,en;q=0.6\\n\")\n se.send(\"User-Agent: Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3\\r\\n\")\n se.send(\"Cache-Control:max-age=0\\n\")\n se.send(\"Connection:keep-alive\\n\")\n se.send(\"Host:\"+host+\"\\r\\n\")\n se.send(\"user-agent: Googlebot\\n\\n\")\n res=se.recv(1024).split(\"\\n\")[0]\n return res,(time.time()-oldtime)\n except Exception as e:\n return e,time.time()-oldtime\n","repo_name":"mgchbot/httploadtest","sub_path":"Get.py","file_name":"Get.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"13394968607","text":"import numpy as np\nfrom sktime.datasets import *\nimport pandas as pd\nimport os\nimport time\nimport matplotlib.pyplot as plt\nimport warnings\nfrom sklearn import preprocessing, svm\nfrom sklearn.feature_selection import SelectPercentile, chi2, VarianceThreshold\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import *\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score, plot_confusion_matrix, \\\n ConfusionMatrixDisplay\nfrom sklearn.ensemble import RandomForestClassifier\n\n\ndef read_datasets(test_filepath, train_filepath):\n test_absolute_path = os.path.abspath(test_filepath)\n train_absolute_path = os.path.abspath(train_filepath)\n testDF_x, testDF_y = load_from_tsfile_to_dataframe(test_absolute_path, replace_missing_vals_with='NaN')\n trainDF_x, trainDF_y = load_from_tsfile_to_dataframe(train_absolute_path, replace_missing_vals_with='NaN')\n defrag_testDF_x, defrag_testDF_y = testDF_x.copy(), testDF_y.copy() # increase performance\n defrag_trainDF_x, defrag_trainDF_y = trainDF_x.copy(), trainDF_y.copy()\n return defrag_testDF_x, defrag_testDF_y, defrag_trainDF_x, defrag_trainDF_y\n\n# vector de criterii\ndef add_criterias(df):\n criteria = [np.max, np.min, np.mean, np.median, np.std]\n # create a dataframe\n data = [pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()]\n # apply criteria on every columns\n for col, _ in df.iteritems():\n for i in range(len(criteria)):\n data[i][col] = df[col].apply(criteria[i])\n return data\n\n\ndef select_cols(df_x, df_y, percentile, dataset_selection):\n if dataset_selection == \"uwave\":\n # selecteaza coloanele care au gradul de varianta mai mare decat limita setat; default 0, selecteaza toate feature-urile cu valori diferite\n selection = VarianceThreshold()\n #selection = SelectPercentile(chi2, percentile=percentile)\n else:\n # selecteaza cele mai bune feature determinate cu ajutorul functiei de evaluare chi2\n selection = SelectPercentile(chi2, percentile=percentile)\n selection.fit(df_x, df_y)\n return selection.get_support(indices=True)\n\n\ndef select_df(train_df_x, train_df_y, test_df_x, percentile, dataset_selection):\n criteria = [np.max, np.min, np.mean, np.median, np.std]\n selected = []\n train_df_crit_x = add_criterias(train_df_x.copy())\n print(\"Train_df_crit_x\")\n print(train_df_crit_x)\n for idx in range(len(criteria)):\n tmp = select_cols(train_df_crit_x[idx], train_df_y, percentile, dataset_selection)\n selected.append(tmp)\n # toate col distincte\n selected = np.unique(selected)\n print(selected)\n print(len(selected))\n train_df_x_sel = train_df_x.copy().iloc[:, selected]\n test_df_x_sel = test_df_x.copy().iloc[:, selected]\n # add_criterias for initial train and test\n df_train = add_criterias(train_df_x_sel)\n df_test = add_criterias(test_df_x_sel)\n return df_train, df_test, selected\n\n\ndef run_algorithm(train_df_x, train_df_y, test_df_x, test_df_y, percentile, dataset_selection):\n names = ['max', 'min', 'mean', 'median', 'std']\n #select_df\n df_train, df_test, selected = select_df(train_df_x, train_df_y, test_df_x, percentile, dataset_selection)\n print(\"df_train: {}\".format(df_train))\n print(\"df_test: {}\".format(df_test))\n print(\"df_selected: {}\".format(selected))\n keys = [list(map(lambda x: str(x) + names[i], selected)) for i in range(5)]\n print(\"keys: {}\".format(keys))\n for i in range(5):\n df_train[i].columns = keys[i]\n df_test[i].columns = keys[i]\n train_final_x = pd.concat(df_train, axis=1)\n test_final_x = pd.concat(df_test, axis=1)\n print(\"train_final_x: {}\".format(train_final_x))\n print(\"test_final_x: {}\".format(test_final_x))\n random_forest(test_df_y, test_final_x, train_df_y, train_final_x, dataset_selection)\n svm_alg(test_df_y, test_final_x, train_df_y, train_final_x, dataset_selection)\n gradient_boosted_trees(test_df_y, test_final_x, train_df_y, train_final_x, dataset_selection)\n\n\ndef random_forest(test_df_y, test_final_x, train_df_y, train_final_x, dataset_selection):\n # posibilitatile de selectie pentru hiperparametrii\n parameters = {\n 'n_estimators': [50, 100, 150, 200, 250], # numarul de estimatori folositi\n 'max_features': ['auto', 'sqrt', 'log2'], # algoritmul folosit pentru evaluare\n 'max_depth': [1, 2, 5, 9, 10] # adancimea arborilor de clasificare\n }\n # alegerea clasificatorului\n rfc = RandomForestClassifier()\n print(\"rfc: {}\".format(rfc))\n # cauta cea mai buna combinatie (cea mai mare precizie) de hiperparametrii pentru clasificatorul folosit\n clf = GridSearchCV(rfc, parameters)\n clf.fit(train_final_x, train_df_y)\n predicted = clf.predict(test_final_x)\n print(\"predicted: {}\".format(predicted))\n # din print-urile astea 2 facem tabelele\n print(clf.best_params_)\n print(classification_report(test_df_y, predicted))\n ConfusionMatrixDisplay.from_predictions(test_df_y, predicted)\n plt.show()\n\n\ndef svm_alg(test_df_y, test_final_x, train_df_y, train_final_x, dataset_selection):\n if dataset_selection == \"pems\":\n parameters = {\n 'kernel': ('linear', 'rbf', 'poly', 'sigmoid'), # tipul de kernel\n 'C': [0.1, 1, 10, 50, 100, 500, 1000], # parametrul de regularizare\n 'gamma': [0.1, 0.5, 1, 5, 10, 100, 1000] # factorul de scalare\n }\n elif dataset_selection == \"uwave\":\n parameters = {\n 'kernel': ('linear', 'rbf', 'sigmoid'),\n 'C': [0.01, 0.05, 0.1, 0.5, 1, 10, 50, 100, 500, 1000, 5000],\n 'gamma': [0.1, 0.5, 1, 5, 10, 100, 1000, 5000]\n }\n else:\n parameters = {\n }\n svc = svm.SVC()\n clf = GridSearchCV(svc, parameters)\n clf.fit(train_final_x, train_df_y)\n predicted = clf.predict(test_final_x)\n print(clf.best_params_)\n print(classification_report(test_df_y, predicted))\n ConfusionMatrixDisplay.from_predictions(test_df_y, predicted)\n plt.show()\n\n\ndef gradient_boosted_trees(test_df_y, test_final_x, train_df_y, train_final_x, dataset_selection):\n parameters = {\n 'n_estimators': [50, 100, 150, 200, 250], # numarul de estimatori folositi\n 'max_depth': [1, 2, 5, 9, 10], # adancimea maxima a arborilor\n }\n model = GradientBoostingClassifier()\n clf = GridSearchCV(model, parameters, verbose=2)\n clf.fit(train_final_x, train_df_y)\n predicted = clf.predict(test_final_x)\n print(clf.best_params_)\n print(classification_report(test_df_y, predicted))\n ConfusionMatrixDisplay.from_predictions(test_df_y, predicted)\n plt.show()\n\n\nif __name__ == \"__main__\":\n warnings.filterwarnings('ignore')\n start_time = time.time_ns()\n #pems_testDF_x, pems_testDF_y, pems_trainDF_x, pems_trainDF_y = read_datasets(\"PEMS/PEMS-SF_TEST.ts\",\n #\"PEMS/PEMS-SF_TRAIN.ts\")\n\n # print(pems_testDF_x)\n # print(pems_testDF_y)\n # print(pems_trainDF_x)\n # print(pems_trainDF_y)\n uwave_testDF_x, uwave_testDF_y, uwave_trainDF_x, uwave_trainDF_y = read_datasets(\n \"UWaveGesture/UWaveGestureLibrary_TEST.ts\", \"UWaveGesture/UWaveGestureLibrary_TRAIN.ts\")\n #run_algorithm(pems_trainDF_x, pems_trainDF_y, pems_testDF_x, pems_testDF_y, 1, \"pems\")\n run_algorithm(uwave_trainDF_x, uwave_trainDF_y, uwave_testDF_x, uwave_testDF_y, 1, \"uwave\")\n end_time = time.time_ns()\n print(\"Elapsed time: {} ms\".format((end_time - start_time) / 1000000))\n","repo_name":"andutzu999/Machine-Learning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"40866938009","text":"import numpy as np\n\n'''A = np.array([3,29,82])\nprint(A)\nprint(A.T)\nprint(np.array_equal(A,A.T))\nE = np.array([[3],[29],[82]])\nprint(E)\nprint(E.T)\nprint(A.T == E.T)\n\nF = np.zeros((3,2))\nH = np.eye(3)\nI = np.empty((3,2))\nJ = np.empty((0,9))\nM = np.random.uniform(size = (3,2))\nprint(F ,'\\n', H ,'\\n', I ,'\\n', J ,'\\n', M)'''\n\nA = np.array([[1,2],[3,4]])\ndet = np.linalg.det(A)\nprint(f\"{det}\")\n\nprint(A + [[1],[1]])","repo_name":"yonguk98/Seoultech_Oss_21_2","sub_path":"midterm_test/test.linearalg.py","file_name":"test.linearalg.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"5949166888","text":"n = int(input())\nlist_ = []\n\nfor i in range(0, n):\n arr = []\n a, b = list([int(i) for i in input().split()])\n arr.append(a)\n arr.append(b)\n list_.append(arr)\n\ndef uscln(a, b):\n if (b == 0):\n return a;\n return uscln(b, a % b);\n\ndef RutGon(list_):\n for i in list_:\n x = uscln(i[0], i[1])\n i[0] = i[0] // x\n i[1] = i[1] // x\n if i[1] == 1:\n print(i[0])\n else:\n print(i[0], i[1])\nRutGon(list_)","repo_name":"ttknguyen/CS114.L22.KHCL","sub_path":"Assignment/19520209/Week 1.2/Problem 5.py","file_name":"Problem 5.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"17493406798","text":"from mysql_connector import *\n\n\n\ndef cancel_train(user_id,train_id,class_type):\n \n mycursor = mydb.cursor()\n ticket_number=input(\"\\nEnter ticket number : \")\n query=\"select count(*) from bookings where train_id=\"+str(train_id)+\" and class_type=\"+\"'\"+class_type+\"'\"+\" and user_id='\"+str(user_id)+\"'\"+\" and ticket_number=\"+ticket_number\n mycursor.execute(query)\n myresult = mycursor.fetchone()[0]\n if myresult==0:\n print(\"\\nWrong Train Number - Program aborted\\n\")\n return\n query=\"delete from bookings where train_id=\"+str(train_id)+\" and class_type=\"+\"'\"+class_type+\"'\"+\" and user_id='\"+str(user_id)+\"'\"+\" and ticket_number=\"+ticket_number\n \n\n\n sql=\"update train set seats=seats+1 where id=\"+str(train_id)+\" and class_type=\"+\"'\"+class_type+\"'\"\n mycursor.execute(sql) \n mydb.commit()\n\n \n mycursor.execute(query)\n print(\"\\nYour booking has been cancelled successfully!!!\\n\")\n mydb.commit()\n ","repo_name":"vigneshr1603/Train-Booking-Application","sub_path":"cancel_train.py","file_name":"cancel_train.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"34395992283","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nFilter annotation table to keep only desired transcripts from provided transcript list\n\"\"\"\nimport csv\nimport sys\nimport argparse\n\ntranscript_colname = \"Transcript\"\nNA_char = \".\"\ninvalid_chars = [\"UNKNOWN\", NA_char]\ndef main(**kwargs):\n \"\"\"\n Main control function for the script\n \"\"\"\n input_file = kwargs.pop('input_file', None)\n output_file = kwargs.pop('output_file', None)\n transcripts_file = kwargs.pop('transcripts_file', \"transcripts.txt\")\n\n # read all transcripts\n with open(transcripts_file) as f:\n transcripts = set([ t.strip() for t in f.readlines() ])\n\n transcripts.add(NA_char)\n\n # open input/output filehandles\n if input_file:\n fin = open(input_file)\n else:\n fin = sys.stdin\n\n if output_file:\n fout = open(output_file, \"w\")\n else:\n fout = sys.stdout\n\n # start tsv parsing\n reader = csv.DictReader(fin, delimiter = '\\t')\n fieldnames = [f for f in reader.fieldnames]\n writer = csv.DictWriter(fout, delimiter = '\\t', fieldnames = fieldnames)\n writer.writeheader()\n\n for row in reader:\n if row[transcript_colname] in transcripts:\n writer.writerow(row)\n\n fout.close()\n fin.close()\n\n\ndef parse():\n \"\"\"\n Parses script args\n \"\"\"\n parser = argparse.ArgumentParser(description='Filters the annotation table to output only annotations that match desired transcripts. Keeps entries with NA or empty values')\n parser.add_argument(\"-i\", default = None, dest = 'input_file', help=\"Input file\")\n parser.add_argument(\"-o\", default = None, dest = 'output_file', help=\"Output file\")\n parser.add_argument(\"-t\", default = \"transcripts.txt\", dest = 'transcripts_file', help=\"Transcripts file\")\n args = parser.parse_args()\n main(**vars(args))\n\nif __name__ == '__main__':\n parse()\n","repo_name":"NYU-Molecular-Pathology/NGS580-nf","sub_path":"bin/filter-annotations-transcripts.py","file_name":"filter-annotations-transcripts.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"28"} +{"seq_id":"382626313","text":"from rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.generics import ListAPIView, RetrieveUpdateDestroyAPIView, ListCreateAPIView, CreateAPIView\nfrom . import serializers, models\nfrom accounts.views import MyPagination\n\n\nclass ListNewsCategoryCreateView(ListCreateAPIView):\n pagination_class = MyPagination\n queryset = models.NewsCategory.objects.all()\n serializer_class = serializers.NewsCategorySerializer\n\n\nclass ListNewsCategoryCreatem2View(ListCreateAPIView):\n queryset = models.NewsCategory.objects.all()\n serializer_class = serializers.NewsCategorySerializer\n\n\nclass NewsCategoryUpdateDetailDestroyView(RetrieveUpdateDestroyAPIView):\n queryset = models.NewsCategory.objects.all()\n serializer_class = serializers.NewsCategorySerializer\n\n\nclass ListNewsView(ListAPIView):\n pagination_class = MyPagination\n queryset = models.News.objects.all()\n serializer_class = serializers.NewsSerializer\n\n\nclass LastNewsListView(ListAPIView):\n queryset = models.News.objects.all()[:3]\n serializer_class = serializers.NewsSerializer\n\n\nclass CreateNewsView(CreateAPIView):\n queryset = models.News.objects.all()\n serializer_class = serializers.NewsCreateSerializer\n\n\nclass NewsUpdateDetailDestroyView(APIView):\n def get(self, request, pk):\n news = models.News.objects.filter(id=pk)\n if news:\n serializer = serializers.NewsSerializer(news.first(), context={'request': request})\n return Response(serializer.data, status=200)\n return Response(status=404)\n\n def put(self, request, pk):\n news = models.News.objects.filter(id=pk)\n if news:\n serializer = serializers.NewsCreateSerializer(data=request.data, instance=news.first())\n if serializer.is_valid():\n serializer.save()\n return Response(status=200)\n return Response(serializer.errors, status=400)\n return Response(status=404)\n\n def delete(self, request, pk):\n news = models.News.objects.filter(id=pk)\n if news:\n news = news.first()\n news.delete()\n return Response(status=204)\n return Response(status=404)\n\n\nclass SendUserAgent(APIView):\n def get(self, request):\n user_agent = request.META['HTTP_USER_AGENT']\n return Response({'user_agent': user_agent}, status=200)\n","repo_name":"amirsalmanii/dena","sub_path":"backend/news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"9889839426","text":"\"\"\"Add Status Create SEP\n\nRevision ID: e4f676c1b384\nRevises: d3dbb32db3e7\nCreate Date: 2022-02-03 09:55:42.866410\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e4f676c1b384'\ndown_revision = 'd3dbb32db3e7'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('sep_request', sa.Column('status_created_sep', sa.String(length=250), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('sep_request', 'status_created_sep')\n # ### end Alembic commands ###\n","repo_name":"teddytkz/flask-rest-bpjs-rsmh","sub_path":"migrations/versions/e4f676c1b384_add_status_create_sep.py","file_name":"e4f676c1b384_add_status_create_sep.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"17692975283","text":"import xml.etree.ElementTree as ET\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ntree = ET.parse('HY202103_D08_(0,2)_LION1_DCM_LMZC.xml')\nroot = tree.getroot()\n\ndef spfl(a): # spfl 함수 정의\n sp = a.text.split(',') # ,를 기준으로 나누고 값 가져오기\n fl = list(map(float, sp)) # 가져온 값을 실수로 바꾸고 리스트에 넣기\n return fl # fl 반환\n\n# I-V Graph\nfor data in root.iter('Voltage'):\n v = spfl(data) # 'Voltage'안에 있는 값을 spfl함수를 사용해 v에 저장\n # print(f'{data.tag}: {data.text}')\n # a = data.text.split(',')\n # b = list(map(float,a))\n\nfor data in root.iter('Current'):\n i = list(map(abs, spfl(data))) #'Current'안에 있는 값을 spfl함수를 사용하고, 절댓값을 사용해 리스트 안에 넣어\n # print(f'{data.tag}: {data.text}')\n # c = data.text.split(',')\n # d = list(map(float,c))\n # d = list(map(abs,d))\n\nplt.figure(figsize = (10,5))\n# plt.subplots(constrained_layout = True)\nplt.subplot(2,2,4)\nplt.title(\"IV analysis\")\nplt.plot(v, i, 'bo--', label = 'I-V curve')\nplt.xlabel(\"Voltage [V]\")\nplt.ylabel(\"Current [A]\")\nplt.legend(loc = 'best')\nplt.yscale('log')\n\n# plt.ylim([1e-11, 8e-10])\n\n# Raw Spectrum\nwvl = []\nitst = []\nfor data in root.iter('L'):\n L1 = data.text.split(',')\n L2 = list(map(float, L1))\n wvl.append(L2)\nfor data in root.iter('IL'):\n IL1 = data.text.split(',')\n IL2 = list(map(float, IL1))\n itst.append(IL2)\n\nlgds = []\nfor data in root.iter(\"WavelengthSweep\"):\n lgds.append(data.get(\"DCBias\"))\n\nplt.subplot(2, 2, 1)\nfor n in range(len(wvl)):\n plt.title(\"Transmission spectra-as measured\")\n plt.xlabel(\"Wavelength [nm]\")\n plt.ylabel(\"Measured transmission [dB]\")\n plt.rc(\"legend\", fontsize = 7)\n if n == 6:\n plt.plot(wvl[6], itst[6], label = 'DCBias = REF')\n else:\n plt.plot(wvl[n], itst[n], label = f'DCBias = {lgds[n]}V')\n plt.legend(loc = 'best', ncol = 3)\n\n# Fitting\nplt.subplot(2, 2, 2)\nfor n in range(len(wvl)):\n if n == 6:\n plt.plot(wvl[n], itst[n], label=\"REF\")\n else:\n continue\n# for i in range(4, 7):\ndp1 = np.polyfit(wvl[6], itst[6], 4)\nf1 = np.poly1d(dp1)\nplt.plot(wvl[6], f1(wvl[6]), 'r--', label = 'fit')\nplt.legend(loc='lower right')\nplt.xlabel('Wavelength[nm]')\nplt.ylabel('Transmissions[dB]')\nplt.title('Transmission spectra - fitted')\n# def polyfit(x, y, degree):\n# results = {}\n# coeffs = np.polyfit(x, y, degree)\n# results['polynomial'] = coeffs.tolist()\n# p = np.poly1d(coeffs)\n# yhat = p(x)\n# ybar = np.sum(y)/len(y)\n# ssreg = np.sum((yhat-ybar)**2)\n# sstot = np.sum((y - ybar)**2)\n# results['determination'] = ssreg / sstot\n# return results\n\nplt.savefig(\"PE2_TW02_Pic.png\",dpi = 300, bbox_inches = 'tight')\nplt.show()\n","repo_name":"hiry0/PE02_heeryeong","sub_path":"PE2_L05_2020042060_장희령/graph2.py","file_name":"graph2.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"31514975305","text":"from sklearn.datasets import load_breast_cancer\nfrom sklearn.linear_model import Ridge\nfrom sklearn.model_selection import GridSearchCV, train_test_split\nfrom sklearn.tree import DecisionTreeRegressor\n\nfrom ngboost import NGBClassifier\nfrom ngboost.distns import k_categorical\n\nif __name__ == \"__main__\":\n # An example where the base learner is also searched over (this is how you would vary tree depth):\n\n X, Y = load_breast_cancer(return_X_y=True)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n\n b1 = DecisionTreeRegressor(criterion=\"friedman_mse\", max_depth=2)\n b2 = DecisionTreeRegressor(criterion=\"friedman_mse\", max_depth=4)\n b3 = Ridge(alpha=0.0)\n\n param_grid = {\n \"n_estimators\": [20, 50],\n \"minibatch_frac\": [1.0, 0.5],\n \"Base\": [b1, b2],\n }\n\n ngb = NGBClassifier(natural_gradient=True, verbose=False, Dist=k_categorical(2))\n\n grid_search = GridSearchCV(ngb, param_grid=param_grid, cv=5)\n grid_search.fit(X_train, Y_train)\n print(grid_search.best_params_)\n","repo_name":"stanfordmlgroup/ngboost","sub_path":"examples/sklearn_cv.py","file_name":"sklearn_cv.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":1533,"dataset":"github-code","pt":"28"} +{"seq_id":"30046270276","text":"import sqlite3\r\nimport pandas as pd\r\n\r\n\r\ndef view_database():\r\n \"\"\"\r\n This function displays a dataframe of the database.\r\n \"\"\"\r\n\r\n db_name = 'news_archive.db'\r\n\r\n try:\r\n # Establish a connection with database db_name\r\n connection = sqlite3.connect(db_name)\r\n\r\n # Make query, read and display database\r\n view = \"SELECT * FROM news_archive;\"\r\n df = pd.read_sql_query(view, connection)\r\n print(df)\r\n\r\n connection.close()\r\n\r\n except sqlite3.Error as error:\r\n print('Failed to display sqlite database/table', error)\r\n\r\n\r\nview_database()\r\n\r\n","repo_name":"tuobaar/spiegel_crawler","sub_path":"view_database.py","file_name":"view_database.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"33888814388","text":"from django.contrib import admin\n\n\nclass BaseAdmin(admin.ModelAdmin):\n actions = ('soft_delete', 'reactivate')\n\n @admin.action(description='Deactive')\n def soft_delete(self, request, queryset):\n for item in queryset:\n item.soft_delete = True\n item.save()\n return None\n\n @admin.action(description='Reactivate')\n def reactivate(self, request, queryset):\n for item in queryset:\n item.soft_delete = False\n item.save()\n return None\n","repo_name":"saraeygh/trello-team-collaboration","sub_path":"core/admin/base_admin.py","file_name":"base_admin.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"16041864042","text":"\n'''\n This is a class to represent a solid frame(geometry) with dipoles inside of it.\n It can be used by it self to see what that frame's magnetic signature looks like.\n It can also be used with other frames to, perhaps simlulate a sensor in the real world. \n\n'''\n\n\nimport numpy as np\nfrom icecream import ic\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport ppigrf\n\n\nimport dipole\nimport geometry as geo\n\n\n\n\nclass MagSignature:\n\n\n #init the magnetic signature\n def __init__(self,sensorpos):\n self.dipolelist = [] #initialise the list of dipoles for this magnetic signature\n self.wirelist = []\n self.set_SensorPosition(sensorpos) #set the sensors position in this frame\n self.set_TranslationMatrix() #initialise the transformation matrix\n self.initdefaultBe()#initialise the default earth magnetic field for testing\n \n\n #method to load a spread sheet (csv format) of dipoles. this includes position, \n def loadFromCSV(self,filename):\n pass\n\n #add a dipole to the signature\n def newDipole(self,dipole):\n self.dipolelist.append(dipole)\n \n #add a wire to the signature\n def newWire(self,wire):\n wire.setSensorPos(geo.Transformation(self.aTb,self.sensorpos))#setting the sensor's position in the signature's coords\n self.wirelist.append(wire)\n \n #set the sensor vector to all dipoles\n def set_Sensor_Vector(self):\n for i in self.dipolelist:\n i.set_sensorVector(geo.Transformation(self.aTb,self.sensorpos-i.get_Dipole_pos()))\n\n \n\n \n #set the translation matrix \n def set_TranslationMatrix(self,x=0,y=0,z=0,thetax=0,thetay=0,thetaz=0):\n self.aTb=geo.TranslationMatrix(thetax,thetay,thetaz,x,y,z)\n #ic(self.aTb)\n\n #get the scalar output from the sensor and the totalfield.\n def resB(self,amp=30):\n phresB=0\n self.c1,self.c2,self.c3=0,0,0\n self.c4,self.c5,self.c6,self.c7,self.c8,self.c9=0,0,0,0,0,0\n self.TFperm=np.array([0]).reshape(1,1)\n self.TFind=np.array([0]).reshape(1,1)\n self.TFwire=np.array([0]).reshape(1,1)\n self.TF=np.array([0]).reshape(1,1)\n self.resultantB=np.array([0,0,0]).reshape(3,1)\n\n if(len(self.dipolelist)>0):# only calculate dipoles if there are some\n for i in self.dipolelist:\n #ic(i.get_vB())\n phresB=phresB+i.get_vB()\n i.get_C()\n self.c1=self.c1+i.C1\n self.c2=self.c2+i.C2\n self.c3=self.c3+i.C3\n self.c4=self.c4+i.C4\n self.c5=self.c5+i.C5\n self.c6=self.c6+i.C6\n self.c7=self.c7+i.C7\n self.c8=self.c8+i.C8\n self.c9=self.c9+i.C9\n\n #ic(phresB+self.Be.reshape(3,1))\n self.resultantB=phresB+self.Be.reshape(3,1)\n self.res_sB=np.sqrt(self.resultantB.reshape(1,3).dot(self.resultantB))\n self.TFperm=self.c1*self.Be[0]/self.sBe+self.c2*self.Be[1]/self.sBe+self.c3*self.Be[2]/self.sBe\n self.TFind=self.c4*self.Be[0]**2/self.sBe+self.c5*self.Be[0]*self.Be[1]/self.sBe+self.c6*self.Be[0]*self.Be[1]/self.sBe+self.c7*self.Be[1]**2/self.sBe+self.c8*self.Be[1]*self.Be[2]/self.sBe+self.c9*self.Be[2]**2/self.sBe\n self.TFind=self.TFind.reshape(1,1)\n if(len(self.wirelist)>0):# only calculate wires if there are some\n self.updateWire(amp)\n self.TFwire=np.linalg.norm( self.Bw.reshape(1,3).dot(self.Be)/self.sBe).reshape(1,1)#.reshape(1,3).dot(self.Be)/self.sBe\n\n #ic(self.TFwire)\n #ic(self.TFperm)\n #ic(self.TFind)\n self.TF=self.TFperm[0,0]+self.TFind[0,0]+self.TFwire[0,0]# sum all parts of the TotalField (TFpermanent,TFinduced,TFelectric,TFeddycurrents,(TFgeomagnetic,TFoceanswell,TFionosphere,TFgeology))\n #ic(self.TF)\n #ic(self.resultantB.reshape(1,3).dot(self.Be)/self.sBe)\n #ic(self.TF/(self.resultantB.reshape(1,3).dot(self.Be)/self.sBe))\n return self.resultantB\n\n #The sensor position in a (aTb) coords\n def set_SensorPosition(self,sensorpos):\n \n self.sensorpos=np.array(sensorpos).reshape(3,1)\n \n def setBe(self,Be):\n self.Be=Be\n self.sBe= np.sqrt(self.Be.reshape(1,3).dot(self.Be))\n\n def updateWire(self,amp):\n self.Bw=np.array([0,0,0]).reshape(3,1)\n \n fieldFromWires=0\n for i in self.wirelist:\n i.setAmp(amp)\n i.setSensorPos(self.sensorpos)\n fieldFromWires=fieldFromWires+i.getField()\n self.Bw=fieldFromWires\n \n \n \n \n\n \n #set the magnetic field of the earth for the equations\n def initdefaultBe(self,lon=-75.552067,lat=45.406838,h= 0.0,date = datetime(2023, 10, 20)):\n\n Be,Bn,Bu=ppigrf.igrf(lon,lat,h,date)\n \n self.Be=np.concatenate((Be,Bn,Bu),axis=0)*1e-9\n \n self.sBe= np.sqrt(self.Be.reshape(1,3).dot(self.Be))\n\n","repo_name":"Revixa1/Simple_Sim","sub_path":"tools/magnetic/magsignature.py","file_name":"magsignature.py","file_ext":"py","file_size_in_byte":5018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"3581100702","text":"from __future__ import print_function\nimport datetime\nimport os.path\nfrom googleapiclient.discovery import build, Resource\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\nimport pytz\nfrom recognition_engine import Recognition_engine\n\n# If modifying these scopes, delete the file token.json.\nSCOPES = ['https://www.googleapis.com/auth/calendar']\n\nservice = None\n\n\n\ndef prepare_service():\n \"\"\"Shows basic usage of the Google Calendar API.\n Prints the start and name of the next 10 events on the user's calendar.\n \"\"\"\n creds = None\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.json'):\n creds = Credentials.from_authorized_user_file('token.json', SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.json', 'w') as token:\n token.write(creds.to_json())\n\n global service\n service = build('calendar', 'v3', credentials=creds)\n\n\ndef add_event(text):\n \"\"\"\n :param text: format: [dd:mm:yyyy] from [hh:mm] to [hh:mm] name [event name]\n :return:\n \"\"\"\n if service is None:\n prepare_service()\n\n location = None\n description = None\n\n\n date, text = text.split(\" from \")\n f, text = text.split(\" to \")\n t, text = text.split(\" name \")\n\n date_start = datetime.datetime.strptime(date + \" \" + f, \"%d.%m.%Y %H:%M\")\n\n date_end = datetime.datetime.strptime(date + \" \" + t, \"%d.%m.%Y %H:%M\")\n\n\n summary = text\n\n\n # date = datetime.datetime(2021, 6, 8, 18, 30)\n # duration = datetime.timedelta(minutes=25)\n\n timezone = 'Poland'\n event = {\n 'summary': summary,\n 'location': location,\n 'description': description,\n 'start': {\n # 'dateTime': date.isoformat(),\n 'dateTime': date_start.isoformat(),\n 'timeZone': timezone,\n },\n 'end': {\n # 'dateTime': (date + duration).isoformat(),\n 'dateTime': date_end.isoformat(),\n 'timeZone': timezone,\n },\n 'colorId': 1,\n }\n\n event = service.events().insert(\n calendarId='smart.assistant.python@gmail.com',\n body=event,\n\n ).execute()\n # print(event)\n return event['summary'] + \" created\"\n\ndef list_events(args):\n if service is None:\n prepare_service()\n # Call the Calendar API\n now = datetime.datetime.now().astimezone(pytz.timezone('Europe/Warsaw')).isoformat()\n\n n = 5\n\n try:\n if len(args) > 0:\n n = int(args) //2\n assert n > 0\n except AssertionError:\n return \"LIST 0 EVENTS?\"\n except:\n pass\n\n # try:\n\n events_result = service.events().list(calendarId='smart.assistant.python@gmail.com', timeMin=now,\n maxResults=n, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n\n if not events:\n print('No upcoming events found.')\n\n print('Upcoming', len(events), 'events')\n for event in events:\n start = datetime.datetime.fromisoformat(event['start'].get('dateTime', event['start'].get('date')))\n print(start, event['summary'])\n\n\ndef main():\n # prepare_service()\n\n add_event(\"08.06.2021 from 22:00 to 23:00 name spanko\")\n\n list_events(\"2\")\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"RysiekPrecikowski/smart_assistant","sub_path":"my_calendar.py","file_name":"my_calendar.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"26730514401","text":"#!/usr/bin/env python\n\nimport argparse\nimport os\nimport os.path\nimport toml\n\nparser = argparse.ArgumentParser(description='text to image with diffuser toolkit')\nparser.add_argument(\n '-c', '--config', metavar='PATH', type=str, default='./diffusable.toml',\n help='toml file to load task parameters from (default: \"./diffusable.toml\")')\nparser.add_argument(\n '-t', '--tasks', action='append', default=[],\n help='tasks from configuration file to execute')\nparser.add_argument(\n '-n', '--name', type=str,\n help='base file name to use for generated images')\nparser.add_argument(\n '-m', '--model', type=str, default='runwayml/stable-diffusion-v1-5',\n help='diffusion model to use for inference (default: \"runwayml/stable-diffusion-v1-5\")')\nparser.add_argument(\n '-p', '--num_outputs', type=int, default=1,\n help='number of images to generate per prompt (default: 1)')\nparser.add_argument(\n '-i', '--num_inference_steps', type=int, default=50,\n help='number of denoising steps, higher increases quality (default: 50)')\nparser.add_argument(\n '-W', '--width', type=int, default=512,\n help='width of generated image (default: 512)')\nparser.add_argument(\n '-H', '--height', type=int, default=512,\n help='height of generated image (default: 512)')\nparser.add_argument(\n '-s', '--seed', type=int, default=0,\n help='seed to use for generator (default: random)')\nparser.add_argument(\n '-g', '--guidance_scale', type=int, default=7,\n help='how closely to link images to prompt, higher can reduce image quality (default: 7)')\nparser.add_argument(\n '-r', '--models_dir', type=str, default='~/src/huggingface.co/',\n help='root directory containing huggingface models (default: \"~/src/huggingface.co\")')\nparser.add_argument(\n '-d', '--download_models', action='store_true', default=False,\n help='allow automatic downloading of diffuser models (default: False)')\nparser.add_argument(\n '-o', '--output_dir', type=str, default='./output/',\n help='directory to write image output (default: \"./output/\")')\nparser.add_argument(\n '-a', '--all_tasks', action='store_true',\n help='run all tasks from the configuration file')\nparser.add_argument(\n '-l', '--list_tasks', action='store_true',\n help='list all tasks from the configuration file')\nparser.add_argument(\n '--dump', action='store_true',\n help='dump configuration and exit')\nparser.add_argument(\n '--disable_trigger', action='store_true',\n help='do not prepend model triggers to prompts')\nparser.add_argument(\n '-j', '--repeat', type=int, default=1,\n help='repeat the specified task this number of times')\nparser.add_argument(\n '-x', '--negative_prompts', action='append',\n help='prompts to negate from the generated image')\nparser.add_argument(\n 'prompts', metavar='PROMPT', nargs='*',\n help='prompt to generate images from')\nFLAGS = parser.parse_args()\n\n# voodoo magic to find explicitly defined flags\nFLAGS_SENTINEL = list()\nFLAGS_SENTINEL_NS = argparse.Namespace(**{ key: FLAGS_SENTINEL for key in vars(FLAGS) })\nparser.parse_args(namespace=FLAGS_SENTINEL_NS)\nEXPLICIT_FLAGS = vars(FLAGS_SENTINEL_NS).items()\n\nCONFIG_SKIP_FLAGS = ('config', 'tasks', 'dump', 'all_tasks', 'repeat', 'list_tasks', 'prompts')\nCONFIG = {'DEFAULT': {}}\nCONFIG_TASKS = []\n\nif FLAGS.config:\n if os.path.exists(FLAGS.config):\n print('[*] loading configuration from', FLAGS.config)\n CONFIG = toml.load(FLAGS.config)\n for task in CONFIG:\n if task == 'DEFAULT': \n continue\n CONFIG_TASKS.append(task)\n\n\ndef normalize_config(config, random_seed=False):\n if not config.get('seed') or random_seed:\n config['seed'] = int.from_bytes(os.urandom(2), 'big')\n\n model_triggers = {}\n if 'model_triggers' in CONFIG['DEFAULT']:\n model_triggers = CONFIG['DEFAULT']['model_triggers']\n del config['model_triggers']\n trigger = model_triggers.get(config['model'])\n if trigger and not config['disable_trigger']:\n print('[*] prepending model trigger to prompts:', trigger)\n config['prompts'] = [trigger + ' ' + prompt for prompt in config['prompts']]\n del config['disable_trigger']\n\n\ndef task_config(task):\n config = {}\n config.update(CONFIG['DEFAULT'])\n if task not in CONFIG:\n print('[!] task not found in configuration file:', task)\n return config\n config.update(CONFIG[task])\n config['name'] = task\n\n # calculate which flags were set explicitly and override config options\n for key, value in EXPLICIT_FLAGS:\n if key in CONFIG_SKIP_FLAGS:\n continue\n if value is not FLAGS_SENTINEL:\n config[key] = value\n elif key not in config:\n config[key] = getattr(FLAGS, key)\n\n return config\n\n\ndef task_config_from_flags(prompt):\n config = {}\n config.update(CONFIG['DEFAULT'])\n for key, value in vars(FLAGS).items():\n if key in CONFIG_SKIP_FLAGS:\n continue\n config[key] = value\n config['prompts'] = [prompt]\n return config\n\n\ndef choose_image_path(root, basename):\n image_name = None\n i = 0\n while True:\n output_file = '%s.%d.png' % (basename, i)\n output_path = os.path.expanduser(os.path.join(root, output_file))\n if not os.path.exists(output_path):\n return output_path\n i += 1\n\n\ndef invoke_task(config):\n if not config.get('prompts'):\n print('[!] prompt must be defined in config or on command line, not running pipeline')\n return\n\n if not config.get('name'):\n print('[!] --name must be specified in config or on command line, not running pipeline')\n return\n\n local_files_only = False\n if not config.get('download_models'):\n model_path = os.path.expanduser(os.path.join(config['models_dir'], config['model']))\n local_files_only = True\n else:\n print('[*] will attempt to download models from huggingface')\n model_path = config['model']\n if 'download_models' in config:\n del config['download_models']\n\n if FLAGS.dump:\n print(config)\n return\n\n print('[*] using generator seed:', config['seed'])\n\n print('[*] preparing diffusion pipeline from', model_path)\n\n import torch\n from torch import autocast\n from diffusers.models import AutoencoderKL\n from diffusers import StableDiffusionPipeline\n\n pipe = StableDiffusionPipeline.from_pretrained(model_path, local_files_only=local_files_only)\n\n def dummy(images, **kwargs):\n return images, False\n\n pipe.safety_checker = dummy\n\n print('[*] executing diffusion pipeline with prompt:', config['prompts'])\n print('[*] images will be written to', config['output_dir'], 'with base name', config['name'])\n\n if config.get('negative_prompts'):\n print('[*] executing with negative prompts:', config['negative_prompts'])\n #print('[*] will generate', FLAGS.num_outputs, 'images per prompt')\n\n generator = torch.Generator().manual_seed(config['seed'])\n\n # Reference pipeline parameters here:\n # https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L467\n output = pipe(\n prompt=config['prompts'],\n negative_prompt=config['negative_prompts'],\n num_images_per_prompt=config['num_outputs'],\n num_inference_steps=config['num_inference_steps'],\n width=config['width'],\n height=config['height'],\n generator=generator,\n guidance_scale=config['guidance_scale'],\n )\n os.makedirs(FLAGS.output_dir, exist_ok=True)\n\n for image in output.images:\n output_path = choose_image_path(config['output_dir'], config['name'])\n print('[*] writing generated image to', output_path)\n image.save(output_path)\n log_path = output_path + '.nfo'\n with open(log_path, 'w') as f:\n f.write(str(config))\n\n\ndef run():\n tasks = FLAGS.tasks\n if FLAGS.all_tasks:\n tasks = CONFIG_TASKS\n\n if FLAGS.list_tasks:\n print('[*] listing available tasks:')\n print()\n for task in CONFIG_TASKS:\n print(task)\n print()\n return\n\n if not FLAGS.prompts and not tasks:\n print('[!] at least one prompt or one config/task must be provided')\n return\n\n if FLAGS.prompts and tasks:\n print('[!] must provide EITHER prompt arguments OR config/tasks')\n return\n\n if len(tasks) > 1 and FLAGS.name:\n print('[!] flag --name cannot be used with multiple tasks from config')\n return\n\n for j in range(FLAGS.repeat):\n for task in tasks:\n print('[*] loaded task from configuration file:', task)\n repeat = CONFIG[task].get('repeat', 1)\n for i in range(repeat):\n config = task_config(task)\n normalize_config(config, i > 0 or j > 0)\n invoke_task(config)\n\n for prompt in FLAGS.prompts:\n print('[*] loaded task from command line flags')\n config = task_config_from_flags(prompt)\n normalize_config(config, j > 0)\n invoke_task(config)\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"khimaros/diffusable","sub_path":"diffusable.py","file_name":"diffusable.py","file_ext":"py","file_size_in_byte":9377,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"28"} +{"seq_id":"32812486208","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTask 2: the height of the Pile, h(t; L)\n\n\"\"\"\n\n\nimport json\nfrom multiprocessing import Pool\nimport Pile\n\n\nL = (4, 8, 16, 32, 64, 128, 256, 512)\noslo = tuple(map(Pile.Pile, L))\n\ndef f(p):\n heights = []\n avas = []\n d = {}\n while not p.crossover:\n heights.append(p.get_height())\n avas.append(p.ava_size)\n p.simulate()\n d[\"crossover time\"] = p.grains\n for i in range(10**6):\n p.simulate()\n heights.append(p.get_height())\n avas.append(p.ava_size)\n d[\"heights\"] = heights\n d[\"avalance size\"] = avas\n path = \".\\\\data for {}\".format(p.length)\n with open(path, 'w') as datafile:\n json.dump(d, datafile)\n\n\n\nif __name__ == \"__main__\":\n # Task 2a:\n with Pool(8) as pool:\n pool.map(f, oslo)\n","repo_name":"Parlin-Galanodel/Complexity-Networks","sub_path":"Complexity/Data.py","file_name":"Data.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"10151121812","text":"\"\"\"\nThis module is responsible for containing anything\nrelated to Teams of students.\n\"\"\"\nclass Team:\n \"\"\"\n This implementation of Team allows us to use the for loop syntax to\n iterate over the team member list.\n\n Builds on top of teams_container.py\n \"\"\"\n\n def __init__(self, team_name, team_members):\n \"\"\"\n Initializes a team with the given name and members\n :param team_name: a string\n :param team_members: a list\n \"\"\"\n self.name = team_name\n self.members = team_members\n\n def __str__(self):\n team_list = \"\"\n for person in self.members:\n team_list += person + \" \"\n return f\"Team: {self.name}\\nMembers: {team_list}\"\n\n def __len__(self):\n \"\"\"\n Returns the number of people in the team\n :return:\n \"\"\"\n return len(self.members)\n\n def __contains__(self, item):\n \"\"\"\n Implements the container protocol. This checks if the given\n item is in the object or not.\n :param item: a string (team members name)\n :return: True if found, False otherwise.\n \"\"\"\n found = False\n for person in self.members:\n if item == person:\n found = True\n break\n return found\n\n def __iter__(self):\n \"\"\"\n Implements the iter protocol. This returns an iterator over the\n team member list allowing us to use this object as part of a for\n loop.\n :return: An Iterator.\n \"\"\"\n return iter(self.members)\n\n\ndef main():\n x_men = Team(\"X-Men\", [\"Professor Xavier\", \"Cyclops\", \"Storm\",\n \"Jean\"])\n print(x_men)\n\n # sized protocol in action\n print(f\"Number of members: {len(x_men)}\")\n\n #Container protocol in action\n print(f\"Is Cyclops part of the X-men? {'Cyclops' in x_men}\")\n\n # iter protocol in action\n print(\"The X-Men:\")\n for person in x_men:\n print(person)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"livictor888/COMP3522_OOP2","sub_path":"Lecture7/team_iterable.py","file_name":"team_iterable.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"35713317646","text":"################################################################################\n# NAM Groningen 2017 Model: DeepNL/Utrecht University/Seismology\n#\n# Thomas Cullison - t.a.cullison@uu.nl\n################################################################################\n\nimport numpy as np\n\nmyprops = np.ones((3, 1450, 1198, 601))\nmy_xdata = np.array([2.074625e+05, 5.000000e+01, 1.450000e+03])\nmy_ydata = np.array([5.559625e+05, 5.000000e+01, 1.198000e+03])\nmy_zdata = np.array([ 0., 10., 601.])\n\nprint('myprops.shape:',myprops.shape)\nprint('my_xdata:',my_xdata)\nprint('my_ydata:',my_ydata)\nprint('my_zdata:',my_zdata)\n\nxmin_clip = 0.3\nxmax_clip = 0.65\nix_min = int(xmin_clip*my_xdata[2] + 0.5)\nif ix_min < 0: \n ix_min = int(0)\nix_max = int(xmax_clip*my_xdata[2] + 1.5)\nif my_xdata[2] < ix_max:\n ix_max = int(my_xdata[2])\nc_nx = int(ix_max - ix_min + 1)\nmy_xdata[0] += ix_min*my_xdata[1]\nmy_xdata[2] = c_nx\n\nymin_clip = 0.2\nymax_clip = 1.0\niy_min = int(ymin_clip*my_ydata[2] + 0.5)\nif iy_min < 0: \n iy_min = int(0)\niy_max = int(ymax_clip*my_ydata[2] + 1.5)\nif my_ydata[2] < iy_max:\n iy_max = int(my_ydata[2])\nc_ny = int(iy_max - iy_min + 1)\nmy_ydata[0] += iy_min*my_ydata[1]\nmy_ydata[2] = c_ny\n\nmyprops = np.copy(myprops[:,ix_min:ix_max,iy_min:iy_max,:])\n\nprint('clip myprops.shape:',myprops.shape)\nprint('clip my_xdata:',my_xdata)\nprint('clip my_ydata:',my_ydata)\nprint('clip my_zdata:',my_zdata)\n","repo_name":"code-cullison/gnam","sub_path":"misc/test_clip.py","file_name":"test_clip.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"19601242376","text":"from BinarySearchTree import BinarySearchTree\r\n\r\nbst = BinarySearchTree()\r\n\r\nkeys_list = [ \r\n [7, 6, 5, 4, 3, 2, 1, 8, 12, 10, 9, 11, 14, 13, 15],\r\n [8, 4, 5, 6, 7, 3, 2, 1, 12, 10, 9, 11, 14, 13, 15],\r\n [8, 4, 2, 3, 1, 6, 5, 7, 12, 10, 9, 11, 14, 13, 15],\r\n [8, 4, 2, 3, 1, 6, 5, 7, 12, 10, 9, 11, 14, 13, 8.5]\r\n]\r\n\r\nfor keys in keys_list:\r\n for key in keys:\r\n bst.insert(key)\r\n\r\nprint(\"Tree:\")\r\nbst.printTree()\r\n\r\nroot_node = bst._root\r\n\r\nprint(\"Equilibrio de nodos del nodo raíz:\", bst.nodeBalance(root_node))\r\nprint(\"Equilibrio de nivel del nodo raíz:\", bst.levelBalance(root_node))\r\nprint(\"Nodos desequilibrados (by=1):\", bst.unbalancedNodes(1))\r\nprint(\"Nodos desequilibrados (by=2):\", bst.unbalancedNodes(2))\r\n\r\n\r\n","repo_name":"Alicia809/IS-310_ALGORITMOS","sub_path":"Unidad 2/Cap 8/8.4/BinarySearchTreeClient.py","file_name":"BinarySearchTreeClient.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"19284042949","text":"#verifica se um arquivo existe\r\ndef existe_arquivo(arquivo):\r\n import os\r\n if os.path.exists(arquivo):\r\n return True\r\n else:\r\n return False\r\n \r\n\r\n#tranforma o arquivo em lista\r\ndef obter_base_dados(arquivo):\r\n base_dados = []\r\n if existe_arquivo(arquivo):\r\n arq = open(arquivo, \"r\")\r\n for linha in arq:\r\n linha = linha.split(\";\")\r\n base_dados.append(linha)\r\n return base_dados\r\n else:\r\n print(f\"O arquivo {arquivo} não pode ser aberto.\")\r\n\r\n \r\n\r\n#apresenta o submenu de usuários\r\ndef sub_usuários(usuários):\r\n opção = 0\r\n while opção != \"6\":\r\n print(\"\\n---------------------------\\n\"\r\n \" Submenu de Usuários:\\n\"\r\n \"---------------------------\\n\"\r\n \"1. Listar todos\\n\"\r\n \"2. Listar elemento \\n\"\r\n \"3. Incluir\\n\"\r\n \"4. Alterar\\n\"\r\n \"5. Excluir\\n\"\r\n \"6. Voltar\\n\")\r\n opção = input(\"Digite a opção desejada: \")\r\n while opção not in \"123456\":\r\n opção = input(\"Opção inválida! Digite a opção desejada de 1 à 6: \")\r\n \r\n\r\n #lista todos os dados dos usuários cadastrados \r\n if opção == \"1\":\r\n contagem = 1 \r\n for pessoa in range(len(usuários)):\r\n print(f\"\\n -- Usuário {contagem} --\\n\")\r\n print(f\"CPF: {usuários[pessoa][0]}\\n\"\r\n f\"Nome: {usuários[pessoa][1]}\\n\"\r\n f\"Rua: {usuários[pessoa][2]}\\n\"\r\n f\"Número: {usuários[pessoa][3]}\\n\"\r\n f\"CEP: {usuários[pessoa][4]}\")\r\n print(\"E-mail(s):\")\r\n emails = usuários[pessoa][5].split(\" \")\r\n for endereço in emails:\r\n print(f\"\\t{endereço}\")\r\n print(\"Telefone(s):\")\r\n telefones = usuários[pessoa][6].split(\" \")\r\n for telefone in telefones:\r\n print(f\"\\t{telefone}\")\r\n print(f\"Data de nascimento: {usuários[pessoa][7]}\\n\"\r\n f\"Profissão: {usuários[pessoa][8]}\")\r\n contagem += 1\r\n if len(usuários) == 0:\r\n print(\"Não existem usuários cadastrados\")\r\n \r\n \r\n #lista os dados de um usuários específico buscado pelo cpf \r\n if opção == \"2\":\r\n pessoa = input(\"\\nDigite o CPF: \").strip()\r\n busca = False\r\n for cadastro in range(len(usuários)):\r\n if pessoa in usuários[cadastro][0]:\r\n print(f\"\\nNome: {usuários[cadastro][1]}\")\r\n print(f\"Rua: {usuários[cadastro][2]}\")\r\n print(f\"Número: {usuários[cadastro][3]}\")\r\n print(f\"CEP: {usuários[cadastro][4]}\")\r\n print(\"E-mail(s):\")\r\n emails = usuários[cadastro][5].split(\" \")\r\n for endereço in emails:\r\n print(f\"\\t{endereço}\")\r\n print(\"Telefone(s):\")\r\n telefones = usuários[cadastro][6].split(\" \")\r\n for telefone in telefones:\r\n print(f\"\\t{telefone}\")\r\n print(f\"Data de nascimento: {usuários[cadastro][7]}\")\r\n print(f\"Profissão: {usuários[cadastro][8]}\")\r\n busca = True\r\n if not busca:\r\n print(\"CPF não cadastrado.\")\r\n \r\n \r\n #inclui dados de um novo usuário\r\n if opção == \"3\":\r\n arq_usuários = open(\"cadastro_usuarios.txt\", \"a\")\r\n lista_usuários = usuários\r\n busca = False\r\n num_cpf = input(\"CPF (somente números): \").strip()\r\n while len(num_cpf) != 11 or num_cpf.isdigit() == False :\r\n num_cpf = input(\"CPF inválido. Digite o CPF (somente números): \").strip()\r\n for cadastro in range(len(lista_usuários)): #verifica se o e cpf já existe no arquivo\r\n if num_cpf in lista_usuários[cadastro][0]:\r\n busca = True\r\n print(\"CPF já cadastrado.\") \r\n if not busca:\r\n usuário = []\r\n usuário.append(num_cpf)\r\n arq_usuários.write(f\"{num_cpf};\")\r\n usuário.append(input(\"Nome: \"))\r\n arq_usuários.write(f\"{usuário[1]};\")\r\n usuário.append(input(\"Rua: \"))\r\n arq_usuários.write(f\"{usuário[2]};\")\r\n usuário.append(input(\"Número: \"))\r\n arq_usuários.write(f\"{usuário[3]};\")\r\n num_cep = input(\"CEP (somente números): \").strip()\r\n while len(num_cep) != 8 or num_cep.isdigit() == False:\r\n num_cep = input(\"CEP inválido. Digite o CEP (somente números): \").strip()\r\n usuário.append(num_cep)\r\n arq_usuários.write(f\"{usuário[4]};\")\r\n lista_emails = []\r\n resp = \"S\"\r\n while resp in \"S\":\r\n end_email = input(\"E-Mail: \")\r\n if end_email in lista_emails:\r\n print(\"E-Mail já cadastrado\")\r\n else:\r\n lista_emails.append(end_email)\r\n arq_usuários.write(f\"{end_email} \")\r\n resp = input(\"Cadastrar outro e-mail? [S/N] \").strip().upper()\r\n usuário.append(lista_emails)\r\n arq_usuários.write(\";\")\r\n lista_telefones = []\r\n resp = \"S\"\r\n while resp in \"S\":\r\n num_telefone = input(\"Telefone: \")\r\n if num_telefone in lista_telefones:\r\n print(\"Telefone já cadastrado.\")\r\n else:\r\n lista_telefones.append(num_telefone)\r\n arq_usuários.write(f\"{num_telefone} \")\r\n resp = input(\"Cadastrar outro telefone? [S/N] \").strip().upper()\r\n usuário.append(lista_telefones)\r\n usuário.append(input(\"Data de nascimento: \"))\r\n arq_usuários.write(f\";{usuário[7]};\")\r\n usuário.append(input(\"Profissão: \"))\r\n arq_usuários.write(f\"{usuário[8]};\")\r\n arq_usuários.write('\\n')\r\n arq_usuários.close()\r\n lista_usuários.append(usuário)\r\n print(\"-- Usuário cadastrado com sucesso --\")\r\n usuários = obter_base_dados(\"cadastro_usuarios.txt\")\r\n\r\n \r\n\r\n #altera os dados de um usuário já cadastrado, busca pelo cpf\r\n if opção == \"4\":\r\n pessoa = input(\"Digite o CPF: \").strip()\r\n busca = False\r\n for cadastro in range(len(usuários)): #faz as alterações e salva na lista\r\n if pessoa in usuários[cadastro][0]:\r\n print(f\"\\nUsuário: {usuários[cadastro][1]}\\n\"\r\n \"-- Digite apenas os dados que serão alterados: \")\r\n nome = input(\"Nome: \").strip()\r\n if len(nome) > 0:\r\n usuários[cadastro][1] = nome\r\n rua = input(\"Rua: \").strip()\r\n if len(rua) > 0:\r\n usuários[cadastro][2] = rua\r\n número = input(\"Número: \").strip()\r\n if len(número) > 0:\r\n usuários[cadastro][3] = número\r\n CEP = input(\"CEP: \")\r\n if len(CEP) > 0:\r\n while len(CEP) != 8 or CEP.isdigit() == False:\r\n CEP = input(\"CEP inválido. Digite o CEP (somente números): \").strip()\r\n usuários[cadastro][4] = CEP\r\n email = input(\"E-mail: \").strip()\r\n if email not in usuários[cadastro][5]: \r\n if len(email) != 0:\r\n usuários[cadastro][5] += (email + \" \")\r\n else:\r\n print(\"E-mail já cadastrado.\")\r\n telefone = input(\"Telefone: \")\r\n if telefone not in usuários[cadastro][6]:\r\n if len(telefone) != 0:\r\n usuários[cadastro][6] += (telefone + \" \")\r\n else:\r\n print(\"Telefone já cadastrado.\")\r\n data_nascimento = input(\"Data de nascimento: \")\r\n if len(data_nascimento) > 0:\r\n usuários[cadastro][7] = data_nascimento\r\n profissão = input(\"Profissão: \")\r\n if len(profissão) > 0:\r\n usuários[cadastro][8] = profissão\r\n print(\"Dados alterados com sucesso!\")\r\n busca = True\r\n\r\n arq_usuários = open(\"cadastro_usuarios.txt\", \"w\") #subscreve todos os registros no arquivo\r\n for registro in range(len(usuários)):\r\n arq_usuários.write(f\"{usuários[registro][0]};\")\r\n arq_usuários.write(f\"{usuários[registro][1]};\")\r\n arq_usuários.write(f\"{usuários[registro][2]};\")\r\n arq_usuários.write(f\"{usuários[registro][3]};\")\r\n arq_usuários.write(f\"{usuários[registro][4]};\")\r\n arq_usuários.write(f\"{usuários[registro][5]};\")\r\n arq_usuários.write(f\"{usuários[registro][6]};\")\r\n arq_usuários.write(f\"{usuários[registro][7]};\")\r\n arq_usuários.write(f\"{usuários[registro][8]};\")\r\n arq_usuários.write(\"\\n\")\r\n arq_usuários.close()\r\n \r\n if not busca:\r\n print(\"CPF não cadastrado.\")\r\n usuários = obter_base_dados(\"cadastro_usuarios.txt\")\r\n\r\n \r\n #exclui os dados de um usuário cadastrado, busca pelo cpf \r\n if opção == \"5\":\r\n pessoa = input(\"Digite o CPF: \").strip()\r\n busca = False\r\n for cadastro in range(len(usuários)):\r\n if pessoa == usuários[cadastro][0]:\r\n print(f\"\\nExcluir usuário: {usuários[cadastro][1]}\")\r\n resposta = input(\"Confirmar? [S/N]: \").upper().strip()\r\n while resposta not in \"SN\":\r\n resposta = input(\"Resposta inválida. Responda S ou N: \").upper().strip()\r\n if resposta == \"S\":\r\n print(f\"Usuário -- {usuários[cadastro][1]} -- excluído com sucesso.\")\r\n del usuários[cadastro]\r\n busca = True\r\n break\r\n else:\r\n busca = True\r\n \r\n arq_usuários = open(\"cadastro_usuarios.txt\", \"w\")\r\n for registro in range(len(usuários)):\r\n arq_usuários.write(f\"{usuários[registro][0]};\")\r\n arq_usuários.write(f\"{usuários[registro][1]};\")\r\n arq_usuários.write(f\"{usuários[registro][2]};\")\r\n arq_usuários.write(f\"{usuários[registro][3]};\")\r\n arq_usuários.write(f\"{usuários[registro][4]};\")\r\n arq_usuários.write(f\"{usuários[registro][5]};\")\r\n arq_usuários.write(f\"{usuários[registro][6]};\")\r\n arq_usuários.write(f\"{usuários[registro][7]};\")\r\n arq_usuários.write(f\"{usuários[registro][8]};\")\r\n arq_usuários.write(\"\\n\")\r\n arq_usuários.close()\r\n usuários = obter_base_dados(\"cadastro_usuarios.txt\")\r\n \r\n if not busca:\r\n print(\"CPF não cadastrado.\")\r\n \r\n \r\n\r\n#apresenta o submenu de livros\r\ndef sub_livros(livros):\r\n opção = 0\r\n while opção != \"6\":\r\n print(\"\\n---------------------------\\n\"\r\n \" Submenu de Livros:\\n\"\r\n \"---------------------------\\n\"\r\n \"1. Listar todos\\n\"\r\n \"2. Listar elemento \\n\"\r\n \"3. Incluir\\n\"\r\n \"4. Alterar\\n\"\r\n \"5. Excluir\\n\"\r\n \"6. Voltar\\n\")\r\n opção = input(\"Digite a opção desejada: \")\r\n while opção not in \"123456\":\r\n opção = input(\"Opção inválida! Digite a opção desejada de 1 à 6: \")\r\n \r\n\r\n #lista todos os dados dos livros cadastrados \r\n if opção == \"1\":\r\n contagem = 1\r\n for livro in range(len(livros)):\r\n print(f\"\\n -- Livro {contagem} --\\n\"\r\n f\"ISBN: {livros[livro][0]}\\n\"\r\n f\"Título: {livros[livro][1]}\\n\"\r\n f\"Gênero: {livros[livro][2]}\\n\"\r\n \"Autor(es):\")\r\n autores = livros[livro][3].split(\" \")\r\n for autor in autores:\r\n print(f\"\\t{autor}\")\r\n print(f\"Número de páginas: {livros[livro][4]}\")\r\n contagem += 1\r\n if len(livros) == 0:\r\n print(\"Não existem livros cadastrados.\")\r\n\r\n\r\n #lista os dados de um livro específico, busca pelo isbn \r\n if opção == \"2\":\r\n isbn= input(\"\\nDigite o ISBN do livro que deseja listar: \")\r\n busca = False\r\n for cadastro in range(len(livros)):\r\n if isbn in livros[cadastro][0]:\r\n print(f\"Título: {livros[cadastro][1]}\")\r\n print(f\"Gênero: {livros[cadastro][2]}\")\r\n print(f\"Autores: \")\r\n autores = livros[livro][3].split(\" \")\r\n for autor in autores:\r\n print(f\"\\t{autor}\")\r\n print(f\"Número de páginas: {livros[cadastro][4]}\")\r\n busca = True\r\n if not busca:\r\n print(\"ISBN não cadastrado.\")\r\n \r\n \r\n #inclui um novo livro no cadastro\r\n if opção == \"3\":\r\n arq_livros = open(\"cadastro_livros.txt\", \"a\")\r\n busca = False\r\n isbn = input(\"\\nISBN do livro a ser cadastrado: \").strip()\r\n while len(isbn) != 13 or isbn.isdigit() == False:\r\n isbn = input(\"ISBN inválido. Digite o ISBN do Livro (somente números): \").strip()\r\n if len(livros) != 0:\r\n for cadastro in range(len(livros)):#verifica se o isbn já existe no arquivo\r\n if isbn in livros[cadastro][0]:\r\n busca = True\r\n print('ISBN já cadastrado')\r\n if not busca or len(livros) == 0:\r\n livro = []\r\n livro.append(isbn)\r\n arq_livros.write(f\"{isbn};\")\r\n livro.append(str(input(\"Insira o Título do livro: \")))\r\n arq_livros.write(f\"{livro[1]};\")\r\n livro.append(str(input(\"Insira o Gênero do livro: \")))\r\n arq_livros.write(f\"{livro[2]};\")\r\n list_autores = [ ]\r\n escolha = \"S\"\r\n while escolha in \"S\":\r\n autor = input(\"Insira o Autor do livro: \")\r\n list_autores.append(autor)\r\n arq_livros.write(f\"{autor} \")\r\n escolha = input(\"Este livro possui mais de um autor? [S/N] \").strip().upper()\r\n livro.append(list_autores)\r\n arq_livros.write(\";\")\r\n livro.append(input(\"Número de páginas: \"))\r\n arq_livros.write(f\"{livro[4]};\")\r\n arq_livros.write(\"\\n\")\r\n arq_livros.close()\r\n livros.append(livro)\r\n print(\"\\nLivro Adicionado com Sucesso!\")\r\n livros = obter_base_dados(\"cadastro_livros.txt\")\r\n \r\n\r\n #altera os dados de um livro já cadastrado, busca pelo isbn\r\n if opção == \"4\":\r\n isbn = input(\"Digite o ISBN do livro: \").strip()\r\n busca = False\r\n for cadastro in range(len(livros)):\r\n if isbn in livros[cadastro][0]:\r\n print(f\"\\nLivro: {livros[cadastro][1]}\\n\"\r\n \"-- Digite apenas os dados que serão alterados: \")\r\n titulo = input(\"Título do livro: \").strip()\r\n if len(titulo) > 0:\r\n livros[cadastro][1] = titulo\r\n genero = input(\"Gênero do livro: \").strip()\r\n if len(genero) > 0:\r\n livros[cadastro][2] = genero\r\n autor = input(\"Autor(es) do livro: \")\r\n if autor not in livros[cadastro][3]: \r\n livros[cadastro][3] += (autor + \" \")\r\n else:\r\n print(\"Este autor já está registrado\")\r\n num_pag = input(\"Número de páginas do livro: \")\r\n if len(num_pag) > 0:\r\n livros[cadastro][4] = num_pag\r\n print(\"\\n-- Dados alterados com sucesso! --\")\r\n busca = True\r\n\r\n arq_livros = open(\"cadastro_livros.txt\", \"w\")#subscreve todos os registros do arquivo\r\n for registro in range(len(livros)):\r\n arq_livros.write(f\"{livros[registro][0]};\")\r\n arq_livros.write(f\"{livros[registro][1]};\")\r\n arq_livros.write(f\"{livros[registro][2]};\")\r\n arq_livros.write(f\"{livros[registro][3]};\")\r\n arq_livros.write(f\"{livros[registro][4]};\")\r\n arq_livros.write(\"\\n\")\r\n arq_livros.close()\r\n \r\n if not busca:\r\n print(\"ISBN não cadastrado.\")\r\n livros = obter_base_dados(\"cadastro_livros.txt\")\r\n \r\n #exclui os dados de um livro cadastrado, busca pelo isbn\r\n if opção == \"5\":\r\n livro = input(\"Digite o ISBN: \").strip()\r\n busca = False\r\n for exclude in range(len(livros)):\r\n if livro in livros[exclude][0]:\r\n print(f\"\\nExcluir Livro: {livros[exclude][1]}\")\r\n busca = True\r\n resposta = input(\"Confirmar? [S/N]: \").upper().strip()\r\n while resposta not in \"SN\":\r\n resposta = input(\"Resposta inválida. Responda S ou N: \").upper().strip()\r\n if resposta == \"S\":\r\n print(f\"Livro -- {livros[exclude][1]} -- excluído com sucesso.\")\r\n del livros[exclude]\r\n break\r\n \r\n arq = open(\"cadastro_livros.txt\", \"w\")\r\n for exc in range(len(livros)):\r\n arq.write(f\"{livros[exc][0]};\")\r\n arq.write(f\"{livros[exc][1]};\")\r\n arq.write(f\"{livros[exc][2]};\")\r\n arq.write(f\"{livros[exc][3]};\")\r\n arq.write(f\"{livros[exc][4]};\")\r\n arq.write(\"\\n\")\r\n arq.close()\r\n livros = obter_base_dados(\"cadastro_livros.txt\")\r\n \r\n if not busca:\r\n print(\"ISBN não cadastrado.\")\r\n \r\n \r\n#apresenta o submenu de empréstimos\r\ndef sub_empréstimos(usuários, livros, empréstimos):\r\n print(\"\\n---------------------------\\n\"\r\n \" Submenu de Empréstimos:\\n\"\r\n \"---------------------------\\n\"\r\n \"1. Listar todos\\n\"\r\n \"2. Listar elemento \\n\"\r\n \"3. Incluir\\n\"\r\n \"4. Alterar\\n\"\r\n \"5. Excluir\\n\"\r\n \"6. Voltar\\n\")\r\n opção = input(\"Digite a opção desejada: \")\r\n while opção not in \"123456\":\r\n opção = input(\"Opção inválida! Digite a opção desejada de 1 à 6: \")\r\n\r\n\r\n #lista todos os empréstimos cadastrados\r\n if opção == \"1\":\r\n contagem = 1\r\n for registro in range(len(empréstimos)):\r\n print(f\"\\n -- Empréstimo {contagem} --\\n\")\r\n print(f\"CPF: {empréstimos[registro][0]}\")\r\n print(f\"ISBN: {empréstimos[registro][1]}\")\r\n print(f\"Data de Retirada: {empréstimos[registro][2]}\")\r\n print(f\"Data de Devolução: {empréstimos[registro][3]}\")\r\n print(f\"Valor Diário da Multa por atraso: R${float(empréstimos[registro][4]):.2f}\")\r\n contagem += 1\r\n if len(empréstimos) == 0:\r\n print(\"Não existem empréstimos cadastrados.\")\r\n \r\n\r\n #lista os dados de um empréstimo específico, busca pelo cpf, isbn e data de retirada \r\n if opção == \"2\":\r\n list_emprest = ()\r\n cpf_emprest = input(\"Digite o CPF da pessoa que realizou o empréstimo: \")\r\n isbn_list = input(\"Digite o ISBN do livro que foi emprestado: \")\r\n ret_list = input(\"Digite a data de retirada do empréstimo(xx/xx/xxxx): \")\r\n list_emprest = (cpf_emprest, isbn_list, ret_list)\r\n if list_emprest in empréstimos:\r\n print(\"\\nAqui estão as informações que você solicitou:\\n\")\r\n print(\"CPF:\", list_emprest[0])\r\n print(f\"ISBN: {list_emprest[1]}\\n\"\r\n f\"Data de Retirada: {list_emprest[2]}\\n\"\r\n f\"Data de Devolução: {empréstimos[list_emprest][0]}\\n\"\r\n f\"Valor Diário da Multa por atraso: {empréstimos[list_emprest][1]}\")\r\n else:\r\n print(\"Este empréstimo não está nos registros de empréstimos.\")\r\n\r\n\r\n #inclui os dados de um novo empréstimo \r\n if opção == \"3\":\r\n arq_empréstimos = open(\"cadastro_emprestimos.txt\", \"a\")\r\n empréstimo = ()\r\n busca = False\r\n cpf = input(\"Digite o CPF (somente números): \").strip()\r\n while not busca:\r\n for cadastro in range(len(usuários)):\r\n if len(cpf) != 11 or cpf.isdigit() == False:\r\n cpf = input(\"CPF inválido. Digite o CPF (somente números): \")\r\n elif cpf in usuários[cadastro][0]:\r\n busca = True\r\n print(f\"Usuário: -- {usuários[cadastro][1]} --\")\r\n if not busca:\r\n cpf = input(\"CPF não cadastrado. Digite o CPF (somente números): \") \r\n isbn = input(\"Digite o ISBN do livro: \")\r\n busca = False\r\n while not busca:\r\n for cadastro in range(len(livros)):\r\n if len(isbn) != 13 or isbn.isdigit() == False:\r\n isbn = input(\"ISBN inválido. Digite o ISBN: \")\r\n elif isbn in livros[cadastro][0]:\r\n busca = True\r\n print(f\"Livro: -- {livros[cadastro][1]} --\")\r\n if not busca:\r\n isbn = input(\"ISBN não cadastrado. Digite o ISBN: \") \r\n data_retirada = input(\"Data de retirada[xx/xx/xxxx]: \")\r\n empréstimo = (cpf, isbn, data_retirada)\r\n for info in range(len(empréstimo)):\r\n arq_empréstimos.write(f\"{empréstimo[info]};\")\r\n lista_empréstimos = [ ]\r\n lista_empréstimos.append(empréstimo)\r\n lista_empréstimos.append(input(\"Data de devolução[xx/xx/xxxx]: \"))\r\n arq_empréstimos.write(f\"{lista_empréstimos[1]};\")\r\n lista_empréstimos.append(float(input(\"Valor diário da Multa por Atraso: R$\")))\r\n arq_empréstimos.write(f\"{lista_empréstimos[2]};\")\r\n arq_empréstimos.write(\"\\n\")\r\n arq_empréstimos.close()\r\n print(\"-- Empréstimo cadastrado com sucesso --\")\r\n empréstimos = obter_base_dados(\"cadastro_emprestimos.txt\")\r\n\r\n\r\n #altera os dados de um empréstimo já cadastrado, busca por cpf, isbn e data de retirada \r\n if opção == \"4\":\r\n alter_emprest =()\r\n cpf_alter = input(\"Digite o CPF da pessoa que realizou o Empréstimo: \")\r\n isbn_alter = input(\"Digite o ISBN do livro referente ao empréstimo: \")\r\n data_alter = input(\"Digite a Data em que o livro foi retirado(xx/xx/xxxx): \")\r\n alter_emprest = (cpf_alter, isbn_alter, data_alter)\r\n busca = False\r\n infos_alterar = ()\r\n for cadastro in range(len(empréstimos)):\r\n infos_alterar = (empréstimos[cadastro][0], empréstimos[cadastro][1], empréstimos[cadastro][2])\r\n if infos_alterar == alter_emprest:\r\n data_dev = input(\"Digite a nova data de devolução se deseja alterá-la: \")\r\n if len(data_dev) > 0:\r\n empréstimos[cadastro][3] = data_dev\r\n valor_mul = input(\"Digite o novo valor da multa de atraso caso deseja alterá-la: R$\")\r\n if len(valor_mul) > 0:\r\n empréstimos[cadastro][4] = float(valor_mul)\r\n print(\"\\n-- Dados alterados com sucesso! --\")\r\n busca = True\r\n\r\n arq_empréstimos = open(\"cadastro_emprestimos.txt\", \"w\")#subscreve todos os registros do arquivo\r\n for registro in range(len(empréstimos)):\r\n arq_empréstimos.write(f\"{empréstimos[registro][0]};\")\r\n arq_empréstimos.write(f\"{empréstimos[registro][1]};\")\r\n arq_empréstimos.write(f\"{empréstimos[registro][2]};\")\r\n arq_empréstimos.write(f\"{empréstimos[registro][3]};\")\r\n arq_empréstimos.write(f\"{empréstimos[registro][4]};\")\r\n arq_empréstimos.write(\"\\n\")\r\n arq_empréstimos.close()\r\n \r\n if not busca:\r\n print(\"Empréstimo não cadastrado\")\r\n empréstimos = obter_base_dados(\"cadastro_emprestimos.txt\")\r\n \r\n\r\n #exclui os dados de um empréstimo cadastrado, busca por cpf, isbn e data de retirada \r\n if opção == \"5\":\r\n emprest_del=()\r\n cpf_del = input(\"Digite o CPF da pessoa que realizou empréstimo que deseja deletar da lista: \")\r\n isbn_del = input(\"Digite o ISBN do livro referente à esse empréstimo: \")\r\n data_del = input(\"Digite a data de retirada referente à esse empréstimo: \")\r\n emprest_del = (cpf_del, isbn_del, data_del)\r\n busca = False\r\n for cadastro in range(len(empréstimos)):\r\n infos_excluir = ()\r\n infos_excluir = (empréstimos[cadastro][0], empréstimos[cadastro][1], empréstimos[cadastro][2])\r\n if emprest_del == infos_excluir:\r\n print(f\"\\nExcluir Empréstimo: {emprest_del}\")\r\n \r\n resposta = input(\"Confirmar? [S/N]: \").upper().strip()\r\n while resposta not in \"SN\":\r\n resposta = input(\"Resposta inválida. Responda S ou N: \").upper().strip()\r\n if resposta == \"S\":\r\n print(\"Empréstimo excluído com sucesso.\")\r\n del empréstimos[cadastro]\r\n busca = True\r\n break\r\n \r\n archive = open(\"cadastro_emprestimos.txt\", \"w\")\r\n for exc in range(len(empréstimos)):\r\n archive.write(f\"{empréstimos[exc][0]};\")\r\n archive.write(f\"{empréstimos[exc][1]};\")\r\n archive.write(f\"{empréstimos[exc][2]};\")\r\n archive.write(f\"{empréstimos[exc][3]};\")\r\n archive.write(f\"{empréstimos[exc][4]};\")\r\n archive.write(\"\\n\")\r\n archive.close()\r\n emprestimos = obter_base_dados(\"cadastro_emprestimos.txt\")\r\n \r\n if not busca:\r\n print(\"Empréstimo não cadastrado.\")\r\n \r\n \r\n\r\n#retorna um relatório dos usuários com com mais de x anos de idade, x é fornecido pelo usuário\r\ndef usuários_idade(usuários, idade): \r\n from datetime import datetime\r\n quantidade = 0\r\n usuários_idade_maior = []\r\n arq_usuários_idade = open(\"relatorio_usuarios_idade.txt\", \"w\")\r\n print(\"\\nSalvando informações em arquivo...\")\r\n arq_usuários_idade.write(f\"Registro de usuários com mais de {idade} anos: \\n\\n\")\r\n for pessoa in range(len(usuários)):\r\n idade_usuário = datetime.now().year - int(usuários[pessoa][7][-4:])\r\n if idade_usuário > idade:\r\n print(f\"\\nCPF: {usuários[pessoa][0]}\\n\"\r\n f\"Nome: {usuários[pessoa][1]}\\n\"\r\n f\"Rua: {usuários[pessoa][2]}\\n\"\r\n f\"Número: {usuários[pessoa][3]}\\n\"\r\n f\"CEP: {usuários[pessoa][4]}\\n\"\r\n \"E-mail(s):\")\r\n emails = usuários[pessoa][5].split(\" \")\r\n for end in emails:\r\n print(f\"\\t{end}\")\r\n print(\"Telefone(s): \")\r\n telefones = usuários[pessoa][6].split(\" \")\r\n for tel in telefones:\r\n print(f\"\\t{tel}\")\r\n print(f\"Data de nascimento: {usuários[pessoa][7]}\\n\"\r\n f\"Profissão: {usuários[pessoa][8]}\")\r\n print(\"*\" * 50)\r\n quantidade += 1\r\n #salvando no arquivo\r\n if existe_arquivo(\"relatorio_usuarios_idade.txt\"):\r\n arq_usuários_idade.write(f\"\\nCPF: {usuários[pessoa][0]}\\n\")\r\n arq_usuários_idade.write(f\"Nome: {usuários[pessoa][1]}\\n\")\r\n arq_usuários_idade.write(f\"Rua: {usuários[pessoa][2]}\\n\")\r\n arq_usuários_idade.write(f\"Número: {usuários[pessoa][3]}\\n\")\r\n arq_usuários_idade.write(f\"CEP: {usuários[pessoa][4]}\\n\")\r\n arq_usuários_idade.write(\"E-mail(s): \\n\")\r\n emails = usuários[pessoa][5].split(\" \")\r\n for end in emails:\r\n arq_usuários_idade.write(f\"\\t{end}\\n\")\r\n arq_usuários_idade.write(\"Telefone(s): \\n\")\r\n telefones = usuários[pessoa][6].split(\" \")\r\n for tel in telefones:\r\n arq_usuários_idade.write(f\"\\t{tel}\\n\")\r\n arq_usuários_idade.write(f\"Data de nascimento: {usuários[pessoa][7]}\\n\")\r\n arq_usuários_idade.write(f\"Profissão: {usuários[pessoa][8]}\\n\")\r\n arq_usuários_idade.write('*'*50) \r\n \r\n else:\r\n print(\"O arquivo 'relatorios_usuarios_idade.txt' não pode ser aberto.\")\r\n print(f\"\\nQuantidade de usuários com idade maior que {idade} anos: {quantidade}.\")\r\n if quantidade == 0:\r\n print(f\"Não existem usuários cadastrados com mais de {idade} anos.\")\r\n arq_usuários_idade.write(f\"Não existem usuários cadastrados com mais de {idade} anos.\")\r\n arq_usuários_idade.close()\r\n\r\n \r\n\r\n#lista os livros com mais de x autores, x é fornecido pelo usuário\r\ndef autores_livro(livros, quantidade):\r\n arq_quant_autores = open(\"relatorio_livros_por_quantidade_autores.txt\", \"w\")\r\n arq_quant_autores.write(f\"Relatório de livros com {quantidade} ou mais autores: \\n\\n\")\r\n print(\"Salvando informações no arquivo...\")\r\n busca = False\r\n for livro in range(len(livros)):\r\n autores = livros[livro][3].split(\" \")\r\n quant_autores = len(autores) - 1\r\n if quant_autores >= quantidade:\r\n print(f\"\\nISBN: {livros[livro][0]}\\n\"\r\n f\"Título: {livros[livro][1]}\\n\"\r\n f\"Gênero: {livros[livro][2]}\\n\"\r\n \"Autor(es):\")\r\n for nome in autores:\r\n print(f\"\\t{nome}\")\r\n print(f\"Número de páginas: {livros[livro][4]}\")\r\n print(\"*\" * 50)\r\n busca = True\r\n\r\n if existe_arquivo(\"relatorio_livros_por_quantidade_autores.txt\"):\r\n arq_quant_autores.write(f\"\\nISBN: {livros[livro][0]}\\n\")\r\n arq_quant_autores.write(f\"Título: {livros[livro][1]}\\n\")\r\n arq_quant_autores.write(f\"Gênero: {livros[livro][2]}\\n\")\r\n arq_quant_autores.write(\"Autor(es): \\n\")\r\n for nome in autores:\r\n arq_quant_autores.write(f\"\\t{nome}\\n\")\r\n arq_quant_autores.write(f\"Número de páginas: {livros[livro][4]}\\n\")\r\n arq_quant_autores.write(\"*\" * 50)\r\n arq_quant_autores.close() \r\n if not busca:\r\n print(f\"Não existem livros cadastrados com mais que {quantidade} autores.\")\r\n\r\n\r\n#lista os empréstimos que devem ser devolvidos entre as datas x e y, x e y fornecidos pelo usuário\r\ndef dados_empréstimos(usuários, livros, empréstimos, data_inicial, data_final):\r\n arq_emprest_dat = open(\"relatorio_emprestimos_entre_datas.txt\", \"w\")\r\n arq_emprest_dat.write(f\"Relatório de empréstimos com data de entrega entre {data_inicial} e {data_final}: \\n\\n\")\r\n print(\"Salvando informações no arquivo...\")\r\n busca = False\r\n from datetime import datetime\r\n data_inicial_ = datetime.strptime(data_inicial, '%d/%m/%Y').date()\r\n data_final_ = datetime.strptime(data_final, '%d/%m/%Y').date()\r\n for empréstimo in range(len(empréstimos)):\r\n data = datetime.strptime(empréstimos[empréstimo][3], '%d/%m/%Y').date()\r\n if data > data_inicial_ and data <= data_final_:\r\n busca = True\r\n cpf = empréstimos[empréstimo][0]\r\n print(f\"\\nCPF: {cpf}\")\r\n arq_emprest_dat.write(f\"\\nCPF: {cpf}\\n\")\r\n for info in range(len(usuários)):\r\n if cpf == usuários[info][0]:\r\n print(f\"Nome: {usuários[info][1]}\")\r\n arq_emprest_dat.write(f\"Nome: {usuários[info][1]}\\n\")\r\n isbn = empréstimos[empréstimo][1]\r\n print(f\"ISBN: {isbn}\")\r\n arq_emprest_dat.write(f\"ISBN: {isbn}\\n\")\r\n for info in range(len(livros)):\r\n if isbn == livros[info][0]:\r\n print(f\"Título: {livros[info][1]}\")\r\n arq_emprest_dat.write(f\"Título: {livros[info][1]}\\n\")\r\n data_retirada = empréstimos[empréstimo][2]\r\n print(f\"Data de retirada: {data_retirada}\")\r\n arq_emprest_dat.write(f\"Data de retirada: {data_retirada}\\n\")\r\n print(f\"Data de devolução: {empréstimos[empréstimo][3]}\")\r\n arq_emprest_dat.write(f\"Data de devolução: {empréstimos[empréstimo][3]}\\n\")\r\n print(f\"Valor diário da multa por atraso: R${float(empréstimos[empréstimo][4]):.2f}\")\r\n arq_emprest_dat.write(f\"Valor diário da multa por atraso: R${float(empréstimos[empréstimo][4]):.2f}\\n\")\r\n arq_emprest_dat.write(\"*\"*50)\r\n\r\n if not busca:\r\n print(f\"Não existem empréstimos com data de entrega entre {data_inicial} e {data_final}.\")\r\n arq_emprest_dat.write(f\"Não existem empréstimos com data de entrega entre {data_inicial} e {data_final}.\")\r\n \r\n arq_emprest_dat.close()\r\n \r\n \r\n#submenu de opções de relatórios\r\ndef sub_relatórios(usuários, livros, empréstimos):\r\n print(\"\\n---------------------------\\n\"\r\n \" Submenu de Relatórios:\\n\"\r\n \"---------------------------\\n\"\r\n \"1. Listar os usuários com mais de X anos de idade\\n\"\r\n \"2. Listar os livros que tenham mais do que X autores\\n\"\r\n \"3. Listar dados de empréstimos\\n\"\r\n \"4. Voltar\\n\")\r\n opção = input(\"Digite a opção desejada: \")\r\n while opção not in \"1234\":\r\n opção = input(\"Opção inválida! Digite a opção desejada de 1 à 4: \")\r\n \r\n if opção == \"1\":\r\n idade = int(input(\"\\nLista de todos os usuários com idade maior que (digite a idade desejada): \"))\r\n usuários_idade(usuários, idade)\r\n \r\n\r\n if opção == \"2\":\r\n quantidade = int(input(\"\\nLista de todos os livros que possuem a quantidade de autores maior que (digite a quantidade desejada): \"))\r\n autores_livro(livros, quantidade)\r\n\r\n if opção == \"3\":\r\n print(\"Digite as datas inicial e final para ver os empréstimos que devem ser devolvidos entre tais datas:\")\r\n data_inicial = input(\"Data inicial [xx/xx/xxxx]: \")\r\n data_final = input(\"Data final [xx/xx/xxxx]: \")\r\n dados_empréstimos(usuários, livros, empréstimos, data_inicial, data_final)\r\n\r\n\r\n \r\n \r\n###################programa principal######################################################################\r\n#cria o arquivo e o transforma em lista\r\nopen(\"cadastro_usuarios.txt\", \"a\") \r\nusuários = obter_base_dados(\"cadastro_usuarios.txt\")\r\nopen(\"cadastro_livros.txt\", \"a\")\r\nlivros = obter_base_dados(\"cadastro_livros.txt\")\r\nopen(\"cadastro_emprestimos.txt\", \"a\")\r\nempréstimos = obter_base_dados(\"cadastro_emprestimos.txt\")\r\n\r\nopção = 0\r\nwhile opção != \"5\":\r\n print(\"\\n---------------------------\\n\"\r\n \" Menu de opções:\\n\"\r\n \"---------------------------\\n\"\r\n \"1. Submenu de Usuários\\n\"\r\n \"2. Submenu de Livros\\n\"\r\n \"3. Submenu de Empréstimos\\n\"\r\n \"4. Submenu de Relatórios\\n\"\r\n \"5. Sair\\n\")\r\n opção = input(\"Digite a opção desejada: \")\r\n while opção not in \"12345\":\r\n opção = input(\"Opção inválida! Digite a opção desejada de 1 à 5: \")\r\n \r\n if opção == \"1\":\r\n sub_usuários(usuários)\r\n \r\n if opção == \"2\":\r\n sub_livros(livros)\r\n \r\n if opção == \"3\":\r\n sub_empréstimos(usuários, livros, empréstimos)\r\n \r\n if opção == \"4\":\r\n sub_relatórios(usuários, livros, empréstimos)\r\n \r\n if opção == \"5\":\r\n print(\"\\n --- ENCERRANDO ---\")\r\n \r\n\r\n\r\n\r\n","repo_name":"repersch/Biblioteca_pyhton_arquivos","sub_path":"biblioteca parte 2.py","file_name":"biblioteca parte 2.py","file_ext":"py","file_size_in_byte":37659,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"45123155457","text":"import re\nfile = open('konstruktikon.xml')\n\n#I create dictionaries to check whether they coincide with konstruktikon\nroles = [\"Actant\", \"Action\", \"Activity\", \"Addressee\", \"Agent\", \"Associated\", \"Beneficiary\",\n \"Cause\", \"Causee\", \"Causer\", \"Circumstance\", \"Condition\", \"Direction\", \"Distance\",\n \"Evaluation\", \"Event\", \"Experiencer\", \"Function\", \"Goal\", \"Goer\", \"Instrument\", \"Limit\",\n \"Location\", \"Manner\", \"Material\", \"Measure\", \"Motivation\", \"Participant\", \"Path\", \"Parameter\",\n \"Patient\", \"Phenomenon\", \"Property\", \"Purpose\", \"Protagonist\", \"Quantity\", \"Recipient\", \"Result\",\n \"Set\", \"Situation\", \"Source\", \"Speaker\", \"Standard\", \"State\", \"Theme\", \"Topic\", \"Undergoer\"]\n\nphrases = ['NP', 'VP', 'AP', 'AdvP', 'PP', 'NumP', 'XP', 'BareCl', 'IndirCl', 'Cl', 'S', 'DiscC',\n 'DirSpeech', 'NP-Nom', 'NP-Gen', 'NP-Acc', 'NP-Dat', 'NP-Ins', 'NP-Loc', 'NP-Nom.Plur',\n 'NP-Gen.Plur', 'NP-Acc.Plur', 'NP-Dat.Plur', 'NP-Ins.Plur', 'NP-Loc.Plur', 'NP-Nom.Sing',\n 'NP-Gen.Sing', 'NP-Acc.Sing', 'NP-Dat.Sing', 'NP-Ins.Sing', 'NP-Loc.Sing', 'NP.Sing', 'NP.Plur',\n 'VP-Inf', 'VP-Pres', 'VP-Past', 'VP-Fut', 'VP-Bare', 'VP-Imper', 'VP-Imp', 'VP-Perf', 'VP-Part',\n 'VP-Pass', 'VP-Act', 'VP-Short', 'VP-Conv', 'VP-1Plur.Pres', 'VP-1Plur.Past', 'VP-1Plur.Fut',\n 'VP-2Plur.Pres', 'VP-2Plur.Past', 'VP-2Plur.Fut', 'VP-3Plur.Pres', 'VP-3Plur.Past', 'VP-3Plur.Fut',\n 'AP-Nom', 'AP-Gen', 'AP-Acc', 'AP-Dat', 'AP-Ins', 'AP-Loc', 'AP-Nom.Plur', 'AP-Gen.Plur', 'AP-Acc.Plur',\n 'AP-Dat.Plur', 'AP-Ins.Plur', 'AP-Loc.Plur', 'AP-Nom.Sing', 'AP-Gen.Sing', 'AP-Acc.Sing', 'AP-Dat.Sing',\n 'AP-Ins.Sing', 'AP-Loc.Sing', 'AP.Sing', 'AP.Plur', 'AP.Cmp', 'NUM', 'PART', 'PRON']\n\nstructures = ['nsubj', 'obj', 'iobj', 'csubj', 'ccomp', 'xcomp',\n 'obl', 'advmod', 'cop', 'mark', 'nmod', 'nummod',\n 'conj', 'cc', 'dep', 'root', 'amod', 'det', 'case', 'parataxis']\n\n# at this step I do a parser to read an xml. file line by line and then extract the tags, that we need\ndef parser(text):\n n = 0\n blocks = []\n sense_block = False\n def_block = False\n find_konst_name = False\n find_struct = False\n block_dict = {}\n for line in text:\n n += 1\n #print(n)\n if not sense_block:\n if line.startswith(' '):\n sense_block = False\n blocks.append(block_dict)\n block_dict = {}\n elif line.startswith(' '):\n def_block = True\n else:\n if line.startswith(' '):\n def_block = False\n return blocks\n\n#1. I look at the name of construction and\n# check that each token in the name is the same\n# as the \"name\" in the Construction elements, internal\n#if not, than we create a file with fail constructions\n\ndef check_konst_names(sense_str, konst_list):\n fail = False\n for token in sense_str.split('_'):\n token = token.strip('?!):,')\n token = re.sub('\\(', '', token)\n if token not in konst_list:\n fail = True\n return fail\n\n#2. I look at the definitions and check that each role coincides\n# with the existing roles in the dictionary\n# if there is a role, that do not exist in the dictionary, than it is stored\n# in a new file with fail constructions\ndef check_definitions(def_names, roles=roles):\n fail = False\n for token in def_names:\n token = token.strip('.,?!:')\n if token not in roles:\n fail = True\n return fail\n\n#3. I look at the name of the constructions and check that each phrase or POS coincides\n# with the existing phrases in the dictionary\n# if there is a phrase or POS, that do not exist in the dictionary, than it is stored\n# in a new file with fail constructions\ndef check_tokens(sense_str, phrases=phrases):\n fail = False\n eng = 'qwertyuiopasdfghjklzxcvbnm'\n for token in sense_str.split('_'):\n token = token.strip('?!):,')\n token = re.sub('\\(', '', token)\n if len(token) > 0 and token[0].lower() in eng:\n if token not in phrases:\n fail = True\n return fail\n\n#4. I look at the structure and check that each relation coincides\n# with the existing relations in the dictionary\n# if there is a relation, that do not exist in the dictionary, than it is stored\n# in a new file with fail constructions\ndef check_sctructs(struct_list, structures_proper_list=structures):\n fail = False\n eng = 'qwertyuiopasdfghjklzxcvbnm'\n for structure in struct_list:\n for token in structure.split():\n if token.startswith('['):\n\n if len(token) > 1 and token[1].lower() in eng:\n if token[1:] not in structures_proper_list:\n fail = True\n return fail\n\n\nfile1 = open('konst_check.txt', 'w')\nfile2 = open('def_check.txt', 'w')\nfile3 = open('sense_id_check.txt', 'w')\nfile4 = open('structure_check.txt', 'w')\nfor block in parser(file):\n if check_konst_names(block['sense'], block['konst']):\n file1.write(block['sense'] + '\\n')\n if check_definitions(block['def_names']):\n file2.write(block['sense'] + '\\n')\n if check_tokens(block['sense']):\n file3.write(block['sense'] + '\\n')\n if check_sctructs(block['structure']):\n file4.write(block['sense'] + '\\n')\nfile1.close()\nfile2.close()\nfile3.close()\nfile4.close()\n\nfile.close()\n","repo_name":"kristinabagdasaryan/2017-osnov-programm","sub_path":"project/project_x.py","file_name":"project_x.py","file_ext":"py","file_size_in_byte":7179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"29346461391","text":"from django.shortcuts import render\nfrom xml.dom import minidom\nfrom django.core.files.storage import FileSystemStorage\nfrom django.http import FileResponse\nimport sys\nfrom . import controlador\nimport json\nimport requests\n\n# from web.controladorxml import readxml\n\n\nrutaAbs = \"\"\ndef filtroFecha(request):\n fecha = {}\n \n if request.method == 'GET':\n listfechas = []\n direccion = \"http://127.0.0.1:5000/mostrar\"\n response = requests.get(direccion)\n data = response.json()\n obj = json.loads(data)\n \n for estadistica in obj:\n try:\n for x in range(0,100):\n key = obj[estadistica]['ESTADISTICA'][x]['FECHA']\n print(key)\n listfechas.append(key)\n except IndexError:\n pass\n fecha['fecha'] = listfechas\n \n return render(request, 'fecha.html', context={\"fecha\": fecha})\n\n\ndef filtroCodigo(request):\n codigo = {}\n if request.method == 'GET':\n listcodigo = []\n direccion = \"http://127.0.0.1:5000/mostrarCodigo\"\n response = requests.get(direccion)\n data = response.json()\n \n # print(data)\n for x in data['codigo']:\n listcodigo.append(x)\n\n codigo['codigo'] = listcodigo\n\n return render(request, 'codigo.html', codigo)\n\n\n\n# Create your views here.\ndef index(request):\n context = {}\n if request.method == 'POST':\n uploaded_file = request.FILES['document']\n ruta = uploaded_file.name\n fs = FileSystemStorage()\n name = fs.save(ruta, uploaded_file)\n \n # obtengo la ruta \n rutaAbs = \"C:\\\\Users\\\\compu\\\\Desktop\\\\IPC2 - 2.0\\\\PROYECTO3\\\\Frontend\\\\media\\\\\"\n rutaAbs = rutaAbs + ruta\n \n textArea = \"\"\n with open(rutaAbs, 'r', encoding='utf-8') as f:\n lineas = f.readlines()\n posicion = 1\n # print(lineas)\n for file in lineas:\n file.rstrip('\\n')\n textArea = textArea + file\n # print(file)\n context['url'] = textArea \n print(rutaAbs)\n\n controlador.xmlToJson(rutaAbs)\n\n\n return render(request, 'index.html', context)\n\n\ndef enviar(request):\n if request.method == \"POST\":\n url = 'http://127.0.0.1:5000/enviar'\n ruta = \"C:\\\\Users\\\\compu\\\\Desktop\\\\IPC2 - 2.0\\\\PROYECTO3\\\\Frontend\\\\media\\\\data.json\"\n # data = json.loads(ruta)\n with open(ruta, 'r') as j:\n contents = json.loads(j.read())\n # print(contents)\n response = requests.post(url, json=contents)\n print(\"Informacion enviada al api :D\")\n return render(request, 'index.html')\n\n\ndef documentacion(request):\n url = \"C:\\\\Users\\\\compu\\\\Desktop\\\\IPC2 - 2.0\\\\PROYECTO3\\\\Documentacion\\\\documentacionProyecto3.pdf\"\n response = FileResponse(open(url, 'rb'), content_type='application/pdf')\n return response\n\n\ndef consulta(request):\n context1 = {}\n if request.method == \"GET\":\n url = ruta = \"C:\\\\Users\\\\compu\\\\Desktop\\\\IPC2 - 2.0\\\\PROYECTO3\\\\Frontend\\\\media\\\\estadistica.xml\"\n textAreaEstadistica = \"\"\n with open(url, 'r', encoding='utf-8') as f:\n lineas = f.readlines()\n\n for file in lineas:\n textAreaEstadistica = textAreaEstadistica + file \n print(textAreaEstadistica)\n context1['url2'] = textAreaEstadistica\n\n return render(request, 'index.html', context1) ","repo_name":"kevcalderon/IPC2_Proyecto3_201902714","sub_path":"Frontend/web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"28490511629","text":"import numpy as np\nfrom Settings import arguments\nfrom Settings import constants\nfrom Settings import card_settings\nfrom Game.CardTools import CardTools\nfrom Game.CardStringConverter import CardStringConverter\nfrom Game.BetSizing import BetSizing\nfrom Settings import arguments\nfrom Tree.TreeTools import Node\nfrom Tree.TreeTools import Params\nfrom Tree.StrategyFilling import StrategyFilling\nfrom copy import deepcopy\n\ncard_tools = CardTools()\ncard_to_string = CardStringConverter()\n\n\nclass PokerTreeBuilder(object):\n def __init__(self):\n self.bet_sizing = None\n\n def set_bet_sizing(self,bet_sizing):\n self.bet_sizing = bet_sizing\n\n\n def get_children_nodes_transition_call(self,parent_node):\n chance_node = Node(parent_node.street,parent_node.bets,\\\n constants.players.chance, parent_node.board)\n chance_node.set_board_string(parent_node.board_string)\n chance_node.set_node_type(constants.node_types.chance_node)\n return chance_node\n\n def get_children_nodes_chance_node(self,parent_node):\n assert (parent_node.current_player == constants.players.chance), \"player is not chance\"\n\n if self.limit_to_street:\n return []\n next_boards = card_tools.get_second_round_boards()\n next_boards_count = next_boards.shape[0]\n\n subtree_height = -1\n children = []\n #print (\"next_boards: \",next_boards_count)\n for i in range(next_boards_count):\n next_board = int(next_boards[i])\n #print (\"next bo : \", next_board)\n next_board_string = card_to_string.card_to_string(next_board)\n # Creating children:\n child = Node(parent_node.street+1,parent_node.bets,\\\n constants.players.P1, next_board)\n child.set_node_type(constants.node_types.inner_node)\n child.set_parent_node(parent_node)\n child.set_board_string(next_boards_count)\n children.append(child)\n #print (\"out\")\n return children\n\n\n def get_children_player_node(self,parent_node):\n assert (parent_node.current_player != constants.players.chance),\\\n \"parent node is a chance node\"\n children = []\n\n # Fold Action:\n fold_node = Node(parent_node.street,parent_node.bets, \\\n 3 - parent_node.current_player, parent_node.board)\n fold_node.set_node_type(constants.node_types.terminal_fold)\n fold_node.convert_to_terminal_node()\n #fold_node.current_player = 3- parent_node.current_player\n fold_node.set_board_string(parent_node.board_string)\n children.append(fold_node)\n #print (\"fold\")\n\n # Check Action:\n # If the current player is player 1 and the bets are equal:\n if (parent_node.current_player == constants.players.P1 and \\\n (parent_node.bets[0] == parent_node.bets[1])):\n check_node = Node(parent_node.street,parent_node.bets,\\\n 3-parent_node.current_player,parent_node.board)\n check_node.set_node_type(constants.node_types.check)\n check_node.set_board_string(deepcopy(parent_node.bets))\n children.append(check_node)\n #print(\"actions\")\n # Transition Call\n # if the current player is player 2 and, the bets are equal and\n # the bets are equal or the bets are different and the maximla bet is\n # smaller that the stack:\n elif parent_node.street == 1 and\\\n ((parent_node.current_player == constants.players.P2 and \\\n parent_node.bets[0] == parent_node.bets[1]) or \\\n (parent_node.bets[0] != parent_node.bets[1] and \\\n max(parent_node.bets) < arguments.stack)):\n # Max Bets:\n max_bets = np.array(deepcopy(parent_node.bets))\n max_bets.fill(max(parent_node.bets))\n chance_node = Node(parent_node.street,max_bets,\\\n constants.players.chance,parent_node.board)\n chance_node.set_board_string(parent_node.board_string)\n chance_node.set_node_type(constants.node_types.chance_node)\n children.append(chance_node)\n else:\n # Max Bets:\n max_bets = np.array(deepcopy(parent_node.bets))\n max_bets.fill(max(parent_node.bets))\n terminal_call_node = Node(parent_node.street,max_bets,\\\n 3-parent_node.current_player,parent_node.board)\n terminal_call_node.set_node_type(constants.node_types.terminal_call)\n terminal_call_node.convert_to_terminal_node()\n terminal_call_node.set_board_string(parent_node.board_string)\n\n children.append(terminal_call_node)\n\n # Possible bet actions:\n possible_bets = self.bet_sizing.get_possible_bets(parent_node)\n\n #print (\"possible: \",possible_bets)\n if len(possible_bets) != 0:\n assert (possible_bets.shape[1] == 2), \"bad size of possible bets\"\n #print (\"enter here possibl bets\")\n for i in range(possible_bets.shape[0]):\n child = Node(parent_node.street,possible_bets[i],\\\n 3-parent_node.current_player,parent_node.board)\n child.set_board_string(parent_node.board_string)\n child.set_parent_node(parent_node)\n children.append(child)\n return children\n\n\n\n def get_children_nodes(self,parent_node):\n call_is_transit = parent_node.current_player == constants.players.P2 and\\\n parent_node.bets[0] == parent_node.bets[1] and\\\n parent_node.street < constants.streets_count\n\n chance_node = parent_node.current_player == constants.players.chance\n #print(\"chance \", chance_node)\n if parent_node.terminal:\n #print (\"terminal\")\n return []\n elif chance_node:\n #print(\"chnance: \")\n return self.get_children_nodes_chance_node(parent_node)\n else:\n #print (\"children player node\")\n return self.get_children_player_node(parent_node)\n\n assert(False), \"Problems\"\n\n\n def build_tree_dfs(self,current_node):\n # add bets:\n current_node.fill_additional_attributes(current_node)\n\n # getting children and adding them:\n children = self.get_children_nodes(current_node)\n #print (\"children ret: \",children)\n\n current_node.set_children(children)\n #print(\"current: \", current_node.node_type)\n depth = 0\n #print (\"get here depth: \",depth)\n current_node.actions = [0]*len(children)\n #print (\"len chil \",len(children) )\n for i in range(len(children)):\n children[i].parent = current_node\n #print (\"i: \",i)\n self.build_tree_dfs(children[i])\n #print (\"gets here\")\n depth = max(depth, children[i].depth)\n if i ==0:\n current_node.actions[i] = constants.actions.fold\n elif i == 1:\n current_node.actions[i] = constants.actions.call\n else:\n current_node.actions[i] = max(children[i].bets)\n current_node.depth = depth +1\n return current_node\n\n\n def build_tree(self,params):\n # initialize root\n root = Node(params.street,params.bets,\\\n params.current_player,params.board)\n\n #print (\"params \",params.bet_sizing)\n #print(\"bet: \", params.bet_sizing is None)\n\n if params.bet_sizing == 0:\n self.set_bet_sizing(BetSizing(arguments.bet_sizing))\n #params.set_bet_sizing(BetSizing(arguments.bet_sizing))\n\n #print (self.bet_sizing)\n assert (self.bet_sizing), \"no bet sizing\"\n #self.bet_sizing = params.bet_sizing\n self.limit_to_street = params.limit_to_street\n\n # recursively build the tree\n self.build_tree_dfs(root)\n #print (\"here we get\")\n\n strategy_filling = StrategyFilling()\n strategy_filling.fill_uniform(root)\n return root\n\n# Passing parameters:\nparams= Params(1,[200,200],constants.players.P2,4,False)\n#print(\"bet: \",params.bet_sizing is None)\n\np1 = PokerTreeBuilder()\nroot = p1.build_tree(params)\n\nroot.strategy\nroot.children[1].strategy\n##### CHECK BET SIZING!!!\n\n","repo_name":"chelo26/ReinforcementLearning_project","sub_path":"Python_Implementation/IIG/Tree/TreeBuilder.py","file_name":"TreeBuilder.py","file_ext":"py","file_size_in_byte":8404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"72633719754","text":"from dataclasses import dataclass\n\n\n@dataclass\nclass MaxHeap:\n size: int = 0\n capacity: int = 0\n heap: list = None\n\n def __init__(self, capacity: int = 0):\n self.capacity = capacity\n self.heap = [None] * self.capacity\n\n # Insert item into the heap and restores the heap property\n # Returns true if successful, false if there is no room in the heap\n # \"item\" can be any primitive or object that can be compared with other items using the < operator\n def enqueue(self, item) -> bool:\n if self.is_full():\n return False\n self.heap = self.heap + [item]\n self.heap = self.build_heap(self.heap)\n return True\n\n # Returns max without changing the heap, returns None if the heap is empty\n def peek(self):\n if self.is_empty():\n return None\n return self.heap[0]\n\n # Returns max and removes it from the heap and restores the heap property\n def dequeue(self):\n if self.is_empty():\n return None\n max = self.heap[0]\n self.heap[0] = self.heap[-1]\n self.heap = self.heap[:-1]\n self.max_heapify(self.heap, 0)\n self.size -= 1\n return max\n\n # Returns a list of contents of the heap in the order it is stored internal to the heap.\n def contents(self) -> list:\n return self.heap\n\n # Discards all items in the current heap and builds a heap from the items in alist using the bottom-up construction method.\n # If the capacity of the current heap is less than the number of items in alist, the capacity of the heap will be increased to accommodate the items in alist\n def build_heap(self, alist: list) -> list:\n if self.capacity < len(alist):\n self.capacity = len(alist)\n self.size = len(alist)\n self.heap = alist\n for i in range(len(self.heap) // 2, -1, -1):\n self.max_heapify(self.heap, i)\n return self.heap\n\n # Swap two elements in the heap\n def swap(self, i: int, j: int) -> None:\n self.heap[i], self.heap[j] = self.heap[j], self.heap[i]\n\n # Process for finding the largest\n def max_heapify(self, alist: list, index: int):\n largest = index\n left = (2 * index) + 1\n right = (2 * index) + 2\n if left < len(alist) and alist[left] > alist[largest]:\n largest = left\n if right < len(alist) and alist[right] > alist[largest]:\n largest = right\n if largest != index:\n self.swap(index, largest)\n self.max_heapify(alist, largest)\n\n # Returns true if the heap is empty. Otherwise, returns false.\n def is_empty(self) -> bool:\n return self.get_size() == 0\n\n # Returns true if the heap is full. Otherwise, returns false.\n def is_full(self) -> bool:\n return self.get_size() == self.get_capacity()\n\n # Returns the capacity of the heap\n def get_capacity(self) -> int:\n return self.capacity\n\n # Returns the number of actual (non-None) elements in the heap\n def get_size(self) -> int:\n return self.size\n\n # Get the index of the left child of the element at index i\n def get_left_child_index(self, i) -> int:\n return (2 * i) + 1\n\n # Get the index of the right child of the element at index i\n def get_right_child_index(self, i) -> int:\n return (2 * i) + 2\n\n # Where the parameter i is an index in the heap and perc_down moves the element stored at that location to its proper place in the heap rearranging elements as it goes.\n def perc_down(self, i) -> None:\n if len(self.heap) < i:\n return\n else:\n while (i * 2) <= self.size:\n j = max(self.get_left_child_index(i), self.get_right_child_index(i))\n if self.heap[i] < self.heap[j]:\n self.swap(i, j)\n i = j\n\n # Where the parameter i is an index in the heap and perc_up moves the element stored at that location to its proper place in the heap rearranging elements as it goes.\n def perc_up(self, i) -> None:\n if len(self.heap) < i:\n return\n else:\n while (\n i // 2 > 0\n and i // 2 < len(self.heap)\n and self.heap[i] > self.heap[i // 2]\n ):\n self.swap(i, i // 2)\n i = i // 2\n\n # Perform heap sort on input alist in ascending order. This method will discard the current contents of the heap, build a new heap using the items in alist, then mutate alist to put the items in ascending order\n def heap_sort_ascending(self, alist) -> None:\n self.heap = self.build_heap(alist)\n self.heap = self.heap.sort()\n","repo_name":"secmancer/csc202-fall-2023","sub_path":"Labs/Lab4/lab4.py","file_name":"lab4.py","file_ext":"py","file_size_in_byte":4689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"22503507753","text":"import os\nimport h5py\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom barbell2.converters.dcm2npy import Dicom2Numpy\nfrom barbell2.converters.tag2npy import Tag2Numpy\nfrom barbell2.utils import update_labels, is_dicom_file\n\n\ndef get_dcm_pixels(f):\n d2n = Dicom2Numpy(f)\n d2n.set_normalize_enabled(True)\n return d2n.execute()\n\n\ndef get_tag_pixels_for_dcm(f, shape):\n if f.endswith('.dcm'):\n f_tag = f[:-4] + '.tag'\n else:\n f_tag = f + '.tag'\n if not os.path.isfile(f_tag):\n return None\n t2n = Tag2Numpy(f_tag, shape)\n return t2n.execute()\n\n\ndef has_correct_labels(pixels):\n labels = np.unique(pixels)\n print(labels)\n if 0 in labels and 1 in labels and 5 in labels and 7 in labels:\n return True\n return False\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--input_dir', help='Input directory')\n parser.add_argument('--output_file', help='Output file', default='output.h5')\n parser.add_argument('--rows', help='Nr. of rows in images', type=int, default=512)\n parser.add_argument('--cols', help='Nr. of columns in images', type=int, default=512)\n args = parser.parse_args()\n with h5py.File(args.output_file, 'w') as h5f:\n count = 1\n for f in os.listdir(args.input_dir):\n f_path = os.path.join(args.input_dir, f)\n if is_dicom_file(f_path):\n dcm_pixels = get_dcm_pixels(f_path)\n tag_pixels = get_tag_pixels_for_dcm(f_path, (args.rows, args.cols))\n if tag_pixels is None:\n print('Could not find TAG file for DICOM, skipping...')\n continue\n if has_correct_labels(tag_pixels):\n tag_pixels = update_labels(tag_pixels)\n idd = ':04d'.format(count)\n group = h5f.create_group(idd)\n group.create_dataset('image', data=dcm_pixels)\n group.create_dataset('labels', data=tag_pixels)\n print(f'{idd}: added {f_path}')\n count += 1\n else:\n # print(f'[ERROR] missing labels for {f_path}')\n pass\n print(f'Created HDF5 based on {count} patients')\n\n\ndef check_labels():\n data_dir = '/Users/Ralph/data/scalpel/raw/gkroft-colorectal-t4-1'\n # data_dir = '/Users/Ralph/data/scalpel/raw/l3-cohorts-1/SURG-PANC'\n for f in os.listdir(data_dir):\n print(f)\n # if f.endswith('.dcm') and not f.startswith('._'):\n # d2n = Dicom2Numpy(os.path.join(data_dir, f))\n # pixels = d2n.execute()\n # plt.imshow(pixels, cmap='gray')\n # plt.savefig('/Users/Ralph/Desktop/files/{}.png'.format(f))\n if f.endswith('.tag') and not f.startswith('._'):\n t2n = Tag2Numpy(os.path.join(data_dir, f), shape=(512, 512))\n pixels = t2n.execute()\n labels = np.unique(pixels)\n print(labels)\n pixels_masked = pixels.copy()\n pixels_masked[pixels_masked != 1] = 0\n pixels_masked[pixels_masked == 1] = 1\n plt.imshow(pixels_masked)\n plt.savefig('/Users/Ralph/Desktop/{}.png'.format(f))\n break\n\n\n\nif __name__ == '__main__':\n from barbell2.common import setup_args_env\n setup_args_env([\n '--input_dir=/Users/Ralph/data/scalpel/raw/gkroft-colorectal-t4-1',\n '--output_file=/Users/Ralph/Desktop/gkroft-colorectal-t4-1.h5',\n '--rows=512',\n '--cols=512',\n ])\n # main()\n check_labels()\n","repo_name":"rbrecheisen/barbell2","sub_path":"barbell2/bodycomp/createh5.py","file_name":"createh5.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"711139789","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\n\ndef initialize_parameters(layers):\n i = 0\n w_l = []\n b_l = []\n for l in layers:\n if i == 0:\n w = np.random.rand(l,layers[i])\n else:\n w = np.random.rand(l,layers[i-1])\n b = np.random.rand(1,l)\n w_l.append(w)\n b_l.append(b)\n i += 1\n return w_l,b_l\n\ndef initialize_problem(num_classes):\n classes = []\n\n return classes\n\ndef sigmoid_function(z):\n return 1 / (1 + np.exp(-z))\n\ndef sigmoid_derivative(z):\n return sigmoid_function(z) * (1 - sigmoid_function(z))\n\ndef backprop_deriv(x,a,d,layers):\n # Creamos una matriz de derivadas, donde cada vector corresponde a las derivadas para cada capa\n dCw, dCb = [0 for a in range(len(layers))], [0 for a in range(len(layers))]\n for l in reversed(range(len(layers))):\n d_a = []\n if l == 0:\n for k in range(len(x)):\n d_a.append(((x[k] * d[l]).T)[0])\n d_a = np.array(d_a)\n dCw[l] = d_a\n else:\n for k in range(len(a[l-1])):\n d_a.append(((a[l-1][k] * d[l]).T)[0])\n d_a = np.array(d_a)\n dCw[l] = d_a\n dCb[l] = d[l]\n return dCw, dCb\n\ndef gradient_descent(W,b,dCw,dCb,layers,alpha = 0.01,momentum = False):\n # Recorremos cada uno de los vectores de pesos de las capas\n for l in range(len(layers)):\n W[l] -= alpha * dCw[l].T\n b[l] -= alpha * dCb[l].T\n return W,b\n\ndef make_tags(X,Y):\n tags = []\n i = 0\n while i < len(X):\n tag = (X[i],Y[i])\n tags.append(tag)\n i += 1\n return tags\n\ndef one_hot(Y):\n new_Y = []\n for y in Y:\n if y == 0:\n new_Y.append([1,0,0])\n elif y == 1:\n new_Y.append([0,1,0])\n else:\n new_Y.append([0,0,1])\n new_Y = np.array(new_Y)\n return new_Y\n\ndef classify(Y):\n i = 0\n c_y = []\n for y in Y:\n for j in y:\n if j >= 0.5:\n break\n i += 1\n c_y.append(i)\n i = 0\n return c_y\n\ndef split_data(X,Y):\n X_tr = np.concatenate((X[:40],X[50:90],X[100:140]))\n Y_tr = np.concatenate((Y[:40],Y[50:90],Y[100:140]))\n X_t = np.concatenate((X[40:50],X[90:100],X[140:]))\n Y_t = np.concatenate((Y[40:50],Y[90:100],Y[140:]))\n print(X_t)\n print(X_tr)\n print(Y_t)\n print(Y_tr)\n return X,Y,X_t,Y_t\n\ndef main():\n iter = 3500\n i , j = 0 , 0\n layers = [3,10,3]\n d = [0 for a in range(len(layers))]\n Z = []\n a = []\n # Inicializamos los datos para trabajar\n iris = datasets.load_iris()\n X = iris.data[:, :layers[0]]\n Y = iris.target\n Y = one_hot(Y)\n X,Y,X_t,Y_t = split_data(X,Y)\n c = []\n tags = make_tags(X,Y)\n # Inicializamos los pesos por cada capa\n W,b = initialize_parameters(layers)\n # Comenzamos el entrenamiento\n for e in range(iter):\n j = 0\n c.clear()\n for x in X:\n inputs = x\n i = 0\n for l in range(len(layers)):\n # Propagación hacia adelante\n z = W[l].dot(inputs) + b[l]\n Z.append(z)\n inputs = sigmoid_function(z)[0]\n a.append(inputs)\n final_output = inputs\n c.append(final_output)\n # Calculamos el error delta de la última capa de la red\n # Derivada de la función de costo con respecto a la activación de la última capa\n d[-1] = ((final_output - Y[j]) * sigmoid_derivative(Z[-1])).T\n j += 1\n for l in reversed(range(len(layers)-1)):\n d[l] = (W[l+1].T.dot(d[l+1]) * sigmoid_derivative(Z[l]).T)\n # Calculamos las derivadas finales\n dCw,dCb = backprop_deriv(x,a,d,layers)\n W,b = gradient_descent(W,b,dCw,dCb,layers)\n Z.clear()\n a.clear()\n print('Epoca ', e)\n print(final_output)\n for o in c:\n print(o)\n # Ahora va la fase de prueba\n print(\"prueba\")\n for x in X_t:\n inputs = x\n for l in range(len(layers)):\n # Propagación hacia adelante\n z = W[l].dot(inputs) + b[l]\n inputs = sigmoid_function(z)[0]\n final_output = inputs\n print(x,final_output)\nmain()\n","repo_name":"carlos-ochoa/NeuralNetworks","sub_path":"MLP.py","file_name":"MLP.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"387788335","text":"alien_0 = {\r\n\t'x_position': 0, \r\n\t'y_position': 25, \r\n\t'speed': 'medium',\t\r\n\t}\r\n\t\r\nprint(\"Original x-position: \" + str(alien_0['x_position']))\r\n\r\n# Escrevendo ao par de chave-valor, que a chave 'speed' agora \r\n# aponta para o valor fast\r\nalien_0['speed'] = 'fast'\r\n\r\n# Move o alienígena para a direita\r\n# Determina a distância que o alienígena deve se deslocar de acordo com\r\n# sua velocidade atual\r\n\r\nif alien_0['speed'] == 'slow':\r\n\tx_increment = 1\r\nelif alien_0['speed'] == 'medium':\r\n\tx_increment = 2\r\nelif alien_0['speed'] == 'fast':\r\n\tx_increment = 3\r\n\r\n# A nova posição é a posição antiga somada ao incremento\r\n\t\r\nalien_0['x_position'] = alien_0['x_position'] + x_increment\r\n\r\nprint(\"New x-position: \" + str(alien_0['x_position']))\r\n","repo_name":"davicosta12/python_work","sub_path":"Part_01/Cap_06/Exemplo_Dicionário.py","file_name":"Exemplo_Dicionário.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"6412637748","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 15 11:45:46 2014\n\n@author: nhpnp3\n\"\"\"\n\nimport numpy as np\nimport gsh_hex_tri_L0_16 as gsh\nimport functions as rr\nimport h5py\nimport time\n\n\ndef euler_to_gsh(el, H, ns, set_id, step, wrt_file):\n\n start = time.time()\n\n f = h5py.File(\"data.hdf5\", 'a')\n dset_name = 'euler_%s%s_s%s' % (ns, set_id, step)\n euler = f.get(dset_name)[...]\n euler = euler.swapaxes(1, 2)\n\n euler_GSH = np.zeros([ns, H, el**3], dtype='complex128')\n\n for sn in range(ns):\n\n tmp = gsh.gsh_eval(euler[sn, :, :], np.arange(H))\n euler_GSH[sn, :, :] = tmp.T\n\n dset_name = 'euler_GSH_%s%s_s%s.npy' % (ns, set_id, step)\n f.create_dataset(dset_name, data=euler_GSH)\n\n f.close()\n\n end = time.time()\n timeE = np.round((end - start), 3)\n\n msg = \"Conversion from Euler angles to GSH coefficients completed:\"\\\n \" %s seconds\" % timeE\n rr.WP(msg, wrt_file)\n","repo_name":"npaulson/MKS-Experimentation","sub_path":"fip_collab/2016_05_03_strain_stress_study/euler_to_gsh.py","file_name":"euler_to_gsh.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"11981553751","text":"import numpy\n\n#import quaternion\nfrom pyquaternion import Quaternion\n\nfrom copy import copy\n\nfrom imucapture.global_data import *\nfrom imucapture.data import Data\n\n\nclass Transform():\n def __init__(self):\n pass\n\n\n\n\n def filter(self, data, nsamp):\n filtered = numpy.zeros_like(data)\n for i in range(3):\n filtered[:, i] = numpy.convolve(data[:, i], numpy.ones((nsamp,))/nsamp, mode='same')\n return filtered\n\n\n\n\n \n def get_orientation_integrate(self, data, calib, imu, filter_num_samples, initwindow=0.5):\n return self.get_orientation_madgwick(data, calib, imu, filter_num_samples, initwindow=initwindow, beta=0.0)\n\n\n\n def get_orientation_dsf(self, data, calib, imu, filter_num_samples, accdynmag=200.0,\n initial_gravity = None, initial_window = 0.5):\n # Dynamic snap free Kalman filter for sensor fusion\n # x is the state: [theta, bias, adyn, jerk]^T\n # where theta are the Euler angles\n\n # GET ACC IN MPS2\n acc = data.imu_data[imu, Data.ACCEL_INDEX, :, :].transpose()\n\n # GET GYRO IN RADIANS PER SEC\n gyro = data.imu_data[imu, Data.GYRO_INDEX, :, :].transpose()\n\n # FILTER DATA\n acc = self.filter(acc, filter_num_samples)\n gyro = self.filter(gyro, filter_num_samples)\n\n # GET TIME IN MS\n #time = numpy.array(data.imu_data['timestamps'])\n\n # CONVERT TIME TO SECONDS\n #time = time / 1000.0\n\n # COMPUTE TIME ARRAY IN SECONDS, ASSUME GLOBAL VALUE IS CORRECT\n time = numpy.array(numpy.arange(0, data.num_samples*Global_data.SECONDS_PER_SAMPLE, Global_data.SECONDS_PER_SAMPLE))\n\n\n # Get the initial orientation of the chip in this data set\n if initial_gravity is None:\n initial_gravity = numpy.mean(acc[time < initial_window, :], axis=0)\n\n bias_gyro = numpy.mean(calib.still_gyro, axis=0)\n\n # GET NOISE COVARIANCES\n Qgyro = numpy.cov(calib.still_gyro, rowvar=False)\n Qacc = numpy.cov(calib.still_accel, rowvar=False)\n\n # BIAS NOISE COVARIANCE (ASSUMING LOW DRIFT)\n Qbias = 1e-10 * Qacc\n\n Qdyn = accdynmag * stack_matrices([[Qacc, numpy.zeros((3, 3))],\n [numpy.zeros((3, 3)), Qacc]])\n\n xkm1 = numpy.zeros((12,))\n dt = numpy.diff(time)\n dt = numpy.insert(dt, 0, dt[0:0])\n dt1 = dt[0]\n\n # MAKE 12 X 12 MATRIX\n Pkm1 = stack_matrices([\n [(Qgyro + Qbias)*dt1**2, -Qbias*dt1, numpy.zeros((3, 6))],\n [-Qbias*dt1, Qbias, numpy.zeros((3, 6))],\n [numpy.zeros((6, 3)), numpy.zeros((6, 3)), Qdyn]])\n\n gyro_unbiased = gyro - bias_gyro\n\n eulerEKF = []\n aD = []\n err = []\n PkEKF = []\n xkEKF = []\n Rk = Qacc\n\n for dt1, omegak, accel in zip(dt, gyro_unbiased, acc):\n Fk, xkM, Bk = self._system_dynamics(xkm1, omegak, dt1)\n\n Qk = stack_matrices([\n [Bk.dot(Qgyro + Qbias).dot(Bk.T)*dt1**2, -Bk.dot(Qbias)*dt1, numpy.zeros((3, 6))],\n [-Qbias.dot(Bk.T)*dt1, Qbias, numpy.zeros((3, 6))],\n [numpy.zeros((6,6)), Qdyn]])\n\n PkM = Fk.dot(Pkm1).dot(Fk.T) + Qk\n hk, Jh = self._observation_dynamics(xkM, initial_gravity)\n\n Hk = Jh\n\n Sk = Hk.dot(PkM).dot(Hk.T) + Rk\n Kk = PkM.dot(Hk.T).dot(numpy.linalg.pinv(Sk))\n xk = xkM + Kk.dot(accel - hk)\n Pk = (numpy.eye(12) - Kk.dot(Hk)).dot(PkM)\n\n QT = eul2rotm(xk[:3])\n\n eulerEKF.append(xk[:3])\n aD.append(QT.T.dot(xk[6:9]))\n err.append(accel - ((QT.dot(initial_gravity) + xk[6:9])))\n\n PkEKF.append(Pk)\n xkEKF.append(xk)\n\n Pkm1 = Pk\n xkm1 = xk\n\n orient_sensor = numpy.pad(numpy.array(eulerEKF), ((1, 0), (0, 0)), mode='edge')\n accdyn_sensor = numpy.pad(numpy.array(aD), ((1, 0), (0, 0)), mode='edge')\n\n accdyn_world = numpy.empty([3,0])\n orient_world = numpy.empty([3,0])\n for chiprpy, dynamacceleration in zip(orient_sensor, accdyn_sensor):\n rotation_chip = eul2rotm(chiprpy)\n rotation = rotation_chip.dot(calib.imu_bases[imu])\n rotation_matrix_world = calib.imu_bases[imu].T.dot(rotation)\n eul = rotm2eul(rotation_matrix_world)\n orient_world = numpy.append(orient_world, [[eul[0]], [eul[1]], [eul[2]]], 1)\n\n rot = rotation.T.dot(dynamacceleration)\n accdyn_world = numpy.append(accdyn_world, [[rot[0]], [rot[1]], [rot[2]]], 1)\n\n print(accdyn_world.shape)\n print(orient_world.shape)\n return (accdyn_world, orient_world)\n\n\n\n def get_orientation_madgwick(self, data, calib, imu, filter_num_samples, initwindow=0.5, beta=2.86):\n\n # GET ACC IN MPS2\n acc = data.imu_data[imu, Data.ACCEL_INDEX, :, :].transpose()\n\n # GET GYRO IN RADIANS PER SEC\n gyro = data.imu_data[imu, Data.GYRO_INDEX, :, :].transpose()\n\n # FILTER DATA\n acc = self.filter(acc, filter_num_samples)\n gyro = self.filter(gyro, filter_num_samples)\n\n # CONVERT ACCEL DATA TO GS\n acc = acc / 9.81\n\n # GET TIME IN MS\n #time = numpy.array(data.imu_data['timestamps'])\n # CONVERT TIME TO SECONDS\n #time = time / 1000.0\n\n # COMPUTE TIME ARRAY IN SECONDS, ASSUME GLOBAL VALUE IS CORRECT\n time = numpy.array(numpy.arange(0, data.num_samples*Global_data.SECONDS_PER_SAMPLE, Global_data.SECONDS_PER_SAMPLE))\n\n # rotation from chip to world is just the conjugate of the world to chip rotation\n qchip2world = Quaternion(matrix=calib.imu_bases[imu])\n # qworld2chip = Quaternion(matrix=calib.imu_bases[imu])\n\n qorient = [0] * time.size\n\n sampfreq = 1.0/numpy.mean(numpy.diff(time))\n\n dt = 1.0 / sampfreq\n\n qorient[0] = qchip2world.conjugate\n\n for i, gyro1 in enumerate(gyro[1:, :], start=1):\n qprev = qorient[i-1]\n\n acc1 = acc[i, :]\n acc1 = acc1 / numpy.linalg.norm(acc1)\n\n # QUATERNION ANGULAR CHANGE FROM THE GRYO\n qdotgyro = 0.5 * (qprev * Quaternion(0, gyro1[0], gyro1[1], gyro1[2]))\n\n\n if beta > 0:\n # GRADIENT DESCENT ALGORITHM CORRECTIVE STEP\n qp = qprev.elements\n\n F = numpy.array([2*(qp[1]*qp[3] - qp[0]*qp[2]) - acc1[0],\n 2*(qp[0]*qp[1] + qp[2]*qp[3]) - acc1[1],\n 2*(0.5 - qp[1]**2 - qp[2]**2) - acc1[2]])\n J = numpy.array([[-2*qp[2], 2*qp[3], -2*qp[0], 2*qp[1]],\n [2*qp[1], 2*qp[0], 2*qp[3], 2*qp[2]],\n [0, -4*qp[1], -4*qp[2], 0]])\n\n step = numpy.dot(J.T, F)\n step = step / numpy.linalg.norm(step)\n\n step = Quaternion(*step)\n\n qdot = qdotgyro - (numpy.deg2rad(beta) * step)\n else:\n qdot = qdotgyro\n\n qorient[i] = qprev + (qdot * dt)\n\n qorient[i] = qorient[i].normalised\n\n\n # GET THE GRAVITY VECTOR\n # GRAVITY IS +Z\n\n gvec = [(q.conjugate * Quaternion(0, 0, 0, 1) * q).elements[1:] for q in qorient]\n\n accdyn_sensor = acc - gvec\n\n # ATTEMPT TO CONVERT FROM CHIP COORDINATES TO WORLD\n # qorient IS THE QUATERNION THAT SPECIFIES THE CURRENT ORIENTATION OF THE CHIP, RELATIVE TO ITS INITIAL ORIENTATION\n # qworld2chip IS THE QUATERNION THAT ROTATES FROM THE WORLD FRAME TO THE INITIAL CHIP ORIENTATION\n\n qorient_world = [qchip2world.conjugate * q1.conjugate for q1 in qorient]\n\n orient_world_rotm = [q1.rotation_matrix for q1 in qorient_world]\n\n orient_world = numpy.array([rotm2eul(R1) for R1 in orient_world_rotm]).transpose()\n\n\n # ROTATE ACCDYN INTO THE WORLD COORDINATE SYSTEM\n\n qaccdyn_world = [(qchip2world.conjugate * Quaternion(0, a1[0], a1[1], a1[2]) * qchip2world) for a1 in accdyn_sensor]\n\n\n accdyn_world = numpy.array([q.elements[1:] for q in qaccdyn_world]).transpose()\n\n # CONVERT ACCEL DATA BACK TO MPS2\n accdyn_world *= 9.81\n\n\n return (accdyn_world, orient_world)\n\n\n\n\n\n\n def _system_dynamics(self, xk, omegak, dt):\n phi, theta, psi = xk[:3]\n biask = xk[3:6]\n\n sPh = numpy.sin(phi)\n cPh = numpy.cos(phi)\n tTh = numpy.tan(theta)\n scTh = 1 / numpy.cos(theta)\n\n Bk = numpy.array([[1, sPh*tTh, cPh*tTh ],\n [0, cPh, -sPh ],\n [0, sPh*scTh, cPh*scTh]])\n\n # partial diffs\n Bk_phi = numpy.array([[0, cPh*tTh, -sPh*tTh ],\n [0, -sPh, -cPh ],\n [0, cPh*scTh, -sPh*scTh]])\n\n Bk_theta = numpy.array([[0, sPh*scTh**2, cPh*scTh**2 ],\n [0, 0, 0 ],\n [0, sPh*scTh*tTh, cPh*scTh*tTh]])\n\n Bk_psi = numpy.zeros((3, 3))\n\n unbiased_omegak = omegak - biask\n unbiased_omegak = unbiased_omegak[:, numpy.newaxis]\n\n Bkomega = numpy.hstack((numpy.dot(Bk_phi, unbiased_omegak),\n numpy.dot(Bk_theta, unbiased_omegak),\n numpy.dot(Bk_psi, unbiased_omegak)))\n\n jerk = xk[9:]\n\n Fk = stack_matrices([\n [numpy.eye(3) + Bkomega*dt, -Bk*dt, numpy.zeros((3, 6))],\n [numpy.zeros((3, 3)), numpy.eye(3), numpy.zeros((3, 6))],\n [numpy.zeros((3, 6)), numpy.eye(3), numpy.eye(3)*dt],\n [numpy.zeros((3, 6)), numpy.zeros((3, 3)), numpy.eye(3)]])\n\n xkp1 = numpy.hstack((xk[:3] + numpy.dot(Bk, unbiased_omegak).squeeze()*dt, xk[3:6],\n xk[6:9] + jerk*dt, xk[9:]))\n\n return Fk, xkp1, Bk\n\n\n def _observation_dynamics(self, xk, gN):\n \"\"\"gN = gravity in inertial coordinate system (3x1)\"\"\"\n phi, theta, psi = xk[:3]\n\n # rotation matrices\n Rz_yaw = numpy.array([[numpy.cos(psi), numpy.sin(psi), 0],\n [-numpy.sin(psi), numpy.cos(psi), 0],\n [0, 0, 1]])\n Ry_pitch = numpy.array([[numpy.cos(theta), 0, -numpy.sin(theta)],\n [0, 1, 0],\n [numpy.sin(theta), 0, numpy.cos(theta)]])\n Txrx_roll = numpy.array([[1, 0, 0],\n [0, numpy.cos(phi), numpy.sin(phi)],\n [0, -numpy.sin(phi), numpy.cos(phi)]])\n\n # rates/derivatives\n Rz_yaw_rate = numpy.array([[-numpy.sin(psi), numpy.cos(psi), 0],\n [-numpy.cos(psi), -numpy.sin(psi), 0],\n [0, 0, 0]])\n Ry_pitch_rate = numpy.array([[-numpy.sin(theta), 0, -numpy.cos(theta)],\n [0, 0, 0],\n [numpy.cos(theta), 0, -numpy.sin(theta)]])\n Txrx_roll_rate = numpy.array([[0, 0, 0],\n [0, -numpy.sin(phi), numpy.cos(phi)],\n [0, -numpy.cos(phi), -numpy.sin(phi)]])\n\n QT = Txrx_roll.dot(Ry_pitch).dot(Rz_yaw)\n QT_roll = Txrx_roll_rate.dot(Ry_pitch).dot(Rz_yaw)\n QT_pitch = Txrx_roll.dot(Ry_pitch_rate).dot(Rz_yaw)\n QT_yaw = Txrx_roll.dot(Ry_pitch).dot(Rz_yaw_rate)\n\n Jh = numpy.vstack((QT_roll.dot(gN), QT_pitch.dot(gN), QT_yaw.dot(gN), numpy.zeros((3, 3)),\n numpy.eye(3), numpy.zeros((3, 3)))).T\n hk = QT.dot(gN) + xk[6:9]\n\n return hk, Jh\n\n\ndef stack_matrices(M):\n m = []\n for row in M:\n m.append(numpy.hstack(tuple(row)))\n return numpy.vstack(tuple(m))\n\n\n\n\n\ndef eul2rotm(x):\n (phi, theta, psi) = x\n\n Rz_yaw = numpy.array([[numpy.cos(psi), numpy.sin(psi), 0 ],\n [-numpy.sin(psi), numpy.cos(psi), 0 ],\n [0, 0, 1 ]])\n Ry_pitch = numpy.array([[numpy.cos(theta), 0, -numpy.sin(theta)],\n [0, 1, 0 ],\n [numpy.sin(theta), 0, numpy.cos(theta) ]])\n Txrx_roll = numpy.array([[1, 0, 0 ],\n [0, numpy.cos(phi), numpy.sin(phi) ],\n [0, -numpy.sin(phi), numpy.cos(phi) ]])\n\n return Txrx_roll.dot(Ry_pitch.dot(Rz_yaw))\n\n\ndef rotm2eul(rotm, singularity=0.001):\n phi = numpy.arctan2(rotm[1, 2], rotm[2, 2])\n theta = -numpy.arcsin(rotm[0, 2])\n psi = numpy.arctan2(rotm[0, 1], rotm[0, 0])\n return (phi, theta, psi)\n\n\n\n","repo_name":"DaveBuckingham/fish","sub_path":"imucapture/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":13411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"18818640676","text":"from django.http import HttpResponse\nfrom django.template import loader\nfrom rest_framework import viewsets\n\nfrom .serializers import YTVideoSerializer\nfrom .models import YTVideoStat, YTVideoTag\n\ndef HomeView(request):\n template = loader.get_template('index.html')\n context = {}\n return HttpResponse(template.render(context, request))\n\nclass YTVideoStatList(viewsets.ModelViewSet):\n serializer_class = YTVideoSerializer\n \n def get_queryset(self):\n queryset = YTVideoStat.objects.all()\n tag = self.request.query_params.get('tag')\n if tag is not None:\n matchedTags = YTVideoTag.objects.filter(tag__icontains=tag).values_list('videoId', flat=True)\n queryset = queryset.filter(videoId__in=list(matchedTags))\n\n sortByPerformance = self.request.query_params.get('sortByPerformance')\n if sortByPerformance is not None:\n order = \"-videoPerformance\"\n if sortByPerformance == \"asc\":\n order = \"videoPerformance\"\n queryset = queryset.order_by(order)\n return queryset\n","repo_name":"astinaam/videotracker","sub_path":"ytvideo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"21756368316","text":"import socketio\nfrom resources.helpers import LOGGER\n\nasync_mode = 'threading'\n\n# Starting Socket server for streaming\nsocket_server = socketio.Server(logger=True, async_mode=async_mode)\n\n\n@socket_server.event\ndef update(msg):\n \"\"\"\n Function to emit the message about the put operation.\n \"\"\" \n try:\n socket_server.emit('Update', msg)\n except Exception as error:\n LOGGER.error(\"Error while emitting message. {}\".format(error))\n\n\n@socket_server.event\ndef connect(sid, test):\n \"\"\"\n The function to establish a connection.\n \"\"\"\n try:\n socket_server.emit('Connected', {'data': 'Connected'}, room=sid)\n except socketio.exceptions.ConnectionError as error:\n LOGGER.error(\"Connection Failed {}\".format(error))\n\n\n@socket_server.event\ndef disconnect(sid):\n \"\"\"\n The function is called when a client disconnects\n \"\"\"\n LOGGER.info('Client Disconnected')\n","repo_name":"nileshbhadana/KeyValueStore","sub_path":"resources/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"34171253462","text":"#!python.exe\nimport cgi\nimport sys\nimport requests\nfrom send_message import send_hello_message\n\n# import email_validator\n# import db_defaults\n# import hello_message\nsys.path.append('..\\\\py_scripts')\nfrom email_validator import construct_confirmation, decrypt_uid\nfrom db_defaults import get_auth_data\nsys.path.append('..\\\\public')\n\n# http://our_site.innopolis.ru/public/confirm.py?id=123&chat=123&email=i.ivanov&key=securekey\n\nprint('Content-type:text/html\\r\\n\\r\\n')\n\ndef _get_printable_info():\n \"\"\"\n Построение ответа сервера на запрос.\n :return: Ответ сервера на запрос\n \"\"\"\n form = cgi.FieldStorage()\n user_id = form.getvalue('id')\n if not user_id:\n return 'User ID is required'\n user_id = decrypt_uid(user_id)\n chat_id = form.getvalue('chat')\n if not chat_id:\n return 'Chat ID is required'\n chat_id = decrypt_uid(chat_id)\n # import smth\n email = form.getvalue('email')\n if not email:\n return 'Email is required'\n key = form.getvalue('key')\n if not key:\n return 'Key is required'\n\n resp = requests.get('http://127.0.0.1:8000/db/users/' + email + '/')\n if resp.status_code != 200:\n return 'User not found'\n\n our_user = resp.json()\n # Сверка ключа из ссылки с хэш-ключом в базе данных\n if our_user['activationKey'] == 'None' or our_user['activationKey'] != construct_confirmation(user_id, email, key):\n return 'Key is incorrect'\n\n resp = requests.put('http://127.0.0.1:8000/db/users/' + str(our_user['id']) + '/',\n data={'email': email, 'userID': user_id, 'chatID': chat_id, 'activationKey': 'None'}, auth=get_auth_data())\n if resp.status_code != 200:\n return 'DB authentication failed! Please, try again later.'\n print('User was successfully added! Check your chat with bot to start conversation.')\n send_hello_message(int(user_id), int(chat_id))\n\n\n# Построение ответа\nprint(_get_printable_info())\n","repo_name":"modbrin/Helpdesk-Bot-Complete-Server","sub_path":"htdocs/public/confirm.py","file_name":"confirm.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"6527008477","text":"\"\"\"\n[987. Vertical Order Traversal of a Binary Tree](https://leetcode.com/problems/vertical-order-traversal-of-a-binary-tree/)\nGiven a binary tree, return the vertical order traversal of its nodes values.\n\nFor each node at position (X, Y), its left and right children respectively will be at positions (X-1, Y-1) and (X+1, Y-1).\n\nRunning a vertical line from X = -infinity to X = +infinity, whenever the vertical line touches some nodes, we report\nthe values of the nodes in order from top to bottom (decreasing Y coordinates).\n\nIf two nodes have the same position, then the value of the node that is reported first is the value that is smaller.\n\nReturn an list of non-empty reports in order of X coordinate. Every report will have a list of values of nodes.\n\nExample 1:\nInput: [3,9,20,null,null,15,7]\nOutput: [[9],[3,15],[20],[7]]\nExplanation:\nWithout loss of generality, we can assume the root node is at position (0, 0):\nThen, the node with value 9 occurs at position (-1, -1);\nThe nodes with values 3 and 15 occur at positions (0, 0) and (0, -2);\nThe node with value 20 occurs at position (1, -1);\nThe node with value 7 occurs at position (2, -2).\n\nExample 2:\nInput: [1,2,3,4,5,6,7]\nOutput: [[4],[2],[1,5,6],[3],[7]]\nExplanation:\nThe node with value 5 and the node with value 6 have the same position according to the given scheme.\nHowever, in the report \"[1,5,6]\", the node value of 5 comes first since 5 is smaller than 6.\n\n\nNote:\nThe tree will have between 1 and 1000 nodes.\nEach node's value will be between 0 and 1000.\n\"\"\"\n\n\n# Solutions\n\n# Definition for a binary tree node.\nfrom collections import defaultdict\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n \"\"\"\n Time Complexity: O( n log n ), as sorted at each level of the tree + recusrsion\n Space Complexity: O( n )\n \"\"\"\n\n def __init__(self):\n # Constants for the iteration\n self.max_level = float(\"-inf\")\n self.min_level = float(\"inf\")\n\n def verticalTraversal(self, root: TreeNode) -> List[List[int]]:\n res_dict = defaultdict(list)\n\n # x is for depth and y is for the width\n def dfs(root, x, y):\n # Base Case\n if not root:\n return\n\n # Setting min and max value for the iteration\n self.min_level = min(x, self.min_level)\n self.max_level = max(x, self.max_level)\n\n # Forming dictionary with keys as x-level\n res_dict[x].append((y, root.val))\n\n # Recursion\n dfs(root.left, x - 1, y + 1)\n dfs(root.right, x + 1, y + 1)\n\n dfs(root, 0, 0)\n\n res = []\n\n # Iterating over each level\n for ind in range(self.min_level, self.max_level + 1):\n # Appending with list by sorting wrt to their y-level\n # For the condition to add the value based on their order of occurrence\n res.append([v for k, v in sorted(res_dict[ind])])\n\n return res\n\n\n# Runtime: 28 ms, faster than 96.01% of Python3 online submissions\n# Memory Usage: 14 MB, less than 57.15% of Python3 online submissions\n","repo_name":"ramanaditya/data-structure-and-algorithms","sub_path":"leetcode/tree/vertical-order-traversal-of-a-binary-tree.py","file_name":"vertical-order-traversal-of-a-binary-tree.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"28"} +{"seq_id":"6424568060","text":"''' PID (Proportional-Integral-Derivative) Controller\n\nUsed to keep speed close to a set speed.\n\nMarko Rasetina\nJuly 31, 2019\n'''\n\n\nclass PIDController:\n def __init__(self, Kp, Ki, Kd):\n self.Kp = Kp\n self.Ki = Ki\n self.Kd = Kd\n self.speed = 0.\n self.errors = [0] * 29\n\n def set_speed(self, speed):\n self.speed = speed\n\n def update(self, measurement):\n self.errors.append(self.speed - measurement)\n\n output = (self.Kp * self.errors[-1] +\n self.Ki * sum(self.errors) +\n self.Kd * self.errors[-2])\n\n if (output > -0.3) and (output < 0.0):\n output = 0.0\n\n self.errors.pop(0)\n\n return output\n","repo_name":"RKal-El/Diplomski","sub_path":"Code/PID_Controller.py","file_name":"PID_Controller.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"512055577","text":"import os, sys, json\nimport numpy as np\nimport tensorflow as tf\nsys.path.append('../datahub/') \n\nfrom chi_lib.ProgressBar import ProgressBar\nfrom chi_lib.library import *\n\nfrom blocks.Block import Block\nfrom model.graph.graph_utils import Graph\n\n\nclass HeuristicGraphAdjMat(Block):\n\tdef __init__(self, json_reader_block, name=None):\n\t\tBlock.__init__(self, [json_reader_block], name=name)\n\t\tself.json_reader_block = json_reader_block\n\t\tself.num_edge = 6\n\t\t\n\tdef implement(self):\n\t\t_, samples = self.json_reader_block.get()['json_samples']\n\t\tsamples = list(samples)\n\t\t\n\t\toutputs_list = []\n\t\tlogTracker.log('Building heuristic graph for ' + str(len(samples)) + ' samples')\n\t\tprogress = ProgressBar('Building', len(samples))\n\t\tfor sample in samples:\n\t\t\toutputs_list.append(get_heuristic_graph_adj_mat(sample))\n\t\t\tprogress.increase()\n\t\tprogress.done()\n\t\tadj_mats = tf.placeholder(dtype=tf.float32, shape=[None, None, self.num_edge, None], name='heuristic_adj_mat')\n\n\t\treturn {\n\t\t\t'adj_mats' : (adj_mats, outputs_list)\n\t\t}\n\t\t\n\ndef get_heuristic_graph_adj_mat(sample):\n\tsample = convert_to_old_format(sample)\n\tg = Graph(sample, None)\n\treturn g.adj\n\n\ndef convert_to_old_format(standard_format_sample):\n\tdef get_x_y_w_h(loc):\n\t\txs = [p[0] for p in loc]\n\t\tys = [p[1] for p in loc]\n\t\tmin_x, min_y, max_x, max_y = min(xs), min(ys), max(xs), max(ys)\n\t\treturn min_x, min_y, max_x - min_x, max_y - min_y\n\n\tres = {}\n\tfor i, tl in enumerate(standard_format_sample):\n\t\tlabel = 'None'\n\t\tif 'label_info' in tl:\n\t\t\tlabel = tl['label_info']['formal_key']\n\n\t\tres['text_line' + str(i)] = {\n\t\t\t\"value\": tl['text'], \n\t\t\t\"location\": get_x_y_w_h(tl['location']), \n\t\t\t\"label\": label\n\t\t}\n\treturn res\n","repo_name":"datvo06/GNN_explainer","sub_path":"from_blockflow/blockflow/blocks/features/HeuristicGraphAdjMat.py","file_name":"HeuristicGraphAdjMat.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"33164913705","text":"def diabetes():\r\n\tcount=0\r\n\tfor i in symp:\r\n\t\tif i=='4':\r\n\t\t\tcount+=1\r\n\t\tif i=='12':\r\n\t\t\tcount+=1\r\n\t\tif i=='15':\r\n\t\t\tcount+=1\r\n\t\telse:\r\n\t\t\tcontinue\r\n\tif count>=2:\r\n\t\tprint(\"Patient has DIABETES!\")\r\n\t\tprint(\"Medication:\\n Statin\\n Insulin\\n Exercising and maintaining diet\")\r\ndiabetes()\r\n\r\ndef pneumonia():\r\n\tcount=0\r\n\tfor i in symp:\r\n\t\tif i=='1':\r\n\t\t\tcount+=1\r\n\t\tif i=='4':\r\n\t\t\tcount+=1\r\n\t\tif i=='9':\r\n\t\t\tcount+=1\r\n\t\tif i=='10':\r\n\t\t\tcount+=1\r\n\t\tif i=='5':\r\n\t\t\tcount+=1\r\n\t\telse:\r\n\t\t\tcontinue\r\n\tif count>=4:\r\n\t\tprint(\"Patient has been affected with PNEUMONIA!\")\r\n\t\tprint(\"Medication:\\n Antibiotics\\n Fluoroquinolones\\n Tetracyclines\")\r\n","repo_name":"DEEKSHACodeaim/SpaceJam2021-Amateur-Coders","sub_path":"diabetes_and_pneumonia.py","file_name":"diabetes_and_pneumonia.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"1550661807","text":"#!/usr/bin/python3\nimport cv2\nimport numpy as np\nimport RPi.GPIO as GPIO\nimport wiringpi\nimport cut\nimport time\nimport sys\nimport io\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\n#Initialize camera\ncamera = PiCamera()\ncamera.resolution = (320, 240)\ncamera.color_effects = (128, 128)\ncamera.framerate = 32\nrawCapture = PiRGBArray(camera, size=(320, 240))\n\n\n# -----------------------------------------------------------\n\ndef button_pressed():\n global run\n if not run:\n run = True\n else:\n run = False\n\n\n\ndef setup_gpios():\n # Setup GPIOs\n wiringpi.wiringPiSetupGpio()\n\n wiringpi.pinMode(LED_GPIO, wiringpi.GPIO.OUTPUT)\n wiringpi.digitalWrite(LED_GPIO, wiringpi.GPIO.OUTPUT)\n\n wiringpi.pinMode(MOTOR_SPL_EN_GPIO, wiringpi.GPIO.OUTPUT)\n\n wiringpi.pinMode(MOTOR_DIR_GPIO, wiringpi.GPIO.OUTPUT)\n wiringpi.pinMode(MOTOR_DISABLE_GPIO, wiringpi.GPIO.OUTPUT)\n\n wiringpi.pinMode(MOTOR_PWM_GPIO, wiringpi.GPIO.PWM_OUTPUT)\n wiringpi.pinMode(SERVO_PWM_GPIO, wiringpi.GPIO.PWM_OUTPUT)\n\n wiringpi.pwmSetMode(wiringpi.GPIO.PWM_MODE_MS)\n wiringpi.pwmSetClock(2)\n wiringpi.pwmSetRange(2000)\n\n wiringpi.pinMode(SONIC_ECHO_GPIO, wiringpi.GPIO.INPUT)\n wiringpi.pinMode(SONIC_TRIG_GPIO, wiringpi.GPIO.OUTPUT)\n\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(SW_GPIO, GPIO.IN)\n GPIO.add_event_detect(SW_GPIO, GPIO.FALLING, button_pressed, 200)\n\n# -----------------------------------------------------------\n\n\n# Configuration of basic constant\nMIN_ANGLE = 70\nMAX_ANGLE = 130\nCENTER = MIN_ANGLE + (MAX_ANGLE - MIN_ANGLE) / 2\nSPEED = 600\n# Motor supply enable\nMOTOR_SPL_EN_GPIO = 10\n# DC motor PWM GPIO\nMOTOR_PWM_GPIO = 12\n# DC motor direction GPIO\nMOTOR_DIR_GPIO = 6\n# DC motor disable GPIO\nMOTOR_DISABLE_GPIO = 19\n# Servo motor PWM GPIO\nSERVO_PWM_GPIO = 13\n\n\nSONIC_ECHO_GPIO = 24\nSONIC_TRIG_GPIO = 23\n\nLED_GPIO = 4\nSW_GPIO = 26\n\ndef setup_gpios():\n # Setup GPIOs\n wiringpi.wiringPiSetupGpio()\n\n wiringpi.pinMode(LED_GPIO, wiringpi.GPIO.OUTPUT)\n wiringpi.digitalWrite(LED_GPIO, wiringpi.GPIO.OUTPUT)\n\n wiringpi.pinMode(MOTOR_SPL_EN_GPIO, wiringpi.GPIO.OUTPUT)\n\n wiringpi.pinMode(MOTOR_DIR_GPIO, wiringpi.GPIO.OUTPUT)\n wiringpi.pinMode(MOTOR_DISABLE_GPIO, wiringpi.GPIO.OUTPUT)\n\n wiringpi.pinMode(MOTOR_PWM_GPIO, wiringpi.GPIO.PWM_OUTPUT)\n wiringpi.pinMode(SERVO_PWM_GPIO, wiringpi.GPIO.PWM_OUTPUT)\n\n wiringpi.pwmSetMode(wiringpi.GPIO.PWM_MODE_MS)\n wiringpi.pwmSetClock(2)\n wiringpi.pwmSetRange(2000)\n\n wiringpi.pinMode(SONIC_ECHO_GPIO, wiringpi.GPIO.INPUT)\n wiringpi.pinMode(SONIC_TRIG_GPIO, wiringpi.GPIO.OUTPUT)\n\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(SW_GPIO, GPIO.IN)\n GPIO.add_event_detect(SW_GPIO, GPIO.FALLING, button_pressed, 200)\n\nsetup_gpios() \n# -----------------------------------------------------------\n\n# Center wheels\nwiringpi.pwmWrite(SERVO_PWM_GPIO, int(CENTER))\n\n\n# -----------------------------------------------------------\n\nKp = 5 # 1000\nKi = 0 # 100\nKd = 0 # 10000\n# offset = cut.find_lines_and_center()[1]\nintegral = 0\nlastError = 0\nderivative = 0\nmeasuring_time = False\n\n# -----------------------------------------------------------\n\n# setup_gpios()\n\n# -----------------------------------------------------------\n\n# Setup motor\nwiringpi.pwmWrite(MOTOR_PWM_GPIO, 0)\nwiringpi.digitalWrite(MOTOR_SPL_EN_GPIO, 1)\nwiringpi.digitalWrite(MOTOR_DISABLE_GPIO, 0)\n\n# -----------------------------------------------------------\n\nwiringpi.digitalWrite(MOTOR_DIR_GPIO, 0) # forward\n\n# Center wheels\nwiringpi.pwmWrite(SERVO_PWM_GPIO, int(CENTER))\nprint(CENTER)\n# -----------------------------------------------------------\n\nKp = 1 # 1000\nKi = 0 # 100\nKd = 0 # 10000\nintegral = 0\nlastError = 0\nderivative = 0\nmeasuring_time = False\nfirst_time = True\n\n# -----------------------------------------------------------\n\nwiringpi.digitalWrite(MOTOR_DIR_GPIO, 0) # forward\nwiringpi.pwmWrite(MOTOR_PWM_GPIO, int(SPEED))\n\ntime.sleep(0.2)\n\nfor frame in camera.capture_continuous(rawCapture, format=\"bgr\", use_video_port=True):\n img = frame.array\n # if LightValue < 5:\n # if not measuring_time:\n # start_time = time.time()\n # measuring_time = True\n # elapsed_time = time.time() - start_time\n #\n # else:\n # measuring_time = False\n #\n # if elapsed_time >= 3: # if lost line, find it\n # while LightValue <= 5:\n # final_turn = CENTER + 15\n # wiringpi.pwmWrite(SERVO_PWM_GPIO, final_turn)\n # wiringpi.pwmWrite(MOTOR_PWM_GPIO, SPEED / 8)\n\n rawCapture.truncate(0)\n error, st, ll, rl, _ = cut.find_lines_and_center(img, 120, 170)\n integral = integral + error\n derivative = error - lastError\n Turn = Kp * error + Ki * integral + Kd * derivative\n # Turn /= 100 # ????\n final_turn = CENTER + Turn\n if final_turn < MIN_ANGLE:\n final_turn = MIN_ANGLE\n elif final_turn > MAX_ANGLE:\n final_turn = MAX_ANGLE\n #print(\"running\")\n wiringpi.pwmWrite(SERVO_PWM_GPIO, int(final_turn))\n print(final_turn)\n # time.sleep(0.01)\n lastError = error\n key = cv2.waitKey(1)\n if key == ord(\"q\"):\n print(\"Quitting\")\n wiringpi.pwmWrite(MOTOR_PWM_GPIO, 0)\n wiringpi.pwmWrite(SERVO_PWM_GPIO, int(CENTER))\n break\n","repo_name":"simonmandlik/eForce1","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":5337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"43106336107","text":"from openpyxl import Workbook, load_workbook\nfrom time import strptime\n\nseasonal_trends = False # False for monthly trends\n\noutput_dir = 'C:/Users/hatzv/Documents/Geography/RSTs/python/Analysis/Results/'\n\n# Prepare the worksheets for the NCEP output file\nif seasonal_trends:\n output_excel_filename_NCEP = output_dir + 'Seasonal_trends_RSTs_NCEP_1979-2016.xlsx'\nelse:\n output_excel_filename_NCEP = output_dir + 'Monthly_trends_RSTs_NCEP_1979-2016.xlsx'\nwb_NCEP_trends = Workbook()\nws_NCEP_NO_RSTs = wb_NCEP_trends.get_sheet_by_name(\"Sheet\")\nws_NCEP_NO_RSTs.title = \"NO_RSTs\"\nws_NCEP_East = wb_NCEP_trends.create_sheet(\"East\")\nws_NCEP_West = wb_NCEP_trends.create_sheet(\"West\")\nws_NCEP_Central = wb_NCEP_trends.create_sheet(\"Central\")\nws_NCEP_All = wb_NCEP_trends.create_sheet(\"All RSTs\")\n\n# Prepare the worksheets for the ERA output file\nif seasonal_trends:\n output_excel_filename_ERA = output_dir + 'Seasonal_trends_RSTs_ERA_1979-2016.xlsx'\nelse:\n output_excel_filename_ERA = output_dir + 'Monthly_trends_RSTs_ERA_1979-2016.xlsx'\nwb_ERA_trends = Workbook()\nws_ERA_NO_RSTs = wb_ERA_trends.get_sheet_by_name(\"Sheet\")\nws_ERA_NO_RSTs.title = \"NO_RSTs\"\nws_ERA_East = wb_ERA_trends.create_sheet(\"East\")\nws_ERA_West = wb_ERA_trends.create_sheet(\"West\")\nws_ERA_Central = wb_ERA_trends.create_sheet(\"Central\")\nws_ERA_All = wb_ERA_trends.create_sheet(\"All RSTs\")\n\n# Prepare the worksheets for the ERA_2_5 output file\nif seasonal_trends:\n output_excel_filename_ERA_2_5 = output_dir + 'Seasonal_trends_RSTs_ERA_2_5_1979-2016.xlsx'\nelse:\n output_excel_filename_ERA_2_5 = output_dir + 'Monthly_trends_RSTs_ERA_2_5_1979-2016.xlsx'\nwb_ERA_2_5_trends = Workbook()\nws_ERA_2_5_NO_RSTs = wb_ERA_2_5_trends.get_sheet_by_name(\"Sheet\")\nws_ERA_2_5_NO_RSTs.title = \"NO_RSTs\"\nws_ERA_2_5_East = wb_ERA_2_5_trends.create_sheet(\"East\")\nws_ERA_2_5_West = wb_ERA_2_5_trends.create_sheet(\"West\")\nws_ERA_2_5_Central = wb_ERA_2_5_trends.create_sheet(\"Central\")\nws_ERA_2_5_All = wb_ERA_2_5_trends.create_sheet(\"All RSTs\")\n\n# Prepare the output tables for all created sheets\ntable_NCEP_NO_RSTs = ws_NCEP_NO_RSTs['B2':'AM13']\ntable_NCEP_East = ws_NCEP_East['B2':'AM13']\ntable_NCEP_West = ws_NCEP_West['B2':'AM13']\ntable_NCEP_Central = ws_NCEP_Central['B2':'AM13']\ntable_NCEP_All = ws_NCEP_All['B2':'AM13']\n\ntable_ERA_NO_RSTs = ws_ERA_NO_RSTs['B2':'AM13']\ntable_ERA_East = ws_ERA_East['B2':'AM13']\ntable_ERA_West = ws_ERA_West['B2':'AM13']\ntable_ERA_Central = ws_ERA_Central['B2':'AM13']\ntable_ERA_All = ws_ERA_All['B2':'AM13']\n\ntable_ERA_2_5_NO_RSTs = ws_ERA_2_5_NO_RSTs['B2':'AM13']\ntable_ERA_2_5_East = ws_ERA_2_5_East['B2':'AM13']\ntable_ERA_2_5_West = ws_ERA_2_5_West['B2':'AM13']\ntable_ERA_2_5_Central = ws_ERA_2_5_Central['B2':'AM13']\ntable_ERA_2_5_All = ws_ERA_2_5_All['B2':'AM13']\n\n# Prepare the worksheets for the input classifications\nexcel_filename_NCEP = 'C:/Users/hatzv/Documents/Geography/RSTs/python/Analysis/Results/RST_classification_NCEP_1979-2016.xlsx'\nexcel_filename_ERA = 'C:/Users/hatzv/Documents/Geography/RSTs/python/Analysis/Results/RST_classification_ERA_1979-2016.xlsx'\nexcel_filename_ERA_2_5 = 'C:/Users/hatzv/Documents/Geography/RSTs/python/Analysis/Results/RST_classification_ERA_2.5_1979-2016.xlsx'\n\nwb_NCEP = load_workbook(excel_filename_NCEP, read_only=True)\nws_NCEP = wb_NCEP.active\ntable_NCEP = ws_NCEP['A2':'AM367']\n\nwb_ERA = load_workbook(excel_filename_ERA, read_only=True)\nws_ERA = wb_ERA.active\ntable_ERA = ws_ERA['A2':'AM367']\n\nwb_ERA_2_5 = load_workbook(excel_filename_ERA_2_5, read_only=True)\nws_ERA_2_5 = wb_ERA_2_5.active\ntable_ERA_2_5 = ws_ERA_2_5['A2':'AM367']\n\nfor row in range(1, ws_NCEP.max_row-1):\n for col in range(1, ws_NCEP.max_column-1):\n any_NCEP_RST = True # A flag for marking if an RST found or not\n any_ERA_RST = True # A flag for marking if an RST found or not\n any_ERA_2_5_RST = True # A flag for marking if an RST found or not\n\n # Find the output row\n current_month = strptime(table_NCEP[row][0].value, '%b').tm_mon\n if seasonal_trends:\n if current_month == 12 or current_month <=2: # DJF\n current_row = 0\n elif current_month >= 3 and current_month <=5: # MAM\n current_row = 1\n elif current_month >= 6 and current_month <= 8: # JJA\n current_row = 2\n else: # SON\n current_row = 3\n else:\n current_row = current_month - 1\n\n NCEP_value = table_NCEP[row][col].value\n ERA_value = table_ERA[row][col].value\n ERA_2_5_value = table_ERA_2_5[row][col].value\n\n if NCEP_value == \"No RST\":\n output_NCEP_table = table_NCEP_NO_RSTs\n any_NCEP_RST = False\n elif NCEP_value == \"East\":\n output_NCEP_table = table_NCEP_East\n elif NCEP_value == \"Central\":\n output_NCEP_table = table_NCEP_Central\n elif NCEP_value == \"West\":\n output_NCEP_table = table_NCEP_West\n\n if ERA_value == \"No RST\":\n output_ERA_table = table_ERA_NO_RSTs\n any_ERA_RST = False\n elif ERA_value == \"East\":\n output_ERA_table = table_ERA_East\n elif ERA_value == \"Central\":\n output_ERA_table = table_ERA_Central\n elif ERA_value == \"West\":\n output_ERA_table = table_ERA_West\n\n if ERA_2_5_value == \"No RST\":\n output_ERA_2_5_table = table_ERA_2_5_NO_RSTs\n any_ERA_2_5_RST = False\n elif ERA_2_5_value == \"East\":\n output_ERA_2_5_table = table_ERA_2_5_East\n elif ERA_2_5_value == \"Central\":\n output_ERA_2_5_table = table_ERA_2_5_Central\n elif ERA_2_5_value == \"West\":\n output_ERA_2_5_table = table_ERA_2_5_West\n\n # TODO: Refactor this entire ugly thing.\n if NCEP_value is not None: # Make sure not to read the None value in a non-leap year\n current_output_value_NCEP = output_NCEP_table[current_row][col-1].value\n if current_output_value_NCEP is not None:\n output_NCEP_table[current_row][col - 1].value = current_output_value_NCEP + 1\n else:\n output_NCEP_table[current_row][col - 1].value = 1\n\n if any_NCEP_RST:\n output_NCEP_table = table_NCEP_All\n current_output_value_NCEP = output_NCEP_table[current_row][col-1].value\n if current_output_value_NCEP is not None:\n output_NCEP_table[current_row][col - 1].value = current_output_value_NCEP + 1\n else:\n output_NCEP_table[current_row][col - 1].value = 1\n\n if ERA_value is not None: # Make sure not to read the None value in a non-leap year\n current_output_value_ERA = output_ERA_table[current_row][col - 1].value\n if current_output_value_ERA is not None:\n output_ERA_table[current_row][col - 1].value = current_output_value_ERA + 1\n else:\n output_ERA_table[current_row][col - 1].value = 1\n\n if any_ERA_RST:\n output_ERA_table = table_ERA_All\n current_output_value_ERA = output_ERA_table[current_row][col-1].value\n if current_output_value_ERA is not None:\n output_ERA_table[current_row][col - 1].value = current_output_value_ERA + 1\n else:\n output_ERA_table[current_row][col - 1].value = 1\n\n if ERA_2_5_value is not None: # Make sure not to read the None value in a non-leap year\n current_output_value_ERA_2_5 = output_ERA_2_5_table[current_row][col - 1].value\n if current_output_value_ERA_2_5 is not None:\n output_ERA_2_5_table[current_row][col - 1].value = current_output_value_ERA_2_5 + 1\n else:\n output_ERA_2_5_table[current_row][col - 1].value = 1\n\n if any_ERA_2_5_RST:\n output_ERA_2_5_table = table_ERA_2_5_All\n current_output_value_ERA_2_5 = output_ERA_2_5_table[current_row][col-1].value\n if current_output_value_ERA_2_5 is not None:\n output_ERA_2_5_table[current_row][col - 1].value = current_output_value_ERA_2_5 + 1\n else:\n output_ERA_2_5_table[current_row][col - 1].value = 1\n\nwb_NCEP_trends.save(output_excel_filename_NCEP)\nwb_ERA_trends.save(output_excel_filename_ERA)\nwb_ERA_2_5_trends.save(output_excel_filename_ERA_2_5)\n","repo_name":"hatzvika/RSTs","sub_path":"python/Analysis/Trends for RSTs.py","file_name":"Trends for RSTs.py","file_ext":"py","file_size_in_byte":8503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"22158929345","text":"tableData = [['apples', 'oranges','cherries', 'banana'],\n ['Alice', 'Bob', 'Carol', 'David'],\n ['dogs', 'cats', 'moose', 'goose']]\n\ndef printTable(tableData):\n sizes = []\n for row in tableData:\n bigger = 0\n for column in row:\n if len(column) > bigger:\n bigger = len(column)\n sizes.append(bigger)\n\n for i in range(len(tableData[0])):\n for j in range(len(tableData)):\n if j < len(tableData) - 1:\n ending = ''\n else:\n ending = '\\n'\n print(tableData[j][i].rjust(sizes[j] + 1), end=ending)\n\nprintTable(tableData)\n","repo_name":"penta8/ABSWP","sub_path":"6_chapter/tablePrinter.py","file_name":"tablePrinter.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"6695181977","text":"import requests\nfrom lxml import etree\n\ncookie_str = 'PHPSESSID=9gjffvcc82tvlla3ditlbar5bc; Hm_lvt_52c0281453695a300fbc7c3cbdeb4551=1591661182; Hm_lpvt_52c0281453695a300fbc7c3cbdeb4551=1591661222'\n# 字典生成式 生成 cookie字段\ncookies_dict = {cookie_line.split('=')[0]: cookie_line.split('=')[1] for cookie_line in cookie_str.split('; ')}\nheaders = {}\nheaders['referer'] = \"https://www.cxyxiaowu.com/suanfa-2/suanfa\"\n# headers[':path'] = \"/suanfa-2/suanfa/page/2\"\n# headers[':authority'] = \"www.cxyxiaowu.com\"\n# headers[':method'] = \"GET\"\n# headers[':scheme'] = \"scheme\"\nheaders['accept'] = \"*/*\"\nheaders['x-requested-with'] = \"XMLHttpRequest\"\nheaders[\n 'user-agent'] = \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36\"\nresponse = requests.get(\"https://www.cxyxiaowu.com/suanfa-2/suanfa/page/2\", headers=headers, cookies=cookies_dict)\nprint(response.content.decode())\nitem = {}\nhtml = etree.HTML(response.content.decode())\ndiv_lines = html.xpath('//*[@class=\"row posts-wrapper\"]/div')\nfor line in div_lines:\n item['title'] = line.xpath('.//article/div[2]/header/h2/a/text()')\n item['image_src'] = line.xpath('.//article/div[1]/div[1]/a/img/@data-src')\n\n print(item)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"xiaoshandegithub/spider_project","sub_path":"suanfatujie/suanfatujie/suanfa_reuqest.py","file_name":"suanfa_reuqest.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"20663475033","text":"nodes = ('A', 'B', 'C', 'D', 'E', 'F')\n\ndistances = {\n 'A': {'C': 2, 'B': 5},\n 'B': {'A': 5, 'D': 8, 'C': 7},\n 'C': {'A': 2, 'E': 8, 'D': 4, 'B': 7},\n 'D': {'B': 8, 'C': 4, 'E': 6, 'F': 4},\n 'E': {'F': 3, 'D': 6, 'C': 8},\n 'F': {'D': 4, 'E': 3}\n}\n\ndef find_route(current, end):\n unvisited = {node: float('inf') for node in nodes}\n current_distance = 0\n unvisited[current] = current_distance\n visited, parents = {}, {}\n while unvisited:\n min_vertex = min(unvisited, key=unvisited.get)\n for neighbour, distance in distances[current].items():\n if neighbour not in unvisited: \n continue\n new_distance = current_distance + distance\n if unvisited[neighbour] is float('inf') or unvisited[neighbour] > new_distance:\n unvisited[neighbour] = new_distance\n parents[neighbour] = min_vertex\n visited[current] = current_distance\n unvisited.pop(min_vertex)\n if min_vertex == end: \n break\n candidates = [node for node in unvisited.items() if node[1]]\n current, current_distance = min(candidates, key=lambda x: x[1])\n return parents, visited\n\ndef generate_path(parents, start, end):\n path = [end]\n while True:\n key = parents[path[0]]\n path.insert(0, key)\n if key == start:\n break\n return ' → '.join(path)\n\nstart, end = 'A', 'F'\nparents, visited = find_route(start, end)\npath = generate_path(parents, start, end)\nprint(f'Menor caminho: {path}')","repo_name":"the-akira/PythonExperimentos","sub_path":"Algoritmos/Dijkstra/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"28"} +{"seq_id":"70533337994","text":"\nimport h5py\nimport json\nimport math\nimport numpy as np\nimport os\nimport sys\nfrom tqdm import tqdm\n\nif __name__ == '__main__':\n SRC_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))\n sys.path.insert(0, SRC_DIR)\n\nfrom directory import VCDB_DIR as DATASET_DIR\n\nVIDEO_DIR = os.path.join(DATASET_DIR, 'videos')\nFRAME_FEATURE_DIR = os.path.join(DATASET_DIR, 'frame-features')\nRMAC_FEATURE_DIR = os.path.join(FRAME_FEATURE_DIR, 'rmac')\nVIDEO_METADATA_DIR = os.path.join(DATASET_DIR, 'video-metadata')\nANNOTATION_DIR = os.path.join(DATASET_DIR, 'annotation')\n\n\ndef _convert_time_to_seconds(time_str):\n \"\"\" converts time in format hh:mm:ss to time in seconds \"\"\"\n hours, minutes, seconds = np.array(time_str.split(':'), dtype=int)\n return hours * 3600 + minutes * 60 + seconds\n\n\ndef _update_video_extension(video_id, downloaded_video_ids, downloaded_videos):\n \"\"\" update the video extension to the extension of the downloaded videos \"\"\"\n idx = downloaded_video_ids.index(video_id)\n return downloaded_videos[idx]\n\n\ndef _read_raw_annotation_file(annotation_file):\n \"\"\" read all annotations files and update them \"\"\"\n with open(annotation_file, 'r') as f:\n annotations = f.readlines()\n\n downloaded_videos = os.listdir(VIDEO_DIR)\n downloaded_video_ids = ['.'.join(video.split('.')[:-1]) for video in downloaded_videos]\n\n annotations = [annotation.strip().split(',') for annotation in annotations]\n anchor_videos, positive_videos, anchor_intervals, positive_intervals = [], [], [], []\n category = (os.path.split(annotation_file)[-1])[:-4]\n for annotation in annotations:\n anchor_video, positive_video = str(annotation[0]), str(annotation[1])\n anchor_video = '.'.join([category, anchor_video])\n positive_video = '.'.join([category, positive_video])\n\n # update video extension\n anchor_video = _update_video_extension(video_id=anchor_video[:-4], downloaded_video_ids=downloaded_video_ids,\n downloaded_videos=downloaded_videos)\n positive_video = _update_video_extension(video_id=positive_video[:-4], downloaded_video_ids=downloaded_video_ids,\n downloaded_videos=downloaded_videos)\n\n # convert video time string to seconds\n anchor_start, anchor_end = _convert_time_to_seconds(annotation[2]), _convert_time_to_seconds(annotation[3])\n positive_start, positive_end = _convert_time_to_seconds(annotation[4]), _convert_time_to_seconds(annotation[5])\n\n anchor_videos.append(anchor_video)\n positive_videos.append(positive_video)\n anchor_intervals.append([int(anchor_start), int(anchor_end)])\n positive_intervals.append([int(positive_start), int(positive_end)])\n return anchor_videos, positive_videos, anchor_intervals, positive_intervals\n\n\ndef _read_raw_annotations():\n \"\"\" read all raw annotations files and update the values \"\"\"\n raw_annotations_files = sorted(os.listdir(ANNOTATION_DIR))\n raw_annotations_files = [os.path.join(ANNOTATION_DIR, file) for file in raw_annotations_files]\n all_anchor_videos, all_positive_videos, all_anchor_intervals, all_positive_intervals = [], [], [], []\n\n for annotation_file in raw_annotations_files:\n anchor_videos, positive_videos, anchor_intervals, positive_intervals = \\\n _read_raw_annotation_file(annotation_file=annotation_file)\n all_anchor_videos += anchor_videos\n all_positive_videos += positive_videos\n all_anchor_intervals += anchor_intervals\n all_positive_intervals += positive_intervals\n\n all_anchor_videos = np.array(all_anchor_videos, dtype=str)\n all_positive_videos = np.array(all_positive_videos, dtype=str)\n all_anchor_intervals = np.array(all_anchor_intervals, dtype=int)\n all_positive_intervals = np.array(all_positive_intervals, dtype=int)\n return all_anchor_videos, all_positive_videos, all_anchor_intervals, all_positive_intervals\n\n\ndef get_overlap_annotations():\n \"\"\"\n process each overlap annotation by creating a json dictionary for overlaps between frame pairs, because each\n video pair can have mutliple overlapping intervals\n annotation[video1.video2] = {\n 'anchor-interval': [[a-start1, a-end1], ... [a-startn, a-endn]]\n 'positive-interval': [[p-start1, p-end1], ... [p-startn, p-endn]]\n }\n \"\"\"\n print('INFO: retrieving vcdb overlap annotations...')\n overlap_annotations = {}\n\n def update_annotation_dict(video, other_video, interval, other_interval):\n annotation_key = '.'.join([video, other_video])\n if annotation_key in overlap_annotations.keys():\n overlap_annotations[annotation_key]['anchor-intervals'].append(interval)\n overlap_annotations[annotation_key]['positive-intervals'].append(other_interval)\n else:\n overlap_annotations[annotation_key] = {}\n overlap_annotations[annotation_key]['anchor-intervals'] = [interval]\n overlap_annotations[annotation_key]['positive-intervals'] = [other_interval]\n\n all_anchor_vidoes, all_positive_videos, all_anchor_intervals, all_positive_intervals = _read_raw_annotations()\n num_copied = len(all_anchor_vidoes)\n for i in range(num_copied):\n anchor_video = all_anchor_vidoes[i]\n positive_video = all_positive_videos[i]\n if anchor_video == positive_video:\n continue\n\n anchor_interval = all_anchor_intervals[i].tolist()\n positive_interval = all_positive_intervals[i].tolist()\n\n update_annotation_dict(video=anchor_video, other_video=positive_video,\n interval=anchor_interval, other_interval=positive_interval)\n update_annotation_dict(video=positive_video, other_video=anchor_video,\n interval=positive_interval, other_interval=anchor_interval)\n\n def get_updated_video_intervals(video_name, intervals):\n \"\"\" update the video overlap to be based on the sampled frames instead of other parameters. \"\"\"\n metadata_file = os.path.join(VIDEO_METADATA_DIR, video_name + '.json')\n with open(metadata_file, 'r') as f:\n video_metadata = json.load(f)\n fps = video_metadata['fps']\n frame_sample_period = max(round(fps), 1) # handle the case where round() returns 0\n num_frames = video_metadata['n-frames']\n num_sampled_frames = int(math.ceil(num_frames / frame_sample_period))\n\n # check that the sampled frame code is correct\n video_rmac_file = os.path.join(RMAC_FEATURE_DIR, video_name + '.hdf5')\n with h5py.File(video_rmac_file, 'r') as f:\n assert num_sampled_frames == len(f['rmac-features'])\n\n updated_intervals = []\n for interval in intervals:\n start, end = interval\n start_frame = int(math.floor(start * fps))\n end_frame = int(math.ceil(end * fps))\n\n updated_start = start_frame // frame_sample_period\n assert updated_start < num_sampled_frames\n updated_end = min(int(math.ceil(end_frame / frame_sample_period)), num_sampled_frames)\n updated_interval = [updated_start, updated_end]\n updated_intervals.append(updated_interval)\n return updated_intervals\n\n # update video intervals according to the overlaps sampled during rmac feature creation.\n for key, annotation in tqdm(overlap_annotations.items()):\n annotation_details = key.split('.')\n assert len(annotation_details) == 6, '3 parts for each video'\n anchor_video = '.'.join(annotation_details[:3])\n positive_video = '.'.join(annotation_details[3:])\n annotation['anchor-intervals'] = get_updated_video_intervals(video_name=anchor_video,\n intervals=annotation['anchor-intervals'])\n annotation['positive-intervals'] = get_updated_video_intervals(video_name=positive_video,\n intervals=annotation['positive-intervals'])\n return overlap_annotations\n","repo_name":"Kennard123661/VRAG","sub_path":"src/data/datasets/vcdb.py","file_name":"vcdb.py","file_ext":"py","file_size_in_byte":8153,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"15436775367","text":"# from twilio.rest import TwilioRestClient\nimport requests\n\n# account_sid = \" \"\n# auth_token = \" \"\n#\n# ourNumber = \" \"\nrequestParams = {\n \"method\": \"getQuote\",\n \"key\": \"457653\",\n \"format\": \"json\",\n \"lang\": \"en\"\n}\nurl = \"http://api.forismatic.com/api/1.0/\"\n\nrequestToApi = requests.post(url, params=requestParams) # Requests the qoute from the API\njson = requestToApi.json() # This grabs the data from the response from API\nprint(json)\nfinishedQuote = json['quoteText'] + \" -\" + json['quoteAuthor'] # The finished quote!\n\nprint(finishedQuote)","repo_name":"philiphyx/Scraper","sub_path":"Quote-Generator.py","file_name":"Quote-Generator.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"11643264734","text":"#!/usr/bin/env python\nimport tensorflow as tf\ntry:\n import bequick\nexcept ImportError:\n import sys\n import os\n sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))\nfrom bequick.tf_utils import random_uniform_matrix\ntry:\n from .tb_parser import Parser\nexcept (ValueError, SystemError) as e:\n from tb_parser import Parser\n\ntf.set_random_seed(1234)\n\n\ndef initialize_word_embeddings(session, form_emb, indices, matrix):\n _indices = [tf.to_int32(i) for i in indices]\n session.run(tf.scatter_update(form_emb, _indices, matrix))\n\n\ndef unpack_inputs(inputs):\n form = [_[0] for _ in inputs]\n pos = [_[1] for _ in inputs]\n deprel = [_[2] for _ in inputs]\n return form, pos, deprel\n\n\nclass Network(object):\n def __init__(self, form_size, form_dim, pos_size, pos_dim, deprel_size, deprel_dim, hidden_dim, output_dim,\n dropout, l2):\n self.form_size = form_size\n self.form_dim = form_dim\n self.pos_size = pos_size\n self.pos_dim = pos_dim\n self.deprel_size = deprel_size\n self.deprel_dim = deprel_dim\n self.hidden_dim = hidden_dim\n self.output_dim = output_dim\n\n # DIMENSIONS\n self.form_features_dim = len(Parser.FORM_NAMES) * self.form_dim\n self.pos_features_dim = len(Parser.POS_NAMES) * self.pos_dim\n self.deprel_features_dim = len(Parser.DEPREL_NAMES) * self.deprel_dim\n self.input_dim = self.form_features_dim + self.pos_features_dim + self.deprel_features_dim\n\n # CONFIG\n self.dropout = dropout\n self.l2 = l2\n\n # INPUT\n self.form = tf.placeholder(tf.int32, shape=(None, len(Parser.FORM_NAMES), ), name=\"form_i\")\n self.pos = tf.placeholder(tf.int32, shape=(None, len(Parser.POS_NAMES), ), name=\"pos_i\")\n self.deprel = tf.placeholder(tf.int32, shape=(None, len(Parser.DEPREL_NAMES), ), name=\"deprel_i\")\n\n\nclass Classifier(Network):\n def __init__(self, form_size, form_dim, pos_size, pos_dim, deprel_size, deprel_dim, hidden_dim, output_dim,\n dropout, l2):\n super(Classifier, self).__init__(form_size, form_dim, pos_size, pos_dim, deprel_size, deprel_dim, hidden_dim,\n output_dim, dropout, l2)\n self.output = tf.placeholder(tf.int32, shape=(None, ), name=\"y_o\")\n\n # EMBEDDING in CPU\n with tf.device(\"/cpu:0\"), tf.name_scope(\"embedding\"):\n self.form_emb = tf.Variable(random_uniform_matrix(self.form_size, self.form_dim), name=\"form_emb\")\n self.pos_emb = tf.Variable(random_uniform_matrix(self.pos_size, self.pos_dim), name=\"pos_emb\")\n self.deprel_emb = tf.Variable(random_uniform_matrix(self.deprel_size, self.deprel_dim), name=\"deprel_emb\")\n inputs = tf.concat(1, [\n tf.reshape(tf.nn.embedding_lookup(self.form_emb, self.form), [-1, self.form_features_dim]),\n tf.reshape(tf.nn.embedding_lookup(self.pos_emb, self.pos), [-1, self.pos_features_dim]),\n tf.reshape(tf.nn.embedding_lookup(self.deprel_emb, self.deprel), [-1, self.deprel_features_dim])\n ])\n\n # PARAMS\n self.W0 = tf.Variable(random_uniform_matrix(self.input_dim, self.hidden_dim), name=\"W0\")\n self.b0 = tf.Variable(tf.zeros([self.hidden_dim]), name=\"b0\")\n self.W1 = tf.Variable(random_uniform_matrix(self.hidden_dim, self.output_dim), name=\"W1\")\n self.b1 = tf.Variable(tf.zeros([self.output_dim]), name=\"b1\")\n\n # PREDICTION\n hidden_layer = tf.nn.relu(tf.add(tf.matmul(inputs, self.W0), self.b0))\n self.prediction = tf.add(tf.matmul(hidden_layer, self.W1), self.b1)\n\n # LOSS\n regularizer = tf.nn.l2_loss(self.W0) + tf.nn.l2_loss(self.b0) + tf.nn.l2_loss(self.W1) + tf.nn.l2_loss(self.b1)\n if self.dropout > 0:\n hidden_layer2 = tf.nn.dropout(hidden_layer, self.dropout)\n else:\n hidden_layer2 = hidden_layer\n self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n tf.add(tf.matmul(hidden_layer2, self.W1), self.b1), self.output)) + l2 * regularizer\n self.optimization = tf.train.AdagradOptimizer(learning_rate=0.1).minimize(self.loss)\n\n def train(self, session, inputs, outputs):\n form, pos, deprel = inputs\n _, cost = session.run([self.optimization, self.loss],\n feed_dict={self.form: form, self.pos: pos, self.deprel: deprel, self.output: outputs})\n return cost\n\n def classify(self, session, inputs):\n form, pos, deprel = inputs\n prediction = session.run(self.prediction, feed_dict={self.form: form, self.pos: pos, self.deprel: deprel})\n return prediction\n\n\nclass DeepQNetwork(Network):\n def __init__(self, form_size, form_dim, pos_size, pos_dim, deprel_size, deprel_dim, hidden_dim, output_dim, dropout,\n l2):\n super(DeepQNetwork, self).__init__(form_size, form_dim, pos_size, pos_dim, deprel_size, deprel_dim, hidden_dim,\n output_dim, dropout, l2)\n self.output = tf.placeholder(tf.float32, shape=(None, ), name=\"y_o\")\n\n # EMBEDDING in CPU\n with tf.device(\"/cpu:0\"), tf.name_scope(\"embedding\"):\n self.form_emb = tf.Variable(random_uniform_matrix(self.form_size, self.form_dim), name=\"form_emb\")\n self.pos_emb = tf.Variable(random_uniform_matrix(self.pos_size, self.pos_dim), name=\"pos_emb\")\n self.deprel_emb = tf.Variable(random_uniform_matrix(self.deprel_size, self.deprel_dim), name=\"deprel_emb\")\n inputs = tf.concat(1, [\n tf.reshape(tf.nn.embedding_lookup(self.form_emb, self.form), [-1, self.form_features_dim]),\n tf.reshape(tf.nn.embedding_lookup(self.pos_emb, self.pos), [-1, self.pos_features_dim]),\n tf.reshape(tf.nn.embedding_lookup(self.deprel_emb, self.deprel), [-1, self.deprel_features_dim])\n ])\n\n # according to minh et al (2015), target network is initialized as zero\n self.tgt_form_emb = tf.Variable(tf.zeros((self.form_size, self.form_dim)), name=\"tgt_form_emb\")\n self.tgt_pos_emb = tf.Variable(tf.zeros((self.pos_size, self.pos_dim)), name=\"tgt_pos_emb\")\n self.tgt_deprel_emb = tf.Variable(tf.zeros((self.deprel_size, self.deprel_dim)), name=\"tgt_deprel_emb\")\n tgt_inputs = tf.concat(1, [\n tf.reshape(tf.nn.embedding_lookup(self.tgt_form_emb, self.form), [-1, self.form_features_dim]),\n tf.reshape(tf.nn.embedding_lookup(self.tgt_pos_emb, self.pos), [-1, self.pos_features_dim]),\n tf.reshape(tf.nn.embedding_lookup(self.tgt_deprel_emb, self.deprel), [-1, self.deprel_features_dim])\n ])\n\n # PARAMS\n self.W0 = tf.Variable(random_uniform_matrix(self.input_dim, self.hidden_dim), name=\"W0\")\n self.b0 = tf.Variable(tf.zeros([self.hidden_dim]), name=\"b0\")\n self.W1 = tf.Variable(random_uniform_matrix(self.hidden_dim, self.output_dim), name=\"W1\")\n self.b1 = tf.Variable(tf.zeros([self.output_dim]), name=\"b1\")\n\n self.tgt_W0 = tf.Variable(tf.zeros((self.input_dim, self.hidden_dim)), name=\"tgt_W0\")\n self.tgt_b0 = tf.Variable(tf.zeros([self.hidden_dim]), name=\"tgt_b0\")\n self.tgt_W1 = tf.Variable(tf.zeros((self.hidden_dim, self.output_dim)), name=\"tgt_W1\")\n self.tgt_b1 = tf.Variable(tf.zeros([self.output_dim]), name=\"tgt_b1\")\n\n # PARAM SYNC\n self.update_op = [\n self.tgt_form_emb.assign(self.form_emb),\n self.tgt_pos_emb.assign(self.pos_emb),\n self.tgt_deprel_emb.assign(self.deprel_emb),\n self.tgt_W0.assign(self.W0),\n self.tgt_b0.assign(self.b0),\n self.tgt_W1.assign(self.W1),\n self.tgt_b1.assign(self.b1)\n ]\n\n # MLP\n hidden_layer = tf.nn.relu(tf.add(tf.matmul(inputs, self.W0), self.b0))\n self.q_function = tf.add(tf.matmul(hidden_layer, self.W1), self.b1)\n\n tgt_hidden_layer = tf.nn.relu(tf.add(tf.matmul(tgt_inputs, self.tgt_W0), self.tgt_b0))\n self.tgt_q_function = tf.add(tf.matmul(tgt_hidden_layer, self.tgt_W1), self.tgt_b1)\n\n # LOSS\n if self.dropout > 0:\n hidden_layer2 = tf.nn.dropout(hidden_layer, self.dropout)\n else:\n hidden_layer2 = hidden_layer\n self.action = tf.placeholder(tf.int32, shape=(None, ), name=\"action\")\n actions_one_hot = tf.one_hot(self.action, self.output_dim, 1.0, 0.0, name='action_one_hot')\n predicted_q = tf.reduce_sum(tf.add(tf.matmul(hidden_layer2, self.W1), self.b1) * actions_one_hot,\n reduction_indices=1)\n regularizer = tf.nn.l2_loss(self.W0) + tf.nn.l2_loss(self.b0) + tf.nn.l2_loss(self.W1) + tf.nn.l2_loss(self.b1)\n\n self.loss = tf.reduce_mean(tf.square(predicted_q - self.output)) + l2 * regularizer\n # self.optimization = tf.train.RMSPropOptimizer(learning_rate=0.00025, momentum=0.95).minimize(self.loss)\n self.optimization = tf.train.AdamOptimizer().minimize(self.loss)\n\n def train(self, session, inputs, action, outputs):\n form, pos, deprel = inputs\n _, cost = session.run([self.optimization, self.loss], feed_dict={\n self.form: form, self.pos: pos, self.deprel: deprel, self.action: action, self.output: outputs})\n return cost\n\n def policy(self, session, inputs):\n form, pos, deprel = inputs\n return session.run(self.q_function, feed_dict={self.form: form, self.pos: pos, self.deprel: deprel})\n\n def target_policy(self, session, inputs):\n form, pos, deprel = inputs\n return session.run(self.tgt_q_function, feed_dict={self.form: form, self.pos: pos, self.deprel: deprel})\n\n def update_target(self, session):\n session.run(self.update_op)\n\n def classify(self, session, inputs):\n return self.policy(session, inputs)\n","repo_name":"Oneplus/bequick","sub_path":"chen2014/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9981,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"28"} +{"seq_id":"28342722948","text":"\n# From https://www.tensorflow.org/tutorials/load_data/images\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\n\ndef load_data_sets(data_dir, img_height, img_width, batch_size):\n train_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir + \"/train\",\n label_mode=\"categorical\",\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\n val_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir + \"/val\",\n label_mode=\"categorical\",\n\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n return train_ds, val_ds\n\ndata_augmentation = keras.Sequential(\n [\n layers.experimental.preprocessing.RandomFlip(\"horizontal\"),\n layers.experimental.preprocessing.RandomRotation(0.1),\n ]\n)\n\ndef load_model(model_name, num_classes):\n if model_name == \"xception\":\n base_model = keras.applications.Xception(\n weights=\"imagenet\", # Load weights pre-trained on ImageNet.\n input_shape=(300, 300, 3),\n include_top=False,\n ) # Do not include the ImageNet classifier at the top.\n\n # Freeze the base_model\n base_model.trainable = False\n\n # Create new model on top\n inputs = keras.Input(shape=(300, 300, 3))\n # x = data_augmentation(inputs) # Apply random data augmentation\n\n # Pre-trained Xception weights requires that input be normalized\n # from (0, 255) to a range (-1., +1.), the normalization layer\n # does the following, outputs = (inputs - mean) / sqrt(var)\n norm_layer = keras.layers.experimental.preprocessing.Normalization()\n mean = np.array([127.5] * 3)\n var = mean ** 2\n # Scale inputs to [-1, +1]\n #x = norm_layer(x)\n x = norm_layer(inputs)\n norm_layer.set_weights([mean, var])\n\n # The base model contains batchnorm layers. We want to keep them in inference mode\n # when we unfreeze the base model for fine-tuning, so we make sure that the\n # base_model is running in inference mode here.\n x = base_model(x, training=False)\n x = keras.layers.GlobalAveragePooling2D()(x)\n x = keras.layers.Dropout(0.2)(x) # Regularize with dropout\n outputs = keras.layers.Dense(num_classes)(x)\n model = keras.Model(inputs, outputs)\n return model\n else:\n raise ValueError(f\"Unsupported model name {model_name}\")\n\n","repo_name":"mengyujackson121/Monkey_Species_Transfer_Learning","sub_path":"keras_transfer_learn.py","file_name":"keras_transfer_learn.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"2592969556","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pylab as plt\n\ndef mean_finder(provideData, user):\n \n '''Accepts a user from the identifier class (UserProfile) and returns means of the same applicant for live births accross years'''\n \n means = []\n early_years = list(range(2007,2011))\n late_years = list(range(2011, 2015))\n main_var1 = user.main_variable_early()\n main_var2 = user.main_variable_late()\n \n for year in early_years:\n year_data = provideData[year]\n temp = year_data[main_var1]\n means.append(temp.astype(float).mean())\n\n for year in late_years:\n year_data = provideData[year]\n temp = year_data[main_var2]\n means.append(temp.astype(float).mean())\n\n means = pd.Series(means, name = user.categorizer()[1]+ ' ' + user.age_grouper()[1] )\n return means\n\n\ndef clinic_finder(clinic_name, user, provideData):\n early_years = list(range(2007,2011)) \n late_years = list(range(2011, 2015))\n main_var1 = user.main_variable_early()\n main_var2 = user.main_variable_late()\n clinic_data = []\n for year in early_years:\n year_data = provideData[year]\n temp_row = year_data.loc[year_data['PrevClinName1'] == clinic_name]\n if temp_row.shape[0] == 0:\n clinic_data.append(np.nan)\n \n elif temp_row.shape[0] == 1:\n clinic_data.append(temp_row[main_var1])\n \n else: \n temp_row = temp_row.loc[temp_row['ClinStateCode'] == user.state]\n clinic_data.append(temp_row[main_var1])\n \n for year in late_years:\n year_data = provideData[year]\n temp_row = year_data.loc[year_data['PrevClinName1'] == clinic_name]\n \n if temp_row.shape[0] == 0:\n clinic_data.append(np.nan)\n elif temp_row.shape[0] == 1:\n clinic_data.append(temp_row[main_var2])\n \n else: \n temp_row = temp_row.loc[temp_row['ClinStateCode'] == user.state]\n clinic_data.append(temp_row[main_var2])\n \n \n clinic_data = pd.Series(clinic_data, name = user.categorizer()[1]+ ' ' + user.age_grouper()[1] )\n return clinic_data\n\n\ndef plotter(chosen_clinic_name, user, provideData): \n fig = plt.figure(figsize= (16,10))\n fig.subplots_adjust(bottom=0.15)\n\n plt.ylabel('Average live births per 100', fontsize = 20)\n plt.xlabel('Time', fontsize = 20)\n plt.title('Average live birth over time for ' + mean_finder(provideData, user).name, fontsize = 20)\n plt.plot(mean_finder(provideData, user), 'o-', linewidth = 4, label = 'national average', markersize = 10, color = 'pink')\n plt.xlim(-1,mean_finder(provideData, user).size)\n plt.ylim(-5,105)\n plt.xticks(range(mean_finder(provideData, user).size), list(map(str,range(2007, 2015))), fontsize = 15 )\n plt.yticks(fontsize = 15)\n plt.plot([mean_finder(provideData, user).size-1],clinic_finder(chosen_clinic_name, user, provideData).iloc[-1], '*', markersize= 30, \n color ='yellow', label='_nolegend_' )\n plt.annotate('Latest data for ' + chosen_clinic_name, \n xy= (mean_finder(provideData, user).size-1, clinic_finder(chosen_clinic_name, user, provideData).iloc[-1]), \n xytext=(mean_finder(provideData, user).size-4, clinic_finder(chosen_clinic_name, user, provideData).iloc[-1]+10), \n arrowprops=dict(facecolor=[0,.4,.7], shrink=0.05, width = 1, headwidth = 5),color= [0,.4,.7])\n plt.plot(clinic_finder(chosen_clinic_name, user, provideData), 'o-', color= [0,.4,.7], \n label = chosen_clinic_name) \n plt.legend(loc = 2)\n if pd.isnull(clinic_finder(chosen_clinic_name, user, provideData)).any() :\n fig.text(0.03,.05,'Information about '+ chosen_clinic_name + ' is missing for some years', color =[0,.4,.7] , fontsize = 15 )\n plt.show()","repo_name":"ds-ga-1007/final_project","sub_path":"IVF_Analysis/NationalComparissonTime.py","file_name":"NationalComparissonTime.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"657409644","text":"import os\nimport logging\nimport numpy as np\nimport time\nimport typing\n\nfrom smac.configspace import Configuration\nfrom smac.epm.rf_with_instances import RandomForestWithInstances\nfrom smac.initial_design.initial_design import InitialDesign\nfrom smac.intensification.abstract_racer import AbstractRacer\nfrom smac.optimizer import pSMAC\nfrom smac.optimizer.acquisition import AbstractAcquisitionFunction\nfrom smac.optimizer.random_configuration_chooser import ChooserNoCoolDown, RandomConfigurationChooser\nfrom smac.optimizer.ei_optimization import AcquisitionFunctionMaximizer\nfrom smac.optimizer.epm_configuration_chooser import EPMChooser\nfrom smac.runhistory.runhistory import RunHistory\nfrom smac.runhistory.runhistory2epm import AbstractRunHistory2EPM\nfrom smac.scenario.scenario import Scenario\nfrom smac.stats.stats import Stats\nfrom smac.tae.execute_ta_run import FirstRunCrashedException\nfrom smac.utils.io.traj_logging import TrajLogger\nfrom smac.utils.validate import Validator\n\n__author__ = \"Aaron Klein, Marius Lindauer, Matthias Feurer\"\n__copyright__ = \"Copyright 2015, ML4AAD\"\n__license__ = \"3-clause BSD\"\n\n\nclass SMBO(object):\n \"\"\"Interface that contains the main Bayesian optimization loop\n\n Attributes\n ----------\n logger\n incumbent\n scenario\n config_space\n stats\n initial_design\n runhistory\n intensifier\n num_run\n rng\n initial_design_configs\n epm_chooser\n \"\"\"\n\n def __init__(self,\n scenario: Scenario,\n stats: Stats,\n initial_design: InitialDesign,\n runhistory: RunHistory,\n runhistory2epm: AbstractRunHistory2EPM,\n intensifier: AbstractRacer,\n num_run: int,\n model: RandomForestWithInstances,\n acq_optimizer: AcquisitionFunctionMaximizer,\n acquisition_func: AbstractAcquisitionFunction,\n rng: np.random.RandomState,\n restore_incumbent: Configuration = None,\n random_configuration_chooser: typing.Union[RandomConfigurationChooser] = ChooserNoCoolDown(2.0),\n predict_x_best: bool = True,\n min_samples_model: int = 1):\n \"\"\"\n Interface that contains the main Bayesian optimization loop\n\n Parameters\n ----------\n scenario: smac.scenario.scenario.Scenario\n Scenario object\n stats: Stats\n statistics object with configuration budgets\n initial_design: InitialDesign\n initial sampling design\n runhistory: RunHistory\n runhistory with all runs so far\n runhistory2epm : AbstractRunHistory2EPM\n Object that implements the AbstractRunHistory2EPM to convert runhistory\n data into EPM data\n intensifier: Intensifier\n intensification of new challengers against incumbent configuration\n (probably with some kind of racing on the instances)\n num_run: int\n id of this run (used for pSMAC)\n model: RandomForestWithInstances\n empirical performance model (right now, we support only RandomForestWithInstances)\n acq_optimizer: AcquisitionFunctionMaximizer\n Optimizer of acquisition function.\n acquisition_func : AcquisitionFunction\n Object that implements the AbstractAcquisitionFunction (i.e., infill criterion for acq_optimizer)\n restore_incumbent: Configuration\n incumbent to be used from the start. ONLY used to restore states.\n rng: np.random.RandomState\n Random number generator\n random_configuration_chooser\n Chooser for random configuration -- one of\n * ChooserNoCoolDown(modulus)\n * ChooserLinearCoolDown(start_modulus, modulus_increment, end_modulus)\n predict_x_best: bool\n Choose x_best for computing the acquisition function via the model instead of via the observations.\n min_samples_model: int\n- Minimum number of samples to build a model\n \"\"\"\n\n self.logger = logging.getLogger(\n self.__module__ + \".\" + self.__class__.__name__)\n self.incumbent = restore_incumbent\n\n self.scenario = scenario\n self.config_space = scenario.cs # type: ignore[attr-defined] # noqa F821\n self.stats = stats\n self.initial_design = initial_design\n self.runhistory = runhistory\n self.intensifier = intensifier\n self.num_run = num_run\n self.rng = rng\n\n self.initial_design_configs = [] # type: typing.List[Configuration]\n\n # initialize the chooser to get configurations from the EPM\n self.epm_chooser = EPMChooser(scenario=scenario,\n stats=stats,\n runhistory=runhistory,\n runhistory2epm=runhistory2epm,\n model=model,\n acq_optimizer=acq_optimizer,\n acquisition_func=acquisition_func,\n rng=rng,\n restore_incumbent=restore_incumbent,\n random_configuration_chooser=random_configuration_chooser,\n predict_x_best=predict_x_best,\n min_samples_model=min_samples_model)\n\n def start(self, output_dir: str = \"\", output_seed: int=0) -> None:\n \"\"\"Starts the Bayesian Optimization loop.\n Detects whether the optimization is restored from a previous state.\n \"\"\"\n self.stats.start_timing()\n\n # Initialization, depends on input\n if self.stats.ta_runs == 0 and self.incumbent is None:\n self.logger.info('Running initial design')\n # Intensifier initialization\n self.initial_design_configs = self.initial_design.select_configurations(output_dir=output_dir, output_seed=output_seed)\n\n # to be on the safe side, never return an empty list of initial configs\n if not self.initial_design_configs:\n self.initial_design_configs = [self.config_space.get_default_configuration()]\n\n elif self.stats.ta_runs > 0 and self.incumbent is None:\n raise ValueError(\"According to stats there have been runs performed, \"\n \"but the optimizer cannot detect an incumbent. Did \"\n \"you set the incumbent (e.g. after restoring state)?\")\n elif self.stats.ta_runs == 0 and self.incumbent is not None:\n raise ValueError(\"An incumbent is specified, but there are no runs \"\n \"recorded in the Stats-object. If you're restoring \"\n \"a state, please provide the Stats-object.\")\n else:\n # Restoring state!\n self.logger.info(\"State Restored! Starting optimization with \"\n \"incumbent %s\", self.incumbent)\n self.logger.info(\"State restored with following budget:\")\n self.stats.print_stats()\n\n def run(self, output_dir=\"\", output_seed=0) -> Configuration:\n \"\"\"Runs the Bayesian optimization loop\n\n Returns\n ----------\n incumbent: np.array(1, H)\n The best found configuration\n \"\"\"\n self.start(output_dir=output_dir, output_seed=output_seed)\n\n # Main BO loop\n while True:\n if self.scenario.shared_model: # type: ignore[attr-defined] # noqa F821\n pSMAC.read(run_history=self.runhistory,\n output_dirs=self.scenario.input_psmac_dirs, # type: ignore[attr-defined] # noqa F821\n configuration_space=self.config_space,\n logger=self.logger)\n\n start_time = time.time()\n\n # sample next configuration for intensification\n # Initial design runs are also included in the BO loop now.\n challenger, new_challenger = self.intensifier.get_next_challenger(\n challengers=self.initial_design_configs,\n chooser=self.epm_chooser,\n run_history=self.runhistory,\n repeat_configs=self.intensifier.repeat_configs,\n output_dir=output_dir,\n output_seed=output_seed\n )\n\n end = time.time()\n \n f = open(f'{output_dir}/smac_pick_time_{output_seed}.csv', 'a')\n f.write(str(end - start_time) + \"\\n\")\n \n # remove config from initial design challengers to not repeat it again\n self.initial_design_configs = [c for c in self.initial_design_configs if c != challenger]\n\n # update timebound only if a 'new' configuration is sampled as the challenger\n if self.intensifier.num_run == 0:\n time_spent = time.time() - start_time\n time_left = self._get_timebound_for_intensification(time_spent, update=False)\n self.logger.debug('New intensification time bound: %f', time_left)\n else:\n old_time_left = time_left\n time_spent = time_spent + (time.time() - start_time)\n time_left = self._get_timebound_for_intensification(time_spent, update=True)\n self.logger.debug('Updated intensification time bound from %f to %f', old_time_left, time_left)\n\n if challenger:\n\n try:\n self.incumbent, inc_perf = self.intensifier.eval_challenger(\n challenger=challenger,\n incumbent=self.incumbent,\n run_history=self.runhistory,\n time_bound=max(self.intensifier._min_time, time_left))\n\n except FirstRunCrashedException:\n if self.scenario.abort_on_first_run_crash: # type: ignore[attr-defined] # noqa F821\n raise\n if self.scenario.shared_model: # type: ignore[attr-defined] # noqa F821\n assert self.scenario.output_dir_for_this_run is not None # please mypy\n pSMAC.write(run_history=self.runhistory,\n output_directory=self.scenario.output_dir_for_this_run, # type: ignore[attr-defined] # noqa F821\n logger=self.logger)\n\n self.logger.debug(\"Remaining budget: %f (wallclock), %f (ta costs), %f (target runs)\" % (\n self.stats.get_remaing_time_budget(),\n self.stats.get_remaining_ta_budget(),\n self.stats.get_remaining_ta_runs()))\n\n if self.stats.is_budget_exhausted():\n break\n\n # print stats at the end of each intensification iteration\n if self.intensifier.iteration_done:\n self.stats.print_stats(debug_out=True)\n\n return self.incumbent\n\n def validate(self,\n config_mode: typing.Union[str, typing.List[Configuration]] = 'inc',\n instance_mode: typing.Union[str, typing.List[str]] = 'train+test',\n repetitions: int = 1,\n use_epm: bool = False,\n n_jobs: int = -1,\n backend: str = 'threading') -> RunHistory:\n \"\"\"Create validator-object and run validation, using\n scenario-information, runhistory from smbo and tae_runner from intensify\n\n Parameters\n ----------\n config_mode: str or list\n string or directly a list of Configuration\n str from [def, inc, def+inc, wallclock_time, cpu_time, all]\n time evaluates at cpu- or wallclock-timesteps of:\n [max_time/2^0, max_time/2^1, max_time/2^3, ..., default]\n with max_time being the highest recorded time\n instance_mode: string\n what instances to use for validation, from [train, test, train+test]\n repetitions: int\n number of repetitions in nondeterministic algorithms (in\n deterministic will be fixed to 1)\n use_epm: bool\n whether to use an EPM instead of evaluating all runs with the TAE\n n_jobs: int\n number of parallel processes used by joblib\n\n Returns\n -------\n runhistory: RunHistory\n runhistory containing all specified runs\n \"\"\"\n if isinstance(config_mode, str):\n assert self.scenario.output_dir_for_this_run is not None # Please mypy\n traj_fn = os.path.join(self.scenario.output_dir_for_this_run, \"traj_aclib2.json\")\n trajectory = (\n TrajLogger.read_traj_aclib_format(fn=traj_fn, cs=self.config_space)\n ) # type: typing.Optional[typing.List[typing.Dict[str, typing.Union[float, int, Configuration]]]]\n else:\n trajectory = None\n if self.scenario.output_dir_for_this_run:\n new_rh_path = os.path.join(self.scenario.output_dir_for_this_run, \"validated_runhistory.json\") # type: typing.Optional[str] # noqa E501\n else:\n new_rh_path = None\n\n validator = Validator(self.scenario, trajectory, self.rng)\n if use_epm:\n new_rh = validator.validate_epm(config_mode=config_mode,\n instance_mode=instance_mode,\n repetitions=repetitions,\n runhistory=self.runhistory,\n output_fn=new_rh_path)\n else:\n new_rh = validator.validate(config_mode, instance_mode, repetitions,\n n_jobs, backend, self.runhistory,\n self.intensifier.tae_runner,\n output_fn=new_rh_path)\n return new_rh\n\n def _get_timebound_for_intensification(self, time_spent: float, update: bool) -> float:\n \"\"\"Calculate time left for intensify from the time spent on\n choosing challengers using the fraction of time intended for\n intensification (which is specified in\n scenario.intensification_percentage).\n\n Parameters\n ----------\n time_spent : float\n\n update : bool\n Only used to check in the unit tests how this function was called\n\n Returns\n -------\n time_left : float\n \"\"\"\n frac_intensify = self.scenario.intensification_percentage # type: ignore[attr-defined] # noqa F821\n if frac_intensify <= 0 or frac_intensify >= 1:\n raise ValueError(\"The value for intensification_percentage-\"\n \"option must lie in (0,1), instead: %.2f\" %\n frac_intensify)\n total_time = time_spent / (1 - frac_intensify)\n time_left = frac_intensify * total_time\n self.logger.debug(\"Total time: %.4f, time spent on choosing next \"\n \"configurations: %.4f (%.2f), time left for \"\n \"intensification: %.4f (%.2f)\" %\n (total_time, time_spent, (1 - frac_intensify), time_left, frac_intensify))\n return time_left\n","repo_name":"AutoFP/Auto-FP","sub_path":"auto_fp/Exp_flexible_time_with_test/SMAC3/smac/optimizer/smbo.py","file_name":"smbo.py","file_ext":"py","file_size_in_byte":15362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"9981891310","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .networks import BaseNetwork\nfrom .networks import get_nonspade_norm_layer\nfrom .networks import MySeparableBilinearDownsample as BilinearDownsample\nimport torch.nn.utils.spectral_norm as spectral_norm\nimport torch as th\nfrom math import pi\nfrom math import log2\nimport time\nimport math\n\n\nclass CoordFillGenerator(BaseNetwork):\n @staticmethod\n def modify_commandline_options(parser, is_train):\n parser.set_defaults(norm_G='instanceaffine')\n parser.set_defaults(lr_instance=True)\n parser.set_defaults(no_instance_dist=True)\n parser.set_defaults(hr_coor=\"cosine\")\n return parser\n\n def __init__(self, opt, hr_stream=None, lr_stream=None, fast=False):\n super(CoordFillGenerator, self).__init__()\n if lr_stream is None or hr_stream is None:\n lr_stream = dict()\n hr_stream = dict()\n self.num_inputs = opt.label_nc + (1 if opt.contain_dontcare_label else 0) + (0 if (opt.no_instance_edge & opt.no_instance_dist) else 1)\n self.lr_instance = opt.lr_instance\n self.learned_ds_factor = opt.learned_ds_factor #(S2 in sec. 3.2)\n self.gpu_ids = opt.gpu_ids\n\n self.downsampling = opt.crop_size // opt.ds_scale\n\n self.highres_stream = PixelQueryNet(self.downsampling, num_inputs=self.num_inputs,\n num_outputs=opt.output_nc, width=opt.hr_width,\n depth=opt.hr_depth,\n no_one_hot=opt.no_one_hot, lr_instance=opt.lr_instance,\n **hr_stream)\n\n num_params = self.highres_stream.num_params\n self.lowres_stream = ParaGenNet(num_params, scale_injection=opt.scale_injection)\n\n def use_gpu(self):\n return len(self.gpu_ids) > 0\n\n def get_lowres(self, im):\n \"\"\"Creates a lowres version of the input.\"\"\"\n device = self.use_gpu()\n if(self.learned_ds_factor != self.downsampling):\n myds = BilinearDownsample(int(self.downsampling//self.learned_ds_factor), self.num_inputs,device)\n return myds(im)\n else:\n return im\n\n def forward(self, highres):\n lowres = self.get_lowres(highres)\n lr_features = self.lowres_stream(lowres)\n output = self.highres_stream(highres, lr_features)\n return output, lr_features#, lowres\n\n\ndef _get_coords(bs, h, w, device, ds):\n \"\"\"Creates the position encoding for the pixel-wise MLPs\"\"\"\n x = th.arange(0, w).float()\n y = th.arange(0, h).float()\n scale = 7 / 8\n x_cos = th.remainder(x, ds).float() / ds\n x_sin = th.remainder(x, ds).float() / ds\n y_cos = th.remainder(y, ds).float() / ds\n y_sin = th.remainder(y, ds).float() / ds\n x_cos = x_cos / (max(x_cos) / scale)\n x_sin = x_sin / (max(x_sin) / scale)\n y_cos = x_cos / (max(y_cos) / scale)\n y_sin = x_cos / (max(y_sin) / scale)\n xcos = th.cos((2 * pi * x_cos).float())\n xsin = th.sin((2 * pi * x_sin).float())\n ycos = th.cos((2 * pi * y_cos).float())\n ysin = th.sin((2 * pi * y_sin).float())\n xcos = xcos.view(1, 1, 1, w).repeat(bs, 1, h, 1)\n xsin = xsin.view(1, 1, 1, w).repeat(bs, 1, h, 1)\n ycos = ycos.view(1, 1, h, 1).repeat(bs, 1, 1, w)\n ysin = ysin.view(1, 1, h, 1).repeat(bs, 1, 1, w)\n coords = th.cat([xcos, xsin, ycos, ysin], 1).to(device)\n\n return coords.to(device)\n\n\n\ndef spectral_norm(module, mode=True):\n if mode:\n return nn.utils.spectral_norm(module)\n\n return module\n\n\nclass ParaGenNet(th.nn.Module):\n \"\"\"Convolutional LR stream to estimate the pixel-wise MLPs parameters\"\"\"\n def __init__(self, num_out, scale_injection=False):\n super(ParaGenNet, self).__init__()\n\n self.num_out = num_out\n self.scale_injection = scale_injection\n\n ngf = 64\n if self.scale_injection:\n self.out_para = nn.Sequential(\n th.nn.Linear(ngf * 8 + 1, self.num_out)\n )\n else:\n self.out_para = nn.Sequential(\n th.nn.Linear(ngf * 8, self.num_out)\n )\n\n def forward(self, model, x, x_hr):\n structure = model(x)\n if self.scale_injection:\n scale = (torch.ones(x_hr.size(0), 1, 1, 1) * (structure.size(3) / x_hr.size(3))) \\\n .to(structure.device)\n scale = scale.repeat(1, structure.size(2), structure.size(3), 1)\n structure = torch.cat([structure.permute(0, 2, 3, 1), scale], dim=-1)\n para = self.out_para(structure).permute(0, 3, 1, 2)\n else:\n para = self.out_para(structure.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)\n return para\n\n def mask_predict(self, model, x, x_hr, mask):\n structure = model(x)\n\n if self.scale_injection:\n scale = (torch.ones(x_hr.size(0), 1, 1, 1) * (structure.size(3) / x_hr.size(3))) \\\n .to(structure.device)\n scale = scale.repeat(1, structure.size(2), structure.size(3), 1)\n structure = torch.cat([structure.permute(0, 2, 3, 1), scale], dim=-1)\n else:\n structure = structure.permute(0, 2, 3, 1)\n\n bs, h, w, c = structure.size()\n k = mask.size(2) // h\n mask = mask.unfold(2, k, k).unfold(3, k, k)\n mask = mask.permute(0, 2, 3, 4, 5, 1).contiguous().view(\n bs, h, w, int(k * k))\n lr_mask = torch.mean(mask, dim=-1).view(h * w)\n structure = structure.view(bs, h * w, c)\n index = torch.nonzero(1 - lr_mask).squeeze(1)\n structure = structure[:, index, :]\n para = self.out_para(structure).permute(0, 2, 1)\n return para, mask\n\n\nclass PixelQueryNet(th.nn.Module):\n \"\"\"Addaptive pixel-wise MLPs\"\"\"\n def __init__(self, downsampling,\n num_inputs=13, num_outputs=3, width=64, depth=5, coordinates=\"cosine\",\n no_one_hot=False, lr_instance=False):\n super(PixelQueryNet, self).__init__()\n\n self.lr_instance = lr_instance\n self.downsampling = downsampling\n self.num_inputs = num_inputs - (1 if self.lr_instance else 0)\n self.num_outputs = num_outputs\n self.width = width\n self.depth = depth\n self.coordinates = coordinates\n self.xy_coords = None\n self.no_one_hot = no_one_hot\n self.channels = []\n self._set_channels()\n\n self.num_params = 0\n self.splits = {}\n self._set_num_params()\n\n @property # for backward compatibility\n def ds(self):\n return self.downsampling\n\n def _set_channels(self):\n \"\"\"Compute and store the hr-stream layer dimensions.\"\"\"\n in_ch = self.num_inputs\n in_ch = in_ch + int(4)\n self.channels = [in_ch]\n for _ in range(self.depth - 1): # intermediate layer -> cste size\n self.channels.append(self.width)\n # output layer\n self.channels.append(self.num_outputs)\n\n def _set_num_params(self):\n nparams = 0\n self.splits = {\n \"biases\": [],\n \"weights\": [],\n }\n\n # # go over input/output channels for each layer\n idx = 0\n for layer, nci in enumerate(self.channels[:-1]):\n nco = self.channels[layer + 1]\n nparams = nparams + nco # FC biases\n self.splits[\"biases\"].append((idx, idx + nco))\n idx = idx + nco\n\n nparams = nparams + nci * nco # FC weights\n self.splits[\"weights\"].append((idx, idx + nco * nci))\n idx = idx + nco * nci\n\n self.num_params = nparams\n\n def _get_weight_indices(self, idx):\n return self.splits[\"weights\"][idx]\n\n def _get_bias_indices(self, idx):\n return self.splits[\"biases\"][idx]\n\n def forward(self, highres, lr_params):\n assert lr_params.shape[1] == self.num_params, \"incorrect input params\"\n\n if self.lr_instance:\n highres = highres[:, :-1, :, :]\n\n # Fetch sizes\n bs, _, h, w = highres.shape\n bs, _, h_lr, w_lr = lr_params.shape\n k = h // h_lr\n\n self.xy_coords = _get_coords(1, h, w, highres.device, h // h_lr)\n\n highres = torch.repeat_interleave(self.xy_coords, repeats=bs, dim=0)\n\n # Split input in tiles of size kxk according to the NN interp factor (the total downsampling factor),\n # with channels last (for matmul)\n # all pixels within a tile of kxk are processed by the same MLPs parameters\n nci = highres.shape[1]\n\n tiles = highres.unfold(2, k, k).unfold(3, k, k)\n tiles = tiles.permute(0, 2, 3, 4, 5, 1).contiguous().view(\n bs, h_lr, w_lr, int(k * k), nci)\n out = tiles\n num_layers = len(self.channels) - 1\n\n for idx, nci in enumerate(self.channels[:-1]):\n nco = self.channels[idx + 1]\n\n # Select params in lowres buffer\n bstart, bstop = self._get_bias_indices(idx)\n wstart, wstop = self._get_weight_indices(idx)\n\n w_ = lr_params[:, wstart:wstop]\n b_ = lr_params[:, bstart:bstop]\n\n w_ = w_.permute(0, 2, 3, 1).view(bs, h_lr, w_lr, nci, nco)\n b_ = b_.permute(0, 2, 3, 1).view(bs, h_lr, w_lr, 1, nco)\n\n out = th.matmul(out, w_) + b_\n\n # Apply RelU non-linearity in all but the last layer, and tanh in the last\n # out = th.nn.functional.leaky_relu(out, 0.01)\n if idx < num_layers - 1:\n out = th.nn.functional.leaky_relu(out, 0.01)\n else:\n out = torch.tanh(out)\n #\n # reorder the tiles in their correct position, and put channels first\n out = out.view(bs, h_lr, w_lr, k, k, self.num_outputs).permute(\n 0, 5, 1, 3, 2, 4)\n out = out.contiguous().view(bs, self.num_outputs, h, w)\n\n return out\n\n def mask_predict(self, highres, lr_params, hr_mask, lr_mask):\n assert lr_params.shape[1] == self.num_params, \"incorrect input params\"\n\n if self.lr_instance:\n highres = highres[:, :-1, :, :]\n\n bs, _, h, w = highres.shape\n bs, h_lr, w_lr, _ = lr_mask.shape\n k = h // h_lr\n\n self.xy_coords = _get_coords(1, h, w, highres.device, h // h_lr)\n pe = torch.repeat_interleave(self.xy_coords, repeats=bs, dim=0)\n # Split input in tiles of size kxk according to the NN interp factor (the total downsampling factor),\n # with channels last (for matmul)\n # all pixels within a tile of kxk are processed by the same MLPs parameters\n nci = pe.shape[1]\n # bs, 5 rgbxy, h//k=h_lr, w//k=w_lr, k, k\n tiles = pe.unfold(2, k, k).unfold(3, k, k)\n tiles = tiles.permute(0, 2, 3, 4, 5, 1).contiguous().view(\n bs, h_lr, w_lr, int(k * k), nci)\n\n mask = torch.mean(lr_mask, dim=-1).view(h_lr * w_lr)\n index = torch.nonzero(1 - mask).squeeze(1)\n out = tiles\n num_layers = len(self.channels) - 1\n\n out = out.view(bs, h_lr * w_lr, int(k * k), nci)[:, index, :, :]\n num = out.size(1)\n\n for idx, nci in enumerate(self.channels[:-1]):\n nco = self.channels[idx + 1]\n\n # Select params in lowres buffer\n bstart, bstop = self._get_bias_indices(idx)\n wstart, wstop = self._get_weight_indices(idx)\n\n w_ = lr_params[:, wstart:wstop]\n b_ = lr_params[:, bstart:bstop]\n\n w_ = w_.permute(0, 2, 1).view(bs, num, nci, nco)\n b_ = b_.permute(0, 2, 1).view(bs, num, 1, nco)\n\n out = th.matmul(out, w_) + b_\n\n # Apply RelU non-linearity in all but the last layer, and tanh in the last\n if idx < num_layers - 1:\n out = th.nn.functional.leaky_relu(out, 0.01)\n else:\n out = torch.tanh(out)\n\n highres = highres.unfold(2, k, k).unfold(3, k, k)\n highres = highres.permute(0, 2, 3, 4, 5, 1).contiguous().view(\n bs, h_lr, w_lr, int(k * k), 3).view(bs, h_lr * w_lr, int(k * k), 3)\n\n highres[:, index, :, :] = out\n out = highres.view(bs, h_lr, w_lr, k, k, self.num_outputs).permute(\n 0, 5, 1, 3, 2, 4)\n out = out.contiguous().view(bs, self.num_outputs, h, w)\n\n return out","repo_name":"NiFangBaAGe/CoordFill","sub_path":"models/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":12294,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"28"} +{"seq_id":"20442257506","text":"\"\"\"i wrote this bot just to block nitro emojis from someone named elperson. fuck you elperson. stop spamming\"\"\"\nimport lightbulb, hikari\nimport time, re\nimport ujson as json\n\nplugin = lightbulb.Plugin(\"elpipi\")\n\n@plugin.listener(hikari.MessageCreateEvent)\nasync def block_elperson(event: hikari.MessageCreateEvent) -> None:\n if event.author.id != 717449386040361002:\n return\n message = event.message.content\n message = re.sub(r\"\", \"\", message)\n message = re.sub(r\"<:[a-zA-Z_-]*:[0-9]*>\", \"\", message)\n message = message.replace(\" \", \"\")\n print(\"message content: %r\" % message)\n if message != \"\" or len(message) != 0:\n return\n await event.message.delete()\n\ndef load(bot):\n bot.add_plugin(plugin)\n\ndef unload(bot):\n bot.remove_plugin(plugin)","repo_name":"howlagon/elwiwibot","sub_path":"plugins/elpipi.py","file_name":"elpipi.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"35497619911","text":"__doc__ = \"\"\"\nThis module contains the supporting classes for the Two Step Analysis user agent\nalgorithm that is used as the primary way to match user agents with the Java API\nfor the WURFL.\n\nA description of the way the following source is intended to work can be found\nwithin the source for the original Java API implementation here:\nhttp://sourceforge.net/projects/wurfl/files/WURFL Java API/\n\nThe original Java code is GPLd and Copyright (c) WURFL-Pro srl\n\"\"\"\n\n__author__ = \"Armand Lynch \"\n__copyright__ = \"Copyright 2011, Armand Lynch\"\n__license__ = \"LGPL\"\n__url__ = \"http://celljam.net/\"\n__version__ = \"1.2.1\"\n\nimport re\n\nfrom pywurfl.algorithms.wurfl.utils import (first_semi_colon, first_slash,\n first_space, is_mobile_browser,\n second_slash, third_space)\nfrom pywurfl.algorithms.wurfl.utils import indexof_or_length as iol\nfrom pywurfl.algorithms.wurfl import normalizers\nfrom pywurfl.algorithms.wurfl.strategies import ld_match, ris_match\n\n\nclass AbstractMatcher(object):\n user_agent_map = {}\n\n def __init__(self, normalizer=normalizers.generic):\n self.normalizer = normalizer\n self.known_user_agents = set()\n\n def add(self, user_agent, wurfl_id):\n self.known_user_agents.add(user_agent)\n self.user_agent_map[user_agent] = wurfl_id\n\n @property\n def user_agents(self):\n return sorted(self.known_user_agents)\n\n def can_handle(self, user_agent):\n raise NotImplementedError\n\n def __call__(self, user_agent):\n normalized_user_agent = self.normalizer(user_agent)\n devid = self.conclusive_match(normalized_user_agent)\n if not devid or devid == u\"generic\":\n devid = self.recovery_match(normalized_user_agent)\n if not devid or devid == u\"generic\":\n devid = self.catch_all_recovery_match(user_agent)\n return devid\n\n def conclusive_match(self, user_agent):\n match = self.find_matching_ua(user_agent)\n #print \"%s -> conclusive_match -> %s\" % (user_agent, match)\n devid = self.user_agent_map.get(match, u\"generic\")\n return devid\n\n def find_matching_ua(self, user_agent):\n tolerance = first_slash(user_agent)\n match = self.ris_matcher(user_agent, tolerance)\n #print \"AbstractMatcher %s -> f_m_ua -> %s\" % (user_agent, match)\n return match\n\n def recovery_match(self, user_agent):\n return u\"generic\"\n\n recovery_map = (\n # Openwave\n (u\"UP.Browser/7.2\", u\"opwv_v72_generic\"),\n (u\"UP.Browser/7\", u\"opwv_v7_generic\"),\n (u\"UP.Browser/6.2\", u\"opwv_v62_generic\"),\n (u\"UP.Browser/6\", u\"opwv_v6_generic\"),\n (u\"UP.Browser/5\", u\"upgui_generic\"),\n (u\"UP.Browser/4\", u\"uptext_generic\"),\n (u\"UP.Browser/3\", u\"uptext_generic\"),\n\n # Series 60\n (u\"Series60\", u\"nokia_generic_series60\"),\n\n # Access/Net Front\n (u\"NetFront/3.0\", u\"generic_netfront_ver3\"),\n (u\"ACS-NF/3.0\", u\"generic_netfront_ver3\"),\n (u\"NetFront/3.1\", u\"generic_netfront_ver3_1\"),\n (u\"ACS-NF/3.1\", u\"generic_netfront_ver3_1\"),\n (u\"NetFront/3.2\", u\"generic_netfront_ver3_2\"),\n (u\"ACS-NF/3.2\", u\"generic_netfront_ver3_2\"),\n (u\"NetFront/3.3\", u\"generic_netfront_ver3_3\"),\n (u\"ACS-NF/3.3\", u\"generic_netfront_ver3_3\"),\n (u\"NetFront/3.4\", u\"generic_netfront_ver3_4\"),\n (u\"NetFront/3.5\", u\"generic_netfront_ver3_5\"),\n (u\"NetFront/4.0\", u\"generic_netfront_ver4\"),\n (u\"NetFront/4.1\", u\"generic_netfront_ver4_1\"),\n\n # Windows CE\n (u\"Windows CE\", u\"generic_ms_mobile_browser_ver1\"),\n\n # web browsers?\n (u\"Mozilla/4.0\", u\"generic_web_browser\"),\n (u\"Mozilla/5.0\", u\"generic_web_browser\"),\n (u\"Mozilla/6.0\", u\"generic_web_browser\"),\n\n # Generic XHTML\n (u\"Mozilla/\", u\"generic_xhtml\"),\n (u\"ObigoInternetBrowser/Q03C\", u\"generic_xhtml\"),\n (u\"AU-MIC/2\", u\"generic_xhtml\"),\n (u\"AU-MIC-\", u\"generic_xhtml\"),\n (u\"AU-OBIGO/\", u\"generic_xhtml\"),\n (u\"Obigo/Q03\", u\"generic_xhtml\"),\n (u\"Obigo/Q04\", u\"generic_xhtml\"),\n (u\"ObigoInternetBrowser/2\", u\"generic_xhtml\"),\n (u\"Teleca Q03B1\", u\"generic_xhtml\"),\n\n # Opera Mini\n (u\"Opera Mini/1\", u\"browser_opera_mini_release1\"),\n (u\"Opera Mini/2\", u\"browser_opera_mini_release2\"),\n (u\"Opera Mini/3\", u\"browser_opera_mini_release3\"),\n (u\"Opera Mini/4\", u\"browser_opera_mini_release4\"),\n (u\"Opera Mini/5\", u\"browser_opera_mini_release5\"),\n\n # DoCoMo\n (u\"DoCoMo\", u\"docomo_generic_jap_ver1\"),\n (u\"KDDI\", u\"docomo_generic_jap_ver1\"))\n\n def catch_all_recovery_match(self, user_agent):\n\n match = u\"generic\"\n for partial_agent, wdevice in self.recovery_map:\n if partial_agent in user_agent:\n match = wdevice\n break\n return match\n\n def ris_matcher(self, user_agent, tolerance):\n return ris_match(self.user_agents, user_agent, tolerance)\n\n def ld_matcher(self, user_agent, tolerance):\n return ld_match(self.user_agents, user_agent, tolerance)\n\n\nclass AlcatelMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return (user_agent.startswith(u\"Alcatel\") or\n user_agent.startswith(u\"ALCATEL\"))\n\n\nclass AndroidMatcher(AbstractMatcher):\n\n androids = {}\n androids[u\"\"] = u\"generic_android\"\n androids[u\"1_5\"] = u\"generic_android_ver1_5\"\n androids[u\"1_6\"] = u\"generic_android_ver1_6\"\n androids[u\"2_0\"] = u\"generic_android_ver2\"\n androids[u\"2_1\"] = u\"generic_android_ver2_1\"\n androids[u\"2_2\"] = u\"generic_android_ver2_2\"\n\n android_os_re = re.compile(r\".*Android[\\s/](\\d)\\.(\\d)\")\n\n def can_handle(self, user_agent):\n return user_agent.startswith(u\"Mozilla\") and u\"Android\" in user_agent\n\n def find_matching_ua(self, user_agent):\n tolerance = iol(user_agent, u\" \",\n start_index=iol(user_agent, u\"Android\"))\n match = self.ris_matcher(user_agent, tolerance)\n #print \"AndroidMatcher %s -> f_m_ua -> %s\" % (user_agent, match)\n return match\n\n def recovery_match(self, user_agent):\n if u\"Froyo\" in user_agent:\n return u\"generic_android_ver2_2\"\n return self.androids.get(self.android_os_version(user_agent),\n u\"generic_android\")\n\n def android_os_version(self, user_agent):\n match = self.android_os_re.match(user_agent)\n if match:\n return u\"%s_%s\" % (match.group(1), match.group(2))\n\n\nclass AOLMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return not is_mobile_browser(user_agent) and u\"AOL\" in user_agent\n\n\nclass AppleMatcher(AbstractMatcher):\n APPLE_LD_TOLERANCE = 5\n\n def can_handle(self, user_agent):\n return (u\"iPhone\" in user_agent or u\"iPod\" in user_agent or u\"iPad\" in\n user_agent)\n\n def find_matching_ua(self, user_agent):\n if user_agent.startswith(u\"Apple\"):\n tolerance = third_space(user_agent)\n else:\n tolerance = first_semi_colon(user_agent)\n match = self.ris_matcher(user_agent, tolerance)\n #print \"AppleMatcher %s -> f_m_ua -> %s\" % (user_agent, match)\n return match\n\n def recovery_match(self, user_agent):\n if u\"iPad\" in user_agent:\n return \"apple_ipad_ver1\"\n if u\"iPod\" in user_agent:\n return u\"apple_ipod_touch_ver1\"\n return u\"apple_iphone_ver1\"\n\n\nclass BenQMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return user_agent.startswith(u\"BENQ\") or user_agent.startswith(u\"BenQ\")\n\n\nclass BlackberryMatcher(AbstractMatcher):\n blackberries = {}\n blackberries[u\"2.\"] = u\"blackberry_generic_ver2\"\n blackberries[u\"3.2\"] = u\"blackberry_generic_ver3_sub2\"\n blackberries[u\"3.3\"] = u\"blackberry_generic_ver3_sub30\"\n blackberries[u\"3.5\"] = u\"blackberry_generic_ver3_sub50\"\n blackberries[u\"3.6\"] = u\"blackberry_generic_ver3_sub60\"\n blackberries[u\"3.7\"] = u\"blackberry_generic_ver3_sub70\"\n blackberries[u\"4.1\"] = u\"blackberry_generic_ver4_sub10\"\n blackberries[u\"4.2\"] = u\"blackberry_generic_ver4_sub20\"\n blackberries[u\"4.3\"] = u\"blackberry_generic_ver4_sub30\"\n blackberries[u\"4.5\"] = u\"blackberry_generic_ver4_sub50\"\n blackberries[u\"4.6\"] = u\"blackberry_generic_ver4_sub60\"\n blackberries[u\"4.7\"] = u\"blackberry_generic_ver4_sub70\"\n blackberries[u\"4.\"] = u\"blackberry_generic_ver4\"\n blackberries[u\"5.\"] = u\"blackberry_generic_ver5\"\n blackberries[u\"6.\"] = u\"blackberry_generic_ver6\"\n\n blackberry_os_re = re.compile(r\".*Black[Bb]erry[^/\\s]+/(\\d\\.\\d)\")\n\n def can_handle(self, user_agent):\n return u\"BlackBerry\" in user_agent or u\"Blackberry\" in user_agent\n\n def recovery_match(self, user_agent):\n match = u\"generic\"\n version = self.blackberry_os_version(user_agent)\n if version:\n match = self.blackberries.get(version, u\"generic\")\n if match == u\"generic\":\n match = self.blackberries.get(version[:-1], u\"generic\")\n return match\n\n def blackberry_os_version(self, user_agent):\n match = self.blackberry_os_re.match(user_agent)\n if match:\n return match.group(1)\n\n\nclass BotMatcher(AbstractMatcher):\n bots = (u\"bot\", u\"crawler\", u\"spider\", u\"novarra\", u\"transcoder\",\n u\"yahoo! searchmonkey\", u\"yahoo! slurp\", u\"feedfetcher-google\",\n u\"toolbar\", u\"mowser\", u\"mediapartners-google\", u\"azureus\",\n u\"inquisitor\", u\"baiduspider\", u\"baidumobaider\", u\"indy library\",\n u\"slurp\", u\"crawl\", u\"wget\", u\"ucweblient\", u\"snoopy\",\n u\"mozfdsilla\", u\"ask jeeves\", u\"jeeves/teoma\", u\"mechanize\",\n u\"http client\", u\"servicemonitor\", u\"httpunit\", u\"hatena\",\n u\"ichiro\")\n\n BOT_TOLERANCE = 4\n\n def can_handle(self, user_agent):\n user_agent = user_agent.lower()\n for bot in self.bots:\n if bot in user_agent:\n return True\n return False\n\n def find_matching_ua(self, user_agent):\n match = self.ld_matcher(user_agent, self.BOT_TOLERANCE)\n return match\n\n def recovery_match(self, user_agent):\n return u\"generic_web_crawler\"\n\n\nclass CatchAllMatcher(AbstractMatcher):\n MOZILLA_LD_TOLERANCE = 4\n\n def can_handle(self, user_agent):\n return True\n\n def find_matching_ua(self, user_agent):\n if user_agent.startswith(u\"Mozilla\"):\n if user_agent.startswith(u\"Mozilla/4\"):\n match = ld_match(self.extract_uas(u\"Mozilla/4\"), user_agent,\n self.MOZILLA_LD_TOLERANCE)\n elif user_agent.startswith(u\"Mozilla/5\"):\n match = ld_match(self.extract_uas(u\"Mozilla/5\"), user_agent,\n self.MOZILLA_LD_TOLERANCE)\n else:\n match = ld_match(self.extract_uas(u\"Mozilla\"), user_agent,\n self.MOZILLA_LD_TOLERANCE)\n else:\n match = super(CatchAllMatcher, self).find_matching_ua(user_agent)\n #print \"CatchAllMatcher %s -> f_m_ua -> %s\" % (user_agent, match)\n return match\n\n def extract_uas(self, start):\n return (x for x in self.user_agents if x.startswith(start))\n\n\nclass ChromeMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return not is_mobile_browser(user_agent) and u\"Chrome\" in user_agent\n\n\nclass DoCoMoMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return user_agent.startswith(u\"DoCoMo\")\n\n def find_matching_ua(self, user_agent):\n return u\"\"\n\n def recovery_match(self, user_agent):\n if user_agent.startswith(u\"DoCoMo/2\"):\n return u\"docomo_generic_jap_ver2\"\n return u\"docomo_generic_jap_ver1\"\n\n\nclass FirefoxMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return not is_mobile_browser(user_agent) and u\"Firefox\" in user_agent\n\n\nclass GrundigMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return (user_agent.startswith(u\"Grundig\") or\n user_agent.startswith(u\"GRUNDIG\"))\n\n\nclass HTCMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return user_agent.startswith(u\"HTC\") or u\"XV6875.1\" in user_agent\n\n\nclass KDDIMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return u\"KDDI\" in user_agent\n\n def find_matching_ua(self, user_agent):\n if user_agent.startswith(u\"KDDI/\"):\n tolerance = second_slash(user_agent)\n elif user_agent.startswith(u\"KDDI\"):\n tolerance = first_slash(user_agent)\n else:\n tolerance = iol(user_agent, \")\")\n match = self.ris_matcher(user_agent, tolerance)\n #print \"KDDIMatcher %s -> f_m_ua -> %s\" % (user_agent, match)\n return match\n\n def recovery_match(self, user_agent):\n if u\"Opera\" in user_agent:\n return u\"opera\"\n return u\"opwv_v62_generic\"\n\n\nclass KonquerorMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return not is_mobile_browser(user_agent) and u\"Konqueror\" in user_agent\n\n\nclass KyoceraMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return (user_agent.startswith(u\"kyocera\") or\n user_agent.startswith(u\"QC-\") or\n user_agent.startswith(u\"KWC-\"))\n\n\nclass LGMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return (user_agent.startswith(u\"lg\") or u\"LG-\" in user_agent or\n u\"LGE\" in user_agent)\n\n def find_matching_ua(self, user_agent):\n tolerance = iol(user_agent, u\"/\",\n start_index=user_agent.upper().index(u\"LG\"))\n match = self.ris_matcher(user_agent, tolerance)\n return match\n\n\nclass LGUPLUSMatcher(AbstractMatcher):\n lgpluses = (\n (u\"generic_lguplus_rexos_facebook_browser\",\n (u\"Windows NT 5\", u\"POLARIS\")),\n (u\"generic_lguplus_rexos_webviewer_browser\",\n (u\"Windows NT 5\",)),\n (u\"generic_lguplus_winmo_facebook_browser\",\n (u\"Windows CE\", u\"POLARIS\")),\n (u\"generic_lguplus_android_webkit_browser\",\n (u\"Android\", u\"AppleWebKit\")))\n\n def can_handle(self, user_agent):\n return u\"lgtelecom\" in user_agent or u\"LGUPLUS\" in user_agent\n\n def conclusive_match(self, user_agent):\n return u\"generic\"\n\n def recovery_match(self, user_agent):\n for wid, searches in self.lgpluses:\n for search in searches:\n if search not in user_agent:\n break\n else:\n return wid\n return u\"generic_lguplus\"\n\n\nclass MaemoMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return u\"Maemo \" in user_agent\n\n def find_matching_ua(self, user_agent):\n tolerance = first_space(user_agent)\n match = self.ris_matcher(user_agent, tolerance)\n return match\n\n\nclass MitsubishiMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return user_agent.startswith(u\"Mitsu\")\n\n\nclass MotorolaMatcher(AbstractMatcher):\n MOTOROLA_TOLERANCE = 5\n\n def can_handle(self, user_agent):\n return (user_agent.startswith(u\"Mot-\") or\n u\"MOT-\" in user_agent or\n u\"Motorola\" in user_agent)\n\n def find_matching_ua(self, user_agent):\n if (user_agent.startswith(u\"Mot-\") or user_agent.startswith(u\"MOT-\") or\n user_agent.startswith(u\"Motorola\")):\n match = super(MotorolaMatcher, self).find_matching_ua(user_agent)\n else:\n match = self.ld_matcher(user_agent, self.MOTOROLA_TOLERANCE)\n #print \"MotorolaMatcher %s -> f_m_ua -> %s\" % (user_agent, match)\n return match\n\n def recovery_match(self, user_agent):\n match = u\"generic\"\n if u\"MIB/2.2\" in user_agent or u\"MIB/BER2.2\" in user_agent:\n match = u\"mot_mib22_generic\"\n return match\n\n\nclass MSIEMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return (not is_mobile_browser(user_agent) and\n user_agent.startswith(u\"Mozilla\") and\n u\"MSIE\" in user_agent)\n\n\nclass NecMatcher(AbstractMatcher):\n NEC_LD_TOLERANCE = 2\n\n def can_handle(self, user_agent):\n return user_agent.startswith(u\"NEC\") or user_agent.startswith(u\"KGT\")\n\n def find_matching_ua(self, user_agent):\n if user_agent.startswith(u\"NEC\"):\n match = super(NecMatcher, self).find_matching_ua(user_agent)\n else:\n match = self.ld_matcher(user_agent, self.NEC_LD_TOLERANCE)\n #print \"NecMatcher %s -> f_m_ua -> %s\" % (user_agent, match)\n return match\n\n\nclass NokiaMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return u\"Nokia\" in user_agent\n\n def find_matching_ua(self, user_agent):\n tol1 = iol(user_agent, u\"/\", start_index=user_agent.index(u\"Nokia\"))\n tol2 = iol(user_agent, u\" \", start_index=user_agent.index(u\"Nokia\"))\n tolerance = tol1 if tol1 < tol2 else tol2\n #print \"NokiaMatcher tolerance %s\" % tolerance\n match = self.ris_matcher(user_agent, tolerance)\n #print \"NokiaMatcher %s -> f_m_ua -> %s\" % (user_agent, match)\n return match\n\n def recovery_match(self, user_agent):\n match = u\"generic\"\n if u\"Series60\" in user_agent:\n match = u\"nokia_generic_series60\"\n elif u\"Series80\" in user_agent:\n match = u\"nokia_generic_series80\"\n return match\n\n\nclass OperaMatcher(AbstractMatcher):\n OPERA_TOLERANCE = 1\n\n operas = {}\n operas[\"7\"] = u\"opera_7\"\n operas[\"8\"] = u\"opera_8\"\n operas[\"9\"] = u\"opera_9\"\n operas[\"10\"] = u\"opera_10\"\n\n opera_re = re.compile(r\".*Opera[\\s/](\\d+).*\")\n\n def can_handle(self, user_agent):\n return not is_mobile_browser(user_agent) and u\"Opera\" in user_agent\n\n def find_matching_ua(self, user_agent):\n match = self.ld_matcher(user_agent, self.OPERA_TOLERANCE)\n #print \"OperaMatcher %s -> f_m_ua -> %s\" % (user_agent, match)\n return match\n\n def recovery_match(self, user_agent):\n match = self.opera_re.match(user_agent)\n if match:\n return self.operas.get(match.group(1), u\"opera\")\n return u\"opera\"\n\n\nclass OperaMiniMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return u\"Opera Mini\" in user_agent\n\n def recovery_match(self, user_agent):\n match = u\"\"\n if u\"Opera Mini/1\" in user_agent:\n match = u\"browser_opera_mini_release1\"\n elif u\"Opera Mini/2\" in user_agent:\n match = u\"browser_opera_mini_release2\"\n elif u\"Opera Mini/3\" in user_agent:\n match = u\"browser_opera_mini_release3\"\n elif u\"Opera Mini/4\" in user_agent:\n match = u\"browser_opera_mini_release4\"\n elif u\"Opera Mini/5\" in user_agent:\n match = u\"browser_opera_mini_release5\"\n return match\n\n\nclass PanasonicMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return user_agent.startswith(u\"Panasonic\")\n\n\nclass PantechMatcher(AbstractMatcher):\n PANTECH_LD_TOLERANCE = 4\n\n def can_handle(self, user_agent):\n return (user_agent.startswith(u\"Pantech\") or\n user_agent.startswith(u\"PT-\") or\n user_agent.startswith(u\"PANTECH\") or\n user_agent.startswith(u\"PG-\"))\n\n def find_matching_ua(self, user_agent):\n if user_agent.startswith(u\"Pantech\"):\n match = self.ld_matcher(user_agent, self.PANTECH_LD_TOLERANCE)\n else:\n match = super(PantechMatcher, self).find_matching_ua(user_agent)\n #print \"PantechMatcher %s -> f_m_ua -> %s\" % (user_agent, match)\n return match\n\n\nclass PhilipsMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return (user_agent.startswith(u\"Philips\") or\n user_agent.startswith(u\"PHILIPS\"))\n\n\nclass PortalmmmMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return user_agent.startswith(u\"portalmmm\")\n\n def find_matching_ua(self, user_agent):\n return u\"\"\n\n\nclass QtekMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return user_agent.startswith(u\"Qtek\")\n\n\nclass SafariMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return (not is_mobile_browser(user_agent) and\n user_agent.startswith(u\"Mozilla\") and\n u\"Safari\" in user_agent)\n\n def recovery_match(self, user_agent):\n if u\"Macintosh\" in user_agent or u\"Windows\" in user_agent:\n match = u\"generic_web_browser\"\n else:\n match = u\"generic\"\n return match\n\n\nclass SagemMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return (user_agent.startswith(u\"Sagem\") or\n user_agent.startswith(u\"SAGEM\"))\n\n\nclass SamsungMatcher(AbstractMatcher):\n SAMSUNGS = [u\"SEC-\", u\"SAMSUNG-\", u\"SCH\", u\"Samsung\", u\"SPH\", u\"SGH\",\n u\"SAMSUNG/\"]\n\n def can_handle(self, user_agent):\n return (u\"Samsung/SGH\" in user_agent or\n u\"Samsung\" in user_agent or\n user_agent.startswith(u\"SEC-\") or\n user_agent.startswith(u\"SAMSUNG\") or\n user_agent.startswith(u\"SPH\") or\n user_agent.startswith(u\"SGH\") or\n user_agent.startswith(u\"SCH\"))\n\n def find_matching_ua(self, user_agent):\n for sams in self.SAMSUNGS:\n if sams in user_agent:\n tol1 = iol(user_agent, u\"/\", start_index=user_agent.index(sams))\n tol2 = iol(user_agent, u\" \", start_index=user_agent.index(sams))\n tolerance = tol1 if tol1 < tol2 else tol2\n break\n else:\n tolerance = len(user_agent)\n\n match = self.ris_matcher(user_agent, tolerance)\n #print \"SamsungMatcher %s -> f_m_ua -> %s\" % (user_agent, match)\n return match\n\n\nclass SanyoMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return (user_agent.startswith(u\"Sanyo\") or\n user_agent.startswith(u\"SANYO\"))\n\n\nclass SharpMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return (user_agent.startswith(u\"Sharp\") or\n user_agent.startswith(u\"SHARP\"))\n\n\nclass SiemensMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return user_agent.startswith(u\"SIE-\")\n\n\nclass SonyEricssonMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return u\"SonyEricsson\" in user_agent\n\n def find_matching_ua(self, user_agent):\n if user_agent.startswith(u\"SonyEricsson\"):\n match = super(SonyEricssonMatcher, self).find_matching_ua(user_agent)\n else:\n tolerance = second_slash(user_agent)\n match = self.ris_matcher(user_agent, tolerance)\n #print \"SonyEricssonMatcher %s -> f_m_ua -> %s\" % (user_agent, match)\n return match\n\n\nclass SPVMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return u\"SPV\" in user_agent\n\n def find_matching_ua(self, user_agent):\n tolerance = iol(user_agent, u\";\", start_index=iol(user_agent, u\"SPV\"))\n match = self.ris_matcher(user_agent, tolerance)\n return match\n\n\nclass ToshibaMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return user_agent.startswith(u\"Toshiba\")\n\n\nclass VodafoneMatcher(AbstractMatcher):\n def can_handle(self, user_agent):\n return user_agent.startswith(u\"Vodafone\")\n\n def find_matching_ua(self, user_agent):\n tolerance = iol(user_agent, u\"/\", 3)\n match = self.ris_matcher(user_agent, tolerance)\n #print \"VodafoneMatcher %s -> f_m_ua -> %s\" % (user_agent, match)\n return match\n\n\nclass WindowsCEMatcher(AbstractMatcher):\n WINDOWS_CE_TOLERANCE = 3\n\n def can_handle(self, user_agent):\n return (u\"Mozilla/\" in user_agent and (u\"Windows CE\" in user_agent or\n u\"WindowsCE\" in user_agent or\n u\"ZuneWP7\" in user_agent))\n\n def find_matching_ua(self, user_agent):\n match = self.ld_matcher(user_agent, self.WINDOWS_CE_TOLERANCE)\n return match\n\n def recovery_match(self, user_agent):\n return u\"generic_ms_mobile_browser_ver1\"\n\n\nhandlers = [NokiaMatcher(),\n LGUPLUSMatcher(),\n AndroidMatcher(normalizers.android),\n SonyEricssonMatcher(),\n MotorolaMatcher(),\n BlackberryMatcher(),\n SiemensMatcher(),\n SagemMatcher(),\n SamsungMatcher(),\n PanasonicMatcher(),\n NecMatcher(),\n QtekMatcher(),\n MitsubishiMatcher(),\n PhilipsMatcher(),\n LGMatcher(normalizers.lg),\n AppleMatcher(),\n KyoceraMatcher(),\n AlcatelMatcher(),\n SharpMatcher(),\n SanyoMatcher(),\n BenQMatcher(),\n PantechMatcher(),\n ToshibaMatcher(),\n GrundigMatcher(),\n HTCMatcher(),\n BotMatcher(),\n SPVMatcher(),\n WindowsCEMatcher(),\n PortalmmmMatcher(),\n DoCoMoMatcher(),\n KDDIMatcher(),\n VodafoneMatcher(),\n OperaMiniMatcher(),\n MaemoMatcher(normalizers.maemo),\n ChromeMatcher(normalizers.chrome),\n AOLMatcher(),\n OperaMatcher(),\n KonquerorMatcher(normalizers.konqueror),\n SafariMatcher(normalizers.safari),\n FirefoxMatcher(normalizers.firefox),\n MSIEMatcher(normalizers.msie),\n CatchAllMatcher()]\n\n","repo_name":"sven-mayer/pywurfl","sub_path":"pywurfl/algorithms/wurfl/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":25864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"29724258527","text":"\"\"\"\n=====================\nL2 solver comparison\n=====================\n\nThis example compares different solvers with L2 regularization.\n\"\"\"\nprint(__doc__)\n\nimport sys\nimport time\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import make_classification\nfrom sklearn.datasets import fetch_20newsgroups_vectorized\n\nfrom lightning.classification import SVRGClassifier\nfrom lightning.classification import SDCAClassifier\nfrom lightning.classification import CDClassifier\nfrom lightning.classification import AdaGradClassifier\nfrom lightning.classification import SAGAClassifier, SAGClassifier\n\nfrom lightning.impl.adagrad_fast import _proj_elastic_all\n\nclass Callback(object):\n\n def __init__(self, X, y):\n self.X = X\n self.y = y\n self.obj = []\n self.times = []\n self.start_time = time.time()\n self.test_time = 0\n\n def __call__(self, clf, t=None):\n test_time = time.time()\n\n if hasattr(clf, \"_finalize_coef\"):\n clf._finalize_coef()\n\n if t is not None:\n _proj_elastic_all(clf.eta, t, clf.g_sum_[0], clf.g_norms_[0],\n alpha1=0, alpha2=clf.alpha, delta=0,\n w=clf.coef_[0])\n\n\n y_pred = clf.decision_function(self.X).ravel()\n loss = (np.maximum(1 - self.y * y_pred, 0) ** 2).mean()\n coef = clf.coef_.ravel()\n regul = 0.5 * clf.alpha * np.dot(coef, coef)\n self.obj.append(loss + regul)\n self.test_time += time.time() - test_time\n self.times.append(time.time() - self.start_time - self.test_time)\n\ntry:\n dataset = sys.argv[1]\nexcept:\n dataset = \"synthetic\"\n\nif dataset == \"news20\":\n bunch = fetch_20newsgroups_vectorized(subset=\"all\")\n X = bunch.data\n y = bunch.target\n y[y >= 1] = 1\n alpha = 1e-4\n eta_svrg = 1e-1\n eta_adagrad = 1\n xlim = (0, 20)\n\nelse:\n X, y = make_classification(n_samples=10000,\n n_features=100,\n n_classes=2,\n random_state=0)\n alpha = 1e-2\n eta_svrg = 1e-3\n eta_adagrad = 1e-2\n xlim = [0, 2]\n\ny = y * 2 - 1\n\n# make sure the method does not stop prematurely, we want to see\n# the full convergence path\ntol = 1e-24\n\nclf1 = SVRGClassifier(loss=\"squared_hinge\", alpha=alpha, eta=eta_svrg,\n n_inner=1.0, max_iter=100, random_state=0, tol=1e-24)\nclf2 = SDCAClassifier(loss=\"squared_hinge\", alpha=alpha,\n max_iter=100, n_calls=X.shape[0]/2, random_state=0, tol=tol)\nclf3 = CDClassifier(loss=\"squared_hinge\", alpha=alpha, C=1.0/X.shape[0],\n max_iter=50, n_calls=X.shape[1]/3, random_state=0, tol=tol)\nclf4 = AdaGradClassifier(loss=\"squared_hinge\", alpha=alpha, eta=eta_adagrad,\n n_iter=100, n_calls=X.shape[0]/2, random_state=0)\nclf5 = SAGAClassifier(loss=\"squared_hinge\", alpha=alpha,\n max_iter=100, random_state=0, tol=tol)\nclf6 = SAGClassifier(loss=\"squared_hinge\", alpha=alpha,\n max_iter=100, random_state=0, tol=tol)\n\nplt.figure()\n\ndata = {}\nfor clf, name in ((clf1, \"SVRG\"),\n (clf2, \"SDCA\"),\n (clf3, \"PCD\"),\n (clf4, \"AdaGrad\"),\n (clf5, \"SAGA\"),\n (clf6, \"SAG\")\n ):\n print(name)\n cb = Callback(X, y)\n clf.callback = cb\n\n if name == \"PCD\" and hasattr(X, \"tocsc\"):\n clf.fit(X.tocsc(), y)\n else:\n clf.fit(X, y)\n data[name] = (cb.times, np.array(cb.obj))\n\n# get best value\nfmin = min([np.min(a[1]) for a in data.values()])\nfor name in data:\n plt.plot(data[name][0], data[name][1] - fmin, label=name, lw=3)\n\nplt.xlim(xlim)\nplt.yscale('log')\nplt.xlabel(\"CPU time\")\nplt.ylabel(\"Objective value minus optimum\")\nplt.legend()\nplt.grid()\n\nplt.show()\n","repo_name":"scikit-learn-contrib/lightning","sub_path":"examples/plot_l2_solvers.py","file_name":"plot_l2_solvers.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","stars":1685,"dataset":"github-code","pt":"28"} +{"seq_id":"37788972595","text":"\"\"\"Reproducing Figure 2: plotting\"\"\"\nfrom plotting.params import COLORS\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nrc = {\n \"pdf.fonttype\": 42,\n \"text.usetex\": True,\n \"font.size\": 14,\n \"xtick.labelsize\": 12,\n \"ytick.labelsize\": 12,\n \"text.usetex\": True,\n}\nplt.rcParams.update(rc)\n\nall_flow_times = np.load(\"../experiments/data/n_50_vary_lambdas_logscale.npy\")\nprint(np.shape(all_flow_times))\nn, _, n_lambda = np.shape(all_flow_times)\nflow_times = np.mean(all_flow_times, axis=0)\nflow_times_low = np.zeros_like(flow_times)\nflow_times_high = np.zeros_like(flow_times)\n\ndeviation = np.std(all_flow_times, axis=0) / np.sqrt(n)\n\nfor i in range(6):\n for j in range(n_lambda):\n flow_times[i, j] = (flow_times[i, j] - flow_times[-2, j]) / flow_times[-1, j]\n flow_times_low[i, j] = (\n flow_times[i, j] - 2 * deviation[i, j] / flow_times[-1, j]\n )\n flow_times_high[i, j] = (\n flow_times[i, j] + 2 * deviation[i, j] / flow_times[-1, j]\n )\n\nlambda1 = [0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 0.9, 0.95, 1.0]\nplt.figure()\nplt.rcParams[\"text.usetex\"] = True\nplt.ylim(top=5, bottom=10 ** (-4))\nfor i, algo in enumerate([\"ETC-U\", \"ETC-RR\", \"UCB-U\", \"UCB-RR\", \"RR\", \"FTPP\"]):\n if algo != \"FTPP\":\n plt.yscale(\"log\")\n plt.semilogx(lambda1, flow_times[i], label=algo, color=COLORS[algo])\n # plt.plot(lambda1, flow_times[i], label=algo, color=COLORS[algo])\n plt.fill_between(\n lambda1,\n flow_times_low[i],\n flow_times_high[i],\n alpha=0.3,\n color=COLORS[algo],\n )\n # plt.plot(lambda1,lambda1,'o')\n # plt.axvline(1 / (1 + 2 * np.sqrt(2)), color=\"black\", linestyle=\"--\")\n plt.xlabel(\"Mean processing time of job 1 ($\\lambda_1$)\")\n plt.ylabel(r\"$(E[C_A]-E[C_{FTPP}])/E[C_{OPT}]$\")\nplt.legend()\nplt.savefig(\"../figures/n_50_vary_lambda_logscale.pdf\", bbox_inches=\"tight\")\nplt.savefig(\"../figures/n_50_vary_lambda_logscale.png\", bbox_inches=\"tight\")\n","repo_name":"hugorichard/ml4a-scheduling","sub_path":"plotting/plot_n_50_vary_lambda_logscale.py","file_name":"plot_n_50_vary_lambda_logscale.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"43203321202","text":"import os\nfrom unittest import mock\n\nfrom django.test import SimpleTestCase as DjangoSimpleTestCase\nfrom django.urls import reverse\nfrom django.utils.deprecation import MiddlewareMixin\nfrom kubernetes.client.configuration import Configuration\n\nfrom mtp_common.analytics import AnalyticsPolicy\n\n\nclass SimpleTestCase(DjangoSimpleTestCase):\n @mock.patch('tests.urls.mocked_context')\n @mock.patch('tests.urls.mocked_template')\n def load_mocked_template(self, template, context, mocked_template, mocked_context, **extra):\n mocked_template.return_value = template\n mocked_context.return_value = context\n return self.client.get(reverse('dummy'), **extra)\n\n @classmethod\n def setup_k8s_incluster_config(cls, mock_config, pod_name):\n os.environ['KUBERNETES_SERVICE_HOST'] = '127.0.0.1'\n os.environ['KUBERNETES_SERVICE_PORT'] = '9988'\n os.environ['POD_NAME'] = pod_name\n configuration = Configuration()\n configuration.host = 'http://127.0.0.1:9988'\n configuration.api_key = {'authorization': 'bearer T0ken'}\n Configuration.set_default(configuration)\n mock_config.return_value = None\n\n\nclass TestAcceptingCookiePolicyMiddleware(MiddlewareMixin):\n \"\"\"\n Used in tests that mimic a user clicking accept on a cookie prompt\n \"\"\"\n\n def process_response(self, request, response):\n AnalyticsPolicy(request).set_cookie_policy(response, True)\n return response\n","repo_name":"ministryofjustice/money-to-prisoners-common","sub_path":"tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"31164705022","text":"import torch\nfrom torch import nn\nimport sys\nimport logging\nimport torch.nn.functional as F\nfrom einops import rearrange\n\nlogger = logging.getLogger('model.ByteCNN_RF')\n\nclass CNNYSJ(nn.Module):\n def __init__(self, nclass):\n super().__init__()\n self.cnn = CNN_YSJ(nclass)\n self.padding_len = 1000\n\n def _pad_fixed(self, x: torch.Tensor, padding_len):\n \"\"\"\n pad pdf file to fit the grid shape\n \"\"\"\n seq_len = x.size()[0]\n \n need = padding_len - seq_len\n logger.debug('need {} size {}'.format(need, seq_len))\n if need < 0:\n x_padded = x.narrow(0, 0, padding_len)\n else:\n x_padded = F.pad(x,(0,need))\n return x_padded\n\n def forward(self, x):\n outs = []\n for seq in x:\n s0 = seq[0]\n logger.debug(f's0 {s0.shape}')\n seq_padded = self._pad_fixed(seq[0], self.padding_len).view(1,-1)\n logger.debug(f'seq_padded {seq_padded.shape}')\n out = self.cnn(seq_padded)\n outs.append(out)\n outs = torch.vstack(outs)\n return outs\n\n\nclass CNN_YSJ(nn.Module):\n def __init__(self, nclass):\n super().__init__()\n # E, L: E=25, \n self.embedding = nn.Embedding(256,25)\n self.conv1 = nn.Conv2d(1,32, kernel_size=(3,25), stride=(1,25),padding=1)\n self.bn1 = nn.BatchNorm2d(32)\n self.conv2 = nn.Conv2d(32,64,kernel_size=(3,1),stride=(1,1),padding=(1,0))\n self.bn2 = nn.BatchNorm2d(64)\n self.pooling = nn.MaxPool2d(kernel_size=(100,1),stride=(100,1))\n self.flatten = nn.Flatten(1)\n self.relu = nn.ReLU(inplace=True)\n self.fc1 = nn.Linear(640, 128)\n self.dropout = nn.Dropout(p=0.25)\n self.fc2 = nn.Linear(128,nclass)\n\n def forward(self, x):\n x = self.embedding(x)\n logger.debug(f'embed {x.shape}')\n x = self.relu(x)\n x = rearrange(x, 'b d l -> b 1 d l')\n x = self.conv1(x)\n x = self.bn1(x)\n logger.debug(f'conv1 {x.shape}')\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n logger.debug(f'conv2 out {x.shape}')\n x = self.pooling(x)\n logger.debug(f'pool out {x.shape}')\n x = self.relu(x)\n x = self.flatten(x)\n logger.debug(f'flatten out {x.shape}')\n x = self.fc1(x)\n x = self.dropout(x)\n x = self.fc2(x)\n return x\n\n\nif __name__ == '__main__':\n logger = logging.getLogger()\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(lineno)d - %(message)s')\n sh = logging.StreamHandler(sys.stdout)\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n logger.setLevel(logging.DEBUG)\n i = [[torch.randint(0,255,(4248001,)),torch.randint(0,255,(2124000,))]]\n\n from torch.profiler import profile, ProfilerActivity, record_function\n\n pcnn = CNNYSJ(2)\n with profile(activities=[ProfilerActivity.CPU], profile_memory=True, record_shapes=True) as prof:\n with record_function(\"model_inference\"):\n print(pcnn(i).shape)\n print(prof.key_averages().table(sort_by=\"cpu_time_total\", row_limit=10))","repo_name":"zhiruiluo/pdf_mal_dection","sub_path":"src/model/ByteCNN_YSJ.py","file_name":"ByteCNN_YSJ.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"40267240088","text":"import socket\nimport json\nimport pymysql\nimport re\nimport time\nimport traceback\nimport os\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nlocal_hostname = socket.gethostname()\nlocal_fqdn = socket.getfqdn()\nip_address = socket.gethostbyname(local_hostname)\n# print (\"working on %s (%s) with %s\" % (local_hostname, local_fqdn, ip_address))\nserver_address = (ip_address, 12345)\ndb = pymysql.connect(\"localhost\", \"ur_admin\", \"ur_pwd\", \"field_1\")\n# print ('starting up on %s port %s' % server_address)\nsock.bind(server_address)\nsock.listen(1)\nprint ('等待連線中...')\nconnection, client_address = sock.accept()\nprint ('已連線 IP :', client_address)\ncursor = db.cursor()\nwhile True:\n try:\n data = connection.recv(1024)\n # dataTo = str(data, encoding=\"utf-8\")\n msg = data.decode('gbk')\n msg = msg.replace(\" \", \"\")\n msg = msg.replace(\"\\x00\", \"\")\n\n if msg != '':\n msgToJson = json.loads(msg)\n else:\n print(\"已斷開連線...\")\n break\n\n # print (\"收到資料 :\",dataTo, \"|| 資料型別 :\", type(dataTo))\n\n chOn = []\n data_int = []\n idIndex = 0\n serial = msgToJson[\"Serial\"]\n for id in msgToJson[\"Data\"]:\n nowTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n idToInt = int(msgToJson[\"Data\"][idIndex]['ID'])\n idToStr = str(idToInt)\n data_str = msgToJson[\"Data\"][idIndex][\"value\"]\n data_int = [int(i) for i in data_str]\n chOn.append(idToInt)\n sql = 'INSERT INTO data (Time, serial, xhot_id, high_temp, lower_temp, pres_id, conn_status, temp_status) VALUES (\"%s\",\"%s\",\"%d\",\"%d\",\"%d\",\"%d\",\"%d\",\"%d\")' % (\n nowTime, serial, idToInt, data_int[0], data_int[1], data_int[2], data_int[3], data_int[4])\n cursor.execute(sql)\n db.commit()\n idIndex += 1\n # print(chOn)\n # print(data_int)\n print(\"目前開啟的 Channel :\", chOn)\n print(\"資料 :\", data_int)\n # print('已寫入', cursor.rowcount, '筆資料')\n except Exception as e:\n # print(\"錯誤資訊 :\" + str(e))\n print(traceback.format_exc())\n\n#再次重新啟動自己, 因為TCP重新連線造成msg的接收錯誤 而迴圈 break 之後, 再把自己重新啟動, 等待重新連線\nos.startfile('pyTCP.py')\n","repo_name":"Allenci/Python-TCP-UDP-Listener","sub_path":"tcpListener.py","file_name":"tcpListener.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"669175701","text":"import abc\nimport datetime\nfrom decimal import *\nfrom typing import List\n\nfrom fiscal_crypt.platforms.abs_platforms import PlatformInterface\nfrom fiscal_crypt.fcrypt_logging import fcrypt_log\n\n\nclass TaxProcessing(abc.ABC):\n \"\"\"\n Abstract class giving a model for the implementation of tax processing.\n Of course, the tax processing is country dependent.\n For each country we want to use a sub-class of \"TaxProcessing\"\n \"\"\"\n\n def __init__(self, currency: str, platforms_ls: List[PlatformInterface]) -> None:\n # Save the list of all platforms where the user own crypto-currencies\n self.platforms_list = platforms_ls\n\n # Save the currency wanted to calculate\n self.currency = currency\n self.buy_transactions = []\n self.sell_transactions = []\n\n def _convert_to_printable_decimal(self, number: Decimal) -> Decimal:\n \"\"\"\n This function allows to convert a Decimal to a printable \"Devise\" (with 2 number after coma)\n\n :param number: Number to convert to printable\n :type number: Decimal\n \"\"\"\n # Normalized value\n norm_nb = number.normalize()\n\n # Convert to Decimal with tow digits\n TWOPLACES = Decimal(10) ** -2\n display_value = norm_nb.quantize(TWOPLACES)\n\n return display_value\n\n def _load_and_sort_all_transactions(self, end_time: datetime.datetime) -> None:\n \"\"\"\n This function allows to load in memory all the transactions done by the user until end_time\n\n :param end_time: Datetime corresponding to the end of the tax period\n :type end_time: datetime.datetime\n \"\"\"\n\n # Logs info\n fcrypt_log.info(\"[TAXES PROCESSING] Loading all buy and sell transactions...\")\n\n # Get the buy and sell transactions\n for crypto_platform in self.platforms_list:\n\n # First the buy transactions\n for buy in crypto_platform.all_buy_transactions_generator(self.currency, end_time):\n self.buy_transactions.append(buy)\n\n # Then the sell transactions\n for sell in crypto_platform.all_sell_transactions_generator(self.currency, end_time):\n self.sell_transactions.append(sell)\n\n # Logs info\n fcrypt_log.info(f\"[TAXES PROCESSING] Number of platforms scanned: {len(self.platforms_list)}\")\n fcrypt_log.info(f\"[TAXES PROCESSING] Number of \\\"BUY\\\" transactions found: {len(self.buy_transactions)}\")\n fcrypt_log.info(f\"[TAXES PROCESSING] Number of \\\"SELL\\\" transactions found: {len(self.sell_transactions)}\")\n\n # Logs info\n fcrypt_log.info(\"[TAXES PROCESSING] Load done\")\n\n def _get_overall_wallets_value(self, time: datetime.datetime) -> Decimal:\n \"\"\"\n This function allows to get the overall value of all the wallets hold by the crypto-owner.\n Basically, it does a sum of all the crypto-owner wallets values.\n\n :param time: Datetime wanted for the values evaluation\n :type time: datetime.datetime\n :returns: Decimal -- Decimal value of the wallets\n \"\"\"\n # Initialize the result\n result = Decimal(0)\n\n # Go over the different platform\n for crypto_platform in self.platforms_list:\n tmp_result = crypto_platform.get_all_wallets_value(self.currency, time)\n result += tmp_result\n\n return result\n\n @abc.abstractmethod\n def get_tax_declaration_for(self, fiat_currency: str, start_time: datetime.datetime,\n end_time: datetime.datetime) -> List[dict]:\n \"\"\"\n This function allows to get exactly what the user needs to declare to the tax service of his government\n\n :param fiat_currency: Fiat currency of the country\n :type fiat_currency: str\n :param start_time: Datetime from which the tax declaration takes effect\n :type start_time: datetime.datetime\n :param end_time: Datetime until which the tax declaration takes effect\n :type end_time: datetime.datetime\n :returns: List[dict] -- List of each declaration to report to the government\n \"\"\"\n pass\n","repo_name":"ArmandBENETEAU/fiscal-crypt","sub_path":"fiscal_crypt/tax_processing/abs_tax_processing.py","file_name":"abs_tax_processing.py","file_ext":"py","file_size_in_byte":4150,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"28"} +{"seq_id":"32999336299","text":"import re\nwith open(\"input.txt\", \"r\") as f:\n hoz, dep=0,0\n res = f.read().replace(\"forward\", \"hoz+=\").replace(\"down\", \"dep+=\").replace(\"up\", \"dep-=\")\n exec(res)\n #pt1\n print(hoz*dep)\nwith open(\"input.txt\", \"r\") as f:\n hoz, dep, aim=0,0,0\n res = f.read().replace(\"down\", \"aim+=\").replace(\"up\", \"aim-=\")\n res = re.sub(r\"forward (\\d)\", r\"hoz+=\\1;dep+=aim*\\1\", res)\n exec(res)\n print(hoz, dep, hoz*dep)","repo_name":"Nigma1337/advent_of_code_2021","sub_path":"day2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"387898625","text":"def construindo_perfil(primeiro, last, **user_info): # dois asterísco fazem python criar um dicionário vazio e colocar quaisquer pares nome-valor recebidos nesse dicionário.\r\n\tperfil = {}\r\n\tperfil['primeiro_nome'] = primeiro\r\n\tperfil['last_name'] = last\r\n\t\r\n\tfor key, value in user_info.items():\r\n\t\tperfil[key] = value \r\n\treturn perfil\r\n\r\n\r\nperfil_usuario = construindo_perfil('davi', 'silva',\r\n\t\t\t\t\t\t\t\t\tlocation = 'fatec',\r\n\t\t\t\t\t\t\t\t\tocupacao = 'análise e desenvolvimento de sistemas',\r\n\t\t\t\t\t\t\t\t\tstatus = 'felicidade pura')\r\nprint(perfil_usuario)\r\n","repo_name":"davicosta12/python_work","sub_path":"Part_01/Cap_08/user_profile.py","file_name":"user_profile.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"17908530449","text":"from time import sleep\n\nimport pygame\n\nfrom arcanoid_classes import Picture\n\npygame.init()\n\nback = (200, 255, 255) # background color\nmw = pygame.display.set_mode((500, 500)) # main window\nmw.fill(back)\nclock = pygame.time.Clock()\n\n# platform coordinates\nracket_x = 200\nracket_y = 330\n# ---------------------------------------------------------\n# ball moving variables\nball_x = 3\nball_y = 3\n# ---------------------------------------------------------\n# moving flags\nmoving_left = False\nmoving_right = False\n# ---------------------------------------------------------\n# end game flag\ngame_over = False\n\n# create objects: ball and platform\nball = Picture('rebel_sticker.png', 160, 200, 50, 50)\nplatform = Picture('yoda_5050.png', racket_x, racket_y, 50, 50)\nend_label = Picture('end.png', 125, 125, 300, 300)\ngo_label = Picture('go_sign.png', 0, 0, 500, 500)\nready = Picture('ready.png', 0, 0, 500, 500)\n\n# create enemies\nstart_x = 5 # first enemy coord\nstart_y = 5\nenemies_count = 9 # enemies in the first raw\nenemies = [] # enemies list\n\nfor j in range(3): # create enemies cycle\n y_coord = start_y + (55 * j) # shift every next raw on 55 px by axis y\n x_coord = start_x + (27.5 * j) # and 27.5 by x\n\n for i in range(enemies_count): # create raw of enemies same as count\n enemy = Picture('imperial_soldier.png', x_coord, y_coord, 50, 50)\n enemies.append(enemy) # add to list\n x_coord += 55 # next enemy x coordinate\n enemies_count -= 1 # reduce next raw on 1 enemy\n\n# start game cycle\nready.draw()\npygame.display.update()\nsleep(2)\nmw.fill(back)\ngo_label.draw()\npygame.display.update()\nmw.fill(back)\nsleep(2)\nwhile not game_over:\n ball.fill()\n platform.fill()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_over = True\n # -------------------------------------------\n # Check buttons and change move flags\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n moving_right = True\n if event.key == pygame.K_LEFT:\n moving_left = True\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_RIGHT:\n moving_right = False\n if event.key == pygame.K_LEFT:\n moving_left = False\n # -----------------------------------------\n # left and right moving\n if moving_left:\n platform.rect.x -= 3\n\n if moving_right:\n platform.rect.x += 3\n # ----------------------------------------\n # permanent ball moving\n ball.rect.x += ball_x\n ball.rect.y += ball_y\n # ----------------------------------------\n # check borders, change direction if needs\n if ball.rect.y < 0:\n ball_y *= -1\n\n if ball.rect.x > 450 or ball.rect.x < 0:\n ball_x *= -1\n # check minimal y_coordinate\n if ball.rect.y > 350 or len(enemies) == 0:\n enemies = []\n mw.fill(back)\n end_label.fill()\n end_label.draw()\n pygame.display.update()\n sleep(2)\n game_over = True\n break\n # ----------------------------------------\n # check if ball touch the platform and change direction:\n if ball.rect.colliderect(platform.rect):\n ball_y *= - 1\n # ----------------------------------------\n # draw enemies from the list\n for enemy in enemies:\n enemy.draw()\n # ---------------------------------------\n # check if the ball has the same coordinates as enemy\n if enemy.rect.colliderect(ball.rect):\n enemies.remove(enemy)\n enemy.fill()\n ball_y *= -1\n # draw platform and ball\n platform.draw()\n ball.draw()\n # renew scene\n pygame.display.update()\n clock.tick(40)\n","repo_name":"zerg959/starwars_arcanoid","sub_path":"arcanoid.py","file_name":"arcanoid.py","file_ext":"py","file_size_in_byte":3759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"27142573984","text":"from pyface.workbench.api import IView\nfrom traits.api import Delegate, Instance\n\n\nfrom .workbench_action import WorkbenchAction\n\n\nclass ToggleViewVisibilityAction(WorkbenchAction):\n \"\"\" An action that toggles a view's visibility (ie. hides/shows it). \"\"\"\n\n # 'Action' interface ---------------------------------------------------\n\n # The action's unique identifier (may be None).\n id = Delegate(\"view\", modify=True)\n\n # The action's name (displayed on menus/tool bar tools etc).\n name = Delegate(\"view\", modify=True)\n\n # The action's style.\n style = \"toggle\"\n\n # 'ViewAction' interface -----------------------------------------------\n\n # The view that we toggle the visibility for.\n view = Instance(IView)\n\n # ------------------------------------------------------------------------\n # 'Action' interface.\n # ------------------------------------------------------------------------\n\n def destroy(self):\n \"\"\" Called when the action is no longer required. \"\"\"\n\n if self.view is not None:\n self._remove_view_listeners(self.view)\n\n def perform(self, event):\n \"\"\" Perform the action. \"\"\"\n\n self._toggle_view_visibility(self.view)\n\n return\n\n # ------------------------------------------------------------------------\n # Private interface.\n # ------------------------------------------------------------------------\n\n # Trait change handlers ------------------------------------------------\n\n def _view_changed(self, old, new):\n \"\"\" Static trait change handler. \"\"\"\n\n if old is not None:\n self._remove_view_listeners(old)\n\n if new is not None:\n self._add_view_listeners(new)\n\n self._refresh_checked()\n\n return\n\n # Methods -------------------------------------------------------------#\n\n def _add_view_listeners(self, view):\n \"\"\" Add listeners for trait events on a view. \"\"\"\n\n view.observe(self._refresh_checked, \"visible\")\n view.observe(self._refresh_checked, \"window\")\n\n def _remove_view_listeners(self, view):\n \"\"\" Add listeners for trait events on a view. \"\"\"\n\n view.observe(self._refresh_checked, \"visible\", remove=True)\n view.observe(self._refresh_checked, \"window\", remove=True)\n\n def _refresh_checked(self, event=None):\n \"\"\" Refresh the checked state of the action. \"\"\"\n\n self.checked = (\n self.view is not None\n and self.view.window is not None\n and self.view.visible\n )\n\n def _toggle_view_visibility(self, view):\n \"\"\" Toggle the visibility of a view. \"\"\"\n\n if view.visible:\n view.hide()\n\n else:\n view.show()\n\n return\n","repo_name":"enthought/pyface","sub_path":"pyface/workbench/action/toggle_view_visibility_action.py","file_name":"toggle_view_visibility_action.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"28"} +{"seq_id":"3173463646","text":"import unittest\n\"\"\"\nhttps://leetcode.com/problems/construct-binary-search-tree-from-preorder-traversal/\n\nhttps://leetcode.com/submissions/detail/214138493/\n\"\"\"\n\nfrom typing import List\nfrom common.tree_node import TreeNode\nfrom common.tree_node_to_list import treeNodeToList\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution:\n def bstFromPreorder(self, preorder: List[int]) -> TreeNode:\n if len(preorder) == 0:\n return None\n if len(preorder) == 1:\n return TreeNode(preorder[0])\n ans = TreeNode(preorder[0])\n idx = 1\n for i in range(1, len(preorder)):\n idx = i\n if preorder[i] > preorder[0]:\n break\n if preorder[idx] < preorder[0]:\n idx += 1\n ans.left = self.bstFromPreorder(preorder[1:idx])\n ans.right = self.bstFromPreorder(preorder[idx:])\n return ans\n\n\nclass Test(unittest.TestCase):\n def test(self):\n solution = Solution()\n self.assertEqual(treeNodeToList(solution.bstFromPreorder(\n [8, 5, 1, 7, 10, 12])), [8, 5, 10, 1, 7, None, 12])\n self.assertEqual(treeNodeToList(\n solution.bstFromPreorder([4, 2])), [4, 2])\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"vivaxy/algorithms","sub_path":"python/problems/construct_binary_search_tree_from_preorder_traversal.py","file_name":"construct_binary_search_tree_from_preorder_traversal.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"24552523619","text":"import torch\nimport gpytorch\nimport random\nimport time\nfrom matplotlib import pyplot as plt\nimport sys\nsys.path.append(\"../models/\")\nsys.path.append(\"../utils\")\nfrom dsvgp import init_gp, train_gp, eval_gp\nfrom metrics import MSE\nimport testfun\nimport pickle as pkl\n\ntrain_n = 600\ntest_n = 1000\ndim = 2\nnum_inducing = 20\nnum_directions = 2\nnum_epochs = 400\nlearning_rate = 0.01\ngamma = 0.2\nmll_type = \"ELBO\"\nverbose = True\nload_model = False\nsave_model = False\ntorch.random.manual_seed(0)\n\n\n\n# TODO: check if data is already saved (check if data file exists), load data if exists, otherwise generate the data\n\n\n# Hints for data saving and loading\n# save training data (x and y)\n# Example: pkl.dump(train_x, open('./train_x.pkl' ,'wb'))\n\n# read training data (x and y)\n# Example: train_x = pkl.load(open('./train_x.pkl' ,'rb'))\n\n\n# generate training and testing data\ntrain_x = torch.rand(train_n,dim)\ntest_x = torch.rand(test_n,dim)\ntrain_y = testfun.f(train_x, deriv=True)\ntest_y = testfun.f(test_x, deriv=True)\n\n\n\nif torch.cuda.is_available():\n train_x, train_y, test_x, test_y = train_x.cuda(), train_y.cuda(), test_x.cuda(), test_y.cuda()\n\n\n# initialize model \n# initialize inducing points and directions from data\nrand_index = random.sample(range(train_n), num_inducing)\ninducing_points = train_x[rand_index, :]\n\n# initialize model\nmodel, likelihood = init_gp(dim, num_inducing, num_directions, inducing_points=inducing_points)\n\n# train\nif load_model:\n # TODO: loda model\n # replace \"pass\" by something like model.load_state() to load model\n pass\nelse:\n print(\"\\n\\n---DirectionalGradVGP---\")\n print(f\"Start training with {train_n} trainig data of dim {dim}\")\n print(f\"VI setups: {num_inducing} inducing points, {num_directions} inducing directions\")\n args={\"verbose\":True}\n t1 = time.time()\t\n model,likelihood = train_gp(\n model,\n likelihood,\n train_x,\n train_y,\n num_directions=num_directions,\n num_epochs=num_epochs, \n learning_rate=learning_rate,\n verbose=verbose,\n )\n t2 = time.time()\t\n\n\nif save_model:\n # TODO: save the model\n # replace pass by something like torch.save(model, path)\n pass\n\n\n# test\nmeans, variances = eval_gp( \n model,\n likelihood,\n test_x,\n test_y,\n num_directions=num_directions,\n)\nt3 = time.time()\t\n\n# compute MSE\ntest_y = test_y.cpu()\ntest_mse = MSE(test_y[:,0],means[::num_directions+1])\n# compute mean negative predictive density\ntest_nll = -torch.distributions.Normal(means[::num_directions+1], variances.sqrt()[::num_directions+1]).log_prob(test_y[:,0]).mean()\nprint(f\"At {test_n} testing points, MSE: {test_mse:.4e}, nll: {test_nll:.4e}.\")\nprint(f\"Training time: {(t2-t1):.2f} sec, testing time: {(t3-t2):.2f} sec\")\n\n# TODO: call plot_testfun to plot the results\n\n\n\nplot=0\nif plot == 1:\n from mpl_toolkits.mplot3d import axes3d\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(12,6))\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(test_x[:,0],test_x[:,1],test_y[:,0], color='k')\n ax.scatter(test_x[:,0],test_x[:,1],means[::num_directions+1], color='b')\n plt.title(\"f(x,y) variational fit; actual curve is black, variational is blue\")\n plt.show()\n fig = plt.figure(figsize=(12,6))\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(test_x[:,0],test_x[:,1],test_y[:,1], color='k')\n ax.scatter(test_x[:,0],test_x[:,1],means[1::num_directions+1], color='b')\n plt.title(\"df/dx variational fit; actual curve is black, variational is blue\")\n plt.show()\n fig = plt.figure(figsize=(12,6))\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(test_x[:,0],test_x[:,1],test_y[:,2], color='k')\n ax.scatter(test_x[:,0],test_x[:,1],means[2::num_directions+1], color='b')\n plt.title(\"df/dy variational fit; actual curve is black, variational is blue\")\n plt.show()\n","repo_name":"xinranzhu/DSVGP-demo","sub_path":"tests/test_dsvgp.py","file_name":"test_dsvgp.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"13612729482","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport csv\nimport os\n\nclass CSVWriter:\n postpix = '_{0}'\n def __init__(self, maxlines, filename, delimiter=',',quotechar='\"', codec_bom = None, headers = None):\n self.maxlines = maxlines\n self.filename = filename + CSVWriter.postpix + '.csv'\n self.delimiter = delimiter\n self.quotechar = quotechar\n self.quoting = csv.QUOTE_ALL\n self.codec_bom = codec_bom\n self.headers = headers\n self.cur_csvfile = None\n self.cur_csvwriter = None\n self.part_counter = 1\n self.line_counter = 0\n\n def _fileOpen(self):\n try:\n filename = self.filename.format(self.part_counter)\n if not os.path.exists(os.path.dirname(filename)):\n os.makedirs(os.path.dirname(filename))\n self.cur_csvfile = open(filename, 'w', newline='', encoding='utf-8-sig')\n except IOError:\n self.cur_csvfile = None\n return\n if self.codec_bom:\n self.cur_csvfile.write(self.codec_bom)\n\n def _fileClose(self):\n if self.cur_csvfile:\n self.cur_csvfile.close()\n self.cur_csvfile = None\n\n def _initCsvWriter(self):\n if not self.cur_csvfile:\n self._fileOpen()\n self.cur_csvwriter = csv.writer(self.cur_csvfile,\n delimiter=self.delimiter,\n quotechar=self.quotechar,\n quoting=self.quoting)\n if self.headers:\n self._writeRow(self.headers)\n\n def _clearCsvWriter(self):\n if self.cur_csvwriter:\n self._fileClose()\n self.cur_csvwriter = None\n\n def _writeRow(self, array):\n self.cur_csvwriter.writerow(array)\n self.line_counter += 1\n\n def setHeaders(self, headers):\n self.headers = headers\n\n def writeRow(self, array):\n if not self.cur_csvwriter:\n self._initCsvWriter()\n if self.cur_csvwriter:\n self._writeRow(array)\n if self.line_counter==self.maxlines:\n self._clearCsvWriter()\n self.line_counter = 0\n self.part_counter += 1\n\n\n def close(self):\n self._clearCsvWriter()\n","repo_name":"jrimeh/determine_templates","sub_path":"csv_writer.py","file_name":"csv_writer.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"8740579967","text":"'''\nCS 5001, Fall 2022\nHUI, Macarious Kin Fung\n\nData Dashboard Final Project\n\nModule Name -- storefront_factory\n\nThis file contains functions that create 'Storefront' objects.\n\nThis file is used by the driver file:\ngraphical_user_interface.py\n'''\n\n# Modules\nimport model.dataset_downloader as dataset_downloader\nimport json\n\n\n# Classes\nfrom model.storefront import Storefront\n\n\ndef create_storefront_list_from_url(dataset_descriptor):\n '''\n Function Name: create_storefront_list_from_url\n Create a list of 'Storefront' objects from a url containg a text\n file with multiple rows, where each row contains data separated by\n delimiters\n \n Parameters:\n dataset_descriptor -- DatasetDescriptor, object containing properties\n relating to a dataset\n \n Raises:\n TypeError -- raises if the attribute 'url' of class 'DatasetDescriptor is not a string\n \n Returns:\n '''\n if type(dataset_descriptor.url) is not str:\n raise TypeError(\"TypeError: The attribute 'url' of class 'DatasetDescriptor must be a string\")\n\n response_text = dataset_downloader.read_url(dataset_descriptor.url)\n response_text_row = response_text.split('\\n')\n\n # First extract header list from 0th row\n header_list = create_header_list(response_text_row[0], dataset_descriptor)\n storefront_list = []\n\n # Instantiate Storefront objects from 1st row onward\n for i in range(1, len(response_text_row)):\n if response_text_row[i] != '': # Skip empty rows\n storefront_object = create_storefront_from_individual_entry(response_text_row[i], header_list, dataset_descriptor)\n storefront_list.append(storefront_object)\n\n return storefront_list\n\n\ndef create_storefront_from_individual_entry(entry_of_data, header_list, dataset_descriptor):\n '''\n Function Name: create_storefront_from_individual_entry\n Instantiates 'Storefront' objects by reading the data from a row of text\n separated by delimiters\n \n Parameters:\n entry_of_data -- str, row of text, which contains data separated by delimiters\n header_list -- str, list of headers, read from the first row of text\n dataset_descriptor -- DatasetDescriptor, object containing properties\n relating to a dataset\n \n Raises:\n TypeError -- raises if the parameter 'entry_of_data' is not a string\n TypeError -- raises if the parameter 'header_list' is not a list\n ValueError -- raises if the parameter 'header_list' is an empty list\n TypeError -- raises if the parameter 'header_list' does not contain elements of type string\n TypeError -- raises if the attribute 'delimiter' of class 'DatasetDescriptor' is not a string\n TypeError -- raises if the attribute 'expected_header' of class 'DatasetDescriptor' is not a dictionary\n \n Returns:\n Storefront, object representing a storefront\n ''' \n if type(entry_of_data) is not str:\n raise TypeError(\"TypeError: The parameter 'entry_of_data' must be a string\")\n\n if type(header_list) is not list:\n raise TypeError(\"TypeError: The parameter 'header_list' must be a list\")\n\n if len(header_list) == 0:\n raise ValueError(\"ValueError: The parameter 'header_list' cannot be empty\")\n\n if not all(type(value) is str for value in header_list):\n raise TypeError(\"TypeError: The parameter 'header_list' must contain elements of type string\")\n\n if type(dataset_descriptor.delimiter) is not str:\n raise TypeError(\"TypeError: The attribute 'delimiter' of class 'DatasetDescriptor' must be a string\")\n\n if type(dataset_descriptor.expected_headers) is not dict:\n raise TypeError(\"TypeError: The attribute 'expected_headers' of class 'DatasetDescriptor' must be a dictionary\")\n\n entry_of_data_list = entry_of_data.strip().split(dataset_descriptor.delimiter)\n\n store_id = None\n business_name = None\n address_unit = None\n address_number = None\n address_street = None\n retail_category = None\n coordinates = None\n local_area = None\n\n # Map each data into the correct object attributes matching the correct column index\n for i in range(len(entry_of_data_list)):\n if i == header_list.index(dataset_descriptor.expected_headers['store id']):\n store_id = int(entry_of_data_list[i])\n\n elif i == header_list.index(dataset_descriptor.expected_headers['business name']):\n business_name = entry_of_data_list[i]\n\n elif i == header_list.index(dataset_descriptor.expected_headers['address unit']):\n address_unit = entry_of_data_list[i]\n\n elif i == header_list.index(dataset_descriptor.expected_headers['address number']):\n address_number = int(entry_of_data_list[i])\n\n elif i == header_list.index(dataset_descriptor.expected_headers['address street']):\n address_street = entry_of_data_list[i]\n\n elif i == header_list.index(dataset_descriptor.expected_headers['retail category']):\n retail_category = entry_of_data_list[i]\n\n elif i == header_list.index(dataset_descriptor.expected_headers['coordinates']):\n coordinates = extract_coordinates(entry_of_data_list[i])\n\n elif i == header_list.index(dataset_descriptor.expected_headers['local area']):\n local_area = entry_of_data_list[i]\n\n return Storefront(\n store_id, business_name, address_unit, address_number, address_street,\n retail_category, coordinates, local_area\n )\n\n\ndef create_header_list(start_entry_of_data, dataset_descriptor):\n '''\n Function Name: create_header_list\n Creates a list of header from a text of row separated by delimiters\n \n Parameters:\n start_entry_of_data -- str, the first row of text, which contains the headers\n dataset_descriptor -- DatasetDescriptor, object containing dataset properies\n \n Raises:\n TypeError -- raises if the parameter 'start_entry_of_data' is not a string\n TypeError -- raises if the attribute 'delimiter' of class 'DatasetDescriptor' is not a string\n \n Returns:\n list of str, list of headers\n '''\n if type(start_entry_of_data) is not str:\n raise TypeError(\"TypeError: The parameter 'start_entry_of_data' must be a string\")\n \n if type(dataset_descriptor.delimiter) is not str:\n raise TypeError(\"TypeError: The attribute 'delimiter' from 'dataset_descriptor' object must be a string\")\n \n header_list = start_entry_of_data.strip().split(dataset_descriptor.delimiter)\n return header_list\n\n \ndef extract_coordinates(coordinates_json):\n '''\n Function Name: extract_coordinates\n Extract coordinates from the json format data read from the text file\n \n Parameters:\n raw_station_name -- str, coordinates data read from text file\n \n Raises:\n TypeError -- raises if parameter 'coordinates_json' is not a string\n \n Returns:\n tuple of (float, float), coordinates extracted from raw coordinates data\n '''\n if type(coordinates_json) is not str:\n raise TypeError(\"TypeError: The parameter 'coordinates_json' must be a string\")\n\n # Remove extra double-quotations (added when reading the data with requests) from raw data\n # and deserialize the json-format string to a dictionary\n coordinates_json_cleaned = coordinates_json.replace('\"\"', '\"').strip('\"')\n coordinates_dictionary = json.loads(coordinates_json_cleaned)\n\n return tuple(coordinates_dictionary['coordinates'])","repo_name":"macarious/Vancouver-Store-Search","sub_path":"model/storefront_factory.py","file_name":"storefront_factory.py","file_ext":"py","file_size_in_byte":7559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"35564216248","text":"# coding=utf-8\nimport pymysql\n\n\ndef attendance_results_sql(table_name, date):\n conn = pymysql.connect(host='106.54.119.102', port=2707, user='root', password='Luohongsheng336!', db='attendance',\n charset='utf8')\n try:\n cursor = conn.cursor()\n sql1 = \"SELECT * FROM `{}` WHERE date = '{}';\".format(table_name, date)\n sql2 = \"SHOW full COLUMNS FROM `{}`\".format(table_name)\n cursor.execute(sql1)\n conn.commit()\n tup1 = cursor.fetchall()\n cursor.execute(sql2)\n conn.commit()\n tup2 = cursor.fetchall()\n\n all_list = [] # 数组列表\n tup2list = [] # 字段列表\n for item in tup2: # 提取字段至字段列表\n tup2list.append(item[0])\n\n for i in range(len(tup1)): # 把每个字段转为字典\n tup_list = list(tup1[i])\n all_dict = dict(zip(tup2list, tup_list))\n all_list.append(all_dict)\n status = 1\n except Exception as e:\n all_list = []\n status = 0\n print(e)\n finally:\n cursor.close()\n conn.close()\n\n return all_list, status\n","repo_name":"comtumacy/face_flask","sub_path":"templates/teacher/attendance_results_teacher/attendance_results_sql.py","file_name":"attendance_results_sql.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"3940355666","text":"# definicao das variaveis para coleta das entradas\nha = int(input(\"Numero de habitantes de uma cidade A: \"))\nhb = int(input(\"Numero de habitantes de uma cidade B: \"))\nca = float(input(\"Percentual de crescimento de uma cidade A: \"))\ncb = float(input(\"Percentual de crescimento de uma cidade B: \"))\n\nt = 0 #acumula o tempo transcorrido em anos\n\nwhile(ha <= hb): #condicao do laco: hab cidade a inferior ao da cidade b\n\tha = ha + (ha*ca/100) #crescimento populacional cidade a\n\thb = hb + (hb*cb/100) #crescimento populacional cidade b\n\tt = t+1 #incremento no tempo\nprint(t) #impressao ","repo_name":"JosephLevinthal/Research-projects","sub_path":"5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4443/codes/1716_2498.py","file_name":"1716_2498.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"28"} +{"seq_id":"19200782238","text":"#This function library was written as part of a bachelor thesis at the\n#Technical University of Denmark (DTU) during the Spring semester of 2022.\n#The code is meant to be easily readable and is built around the use of dictionaries\n#and dataframes. As of now, systems are loaded directly from PandaPower, but \n#expanding to load custom systems should be relatively straightforward.\n\n#Author: Markus Hvid Monin (s194011)\n\nimport numpy as np\nimport pandas as pd\nimport pandapower as pp\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gsp\n\n\n# =============================================================================\n# Functions for loading and tweaking PandaPower systems\n\ndef load_pandapower_case(network, enforce_q_limits=False, distributed_slack = False, \n slack_gens = np.array([]), participation_factors = np.array([]),\n ref_bus_pset = 0, original_ref_bus = False):\n baseMVA = network.sn_mva #power base for system\n freq = network.f_hz\n\n #run PandaPower power flow\n pp.runpp(network, enforce_q_lims = enforce_q_limits, trafo_model='pi', trafo_loading='power', max_iteration=25)\n #Saving PandaPower results and per-unitizing power values\n pandapower_results = network.res_bus\n pandapower_results['p_pu'] = pandapower_results.p_mw/baseMVA\n pandapower_results['q_pu'] = pandapower_results.q_mvar/baseMVA\n pandapower_results = pandapower_results[['vm_pu','va_degree','p_pu','q_pu']]\n\n ybus = network._ppc[\"internal\"][\"Ybus\"].todense() #extract Ybus after running power flow\n ybus = np.asarray(ybus) #from matrix to array object\n gen = network.gen #voltage controlled generators\n sgen = network.sgen #static generators (PQ)\n load = network.load\n slack = network.ext_grid\n buses = network.bus #bus parameters\n lines = network.line #line parameters\n shunts = network.shunt #shunt elements for reactive power control\n trafo = network.trafo #transformers\n\n #Reformatting slack and generator dataframes and per-unitizing values\n slack = slack.rename(columns={'vm_pu':'vset', 'max_p_mw':'pmax', 'min_p_mw':'pmin',\n 'max_q_mvar':'qmax', 'min_q_mvar':'qmin'})\n slack = slack[['in_service', 'bus', 'vset', 'pmax', 'pmin', 'qmax','qmin']]\n slack[['pmax', 'pmin', 'qmax', 'qmin']] = slack[['pmax', 'pmin','qmax', 'qmin']] / baseMVA\n\n gen['type'] = 'pv'\n gen['qset'] = None\n gen = gen.rename(columns = {'p_mw':'pset', 'max_p_mw':'pmax', 'min_p_mw':'pmin',\n 'max_q_mvar':'qmax', 'min_q_mvar':'qmin', 'vm_pu':'vset'})\n gen = gen[['in_service', 'bus', 'type', 'vset', 'pset', 'qset', 'pmax', 'pmin', 'qmax', 'qmin']]\n #Per-unitizing values\n gen[['pset', 'pmax', 'pmin', 'qmax', 'qmin']] = gen[['pset', 'pmax', 'pmin',\n 'qmax', 'qmin']] / baseMVA\n\n if len(sgen.index) == 0:\n gens = gen #if there are no static generators\n else:\n #else, adding static generators to generator list as PQ-generators\n sgen['vset'] = None\n sgen['type'] = 'pq'\n sgen = sgen.rename(columns = {'p_mw':'pset', 'q_mvar':'qset', 'max_p_mw':'pmax', 'min_p_mw':'pmin',\n 'max_q_mvar':'qmax', 'min_q_mvar':'qmin'})\n\n sgen = sgen[['in_service', 'bus', 'type', 'vset', 'pset', 'qset', 'pmax', 'pmin', 'qmax', 'qmin']]\n #Per-unitizing values\n sgen[['pset', 'qset', 'pmax', 'pmin', 'qmax', 'qmin']] = sgen[['pset', 'qset', 'pmax', 'pmin',\n 'qmax', 'qmin']] / baseMVA\n gens = pd.concat([gen,sgen]) \n\n\n gens = gens.sort_values(by=['bus'])\n gens = gens.reset_index(drop=True)\n\n #Reformatting load and shunt dataframes\n load = load.rename(columns={'p_mw':'p', 'q_mvar':'q'})\n load = load[['in_service','bus', 'p', 'q']]\n load[['p','q']] = load[['p','q']] / baseMVA\n\n shunts = shunts.rename(columns={'p_mw':'p', 'q_mvar':'q'})\n shunts = shunts[['in_service','bus', 'p', 'q']]\n shunts[['p','q']] = shunts[['p','q']] / baseMVA\n\n #Note! Shunts are handled as loads in this code.\n #PandaPower implements shunts in a different way - they essentially do not \n #affect bus voltages, but instead their power consumption is calculated \n #and added to the bus power consumption\n #more on this: https://pandapower.readthedocs.io/en/v2.8.0/elements/shunt.html \n\n\n #Reformatting lines dataframe\n lines = lines.rename(columns={'from_bus':'from', 'to_bus':'to', 'length_km':'length',\n 'max_i_ka':'ampacity_ka'})\n lines = lines[['in_service','from','to','parallel','length','c_nf_per_km', 'g_us_per_km', 'r_ohm_per_km',\n 'x_ohm_per_km','ampacity_ka']]\n\n \n #Calculating transformer short circuit impedance and magnitizing admittance\n #used for building admittance matrix through custom code\n #based on https://pandapower.readthedocs.io/en/develop/elements/trafo.html \n #\"The values calculated in that way are relative to the rated values of the transformer. \n # To transform them into the per unit system, \n # they have to be converted to the rated values of the network.\"\n \n z = trafo['vk_percent']/100 * baseMVA/trafo['sn_mva']\n r = trafo['vkr_percent']/100 * baseMVA/trafo['sn_mva'] \n x = np.sqrt(z ** 2 - r ** 2)\n z_k = (1 + 0j)*r + 1j*x \n trafo['z_k'] = round(z_k,6)\n \n y = trafo['i0_percent']/100\n g = trafo['pfe_kw']/(trafo['sn_mva']*1e3) * baseMVA/trafo['sn_mva']\n b = np.sqrt(y ** 2 - g ** 2)\n y_m = (1 + 0j)*g + 1j*b\n trafo['y_m'] = round(y_m,6)\n \n #Reformatting transformers dataframe\n trafo = trafo.rename(columns={'sn_mva':'s_rated'})\n trafo = trafo[['in_service', 'lv_bus', 'hv_bus', 'parallel', 's_rated', 'tap_pos', 'tap_min', 'tap_max',\n 'tap_side', 'tap_step_percent','z_k', 'y_m','vk_percent','vkr_percent','i0_percent','pfe_kw']]\n\n buses = buses[['in_service','vn_kv', 'max_vm_pu', 'min_vm_pu','name']]\n \n #loading power factors for loads\n load['pf'] = load['p']/(np.sqrt(load['p']**2 + load['q']**2))\n \n #Setup system dictionary\n system = {'n_buses':ybus.shape[0],'distributed_slack':distributed_slack, 'admmat':ybus,'slack':slack,\n 'iteration_limit':15,'tolerance':1e-3, 's_base':baseMVA, 'frequency':freq}\n system.update({'generators':gen})\n system.update({'loads':load})\n system.update({'shunts':shunts})\n system.update({'buses':buses})\n system.update({'lines':lines})\n system.update({'transformers':trafo})\n\n\n if distributed_slack:\n #Note: If a single slack bus is entered, the single slack power flow is obtained for that\n #generator.\n #Changing slack bus to PV-bus\n #The new P-setpoint of the original slack bus generator is the difference between \n #total load and total generation unless a custom setpoint is entered\n \n if ref_bus_pset != 0:\n slack_pset = ref_bus_pset\n else:\n load_sum = 0\n gen_sum = 0\n for i in range(len(load.index)):\n load_sum += load.p[i]\n for i in range(len(gens.index)):\n gen_sum += gens.pset[i]\n slack_pset = load_sum - gen_sum\n \n slack_to_gen = {'in_service':slack.in_service[0], 'bus':slack.bus[0], 'type':'pv', 'vset':slack.vset[0], \n 'pset':slack_pset, 'qset':None, 'pmax':slack.pmax[0], \n 'pmin':slack.pmin[0], 'qmax':slack.qmax[0], \n 'qmin':slack.qmin[0]}\n\n #Adding slack bus to generator dataframe and re-sorting by bus\n gens = gens.append(slack_to_gen, ignore_index = True)\n gens = gens.sort_values(by=['bus'])\n gens = gens.reset_index(drop=True)\n \n gens['slack'] = True #if no specific slack generators are entered, every generator participates\n gens['participation_factor'] = 0.0\n \n #Checking for entered slack generators and inactive generators\n for i in range(len((gens.index))):\n if np.size(slack_gens) != 0:\n if i in slack_gens:\n gens.slack[i] = True\n else: \n gens.slack[i] = False\n if not gens.in_service[i]:\n gens.slack[i] = False\n \n system.update({'generators':gens})\n \n if (np.size(slack_gens) == 1) and (slack.bus[0] != gens.bus[slack_gens[0]]) and not original_ref_bus:\n #If a single slack gen is specified, it is saved as the angle reference bus\n system.update({'reference_bus':system.get('generators').bus[slack_gens[0]]})\n else:\n #otherwise the original single slack bus is kept as angle reference\n system.update({'reference_bus':slack.bus[0]})\n \n del system['slack'] #removing separate slack bus description from system dictionary\n \n load_participation_factors(system, p_factors=participation_factors) #loading either equal p-factors or custom ones\n\n return (system, pandapower_results)\n\n\ndef load_participation_factors(system, p_factors = np.array([])):\n #accepts an array of participation factors ordered by increasing generator bus indices\n #if no array is entered, slack is distributed evenly among generators participating in slack\n \n #the size of the p-factor vector must be the number of controllable generators\n #the sum of the p-factors must be 1\n \n gens = system.get('generators')\n num_gens = len(gens.index)\n \n if np.size(p_factors) == 0: #standard case for no input - equal factors\n participation_factors = np.ones(num_gens)\n participation_factors = participation_factors / num_gens\n \n elif np.any(p_factors < 0):\n print('Error loading participation factors - all values must be non-negative.')\n print('Set to equal factors (standard case).\\n')\n participation_factors = np.ones(num_gens)\n participation_factors = participation_factors / num_gens\n \n elif np.size(p_factors) != num_gens:\n print('Error loading participation factors - array length not equal to number of generators.')\n print('Set to equal factors (standard case).\\n')\n participation_factors = np.ones(num_gens)\n participation_factors = participation_factors / num_gens\n \n elif round(sum(p_factors),3) != 1.0:\n #if the sum of the factors is not 1, the vector is normalized to enfore this.\n print('Error loading participation factors - sum (%f) not equal to 1.' % sum(p_factors))\n print('Input array normalized.\\n')\n participation_factors = p_factors / np.sum(p_factors)\n else:\n participation_factors = p_factors.copy()\n \n \n #Checking validity of participation factors against generator status and slack participation\n for i in range(len(gens.index)):\n if (not gens.in_service[i]) and (participation_factors[i] != 0.0):\n print('Non-zero participation for inactive generator. Zero value enforced and array re-normalized.\\n')\n participation_factors[i] = 0.0\n participation_factors = participation_factors / np.sum(participation_factors)\n elif (not gens.slack[i]) and (participation_factors[i] != 0.0):\n print('Non-zero participation for non-slack generator. Zero value enforced and array re-normalized.\\n')\n participation_factors[i] = 0.0\n participation_factors = participation_factors / np.sum(participation_factors)\n \n #Loading the participation factors into the generator dataframe \n for i in range(len(gens.index)):\n gens.participation_factor[i] = participation_factors[i]\n \n system.update({'generators':gens})\n return\n\ndef set_reference_bus_power(system, pset):\n #Function used to adjust the power setpoints of the reference bus,\n #which is often the original slack bus for converted single slack bus systems\n if not system.get('distributed_slack'):\n print(\"Error - single slack bus system. Cannot slack generator power.\\n\")\n return\n else:\n gens = system.get('generators')\n idx = gens[gens.bus == system.get('reference_bus')].index[0]\n gens.pset[idx] = pset\n system.update({'generators':gens.copy()})\n return\n \ndef new_england_39_new_voltages(network):\n #https://pdfcoffee.com/39-bus-new-england-system-pdf-free.html\n #The PandaPower nominal bus voltages are all set to 345 kV\n #the change makes little to no difference for power flow, but it may be more\n #accurate - for example when calculating currents in amperes.\n \n #Fixing bus base voltages\n network.bus['vn_kv'][11] = 138\n network.bus['vn_kv'][19] = 230\n network.bus['vn_kv'][29] = 16.5\n network.bus['vn_kv'][30] = 16.5\n network.bus['vn_kv'][31] = 16.5\n network.bus['vn_kv'][32] = 16.5\n network.bus['vn_kv'][33] = 16.5\n network.bus['vn_kv'][34] = 16.5\n network.bus['vn_kv'][35] = 16.5\n network.bus['vn_kv'][36] = 16.5\n network.bus['vn_kv'][37] = 16.5\n\n #Updating transformer voltages\n network.trafo['vn_lv_kv'][0] = 16.5\n network.trafo['vn_lv_kv'][1] = 16.5\n network.trafo['vn_lv_kv'][2] = 16.5\n network.trafo['vn_lv_kv'][3] = 138\n network.trafo['vn_lv_kv'][4] = 138\n network.trafo['vn_lv_kv'][5] = 230\n network.trafo['vn_lv_kv'][6] = 16.5\n network.trafo['vn_lv_kv'][7] = 16.5\n network.trafo['vn_hv_kv'][7] = 230\n network.trafo['vn_lv_kv'][8] = 16.5\n network.trafo['vn_lv_kv'][9] = 16.5\n network.trafo['vn_lv_kv'][10] = 16.5\n\n #Fixing erroneously defined transformer LV/HV sides\n network.trafo['lv_bus'][3] = 11\n network.trafo['hv_bus'][3] = 10\n network.trafo['lv_bus'][4] = 11\n network.trafo['hv_bus'][4] = 12\n \n return network\n\n\ndef new_england_case_line_fix(system):\n #Change transformer erroneously defined as transmission line i PandaPower\n lines = system.get('lines')\n #saving short circuit impedance value\n z_k = np.round(complex(lines['r_ohm_per_km'][29],\n lines['x_ohm_per_km'][29]) / (345000**2 / 100000000),6)\n lines = lines.drop(labels=29, axis=0)\n lines = lines.reset_index(drop=True)\n system.update({'lines':lines})\n\n trafo = system.get('transformers')\n swap_trafo = trafo[trafo.index == 2].reset_index(drop=True) #copy other trafo with same rating - see matpower desc\n swap_trafo['hv_bus'][0] = 22\n swap_trafo['lv_bus'][0] = 35\n swap_trafo['z_k'] = z_k\n swap_trafo['in_service'] = True\n trafo = trafo.append(swap_trafo)\n trafo = trafo.sort_values(by=['hv_bus'])\n trafo = trafo.reset_index(drop=True)\n system.update({'transformers':trafo})\n \n return\n\n# =============================================================================\n# Functions for extracting system information\n\n\ndef process_admittance_mat(system):\n #extract the number of buses and real and imaginary parts of the Ybus\n ybus = system.get('admmat')\n n_buses = ybus.shape[0]\n g = np.real(ybus)\n b = np.imag(ybus)\n return n_buses, g, b\n\ndef get_pv_idx(system):\n #returns an array containing the indices of PV-buses\n \n inactive_buses = inactive_bus_idx(system)\n gens = system.get('generators')\n pv_idx = gens[gens.in_service & (gens.type=='pv')].bus.to_numpy()\n #accounting for inactive buses effectively reducing the bus indices\n for i in range(np.size(inactive_buses)):\n pv_idx[pv_idx > inactive_buses[i]] -= 1\n \n return pv_idx\n\ndef slack_idx(system):\n #returns the bus index of the slack or reference bus of the system\n inactive_buses = inactive_bus_idx(system)\n \n if system.get('distributed_slack'):\n bus = system.get('reference_bus')\n else:\n bus = system.get('slack').bus[0]\n \n #accounting for inactive buses\n for i in range(np.size(inactive_buses)):\n if bus > inactive_buses[i]:\n bus -= 1\n \n return bus\n \n\ndef inactive_bus_idx(system):\n #returns an array of the indices of inactive buses in the system\n #this is used to allow adjust power flow calculations for inactive buses\n buses = system.get('buses')\n return buses[buses.in_service == False].index.to_numpy()\n\n\ndef build_admittance_matrix(system):\n# =============================================================================\n# UNFINISHED - does not match pandapower values for transformers in the matrix\n# also, it does currently not account for inactive buses\n# =============================================================================\n \n #Calculates the bus admittance matrix based exclusively on t\n #ransmission line and transformer data.\n #line model: pi\n #trafo model: series inductance / pi (simple - no tap changing)\n \n #1 - calculate per unitized values according to base impedances\n #2 - build off-diagonals\n #3 - build diagonals\n \n \n n_buses = len(system.get('buses').index)\n line = system.get('lines')\n trafo = system.get('transformers')\n bus = system.get('buses')\n s_base = system.get('s_base')\n \n ybus = np.zeros((n_buses, n_buses), dtype = complex)\n \n for i in range(len(line.index)):\n if line['in_service'][i]:\n fr = line['from'][i]\n to = line['to'][i]\n z_base = ((bus.vn_kv[fr] * 1e3) ** 2) / (s_base * 1e6)\n \n z_line_pu = complex(line['r_ohm_per_km'][i], \n line['x_ohm_per_km'][i]) * line.length[i] / z_base\n y_line_pu = complex(line['g_us_per_km'][i], \n 2*np.pi*system.get('frequency')*line['c_nf_per_km'][i]*1e-9\n ) * line.length[i] * z_base\n \n #diagonals\n ybus[fr, fr] += 1 / z_line_pu + 0.5 * y_line_pu\n ybus[to, to] += 1 / z_line_pu + 0.5 * y_line_pu\n \n #off-diagonals\n ybus[fr, to] -= 1 / z_line_pu\n ybus[to, fr] -= 1 / z_line_pu\n \n \n for i in range(len(trafo.index)):\n #Note: values of z_k and y_m for the transformers are per unitized \n #with respect to the low voltage side\n #Per unit impedance remains unchanged when referred from one side to the other\n \n if trafo['in_service'][i]:\n lv = trafo['lv_bus'][i]\n hv = trafo['hv_bus'][i]\n \n # #per-unitization of transformer values\n # #based on PandaPower documentation\n # z_base_lv = bus.vn_kv[lv] ** 2 / s_base\n # z_ref_trafo = bus.vn_kv[lv] ** 2 * s_base / trafo.s_rated[i]\n \n # # z_pu = trafo.z_k[i] * z_ref_trafo/z_base_lv\n # # y_pu = trafo.y_m[i] * z_base_lv/z_ref_trafo\n z_pu = trafo.z_k[i]\n y_pu = trafo.y_m[i]\n \n #diagonals\n ybus[lv, lv] += 1 / z_pu + 0.5 * y_pu\n ybus[hv, hv] += 1 / z_pu + 0.5 * y_pu\n \n #off-diagonals\n ybus[lv, hv] -= 1 / z_pu\n ybus[hv, lv] -= 1 / z_pu\n \n \n #Delete rows and columns for inactive buses\n #easiest way is probably to delete rows and columns where the diagonal elements\n #are equal to zero, since this means that nothing is connected to the bus\n \n return ybus\n\n\n# =============================================================================\n# Functions for Newton-Raphson power flow calculations\n\ndef init_voltage_vecs(system):\n n_buses = system.get('n_buses')\n #vectors containing voltage magnitude and angle information on all busses\n vmag_full = np.ones((n_buses,1))\n delta_full = np.zeros((n_buses,1))\n\n #Checking for PV-busses in order to simplify eventual calculations\n pv_idx = get_pv_idx(system)\n vset = np.empty((1,0), dtype=int)\n gens = system.get('generators')\n \n if np.size(pv_idx) != 0:\n #loading voltage setpoints for PV generators\n for i in range(len(gens.index)):\n if (gens.type[i] == 'pv') and gens.in_service[i]:\n vset = np.append(vset, gens.vset[i])\n \n vset = np.reshape(vset, (np.size(vset),1))\n vmag_full[pv_idx] = vset\n \n #simplifying vectors according to PV and slack buses\n if system.get('distributed_slack'):\n vmag = np.delete(vmag_full, pv_idx, 0) #removing known PV bus voltage magnitudes\n else:\n vmag_full[slack_idx(system)] = system.get('slack').vset[0]#setting slack bus voltage magnitude\n pv_slack_idx = np.sort(np.append(pv_idx, slack_idx(system))) #pv and slack indices\n vmag = np.delete(vmag_full, pv_slack_idx, 0) #removing slack bus and PV busses\n \n delta = np.delete(delta_full, slack_idx(system), 0) #reference voltage angle\n \n return vmag, delta, vmag_full, delta_full\n\n\ndef calc_power_vecs(system, vmag, delta, g, b):\n n_buses = g.shape[0] \n \n # g = np.asarray(g)\n # b = np.asarray(b)\n \n #vectors containing power injection information about every bus\n p_full = np.zeros((n_buses,1))\n q_full = np.zeros((n_buses,1))\n \n # #calculate full power vectors based on the voltages of the system \n # for k in range(n_buses): \n # psum = 0\n # qsum = 0\n # for n in range(n_buses):\n # psum += vmag[n] * (g[k,n]*(np.cos(delta[k] - delta[n])) + b[k,n]*np.sin(delta[k] - delta[n]))\n # qsum += vmag[n] * (g[k,n]*(np.sin(delta[k] - delta[n])) - b[k,n]*np.cos(delta[k] - delta[n]))\n # p_full[k] = vmag[k] * psum\n # q_full[k] = vmag[k] * qsum\n \n #rewritten for speed \n for k in range(n_buses): \n delta_k = np.ones((n_buses,1)) * delta[k]\n \n psum = vmag * (g[k,:].reshape((n_buses,1)) * (np.cos(delta_k - delta)) + b[k,:].reshape((n_buses,1)) * np.sin(delta_k - delta))\n \n qsum = vmag * (g[k,:].reshape((n_buses,1)) * (np.sin(delta_k - delta)) - b[k,:].reshape((n_buses,1)) * np.cos(delta_k - delta))\n \n p_full[k] = vmag[k] * np.sum(psum)\n q_full[k] = vmag[k] * np.sum(qsum)\n \n #Checking for PV-busses in order to simplify eventual calculations\n pv_idx = get_pv_idx(system)\n \n if system.get('distributed_slack'):\n q = np.delete(q_full, pv_idx, 0) #removing the pv bus indices\n p = p_full\n else:\n pv_slack_idx = np.sort(np.append(pv_idx, slack_idx(system))) #pv and slack indices\n q = np.delete(q_full, pv_slack_idx, 0) #removing the pv and slack bus indices after calculation\n p = np.delete(p_full, slack_idx(system), 0) #removing slack bus index from power vector\n \n return p, q, p_full, q_full\n\n\ndef calc_power_setpoints(system):\n n_buses = system.get('n_buses')\n \n inactive_buses = inactive_bus_idx(system)\n offset = j = 0\n \n pset = np.zeros((n_buses,1))\n qset = np.zeros((n_buses,1))\n # pset = np.zeros(n_buses)\n # qset = np.zeros(n_buses)\n \n gens = system.get('generators')\n loads = system.get('loads')\n shunts = system.get('shunts')\n #Shunts are as of now handled as loads \n #this allows them to affect bus voltages\n \n \n #loading bus setpoints\n for i in range(len(loads.index)):\n if loads.in_service[i]:\n if (np.size(inactive_buses) > j) and (loads.bus[i] > inactive_buses[j]):\n #To account for inactive buses, which essentialy reduces the \n #amount of buses in the system from a calculations standpoint\n offset += 1\n j += 1\n k = loads.bus[i] - offset\n #load is a negative power injection\n pset[k] -= loads.p[i] \n qset[k] -= loads.q[i]\n \n offset = j = 0\n for i in range(len(shunts.index)):\n if shunts.in_service[i]:\n if (np.size(inactive_buses) > j) and (shunts.bus[i] > inactive_buses[j]):\n offset += 1\n j += 1\n k = shunts.bus[i] - offset\n #shunt values are consumption (load convention)\n pset[k] -= shunts.p[i] \n qset[k] -= shunts.q[i]\n \n offset = j = 0\n for i in range(len(gens.index)):\n if gens.in_service[i]:\n if (np.size(inactive_buses) > j) and (gens.bus[i] > inactive_buses[j]):\n offset += 1\n j += 1\n k = gens.bus[i] - offset\n pset[k] += gens.pset[i] #generator is a positive injection\n if gens.type[i] == 'pq':\n qset[k] += gens.qset[i]\n \n #rewritten for speed\n # bus_idx = loads[loads.in_service == True].bus.to_numpy()\n # p_load = loads[loads.in_service == True].p.to_numpy()\n # q_load = loads[loads.in_service == True].q.to_numpy()\n \n # for i in range(np.size(inactive_buses)):\n # bus_idx[bus_idx > inactive_buses[i]] -= 1\n # pset[bus_idx] -= p_load\n # qset[bus_idx] -= q_load\n \n # bus_idx = shunts[shunts.in_service == True].bus.to_numpy()\n # p_shunt = shunts[shunts.in_service == True].p.to_numpy()\n # q_shunt = shunts[shunts.in_service == True].q.to_numpy()\n \n # for i in range(np.size(inactive_buses)):\n # bus_idx[bus_idx > inactive_buses[i]] -= 1\n # pset[bus_idx] -= p_shunt\n # qset[bus_idx] -= q_shunt\n \n # bus_idx = gens[gens.in_service == True].bus.to_numpy()\n # p_gen = gens[gens.in_service == True].pset.to_numpy()\n # q_gen = gens[gens.in_service == True].qset.to_numpy()\n \n # for i in range(np.size(inactive_buses)):\n # bus_idx[bus_idx > inactive_buses[i]] -= 1\n # pset[bus_idx] += p_gen\n # q_gen[q_gen == None] = 0 #handling no specified q setpoint\n # qset[bus_idx] += q_gen.astype(float)\n \n pv_idx = get_pv_idx(system)\n if system.get('distributed_slack'):\n qset = np.delete(qset, pv_idx, 0) #removing PV bus indices\n else:\n pv_slack_idx = np.sort(np.append(pv_idx, slack_idx(system))) #pv and slack indices\n qset = np.delete(qset, pv_slack_idx, 0) #removing PV and slack bus indices\n pset = np.delete(pset, slack_idx(system), 0) #removing slack bus index\n \n # return pset.reshape((np.size(pset),1)), qset.reshape((np.size(qset),1))\n return pset, qset\n\n\ndef calc_mismatch_vecs(system, p, q):\n #calculates power mismatch vectors\n #(setpoints minus calculated power vectors)\n (pset, qset) = calc_power_setpoints(system)\n del_p = pset - p\n del_q = qset - q\n return del_p, del_q\n\n\ndef calc_jacobian(system, vmag, delta, g, b, p, q):\n #calculates the full jacobian matrix for the power flow iteration\n n_buses = system.get('n_buses')\n \n jacobian = np.zeros((2*(n_buses),2*(n_buses)))\n #Pointing to the submatrices\n j1 = jacobian[0:(n_buses),0:(n_buses)]\n j2 = jacobian[0:(n_buses),(n_buses):(2*(n_buses))]\n j3 = jacobian[(n_buses):(2*(n_buses)),0:(n_buses)]\n j4 = jacobian[(n_buses):(2*(n_buses)),(n_buses):(2*(n_buses))]\n\n #Calculating Jacobian matrix\n # for k in range(n_buses):\n # for n in range(n_buses):\n # if k == n: #diagonal elements\n # j1[k,n] = -q[k] - b[k,k] * vmag[k]**2\n # j2[k,n] = p[k] / vmag[k] + g[k,k] * vmag[k]\n # j3[k,n] = p[k] - g[k,k] * vmag[k]**2\n # j4[k,n] = q[k] / vmag[k] - b[k,k] * vmag[k]\n\n # else: #off-diagonal elements\n # j1[k,n] = vmag[k] * vmag[n] * (g[k,n]*(np.sin(delta[k] - delta[n])) - b[k,n]*np.cos(delta[k] - delta[n]))\n # j2[k,n] = vmag[k] * (g[k,n]*(np.cos(delta[k] - delta[n])) + b[k,n]*np.sin(delta[k] - delta[n]))\n # j3[k,n] = -vmag[k] * vmag[n] * (g[k,n]*(np.cos(delta[k] - delta[n])) + b[k,n]*np.sin(delta[k] - delta[n]))\n # j4[k,n] = vmag[k] * (g[k,n]*(np.sin(delta[k] - delta[n])) - b[k,n]*np.cos(delta[k] - delta[n]))\n \n \n #rewritten for speed\n #using the power of numpy vectorization to avoid nested for loop\n #to ensure vector shapes align, reshape is used, \n #and flatten at the end to create a flat one-dimensional array\n for k in range(n_buses):\n delta_k = np.ones((n_buses,1)) * delta[k]\n \n #off-diagonal elements\n j1[k,:] = (vmag[k] * vmag * (g[k,:].reshape((n_buses,1))*(np.sin(delta_k - delta)) - b[k,:].reshape((n_buses,1))*np.cos(delta_k - delta))).flatten()\n j2[k,:] = (vmag[k] * (g[k,:].reshape((n_buses,1))*(np.cos(delta_k - delta)) + b[k,:].reshape((n_buses,1))*np.sin(delta_k - delta))).flatten()\n j3[k,:] = (-vmag[k] * vmag * (g[k,:].reshape((n_buses,1))*(np.cos(delta_k - delta)) + b[k,:].reshape((n_buses,1))*np.sin(delta_k - delta))).flatten()\n j4[k,:] = (vmag[k] * (g[k,:].reshape((n_buses,1))*(np.sin(delta_k - delta)) - b[k,:].reshape((n_buses,1))*np.cos(delta_k - delta))).flatten()\n \n #diagonal elements\n j1[k,k] = -q[k] - b[k,k] * vmag[k]**2\n j2[k,k] = p[k] / vmag[k] + g[k,k] * vmag[k]\n j3[k,k] = p[k] - g[k,k] * vmag[k]**2\n j4[k,k] = q[k] / vmag[k] - b[k,k] * vmag[k]\n \n \n if system.get('distributed_slack'):\n #loading vector of participation factors from the system\n part_facts = np.zeros((2 * n_buses, 1)) #the full vector containing zeros for non-slack buses\n gens = system.get('generators')\n \n inactive_buses = inactive_bus_idx(system)\n offset = j = 0\n for i in range(len(gens.index)):\n #loading generator participation into the jacobian\n #and accounting for inactive buses\n if (np.size(inactive_buses) > j) and (gens.bus[i] > inactive_buses[j]):\n offset += 1\n j += 1\n k = gens.bus[i] - offset\n part_facts[k] = gens.participation_factor[i]\n \n jacobian = np.append(jacobian, part_facts, axis = 1)\n \n return jacobian\n\ndef jacobian_calc_simplify(system, jacobian):\n #simplifies the jacobian matrix for calculations if possible\n n_buses = system.get('n_buses')\n pv_idx = get_pv_idx(system) #indices of PV-busses\n row_remove = np.array([], dtype=int) #used to track which rows to remove\n col_remove = np.array([], dtype=int) #used to track which columns to remove\n\n if system.get('distributed_slack'):\n ref_idx = system.get('reference_bus')\n #voltage angle is assumed known at the reference bus\n #the corresponding column of J1 and J3 may therefore be removed\n col_remove = np.append(col_remove, ref_idx)\n \n #removing appropriate rows and columns for PV buses\n col_remove = np.append(col_remove, pv_idx + n_buses) #offset of n_buses to reach J2 and J4\n row_remove = np.append(row_remove, pv_idx + n_buses) #offset of n_buses to reach J3 and J4\n \n else: #single slack\n slack_index = slack_idx(system)\n \n #removing rows and columns related to the slack bus\n col_remove = np.append(col_remove, slack_index)\n col_remove = np.append(col_remove, slack_index + n_buses)\n row_remove = np.append(row_remove, slack_index)\n row_remove = np.append(row_remove, slack_index + n_buses)\n \n #PV bus simplification\n col_remove = np.append(col_remove, pv_idx + n_buses) #offset of n_buses to reach J2 and J4\n row_remove = np.append(row_remove, pv_idx + n_buses) #offset of n_buses to reach J3 and J4\n \n #Deleting rows and columns and returning simplified jacobian \n jacobian_calc = np.delete(jacobian, row_remove, 0) \n jacobian_calc = np.delete(jacobian_calc, col_remove, 1)\n \n return jacobian_calc\n\ndef next_iteration(dist_slack, jacobian, vmag, delta, k_g, del_p, del_q):\n #single function for both single and distributed slack\n #calculates the next iteration of the power flow for distributed slack\n #based on inversion of the jacobian and matrix multiplication\n x = np.row_stack((delta, vmag))\n if dist_slack:\n x = np.append(x, [[k_g]], axis = 0) #append the slack parameter to iteration vector\n \n y = np.row_stack((del_p, del_q))\n\n # x_next = x + np.matmul(np.linalg.inv(jacobian), y) #calculating next iteration\n x_next = x + np.linalg.solve(jacobian, y) #calculating next iteration\n \n #separating variables\n delta_next = x_next[0:np.size(delta)]\n if dist_slack:\n vmag_next = x_next[np.size(delta):(np.size(x_next) - 1)]\n k_g_next = x_next[-1][0]\n else:\n #k_g_next is always returned, but is 0 for single slack (unused)\n k_g_next = 0 \n vmag_next = x_next[np.size(delta):]\n \n return delta_next, vmag_next, k_g_next\n\ndef check_convergence(dist_slack, delta_next, vmag_next, delta, vmag, k_g_next, k_g, threshold):\n #single function for checking both single and distributed slack\n #returns true or false based on magnitude in change of iteration values for voltages\n #iteration step-based convergence criteria are most common, but it can also\n #be based on magnitudes of mismatch vectors\n x_next = np.row_stack((delta_next, vmag_next))\n x = np.row_stack((delta, vmag))\n if dist_slack:\n x = np.append(x, [[k_g]], axis = 0)\n x_next = np.append(x_next, [[k_g_next]], axis = 0)\n\n # checkvec = np.ones((x.shape))\n # for i in range(np.size(x)):\n # if abs(x[i]) > 0: #avoid division by zero\n # checkvec[i] = (x_next[i] - x[i])/x[i]\n \n #rewritten for speed\n #division by zero is handled by setting the index to = 1, not zero\n #as that would place the index value below the convergence threshold \n checkvec = np.divide(((x_next-x)), x, out=np.ones_like(x_next), where=x!=0)\n \n return np.all(np.absolute(checkvec) < threshold)\n\ndef check_pv_bus(system, n_buses, q_full, print_results):\n #check if PV bus reactive power is within specified limits\n #if not, set bus(es) to PQ at Q limit and return a bool to specify \n #whether recalculation should be performed\n limit_violation = False\n \n inactive_buses = inactive_bus_idx(system)\n #Only the generator outputs should be considered, so load at the generator bus must be subtracted\n #when checking the limit violation for reactive power! \n q_loads = np.zeros((n_buses + np.size(inactive_buses),1))\n loads = system.get('loads')\n gens = system.get('generators')\n shunts = system.get('shunts')\n \n for i in range(len(loads.index)):\n if loads.in_service[i]:\n k = loads.bus[i]\n q_loads[k] -= loads.q[i]\n \n for i in range(len(shunts.index)):\n if shunts.in_service[i]:\n k = shunts.bus[i]\n q_loads[k] -= shunts.q[i]\n \n q_loads = np.delete(q_loads, inactive_buses, axis=0)\n offset = j = 0\n \n for i in range(len(gens.index)):\n if (gens.type[i] == 'pv') and (gens.in_service[i]): #only considering in service PV-busses\n if (np.size(inactive_buses) > j) and (gens.bus[i] > inactive_buses[j]):\n #adjusting for inactive buses\n offset += 1\n j += 1 \n k = gens.bus[i] - offset \n \n #Checking for static generators on the PV gen bus \n #(not relevant for New England system)\n sgen = gens[(gens.bus == gens.bus[i]) & (gens.type == 'pq')].reset_index(drop=True)\n if len(sgen.index) != 0:\n q_sgen = 0\n for j in range(len(sgen.index)):\n q_sgen += sgen.qset[j]\n q_gen = q_full[k] - q_loads[k] - q_sgen\n else:\n q_gen = q_full[k] - q_loads[k]\n \n #if limit is violated, flag for recalculation and exit for-loop\n if q_gen < gens.qmin[i]:\n qset = gens.qmin[i]\n gens.qset[i] = qset\n gens.type[i] = 'pq'\n limit_violation = True\n break\n \n elif q_gen > gens.qmax[i]:\n qset = gens.qmax[i]\n gens.qset[i] = qset\n gens.type[i] = 'pq'\n limit_violation = True\n break\n\n if limit_violation == True:\n system.update({'generators':gens})\n if print_results: #toggles printing of information\n print('Generator reactive power limit violated at bus %d (%f pu).\\nType set to PQ with generator reactive power setpoint of %.2f pu.\\n' % (gens.bus[i], q_gen, qset))\n \n return limit_violation\n\ndef run_power_flow(system, enforce_q_limits, print_results=True, print_bus_type=True):\n #runs the Newton-Raphson based power flow algorithm on a valid system dictionary\n \n iteration_limit = system.get('iteration_limit')\n tolerance = system.get('tolerance')\n dist_slack = system.get('distributed_slack')\n recalculate = True\n recalcs = 0\n \n if dist_slack:\n print(\"-------------------------------------------------------------\")\n print(\"Calculating power flow (distributed slack bus)...\\n\")\n else:\n print(\"-------------------------------------------------------------\")\n print(\"Calculating power flow (single slack bus)...\\n\")\n \n #while Q-limits are violated, power flow is recalculated according to adjustments\n #if limits are not enforced, recalculate is set to False upon convergence of power flow\n while recalculate == True: \n (n_buses, g, b) = process_admittance_mat(system)\n \n (vmag, delta, vmag_full, delta_full) = init_voltage_vecs(system)\n \n k_g = k_g_next = 0.0 #used for distributed slack\n \n (p, q, p_full, q_full) = calc_power_vecs(system, vmag_full, delta_full, g, b)\n \n jacobian = calc_jacobian(system, vmag_full, delta_full, g, b, p_full, q_full)\n \n jacobian_calc = jacobian_calc_simplify(system, jacobian)\n \n (pset, qset) = calc_power_setpoints(system)\n \n # (del_p, del_q) = calc_mismatch_vecs(system, p, q)\n del_p = pset - p\n del_q = qset - q\n \n \n #obtaining list of non-PV and non-slack busses\n pv_idx = get_pv_idx(system)\n pq_idx = np.arange(n_buses)\n non_ref_idx = np.delete(pq_idx, slack_idx(system), 0)\n pq_idx = np.delete(pq_idx, pv_idx, 0)\n if not dist_slack:\n pq_idx = pq_idx[pq_idx != slack_idx(system)]\n \n gens = system.get('generators')\n \n for i in range(1, iteration_limit + 1):\n #iterates on power flow until convergence until at maximum \n #number of iterations\n (delta_next, vmag_next, k_g_next) = next_iteration(dist_slack, jacobian_calc, \n vmag, delta, k_g, del_p, del_q)\n \n if check_convergence(dist_slack, delta_next, vmag_next, delta, vmag, k_g_next, k_g, tolerance):\n #If power flow has converged\n if enforce_q_limits:\n recalculate = check_pv_bus(system, n_buses, q_full, print_results)\n else:\n recalculate = False\n \n if recalculate: \n #if Q limits are violated, restart calculations after adjustments\n if print_results:\n print('Recalculating power flow...\\n')\n break\n else:\n print(\"Power flow converged at %d iterations (tolerance of %.12f).\\n\" % (i, tolerance))\n \n delta_full[non_ref_idx] = delta_next #updating voltage angles on all busses except ref/slack bus\n vmag_full[pq_idx] = vmag_next #updating voltage magnitudes on non-PV busses (and non-slack)\n if dist_slack: \n k_g = k_g_next\n \n #calculating final power vectors and mismatches\n (p, q, p_full, q_full) = calc_power_vecs(system, vmag_full, delta_full, g, b)\n del_p = pset - p\n del_q = qset - q\n \n if print_bus_type:\n #reading bus types into list for saving results\n typelist = ['' for i in range(n_buses)]\n \n inactive_buses = inactive_bus_idx(system)\n \n if not dist_slack: \n #since the distributed slack bus has no explicit slack bus\n offset = 0\n for i in range(np.size(inactive_buses)):\n if system.get('slack').bus[0] > inactive_buses[i]:\n offset += 1\n typelist[system.get('slack').bus[0] - offset] = 'SLACK'\n \n offset = j = 0\n for i in range(len(gens.index)):\n if (np.size(inactive_buses) > j) and (gens.bus[i] >= inactive_buses[j]):\n #account for inactive buses\n offset += 1\n j += 1\n k = gens.bus[i] - offset \n typelist[k] = gens.type[i].upper()\n \n for i in range(n_buses):\n if typelist[i] == '':\n typelist[i] = 'PQ' \n \n d = {'vmag_pu':vmag_full.flatten(), 'delta_deg':delta_full.flatten()*180/np.pi, 'p_pu':p_full.flatten(), 'q_pu':q_full.flatten(), 'type':typelist}\n else:\n d = {'vmag_pu':vmag_full.flatten(), 'delta_deg':delta_full.flatten()*180/np.pi, 'p_pu':p_full.flatten(), 'q_pu':q_full.flatten()}\n \n #saving results in dataframe\n df = pd.DataFrame(data=d, index = np.arange(n_buses))\n df.index.name = 'bus'\n break\n \n elif i == iteration_limit: #no convergence\n print(\"Power flow did not converge after %d iterations (tolerance of %.12f).\\n\" % (i, tolerance))\n return None\n\n delta_full[non_ref_idx] = delta_next \n vmag_full[pq_idx] = vmag_next \n \n delta = np.copy(delta_next)\n vmag = np.copy(vmag_next)\n if dist_slack: \n k_g = k_g_next\n \n (p, q, p_full, q_full) = calc_power_vecs(system, vmag_full, delta_full, g, b)\n \n jacobian = calc_jacobian(system, vmag_full, delta_full, g, b, p_full, q_full)\n \n jacobian_calc = jacobian_calc_simplify(system, jacobian)\n \n if dist_slack:\n del_p = pset - (p + slack_distribution(system, k_g))\n del_q = qset - q\n else:\n del_p = pset - p\n del_q = qset - q\n # (del_p, del_q) = calc_mismatch_vecs(system, p, q)\n \n #Tracking how many times the while-loop has run to avoid endless loop\n #if the number of recalculations exceed the number of generators,\n #something has most likely gone wrong, since at that point, they should all\n #be set to PQ and thus nothing more can be done by the check_pv_bus function\n recalcs += 1\n if recalcs > (len(gens.index)): \n print('\\nError - endless loop. Calculation terminated.\\n')\n break\n \n #Saving and exporting power flow results as dictionary\n vmag_res = pd.Series.to_numpy(df['vmag_pu'])\n delta_res = pd.Series.to_numpy(df['delta_deg']) * np.pi / 180\n\n inactive_buses = inactive_bus_idx(system)\n for i in range(np.size(inactive_buses)):\n #re-inserting inactive buses in results dataframe to reobtain\n #actual number of buses and illustrate results for inactive buses\n empty_row = pd.DataFrame({\"vmag_pu\": np.nan, \"delta_deg\": np.nan, \"p_pu\":0, \"q_pu\":0, \"type\":np.nan},index=[0])\n df = pd.concat([df.iloc[:inactive_buses[i]], empty_row, \n df.iloc[inactive_buses[i]:]]).reset_index(drop=True)\n \n p_loss = calc_system_losses(system, vmag_res, delta_res) \n\n line_flows = calc_line_flows(system, df)\n \n trafo_flows = calc_transformer_loadings(system, df)\n \n if dist_slack:\n #Calculating slack distribution and accounting for inactive buses\n active_gen_indices = gens[gens.in_service].index.to_numpy()\n indices = np.arange(system.get('n_buses'))\n \n #reverse loop to get proper correction of bus indices\n for i in range(np.size(inactive_buses)-1, -1, -1):\n indices[indices >= inactive_buses[i]] += 1\n \n slack_distribution_df = pd.DataFrame(data={'p_pu':(-1)*slack_distribution(system, k_g).flatten()}, \n index = indices)\n slack_distribution_df.index.name = 'bus'\n \n slack_distribution_df = slack_distribution_df.filter(items = gens['bus'].to_numpy()[active_gen_indices], axis = 0)\n slack_distribution_df['\\u03C0'] = gens['participation_factor'].to_numpy()[active_gen_indices]\n \n #some pandapower systems have multiple generators at a single bus (in case of static gens)\n #the line below is a workaround to avoid showing multiple busses and too much slack\n slack_distribution_df = slack_distribution_df.groupby(level=0).mean()\n \n #export results as dictionary\n results = {'bus_results':df, 'line_flows':line_flows, 'total_losses_pu':p_loss, 'transformer_flows':trafo_flows,\n 'mismatches':(del_p, del_q), 'slack_distribution':slack_distribution_df}\n else:\n results = {'bus_results':df, 'line_flows':line_flows, 'transformer_flows':trafo_flows, \n 'total_losses_pu':p_loss, 'mismatches':(del_p, del_q)}\n \n \n gen_res = get_generator_results(system, results)\n results.update({'generator_results':gen_res})\n \n if print_results: \n if dist_slack:\n print(\"\\nSlack (%f p.u.) distribution across slack generators:\\n\" % (-1*k_g))\n print(slack_distribution_df)\n #prints bus results to terminal\n print(\"\\nTable of results (power values are injections):\\n\")\n print(df)\n \n #prints warnings about limit violations\n print(\"\\nWarnings:\\n\")\n check_p_limits(system, results)\n check_q_limits(system, results)\n check_bus_voltage(system, results)\n check_line_trafo_loading(system, results)\n \n return results \n \n\n# =============================================================================\n# Functions for evaluating power flow results\n\ndef get_generator_results(system, results):\n #NB: As of now, results only valid for system in which there are \n #not more than one generator on a single bus\n \n p_gen = np.copy(results.get('bus_results').p_pu.to_numpy())\n q_gen = np.copy(results.get('bus_results').q_pu.to_numpy())\n shunts = system.get('shunts')\n gens = system.get('generators')\n loads = system.get('loads')\n\n #Calculating vector of generator outputs\n for i in range(len(loads.index)):\n k = loads.bus[i]\n p_gen[k] += loads.p[i] #removing the negative load injections from the power vector\n q_gen[k] += loads.q[i] \n \n for i in range(len(shunts.index)):\n k = shunts.bus[i]\n q_gen[k] += shunts.q[i] \n\n gen_buses = gens['bus'].to_numpy()\n vres = np.copy(results.get('bus_results').vmag_pu.to_numpy()[gen_buses])\n deltares = np.copy(results.get('bus_results').delta_deg.to_numpy()[gen_buses])\n \n #saving results to dataframe\n data = {'bus':gens.bus.copy(), 'p_pu':p_gen[gen_buses], 'q_pu':q_gen[gen_buses], \n 'vmag_pu':vres, 'delta_deg':deltares, 'pmax':gens.pmax.copy()}\n gen_res = pd.DataFrame(data=data, index = gens.index)\n return gen_res\n\ndef check_p_limits(system, results):\n #NB: As of now, results only valid for system in which there are \n #not more than one generator on a single bus\n gens = system.get('generators')\n p_gen = results.get('generator_results').p_pu\n \n p_limits = np.copy(gens.pmax)\n\n for i in range(np.size(p_gen)):\n if gens.in_service[i]:\n if p_gen[i] > p_limits[i]:\n k = gens.bus[i]\n magnitude = p_gen[i] - p_limits[i]\n print(\"\\nGenerator(s) real power limit exceeded at bus %i by %f pu.\\n\" \n % (k, magnitude))\n return\n\n\ndef check_q_limits(system, results):\n #Only relevant if reactive power limits are not enforced in the power flow\n \n #NB: As of now, results only valid for system in which there are \n #not more than one generator on a single bus\n q_gen = results.get('generator_results').q_pu\n gens = system.get('generators')\n q_max = np.copy(gens.qmax)\n q_min = np.copy(gens.qmin)\n\n for i in range(np.size(q_gen)):\n if gens.in_service[i]:\n if round(q_gen[i],4) > round(q_max[i],4):\n magnitude = q_gen[i] - q_max[i]\n k = gens.bus[i]\n print(\"\\nGenerator(s) reactive power upper limit exceeded at bus %i by %f pu.\\n\" \n % (k, magnitude))\n elif round(q_gen[i],4) < round(q_min[i],4):\n magnitude = abs(q_min[i] - q_gen[i])\n k = gens.bus[i]\n print(\"\\nGenerator(s) reactive power lower limit exceeded at bus %i by %f pu.\\n\" \n % (k, magnitude)) \n return\n\ndef check_bus_voltage(system, results):\n bus = system.get('buses')\n vmag = results.get('bus_results').vmag_pu\n \n for i in range(len(bus.index)):\n if bus.in_service[i]:\n if vmag[i] > bus.max_vm_pu[i]:\n magnitude = vmag[i] - bus.max_vm_pu[i]\n print(\"\\nBus voltage upper limit exceeded at bus %i by %f pu.\\n\" \n % (i, magnitude))\n elif vmag[i] < bus.min_vm_pu[i]:\n magnitude = bus.min_vm_pu[i] - vmag[i]\n print(\"\\nBus voltage lower limit exceeded at bus %i by %f pu.\\n\" \n % (i, magnitude))\n return\n\ndef check_line_trafo_loading(system, results):\n lines = system.get('lines')\n trafo = system.get('transformers')\n l_flows = results.get('line_flows')\n t_flows = results.get('transformer_flows')\n \n for i in range(len(l_flows.index)):\n if l_flows.loading_percent[i] > 100:\n f = lines['from'][i]\n t = lines['to'][i]\n print(\"\\nLine %i (bus %i to %i) overloaded at %f %%.\\n\" \n % (i, f, t, l_flows.loading_percent[i]))\n elif (100 - l_flows.loading_percent[i]) < 5:\n f = lines['from'][i]\n t = lines['to'][i]\n print(\"\\nLine %i (bus %i to %i) near limit at %f %%.\\n\" \n % (i, f, t, l_flows.loading_percent[i]))\n \n for i in range(len(t_flows.index)):\n if t_flows.loading_percent[i] > 100:\n lv = trafo.lv_bus[i]\n hv = trafo.hv_bus[i]\n print(\"\\nTransformer %i (bus %i to %i) overloaded at %f %%.\\n\" \n % (i, lv, hv, t_flows.loading_percent[i]))\n elif (100 - t_flows.loading_percent[i]) < 5:\n lv = trafo.lv_bus[i]\n hv = trafo.hv_bus[i]\n print(\"\\nTransformer %i (bus %i to %i) near limit at %f %%.\\n\" \n % (i, lv, hv, t_flows.loading_percent[i]))\n \n return\n\n\ndef get_phasor(vmag, delta_rad, bus):\n #returns a voltage phasor\n return complex(vmag[bus]*np.cos(delta_rad[bus]),vmag[bus]*np.sin(delta_rad[bus]))\n\n\ndef calc_line_flows(system, bus_results):\n #Line flows: Current, real, reactive, apparent power at each end of lines\n #P_ft, Ptf, Q_ft, Q_tf, I_ft, I_tf, S_ft, S_tf\n #where ft = from/to and tf = to/from\n \n vmag = bus_results.vmag_pu.to_numpy()\n delta = bus_results.delta_deg.to_numpy() * np.pi / 180\n \n s_base = system.get('s_base')\n freq = system.get('frequency')\n \n lines = system.get('lines')\n n_lines = len(lines.index)\n\n #initializing empty arrays for storing data\n i_ft_pu = np.zeros(n_lines, dtype = complex)\n i_tf_pu = np.zeros(n_lines, dtype = complex)\n\n s_ft_pu = np.zeros(n_lines, dtype = complex)\n s_tf_pu = np.zeros(n_lines, dtype = complex)\n\n i_ka = np.zeros(n_lines, dtype = complex)\n fr = np.zeros(n_lines, dtype = int)\n to = np.zeros(n_lines, dtype = int)\n loading_percent = np.zeros(n_lines)\n \n fr = np.copy(lines['from'].to_numpy())\n to = np.copy(lines['to'].to_numpy()) \n \n for i in range(n_lines):\n if lines.in_service[i]:\n l = lines.length[i]\n parallel = lines.parallel[i] #number of lines in parallel\n \n #relevant base values for per unit calculations\n v_base = system.get('buses').vn_kv[fr[i]]\n z_base = (v_base ** 2) / (s_base) #voltage in kV and power in MVA\n # I_base = S_base_3ph / sqrt(3) * V_base_LL\n i_base_ka = s_base * 1e3 / (np.sqrt(3) * v_base * 1e3) #base current in kA (power base multiplied by 1e3 instead of 1e6)\n \n \n y_shunt = complex(lines['g_us_per_km'][i] * 1e-6, \n 2 * np.pi * freq * lines['c_nf_per_km'][i]*1e-9) * l * parallel\n y_shunt_pu = y_shunt * z_base # Y = 1/Z, so Y_pu = 1/Z_pu = Y * Z_base\n \n z_line = complex(lines['r_ohm_per_km'][i], lines['x_ohm_per_km'][i]) * l / parallel\n z_line_pu = z_line / z_base\n \n #loading voltage magnitude and phase angle as phasor\n v_1 = get_phasor(vmag, delta, fr[i])\n v_2 = get_phasor(vmag, delta, to[i])\n \n # I_12 = (V_1 - V_2) / (Z_12) + V_1 / Y_sh / 2\n \n i_ft_pu[i] = ((v_1 - v_2) / z_line_pu + v_1 * (y_shunt_pu / 2))\n i_tf_pu[i] = ((v_2 - v_1) / z_line_pu + v_2 * (y_shunt_pu / 2))\n \n s_ft_pu[i] = v_1 * np.conj(i_ft_pu[i])\n s_tf_pu[i] = v_2 * np.conj(i_tf_pu[i])\n \n i_ka[i] = max(np.abs(i_ft_pu[i]), np.abs(i_tf_pu[i])) * i_base_ka\n \n loading_percent[i] = (np.abs(i_ka[i]) / lines['ampacity_ka'][i]) * 100\n \n \n p_ft_pu = np.real(s_ft_pu)\n p_tf_pu = np.real(s_tf_pu)\n\n q_ft_pu = np.imag(s_ft_pu)\n q_tf_pu = np.imag(s_tf_pu)\n\n p_loss = p_ft_pu + p_tf_pu\n \n #saving results to dataframe\n d = {'from':lines['from'].to_numpy(),'to':lines['to'].to_numpy(),\n 'loading_percent':loading_percent, 'i_ka':np.abs(i_ka), 'p_ft_pu':p_ft_pu, 'p_tf_pu':p_tf_pu, \n 'p_loss_pu':p_loss, 'q_ft_pu':q_ft_pu, 'q_tf_pu':q_tf_pu, 'i_ft_pu':np.abs(i_ft_pu), \n 'i_tf_pu':np.abs(i_tf_pu), 's_ft_pu':np.abs(s_ft_pu), \n 's_tf_pu':np.abs(s_tf_pu)}\n df = pd.DataFrame(data=d, index = np.arange(n_lines))\n df.index.name = 'line'\n \n return df\n\n\ndef calc_transformer_loadings(system, bus_results):\n #Note: Simplified representation of transformer as a series impedance between busses\n #(typical per-unit representation)\n\n vmag = bus_results.vmag_pu.to_numpy()\n delta = bus_results.delta_deg.to_numpy() * np.pi / 180\n \n trafo = system.get('transformers')\n bus = system.get('buses')\n ybus = system.get('admmat')\n s_base = system.get('s_base')\n n_trafo = len(trafo.index)\n \n #initializing empty arrays for storing data\n i_lv_pu = np.zeros(n_trafo, dtype = complex)\n i_hv_pu = np.zeros(n_trafo, dtype = complex)\n \n i_lv_ka = np.zeros(n_trafo, dtype = complex)\n i_hv_ka = np.zeros(n_trafo, dtype = complex)\n\n s_lv_pu = np.zeros(n_trafo, dtype = complex)\n s_hv_pu = np.zeros(n_trafo, dtype = complex)\n \n loading_percent = np.zeros(n_trafo)\n \n lv = np.copy(trafo['lv_bus'].to_numpy())\n hv = np.copy(trafo['hv_bus'].to_numpy())\n \n #Adjusting HV/LV indices according to inactive buses\n #to avoid errors when using the Ybus matrix for flow calculations\n #since the Ybus is reduced for inactive buses\n if np.size(inactive_bus_idx(system)) > 0:\n inactive_buses = inactive_bus_idx(system)\n vmag = np.delete(vmag, inactive_buses)\n delta = np.delete(delta, inactive_buses)\n \n for i in range(np.size(inactive_buses)-1, -1, -1):\n lv[lv > inactive_buses[i]] -= 1\n hv[hv > inactive_buses[i]] -= 1\n \n for i in range(n_trafo):\n if trafo.in_service[i] == False:\n pass #do nothing - all zeros\n else:\n v_lv = get_phasor(vmag, delta, lv[i])\n v_hv = get_phasor(vmag, delta, hv[i])\n #loading the per unit series impedance from the admittance matrix\n x_t = 1 / (-1 * ybus[lv[i], hv[i]]) \n \n i_lv_pu[i] = (v_lv - v_hv) / x_t\n i_hv_pu[i] = (v_hv - v_lv) / x_t\n i_lv_ka[i] = i_lv_pu[i] * s_base / (np.sqrt(3) * bus.vn_kv[lv[i]])\n i_hv_ka[i] = i_hv_pu[i] * s_base / (np.sqrt(3) * bus.vn_kv[hv[i]])\n \n s_lv_pu[i] = v_lv * np.conj(i_lv_pu[i])\n s_hv_pu[i] = v_hv * np.conj(i_hv_pu[i])\n \n s_mva = abs(max(s_lv_pu[i], s_hv_pu[i]) * s_base) #complex power in MVA\n \n #calculating loading percentage based on transformer power rating\n loading_percent[i] = (s_mva / trafo['s_rated'][i]) * 100\n \n d = {'lv':trafo['lv_bus'].to_numpy(),'hv':trafo['hv_bus'].to_numpy(),'loading_percent':loading_percent, 'p_lv_pu':np.real(s_lv_pu), \n 'p_hv_pu':np.real(s_hv_pu), 'q_lv_pu':np.imag(s_lv_pu), \n 'q_hv_pu':np.imag(s_hv_pu), 'i_lv_ka':np.abs(i_lv_ka), \n 'i_hv_ka':np.abs(i_hv_ka), 's_lv_pu':np.abs(s_lv_pu), \n 's_hv_pu':np.abs(s_hv_pu)}\n df = pd.DataFrame(data=d, index = np.arange(n_trafo))\n df.index.name = 'trafo'\n \n return df\n\ndef calc_system_losses(system, vmag, delta):\n #Computes the system real power losses based on the loss function\n #especially relevant for distributed slack\n (n_buses, g, b) = process_admittance_mat(system)\n losses = 0\n\n # for k in range(n_buses):\n # losses += vmag[k] ** 2 * g[k,k]\n # for n in range(k + 1, n_buses): #starts at n = k + 1 to avoid n == k as well as repeating behavior\n # losses += 2 * vmag[k] * vmag[n] * g[k,n] * np.cos(delta[k] - delta[n]) \n \n #rewritten for speed\n for k in range(n_buses):\n losses += vmag[k] ** 2 * g[k,k]\n delta_k = delta[k]#starts at n = k + 1 to avoid n == k as well as repeating behavior\n losses += np.sum(2 * vmag[k] * vmag[(k+1):] * g[k,(k+1):] * np.cos(delta_k - delta[(k+1):])) \n \n return losses\n\ndef slack_distribution(system, k_g):\n #returns a vector of the distribution of system slack across buses\n gens = system.get('generators')\n slackvec = np.zeros((system.get('n_buses'), 1))\n inactive_buses = inactive_bus_idx(system)\n offset = j = 0\n \n for i in range(len(gens.index)):\n if gens.slack[i]:\n if (np.size(inactive_buses) > j) and (gens.bus[i] > inactive_buses[j]):\n offset += 1\n j += 1\n k = gens.bus[i] - offset \n p_fact = gens.participation_factor[i]\n #k_g is a negative injection, but the absolute value is taken here\n #because the vector denotes how much each slack generator injects\n #to compensate for losses\n slackvec[k] = p_fact * k_g\n \n return slackvec\n\n\n# =============================================================================\n# Functions for plotting results: Bus voltages, line/trafo loadings\n\n\ndef plot_results(system, results, angle=False ,name='', save_directory='', plot='', axis_values=[0,0,0,0,0], lg_lim=[0,0]):\n #Note: need small changes if the system does not have transformers \n #or if the system has different bus voltage limits for each bus\n #but the functionality is based on the New England 39 bus system\n \n #axis_values[0]: min y-value for voltage magnitude\n #axis_values[1]: max y-value for voltage magnitude\n #axis_values[2]: min/max value for voltage angle\n #axis_values[3]: max y-value for line loading\n #axis_values[4]: max y-value for transformer loading\n mpl.rcParams[\"axes.titlesize\"] = 19\n mpl.rcParams[\"axes.labelsize\"] = 18\n mpl.rcParams[\"xtick.labelsize\"] = 15\n mpl.rcParams[\"ytick.labelsize\"] = 16\n if plot == 'lines':\n fig = plt.figure(dpi=200)\n fig.set_figheight(10)\n fig.set_figwidth(12)\n plt.bar(results.get('line_flows').index, results.get('line_flows')['loading_percent'], \n color='teal')\n # plt.scatter(results.get('line_flows').index, np.ones(len(results.get('line_flows').index))*100, marker=\"_\", color='tab:red',s=30)\n plt.axhline(y=100, color='tab:red', linestyle='--')\n if max(results.get('line_flows')['loading_percent']) > 110:\n plt.ylim(0,max(results.get('line_flows')['loading_percent']) + 5)\n else:\n plt.ylim(0,110)\n plt.title('Line Loading')\n plt.ylabel('Percentage')\n plt.xlabel('Line')\n plt.xticks(range(0, len(results.get('line_flows').index), 2))\n plt.grid(linestyle='--', linewidth=0.5, alpha=0.65)\n plt.margins(x=0.025)\n if name != '':\n plt.title('%s\\n\\n' % name, fontweight='bold', fontsize=14)\n elif plot == 'generators':\n fig = plt.figure(dpi=200)\n fig.set_figheight(11)\n fig.set_figwidth(11)\n gen_loadings = (results.get('generator_results')['p_pu'].to_numpy()/system.get('generators')['pmax'].to_numpy())*100\n plt.bar(results.get('generator_results').index, \n gen_loadings, \n color='darkcyan')\n # plt.scatter(results.get('line_flows').index, np.ones(len(results.get('line_flows').index))*100, marker=\"_\", color='tab:red',s=30)\n plt.axhline(y=100, color='tab:red', linestyle='--')\n if max(gen_loadings) > 110:\n plt.ylim(0,max(gen_loadings + 5))\n else:\n plt.ylim(0,110)\n plt.title('Generator Loading (P)')\n plt.ylabel('Percentage')\n plt.xlabel('Generator')\n plt.xticks(range(0, np.size(gen_loadings), 2))\n plt.grid(linestyle='--', linewidth=0.5, alpha=0.65)\n plt.margins(x=0.025)\n if name != '':\n plt.title('%s\\n\\n' % name, fontweight='bold', fontsize=14)\n elif plot == 'lg': #lines and generators\n gs = gsp.GridSpec(2, 1)\n fig = plt.figure(dpi=200)\n fig.set_figheight(11)\n fig.set_figwidth(11)\n if name != '':\n plt.title('%s\\n\\n' % name, fontweight='bold', fontsize=14)\n ax = plt.gca()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n \n ax1 = fig.add_subplot(gs[0, :]) # row 0, col 0\n gen_loadings = (results.get('generator_results')['p_pu'].to_numpy()/system.get('generators')['pmax'].to_numpy())*100\n ax1.bar(results.get('generator_results').index, gen_loadings, color='midnightblue')\n ax1.axhline(y=100, color='tab:red', linestyle='--')\n ax1.title.set_text('Generator Loading (P)')\n ax1.set_ylabel('Percentage')\n ax1.set_xlabel('Generator')\n ax1.set_xticks(range(0, len(gen_loadings), 2))\n if lg_lim[0] != 0:\n ax1.set_ylim(0, lg_lim[0])\n ax1.grid(linestyle='--', linewidth=0.5, alpha=0.65)\n ax1.margins(x=0.025)\n \n ax2 = fig.add_subplot(gs[1, 0])\n ax2.bar(results.get('line_flows').index, results.get('line_flows')['loading_percent'], \n color='teal')\n # plt.scatter(results.get('line_flows').index, np.ones(len(results.get('line_flows').index))*100, marker=\"_\", color='tab:red',s=30)\n ax2.axhline(y=100, color='tab:red', linestyle='--')\n if lg_lim[1] != 0:\n ax2.set_ylim(0,lg_lim[1])\n elif max(results.get('line_flows')['loading_percent']) > 110:\n ax2.set_ylim(0,max(results.get('line_flows')['loading_percent']) + 5)\n else:\n ax2.set_ylim(0,110)\n ax2.title.set_text('Line Loading')\n ax2.set_ylabel('Percentage')\n ax2.set_xlabel('Line')\n ax2.set_xticks(range(0, len(results.get('line_flows').index), 2))\n ax2.grid(linestyle='--', linewidth=0.5, alpha=0.65)\n ax2.margins(x=0.025) \n \n else:\n if angle:\n gs = gsp.GridSpec(3, 2)\n else:\n gs = gsp.GridSpec(2, 2)\n fig = plt.figure(dpi=200)\n fig.set_figheight(11)\n fig.set_figwidth(11)\n if name != '':\n plt.title('%s\\n\\n' % name, fontweight='bold', fontsize=14)\n ax = plt.gca()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n \n ax1 = fig.add_subplot(gs[0, :]) # row 0, col 0\n ax1.scatter(results.get('bus_results').index, results.get('bus_results')['vmag_pu'], marker=\"D\", \n color='darkblue',s=25)\n # ax1.scatter(system.get('buses').index, system.get('buses')['max_vm_pu'], marker=\"_\", color='tab:red',s=30)\n # ax1.scatter(system.get('buses').index, system.get('buses')['min_vm_pu'], marker=\"_\", color='tab:red',s=30)\n ax1.axhline(y=system.get('buses')['max_vm_pu'][0], color='tab:red', linestyle='--')\n ax1.axhline(y=system.get('buses')['min_vm_pu'][0], color='tab:red', linestyle='--')\n ax1.title.set_text('Voltage Magnitude')\n ax1.set_ylabel('Magnitude [p.u.]')\n ax1.set_xlabel('Bus')\n ax1.set_xticks(range(0, len(results.get('bus_results').index), 2))\n if (axis_values[0] != 0) and (axis_values[1] != 0):\n ax1.set_ylim(-axis_values[0], axis_values[1])\n ax1.grid(linestyle='--', linewidth=0.5, alpha=0.65)\n ax1.margins(x=0.025)\n \n if angle:\n ax2 = fig.add_subplot(gs[2, 0])\n else:\n ax2 = fig.add_subplot(gs[1, 0])\n ax2.bar(results.get('line_flows').index, results.get('line_flows')['loading_percent'], \n color='teal')\n # plt.scatter(results.get('line_flows').index, np.ones(len(results.get('line_flows').index))*100, marker=\"_\", color='tab:red',s=30)\n ax2.axhline(y=100, color='tab:red', linestyle='--')\n ax2.title.set_text('Line Loading')\n ax2.set_ylabel('Percentage')\n ax2.set_xlabel('Line')\n ax2.set_xticks(range(0, len(results.get('line_flows').index), 2))\n if axis_values[3] != 0:\n ax2.set_ylim(0, axis_values[3])\n elif max(results.get('line_flows')['loading_percent']) > 110:\n ax2.set_ylim(0,max(results.get('line_flows')['loading_percent']) + 5)\n else:\n ax2.set_ylim(0,110)\n \n ax2.tick_params(axis='x', labelsize=mpl.rcParams[\"xtick.labelsize\"] - 2)\n ax2.grid(linestyle='--', linewidth=0.5, alpha=0.65)\n ax2.margins(x=0.025)\n \n if angle:\n ax3 = fig.add_subplot(gs[2,1])\n else:\n ax3 = fig.add_subplot(gs[1,1])\n ax3.bar(results.get('transformer_flows').index, results.get('transformer_flows')['loading_percent'], \n color='darkgreen')\n # plt.scatter(results.get('transformer_flows').index, np.ones(len(results.get('transformer_flows').index))*100, marker=\"_\", color='tab:red',s=60)\n ax3.axhline(y=100, color='tab:red', linestyle='--')\n ax3.title.set_text('Transformer Loading')\n ax3.set_ylabel('Percentage')\n ax3.set_xlabel('Transformer')\n ax3.set_xticks(range(0, len(results.get('transformer_flows').index), 1))\n if axis_values[4] != 0:\n ax3.set_ylim(0, axis_values[4])\n elif max(results.get('transformer_flows')['loading_percent']) > 110:\n ax3.set_ylim(0,max(results.get('transformer_flows')['loading_percent']) + 5)\n else:\n ax3.set_ylim(0,110)\n ax3.tick_params(axis='x', labelsize=mpl.rcParams[\"xtick.labelsize\"] - 2)\n ax3.grid(linestyle='--', linewidth=0.5, alpha=0.65)\n ax3.margins(x=0.025)\n \n if angle:\n ax4 = fig.add_subplot(gs[1,:])\n ax4.bar(results.get('bus_results').index, results.get('bus_results')['delta_deg'], \n color='darkslateblue')\n ax4.axhline(y=0, color='darkslategray', linestyle='-')\n ax4.title.set_text('Voltage Phase Angle')\n ax4.set_ylabel('Phase Angle [Deg.]')\n ax4.set_xlabel('Bus')\n ax4.set_xticks(range(0, len(results.get('bus_results').index), 2))\n ax4.grid(linestyle='--', linewidth=0.5, alpha=0.65)\n ax4.margins(x=0.025)\n if axis_values[2] != 0:\n ax4.set_ylim(-axis_values[2], axis_values[2])\n else:\n ax4.set_ylim(-(max(abs(results.get('bus_results')['delta_deg']))+1), \n max(abs(results.get('bus_results')['delta_deg']))+1)\n fig.tight_layout()\n \n if save_directory != '':\n fig.savefig(save_directory)\n return\n\ndef plot_result_comparison(results1, results2, angle=False, name = '', fixed_y_axis_values=[0,0,0,0]):\n #Plots differences for bus voltages, line loadings etc. between two result dictionaries\n #The fixed y axis values list indices correspond to the following plot y limits:\n #0: voltage magnitude, 1: line loading, 2: transformer loading, 3: voltage angle\n #The same limit is set for positive and negative values\n mpl.rcParams[\"axes.titlesize\"] = 19\n mpl.rcParams[\"axes.labelsize\"] = 18\n mpl.rcParams[\"xtick.labelsize\"] = 15\n mpl.rcParams[\"ytick.labelsize\"] = 16\n \n #Bus voltages\n vmag1 = results1.get('bus_results')['vmag_pu'].to_numpy()\n vmag2 = results2.get('bus_results')['vmag_pu'].to_numpy()\n vmag_diff = np.zeros(np.size(vmag1))\n \n for i in range(np.size(vmag1)):\n # if vmag1[i] > vmag2[i]:\n # vmag_diff[i] = vmag1[i] - vmag2[i]\n # elif vmag2[i] > vmag1[i]:\n # vmag_diff[i] = vmag2[i] - vmag1[i]\n vmag_diff[i] = vmag2[i] - vmag1[i]\n \n #Line loadings\n l1 = results1.get('line_flows')['loading_percent'].to_numpy()\n l2 = results2.get('line_flows')['loading_percent'].to_numpy()\n l_diff = np.zeros(np.size(l1))\n \n for i in range(np.size(l1)):\n # if l1[i] > l2[i]:\n # l_diff[i] = l1[i] - l2[i]\n # elif l2[i] > l1[i]:\n # l_diff[i] = l2[i] - l1[i]\n l_diff[i] = l2[i] - l1[i]\n \n #Transformer loadings\n t1 = results1.get('transformer_flows')['loading_percent'].to_numpy()\n t2 = results2.get('transformer_flows')['loading_percent'].to_numpy()\n t_diff = np.zeros(np.size(t1))\n \n for i in range(np.size(t1)):\n # if t1[i] > t2[i]:\n # t_diff[i] = t1[i] - t2[i]\n # elif t2[i] > t1[i]:\n # t_diff[i] = t2[i] - t1[i]\n t_diff[i] = t2[i] - t1[i]\n \n \n if angle:\n gs = gsp.GridSpec(3, 2)\n else:\n gs = gsp.GridSpec(2, 2)\n fig = plt.figure(dpi=200)\n fig.set_figheight(10)\n fig.set_figwidth(12)\n if name != '':\n plt.title('%s\\n\\n' % name, fontweight='bold', fontsize=14)\n ax = plt.gca()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n \n ax1 = fig.add_subplot(gs[0, :]) # row 0, col 0\n ax1.bar(np.arange(0,np.size(vmag_diff)), vmag_diff, color='darkblue')\n ax1.axhline(y=0, color='darkslategray', linestyle='-')\n ax1.title.set_text('Difference in Voltage Magnitude')\n ax1.set_ylabel('\\u0394 [p.u.]')\n ax1.set_xlabel('Bus')\n ax1.set_xticks(range(0, np.size(vmag_diff), 2))\n ax1.grid(linestyle='--', linewidth=0.5, alpha=0.65)\n ax1.margins(x=0.025)\n if fixed_y_axis_values[0] == 0:\n ax1.set_ylim(-max(abs(vmag_diff)), max(abs(vmag_diff)))\n else:\n ax1.set_ylim(-fixed_y_axis_values[0], fixed_y_axis_values[0])\n \n if angle:\n ax2 = fig.add_subplot(gs[2, 0])\n else:\n ax2 = fig.add_subplot(gs[1, 0])\n ax2.bar(np.arange(0,np.size(l_diff)), l_diff, color='teal')\n ax2.axhline(y=0, color='darkslategray', linestyle='-')\n ax2.title.set_text('Difference in Line Loading')\n ax2.set_ylabel('\\u0394 [%]')\n ax2.set_xlabel('Line')\n ax2.set_xticks(range(0, np.size(l_diff), 2))\n ax2.tick_params(axis='x', labelsize=mpl.rcParams[\"xtick.labelsize\"] - 2)\n ax2.grid(linestyle='--', linewidth=0.5, alpha=0.65)\n ax2.margins(x=0.025)\n if fixed_y_axis_values[1] == 0:\n ax2.set_ylim(-max(abs(l_diff)), max(abs(l_diff)))\n else:\n ax2.set_ylim(-fixed_y_axis_values[1], fixed_y_axis_values[1])\n \n if angle:\n ax3 = fig.add_subplot(gs[2,1])\n else:\n ax3 = fig.add_subplot(gs[1,1])\n ax3.bar(np.arange(0,np.size(t_diff)), t_diff, color='darkgreen')\n ax3.axhline(y=0, color='darkslategray', linestyle='-')\n ax3.title.set_text('Difference in Transformer Loading')\n ax3.set_ylabel('\\u0394 [%]')\n ax3.set_xlabel('Transformer')\n ax3.set_xticks(range(0, np.size(t_diff), 2))\n ax3.tick_params(axis='x', labelsize=mpl.rcParams[\"xtick.labelsize\"] - 2)\n ax3.grid(linestyle='--', linewidth=0.5, alpha=0.65)\n ax3.margins(x=0.025)\n if fixed_y_axis_values[2] == 0:\n ax3.set_ylim(-max(abs(t_diff)), max(abs(t_diff)))\n else:\n ax3.set_ylim(-fixed_y_axis_values[2], fixed_y_axis_values[2])\n \n if angle:\n #Bus voltage angles\n delta1 = results1.get('bus_results')['delta_deg'].to_numpy()\n delta2 = results2.get('bus_results')['delta_deg'].to_numpy()\n delta_diff = np.zeros(np.size(delta1))\n \n for i in range(np.size(delta1)):\n delta_diff[i] = delta2[i] - delta1[i]\n \n ax4 = fig.add_subplot(gs[1, :]) # row 0, col 0\n ax4.bar(np.arange(0,np.size(delta_diff)), delta_diff, color='darkslateblue')\n ax4.axhline(y=0, color='darkslategray', linestyle='-')\n ax4.set_ylabel('\\u0394 [Deg.]')\n # ax1.set_xlabel('')\n # ax1.axes.get_xaxis().get_label().set_visible(False)\n ax4.title.set_text('Difference in Voltage Phase Angle')\n ax4.set_xlabel('Bus')\n ax4.set_xticks(range(0, np.size(delta_diff), 2))\n ax4.grid(linestyle='--', linewidth=0.5, alpha=0.65)\n ax4.margins(x=0.025)\n if fixed_y_axis_values[3] == 0:\n ax4.set_ylim(-max(abs(delta_diff)), max(abs(delta_diff)))\n else:\n ax4.set_ylim(-fixed_y_axis_values[3], fixed_y_axis_values[3])\n \n fig.tight_layout()\n return\n\n\n# =============================================================================\n# Functions used for thesis test cases\n\ndef load_variation(system, load_indices=np.array([]), scalings=np.array([]), const_pf = True, all_loads = False, scale = 1):\n #accepts an array of load indices to scale and an array of the \n #corresponding scaling factors\n #Scale of 1.0 = 100%, 1.10 = 110% (+10%) etc.\n \n loads = system.get('loads')\n psi_load = 0\n \n if all_loads:\n for i in range(len(loads.index)):\n p_old = loads.p[i]\n p_new = p_old * scale\n loads.p[i] = p_new\n psi_load += p_new - p_old\n \n if const_pf:\n q_old = loads.q[i]\n q_new = q_old * scale\n loads.q[i] = q_new\n \n loads.pf[i] = loads.p[i]/(np.sqrt(loads.p[i]**2 + loads.q[i]**2)) #update pf\n \n print(\"\\nUniform load change of %f %%.\" % ((scale - 1)*100))\n \n else: \n j = 0 \n for i in load_indices:\n p_old = loads.p[i]\n p_new = p_old * scalings[j]\n loads.p[i] = p_new\n psi_load += p_new - p_old\n \n if const_pf:\n q_old = loads.q[i]\n q_new = q_old * scalings[j]\n loads.q[i] = q_new\n # print(\"\\nLoad at bus %i changed from %f to %f (real power)\\nAnd %f to %f (reactive power).\" % (loads.bus[i], p_old, p_new, q_old, q_new))\n # else:\n # print(\"\\nLoad at bus %i changed from %f to %f (real power).\\n\" % (loads.bus[i], p_old, p_new))\n loads.pf[i] = loads.p[i]/(np.sqrt(loads.p[i]**2 + loads.q[i]**2)) #update pf\n j += 1\n \n print(\"\\nTotal variation in real power load: %f pu\\n\" % psi_load)\n \n system.update({'loads':loads})\n \n return\n\n\ndef gen_variation(system, gen_indices=np.array([]), scalings=np.array([])):\n #accepts an array of gen indices to scale and an array of the \n #corresponding scaling factors\n #Scale of 1.0 = 100%, 1.10 = 110% (+10%) etc.\n \n gens = system.get('generators')\n psi_gen = 0\n \n \n j = 0 \n for i in gen_indices:\n p_old = gens.pset[i]\n p_new = p_old * scalings[j]\n gens.pset[i] = p_new\n psi_gen += p_new - p_old\n j += 1\n \n print(\"\\nTotal variation in real power generation: %f pu\\n\" % psi_gen)\n \n system.update({'generators':gens})\n \n return \n\n\ndef panda_disable_bus(network, bus_idx):\n #Fast track function to disable all network elements associated with the given bus\n #of a pandapower network\n line = network.line\n trafo = network.trafo\n gen = network.gen\n load = network.load\n shunt = network.shunt\n \n line_idx = line[(line.from_bus == bus_idx) | (line.to_bus == bus_idx)].index.tolist()\n trafo_idx = trafo[(trafo.lv_bus == bus_idx) | (trafo.hv_bus == bus_idx)].index.tolist()\n gen_idx = gen[gen.bus == bus_idx].index.tolist()\n load_idx = load[load.bus == bus_idx].index.tolist()\n shunt_idx = shunt[shunt.bus == bus_idx].index.tolist()\n \n line.in_service[line_idx] = False\n trafo.in_service[trafo_idx] = False\n gen.in_service[gen_idx] = False\n load.in_service[load_idx] = False\n shunt.in_service[shunt_idx] = False\n network.bus.in_service[bus_idx] = False\n return\n\n\ndef line_loading_metric_old(results_list):\n #array of zeros of length equal to amount of lines, \n #assuming that all results are from the same system\n phi_lines = np.zeros(len(results_list[0].get('line_flows').index))\n \n \n for j in range(len(results_list)):\n line_flows = results_list[j].get('line_flows').copy()\n \n #computing the metric for line l\n for l in range(np.size(phi_lines)):\n phi_lines[l] += (line_flows['loading_percent'][l] / 100) ** 10\n \n #averaging across all results/contingencies\n if j == len(results_list):\n phi_lines = phi_lines / j\n \n phi = np.sum(phi_lines) / np.size(phi_lines)\n \n return phi\n\ndef line_loading_metric(results):\n #array of zeros of length equal to amount of lines\n phi_lines = np.zeros(len(results.get('line_flows').index))\n \n line_flows = results.get('line_flows').copy()\n \n #computing the metric for line l\n #raised to the power of 10 to strongly penalize lines close to 100% loading\n for l in range(np.size(phi_lines)):\n phi_lines[l] += (line_flows['loading_percent'][l] / 100) ** 30\n \n #averaging over all lines\n phi = np.sum(phi_lines) / np.size(phi_lines)\n \n return phi\n\ndef generator_limit_metric(system, results):\n \n gens = system.get('generators').copy()\n gen_res = results.get('generator_results').copy()\n #array of zeros of length equal to amount of gens\n phi_gens = np.zeros(len(gen_res.index))\n \n #computing the metric for line l\n #raised to the power of 10 to strongly penalize lines close to 100% loading\n for g in range(len(gen_res.index)):\n if gens.in_service[g]:\n phi_gens[g] += (gen_res['p_pu'][g] / gens['pmax'][g]) ** 30\n \n #averaging over all lines\n phi = np.sum(phi_gens) / np.size(phi_gens)\n \n return phi","repo_name":"Hvid1999/ds_power_flow_python","sub_path":"ds_power_flow.py","file_name":"ds_power_flow.py","file_ext":"py","file_size_in_byte":80855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"70971329674","text":"from django.conf.urls import url\nfrom django.urls import include, path, re_path\n\nfrom content.views import (LandingPageView, ContentPostDetailView,\n ContentPostListView)\n\napp_name='content'\n\nurlpatterns = [\n url(r'^$', LandingPageView.as_view(), name='landing_page'),\n re_path(r'content/details/(?P[-\\w]+)/', ContentPostDetailView.as_view(), name='post_detail'),\n path(\"content/list\", ContentPostListView.as_view(), name='post_list'),\n]\n","repo_name":"cdunn6754/cdunnSite","sub_path":"content/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"28"} +{"seq_id":"42225710642","text":"from __future__ import annotations\n\nimport copy\nimport dataclasses\nimport numbers\nimport unittest\nimport warnings\nfrom typing import Any, Collection, Optional\n\nimport numpy as np\nimport onnx\nimport onnx.backend.test.case.node as node_test\nimport onnxruntime as ort\nfrom onnx.onnx_cpp2py_export import checker\nfrom onnxruntime.capi.onnxruntime_pybind11_state import (\n Fail,\n InvalidArgument,\n InvalidGraph,\n)\n\nimport onnxscript\nfrom onnxscript._internal import utils\n\n\n@dataclasses.dataclass(repr=False, eq=False)\nclass FunctionTestParams:\n function: onnxscript.OnnxFunction\n input: list[Any] | dict[str, Any]\n output: list[Any]\n attrs: Optional[dict[str, Any]] = None\n\n\nclass OnnxScriptTestCase(unittest.TestCase):\n local_function_opset_version: int\n atol: float\n rtol: float\n\n @classmethod\n def setUpClass(cls):\n # A function (and node) in a model tells its domain, not version.\n # When building a model that consumes functions and nodes, model opset_imports\n # indicate domains and versions of nodes and functions that are used.\n # Function version number is needed for a runtime to run it without inlining.\n # Before ONNX IR (or FunctionIR) being updated\n # for FunctionProto to have version number we\n # need to put a default version number here to workaround the problem.\n cls.local_function_opset_version = 1\n cls.atol = 1e-7\n cls.rtol = 1e-7\n try:\n # experimental version\n # pylint: disable=no-value-for-parameter\n cls.all_test_cases = node_test.collect_testcases() # type: ignore[attr-defined,call-arg]\n # pylint: enable=no-value-for-parameter\n except TypeError:\n # official version\n cls.all_test_cases = node_test.collect_testcases(None) # type: ignore[attr-defined,arg-type]\n\n def _create_model_from_param(\n self, param: FunctionTestParams, onnx_case_model: onnx.ModelProto\n ) -> onnx.ModelProto:\n local_function_proto = param.function.function_ir.to_function_proto()\n if not onnx_case_model:\n input_names = [f\"input_{i}\" for i in range(len(param.input))]\n output_names = [f\"output_{i}\" for i in range(len(param.output))]\n input_value_infos = utils.values_to_value_infos(zip(input_names, param.input))\n elif len(onnx_case_model.graph.input) == len(local_function_proto.input) and all(\n i != \"\" for i in onnx_case_model.graph.input\n ):\n # we want to create a model that onnx_test_runner\n # can run with onnx test case data\n input_names = [i.name for i in onnx_case_model.graph.input]\n output_names = [o.name for o in onnx_case_model.graph.output]\n input_value_infos = utils.values_to_value_infos(zip(input_names, param.input))\n else:\n # in an onnx test case, an optional input with missing input data\n # is dropped, if it is a tailing input, and otherwise the input is named \"\".\n # a models from script keeps all optional inputs,\n # to run script model with onnx test data, we need to map input test data\n # to the corresponding script model input.\n # take Clip test case for example:\n # clip function input is like: [\"input\", \"min2\", \"max2\"]\n # (1) when min is missing, the test_case_model is [\"x\", \"\", \"max\"]\n # in this case we want to create a model with input being: [\"x\", \"min\", \"max\"]\n # input feed: {x: ?, min: None, max: ?} # ? is a np.array\n # (2) when max is missing, the test_case_model is [\"x\", \"min\"]\n # in this case we want to create a model with input being: [\"x\", \"min\", \"max2\"]\n # input feed: {x: ?, min: ?, max: None} # ? is a np.array\n\n # there is another issue: when input data is missing,\n # there is not way from the onnx test case's model and feed to get TypeProto\n # in order to build a model.\n # we have to resolve the TypeProto from script function.\n local_function_model_proto = param.function.function_ir.to_model_proto()\n input_value_infos = []\n for i, input in enumerate(local_function_model_proto.graph.input):\n vi = copy.deepcopy(input)\n if (\n i < len(onnx_case_model.graph.node[0].input)\n and onnx_case_model.graph.node[0].input[i] != \"\"\n ):\n vi.name = onnx_case_model.graph.node[0].input[i]\n else:\n vi.name = input.name\n input_value_infos.append(vi)\n\n output_names = [o.name for o in onnx_case_model.graph.output]\n\n output_value_infos = utils.values_to_value_infos(zip(output_names, param.output))\n\n return utils.make_model_from_function_proto(\n local_function_proto,\n self.local_function_opset_version,\n input_value_infos,\n output_value_infos,\n **(param.attrs or {}),\n )\n\n def _filter_test_case_by_op_type(self, op_type):\n test_cases = [\n case\n for case in self.all_test_cases # type: ignore[attr-defined]\n if (\n case.kind == \"node\"\n and len(case.model.graph.node) == 1\n and case.model.graph.node[0].op_type == op_type\n )\n ]\n return test_cases\n\n def run_converter_test(\n self, param: FunctionTestParams, onnx_case_model: Optional[onnx.ModelProto] = None\n ):\n # we need the latest version in onnx.ai domain\n # to build a function\n if onnx_case_model:\n model = self._create_model_from_param(param, onnx_case_model)\n else:\n model = param.function.function_ir.to_model_proto(producer_name=\"call_clip\")\n try:\n onnx.checker.check_model(model)\n except checker.ValidationError as e:\n if \"Field 'shape' of 'type' is required but missing\" in str(\n e\n ) or \"Field 'shape' of type is required but missing\" in str(e):\n # input or output shapes are missing because the function\n # was defined with FLOAT[...].\n warnings.warn(str(e), stacklevel=1)\n else:\n raise AssertionError(\"Verification of model failed.\") from e\n\n if isinstance(param.input, dict):\n input = param.input\n else:\n # onnx_case_model is provided with testing with onnx test cases.\n if onnx_case_model:\n input = {}\n feed_index = 0\n for i, model_input in enumerate(model.graph.input):\n # take care of [\"x\", \"\", \"max\"] and [\"x\", \"min\"] cases\n if (\n feed_index < len(param.input)\n and onnx_case_model.graph.node[0].input[i] != \"\"\n ):\n input[model_input.name] = (\n np.array(param.input[feed_index])\n if isinstance(param.input[feed_index], numbers.Number)\n else param.input[feed_index]\n )\n feed_index += 1\n else:\n input[model_input.name] = None\n else:\n input = {\n vi.name: np.array(t) if isinstance(t, numbers.Number) else t\n for vi, t in zip(model.graph.input, param.input)\n }\n try:\n session = ort.InferenceSession(\n model.SerializeToString(), providers=(\"CPUExecutionProvider\",)\n )\n except (Fail, InvalidArgument, InvalidGraph) as e:\n raise AssertionError(f\"Unable to load model\\n{model}\") from e\n # input['input_2'] = None\n actual = session.run(None, input)\n np.testing.assert_allclose(actual, param.output, rtol=self.rtol)\n\n def run_eager_test(\n self,\n param: FunctionTestParams,\n rtol: Optional[float] = None,\n atol: Optional[float] = None,\n ):\n actual = param.function(*param.input, **(param.attrs or {}))\n np.testing.assert_allclose(\n actual if isinstance(actual, list) else [actual],\n param.output,\n rtol=rtol or self.rtol,\n atol=atol or self.atol,\n )\n\n def run_onnx_test(\n self,\n function: onnxscript.OnnxFunction,\n rtol: Optional[float] = None,\n atol: Optional[float] = None,\n skip_eager_test: bool = False,\n skip_test_names: Optional[Collection[str]] = None,\n **attrs: Any,\n ) -> None:\n \"\"\"Run ONNX test cases with an onnxscript.OnnxFunction.\n\n The function should have test cases in ONNX repo.\n For example: in onnx/test/case/node.\n Test case models and data are used to do converter and eager mode test.\n\n Args:\n function: the function to be tested.\n rtol: relative tolerance. Defaults to None.\n atol: absolute tolerance. Defaults to None.\n skip_eager_test: not to run eager test if True.\n skip_test_names: to skip these tests.\n attrs: default attributes of the function node.\n \"\"\"\n if skip_test_names is None:\n skip_test_names = set()\n else:\n skip_test_names = set(skip_test_names)\n\n cases = self._filter_test_case_by_op_type(function.function_ir.name)\n for case in cases:\n if len(case.model.graph.node) != 1:\n raise ValueError(\n \"run_onnx_test only \\\n tests models with one operator node.\"\n )\n\n if case.name not in skip_test_names:\n test_case_attrs = {\n a.name: onnx.helper.get_attribute_value(a)\n for a in case.model.graph.node[0].attribute\n }\n test_case_attrs = {**attrs, **test_case_attrs}\n\n for ds in case.data_sets:\n param = FunctionTestParams(function, ds[0], ds[1], attrs=test_case_attrs)\n self.run_converter_test(param, case.model)\n if not skip_eager_test:\n self.run_eager_test(param, rtol=rtol, atol=atol)\n","repo_name":"microsoft/onnxscript","sub_path":"onnxscript/tests/common/onnx_script_test_case.py","file_name":"onnx_script_test_case.py","file_ext":"py","file_size_in_byte":10460,"program_lang":"python","lang":"en","doc_type":"code","stars":183,"dataset":"github-code","pt":"28"} +{"seq_id":"71076115275","text":"from tkinter import Label\nfrom EnchantedMushroom import EnchantedMushroom\nclass EnchantedLog (object):\n\n #class variable that will finally contain 2 images of the log: non-enchanted and enchanted \n displayImages: list =[]\n \n #construct a log\n def __init__(self, guiElt: Label):\n\n #a property to check the status of enchantment\n self.__enchantment: bool = False\n\n #a property to track the status (enchanted or not) of all the mushrooms instnances\n self.__enchantedMushroom: list = []\n\n #a property to track the display image of the log\n self.__display: Label = guiElt\n\n #set up the image of the inital log: non-enchanted\n self.__display['image'] = EnchantedLog.displayImages[0]\n\n #set up the click \n self.__display.bind( '